extent-tree.c 203.4 KB
Newer Older
C
Chris Mason 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */
Z
Zach Brown 已提交
18
#include <linux/sched.h>
19
#include <linux/pagemap.h>
20
#include <linux/writeback.h>
21
#include <linux/blkdev.h>
22
#include <linux/sort.h>
23
#include <linux/rcupdate.h>
J
Josef Bacik 已提交
24
#include <linux/kthread.h>
25
#include <linux/slab.h>
26
#include <linux/ratelimit.h>
C
Chris Mason 已提交
27
#include "compat.h"
28
#include "hash.h"
29 30 31
#include "ctree.h"
#include "disk-io.h"
#include "print-tree.h"
32
#include "transaction.h"
33
#include "volumes.h"
34
#include "locking.h"
35
#include "free-space-cache.h"
36

37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
/* control flags for do_chunk_alloc's force field
 * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
 * if we really need one.
 *
 * CHUNK_ALLOC_FORCE means it must try to allocate one
 *
 * CHUNK_ALLOC_LIMITED means to only try and allocate one
 * if we have very few chunks already allocated.  This is
 * used as part of the clustering code to help make sure
 * we have a good pool of storage to cluster in, without
 * filling the FS with empty chunks
 *
 */
enum {
	CHUNK_ALLOC_NO_FORCE = 0,
	CHUNK_ALLOC_FORCE = 1,
	CHUNK_ALLOC_LIMITED = 2,
};

56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
/*
 * Control how reservations are dealt with.
 *
 * RESERVE_FREE - freeing a reservation.
 * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
 *   ENOSPC accounting
 * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
 *   bytes_may_use as the ENOSPC accounting is done elsewhere
 */
enum {
	RESERVE_FREE = 0,
	RESERVE_ALLOC = 1,
	RESERVE_ALLOC_NO_ACCOUNT = 2,
};

71 72
static int update_block_group(struct btrfs_trans_handle *trans,
			      struct btrfs_root *root,
73
			      u64 bytenr, u64 num_bytes, int alloc);
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
				struct btrfs_root *root,
				u64 bytenr, u64 num_bytes, u64 parent,
				u64 root_objectid, u64 owner_objectid,
				u64 owner_offset, int refs_to_drop,
				struct btrfs_delayed_extent_op *extra_op);
static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
				    struct extent_buffer *leaf,
				    struct btrfs_extent_item *ei);
static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
				      struct btrfs_root *root,
				      u64 parent, u64 root_objectid,
				      u64 flags, u64 owner, u64 offset,
				      struct btrfs_key *ins, int ref_mod);
static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
				     struct btrfs_root *root,
				     u64 parent, u64 root_objectid,
				     u64 flags, struct btrfs_disk_key *key,
				     int level, struct btrfs_key *ins);
J
Josef Bacik 已提交
93 94 95
static int do_chunk_alloc(struct btrfs_trans_handle *trans,
			  struct btrfs_root *extent_root, u64 alloc_bytes,
			  u64 flags, int force);
96 97
static int find_next_key(struct btrfs_path *path, int level,
			 struct btrfs_key *key);
J
Josef Bacik 已提交
98 99
static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
			    int dump_block_groups);
100 101
static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
				       u64 num_bytes, int reserve);
J
Josef Bacik 已提交
102

J
Josef Bacik 已提交
103 104 105 106 107 108 109
static noinline int
block_group_cache_done(struct btrfs_block_group_cache *cache)
{
	smp_mb();
	return cache->cached == BTRFS_CACHE_FINISHED;
}

J
Josef Bacik 已提交
110 111 112 113 114
static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
{
	return (cache->flags & bits) == bits;
}

115
static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
116 117 118 119 120 121
{
	atomic_inc(&cache->count);
}

void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
{
122 123 124
	if (atomic_dec_and_test(&cache->count)) {
		WARN_ON(cache->pinned > 0);
		WARN_ON(cache->reserved > 0);
125
		kfree(cache->free_space_ctl);
126
		kfree(cache);
127
	}
128 129
}

J
Josef Bacik 已提交
130 131 132 133
/*
 * this adds the block group to the fs_info rb tree for the block group
 * cache
 */
134
static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
J
Josef Bacik 已提交
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
				struct btrfs_block_group_cache *block_group)
{
	struct rb_node **p;
	struct rb_node *parent = NULL;
	struct btrfs_block_group_cache *cache;

	spin_lock(&info->block_group_cache_lock);
	p = &info->block_group_cache_tree.rb_node;

	while (*p) {
		parent = *p;
		cache = rb_entry(parent, struct btrfs_block_group_cache,
				 cache_node);
		if (block_group->key.objectid < cache->key.objectid) {
			p = &(*p)->rb_left;
		} else if (block_group->key.objectid > cache->key.objectid) {
			p = &(*p)->rb_right;
		} else {
			spin_unlock(&info->block_group_cache_lock);
			return -EEXIST;
		}
	}

	rb_link_node(&block_group->cache_node, parent, p);
	rb_insert_color(&block_group->cache_node,
			&info->block_group_cache_tree);
	spin_unlock(&info->block_group_cache_lock);

	return 0;
}

/*
 * This will return the block group at or after bytenr if contains is 0, else
 * it will return the block group that contains the bytenr
 */
static struct btrfs_block_group_cache *
block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
			      int contains)
{
	struct btrfs_block_group_cache *cache, *ret = NULL;
	struct rb_node *n;
	u64 end, start;

	spin_lock(&info->block_group_cache_lock);
	n = info->block_group_cache_tree.rb_node;

	while (n) {
		cache = rb_entry(n, struct btrfs_block_group_cache,
				 cache_node);
		end = cache->key.objectid + cache->key.offset - 1;
		start = cache->key.objectid;

		if (bytenr < start) {
			if (!contains && (!ret || start < ret->key.objectid))
				ret = cache;
			n = n->rb_left;
		} else if (bytenr > start) {
			if (contains && bytenr <= end) {
				ret = cache;
				break;
			}
			n = n->rb_right;
		} else {
			ret = cache;
			break;
		}
	}
202
	if (ret)
203
		btrfs_get_block_group(ret);
J
Josef Bacik 已提交
204 205 206 207 208
	spin_unlock(&info->block_group_cache_lock);

	return ret;
}

209 210
static int add_excluded_extent(struct btrfs_root *root,
			       u64 start, u64 num_bytes)
J
Josef Bacik 已提交
211
{
212 213 214 215 216 217 218
	u64 end = start + num_bytes - 1;
	set_extent_bits(&root->fs_info->freed_extents[0],
			start, end, EXTENT_UPTODATE, GFP_NOFS);
	set_extent_bits(&root->fs_info->freed_extents[1],
			start, end, EXTENT_UPTODATE, GFP_NOFS);
	return 0;
}
J
Josef Bacik 已提交
219

220 221 222 223
static void free_excluded_extents(struct btrfs_root *root,
				  struct btrfs_block_group_cache *cache)
{
	u64 start, end;
J
Josef Bacik 已提交
224

225 226 227 228 229 230 231
	start = cache->key.objectid;
	end = start + cache->key.offset - 1;

	clear_extent_bits(&root->fs_info->freed_extents[0],
			  start, end, EXTENT_UPTODATE, GFP_NOFS);
	clear_extent_bits(&root->fs_info->freed_extents[1],
			  start, end, EXTENT_UPTODATE, GFP_NOFS);
J
Josef Bacik 已提交
232 233
}

234 235
static int exclude_super_stripes(struct btrfs_root *root,
				 struct btrfs_block_group_cache *cache)
J
Josef Bacik 已提交
236 237 238 239 240 241
{
	u64 bytenr;
	u64 *logical;
	int stripe_len;
	int i, nr, ret;

242 243 244 245 246 247 248 249
	if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
		stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
		cache->bytes_super += stripe_len;
		ret = add_excluded_extent(root, cache->key.objectid,
					  stripe_len);
		BUG_ON(ret);
	}

J
Josef Bacik 已提交
250 251 252 253 254 255
	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
		bytenr = btrfs_sb_offset(i);
		ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
				       cache->key.objectid, bytenr,
				       0, &logical, &nr, &stripe_len);
		BUG_ON(ret);
256

J
Josef Bacik 已提交
257
		while (nr--) {
258
			cache->bytes_super += stripe_len;
259 260 261
			ret = add_excluded_extent(root, logical[nr],
						  stripe_len);
			BUG_ON(ret);
J
Josef Bacik 已提交
262
		}
263

J
Josef Bacik 已提交
264 265 266 267 268
		kfree(logical);
	}
	return 0;
}

269 270 271 272 273 274 275 276 277 278 279
static struct btrfs_caching_control *
get_caching_control(struct btrfs_block_group_cache *cache)
{
	struct btrfs_caching_control *ctl;

	spin_lock(&cache->lock);
	if (cache->cached != BTRFS_CACHE_STARTED) {
		spin_unlock(&cache->lock);
		return NULL;
	}

280 281 282
	/* We're loading it the fast way, so we don't have a caching_ctl. */
	if (!cache->caching_ctl) {
		spin_unlock(&cache->lock);
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
		return NULL;
	}

	ctl = cache->caching_ctl;
	atomic_inc(&ctl->count);
	spin_unlock(&cache->lock);
	return ctl;
}

static void put_caching_control(struct btrfs_caching_control *ctl)
{
	if (atomic_dec_and_test(&ctl->count))
		kfree(ctl);
}

J
Josef Bacik 已提交
298 299 300 301 302
/*
 * this is only called by cache_block_group, since we could have freed extents
 * we need to check the pinned_extents for any extents that can't be used yet
 * since their free space will be released as soon as the transaction commits.
 */
J
Josef Bacik 已提交
303
static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
J
Josef Bacik 已提交
304 305
			      struct btrfs_fs_info *info, u64 start, u64 end)
{
J
Josef Bacik 已提交
306
	u64 extent_start, extent_end, size, total_added = 0;
J
Josef Bacik 已提交
307 308 309
	int ret;

	while (start < end) {
310
		ret = find_first_extent_bit(info->pinned_extents, start,
J
Josef Bacik 已提交
311
					    &extent_start, &extent_end,
312
					    EXTENT_DIRTY | EXTENT_UPTODATE);
J
Josef Bacik 已提交
313 314 315
		if (ret)
			break;

316
		if (extent_start <= start) {
J
Josef Bacik 已提交
317 318 319
			start = extent_end + 1;
		} else if (extent_start > start && extent_start < end) {
			size = extent_start - start;
J
Josef Bacik 已提交
320
			total_added += size;
321 322
			ret = btrfs_add_free_space(block_group, start,
						   size);
J
Josef Bacik 已提交
323 324 325 326 327 328 329 330 331
			BUG_ON(ret);
			start = extent_end + 1;
		} else {
			break;
		}
	}

	if (start < end) {
		size = end - start;
J
Josef Bacik 已提交
332
		total_added += size;
333
		ret = btrfs_add_free_space(block_group, start, size);
J
Josef Bacik 已提交
334 335 336
		BUG_ON(ret);
	}

J
Josef Bacik 已提交
337
	return total_added;
J
Josef Bacik 已提交
338 339
}

340
static noinline void caching_thread(struct btrfs_work *work)
341
{
342 343 344 345
	struct btrfs_block_group_cache *block_group;
	struct btrfs_fs_info *fs_info;
	struct btrfs_caching_control *caching_ctl;
	struct btrfs_root *extent_root;
346
	struct btrfs_path *path;
347
	struct extent_buffer *leaf;
348
	struct btrfs_key key;
J
Josef Bacik 已提交
349
	u64 total_found = 0;
350 351 352
	u64 last = 0;
	u32 nritems;
	int ret = 0;
353

354 355 356 357 358
	caching_ctl = container_of(work, struct btrfs_caching_control, work);
	block_group = caching_ctl->block_group;
	fs_info = block_group->fs_info;
	extent_root = fs_info->extent_root;

359 360
	path = btrfs_alloc_path();
	if (!path)
361
		goto out;
362

J
Josef Bacik 已提交
363
	last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
364

365
	/*
J
Josef Bacik 已提交
366 367 368 369
	 * We don't want to deadlock with somebody trying to allocate a new
	 * extent for the extent root while also trying to search the extent
	 * root to add free space.  So we skip locking and search the commit
	 * root, since its read-only
370 371
	 */
	path->skip_locking = 1;
J
Josef Bacik 已提交
372
	path->search_commit_root = 1;
J
Josef Bacik 已提交
373
	path->reada = 1;
J
Josef Bacik 已提交
374

Y
Yan Zheng 已提交
375
	key.objectid = last;
376
	key.offset = 0;
377
	key.type = BTRFS_EXTENT_ITEM_KEY;
378
again:
379
	mutex_lock(&caching_ctl->mutex);
380 381 382
	/* need to make sure the commit_root doesn't disappear */
	down_read(&fs_info->extent_commit_sem);

383
	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
384
	if (ret < 0)
385
		goto err;
Y
Yan Zheng 已提交
386

387 388 389
	leaf = path->nodes[0];
	nritems = btrfs_header_nritems(leaf);

C
Chris Mason 已提交
390
	while (1) {
391
		if (btrfs_fs_closing(fs_info) > 1) {
392
			last = (u64)-1;
J
Josef Bacik 已提交
393
			break;
394
		}
J
Josef Bacik 已提交
395

396 397 398 399 400
		if (path->slots[0] < nritems) {
			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
		} else {
			ret = find_next_key(path, 0, &key);
			if (ret)
401
				break;
J
Josef Bacik 已提交
402

403 404 405
			if (need_resched() ||
			    btrfs_next_leaf(extent_root, path)) {
				caching_ctl->progress = last;
C
Chris Mason 已提交
406
				btrfs_release_path(path);
407 408
				up_read(&fs_info->extent_commit_sem);
				mutex_unlock(&caching_ctl->mutex);
409
				cond_resched();
410 411 412 413 414
				goto again;
			}
			leaf = path->nodes[0];
			nritems = btrfs_header_nritems(leaf);
			continue;
415
		}
J
Josef Bacik 已提交
416

417 418
		if (key.objectid < block_group->key.objectid) {
			path->slots[0]++;
J
Josef Bacik 已提交
419
			continue;
420
		}
J
Josef Bacik 已提交
421

422
		if (key.objectid >= block_group->key.objectid +
J
Josef Bacik 已提交
423
		    block_group->key.offset)
424
			break;
425

426
		if (key.type == BTRFS_EXTENT_ITEM_KEY) {
J
Josef Bacik 已提交
427 428 429
			total_found += add_new_free_space(block_group,
							  fs_info, last,
							  key.objectid);
430
			last = key.objectid + key.offset;
J
Josef Bacik 已提交
431

432 433 434 435
			if (total_found > (1024 * 1024 * 2)) {
				total_found = 0;
				wake_up(&caching_ctl->wait);
			}
J
Josef Bacik 已提交
436
		}
437 438
		path->slots[0]++;
	}
J
Josef Bacik 已提交
439
	ret = 0;
440

J
Josef Bacik 已提交
441 442 443
	total_found += add_new_free_space(block_group, fs_info, last,
					  block_group->key.objectid +
					  block_group->key.offset);
444
	caching_ctl->progress = (u64)-1;
J
Josef Bacik 已提交
445 446

	spin_lock(&block_group->lock);
447
	block_group->caching_ctl = NULL;
J
Josef Bacik 已提交
448 449
	block_group->cached = BTRFS_CACHE_FINISHED;
	spin_unlock(&block_group->lock);
J
Josef Bacik 已提交
450

451
err:
452
	btrfs_free_path(path);
453
	up_read(&fs_info->extent_commit_sem);
J
Josef Bacik 已提交
454

455 456 457
	free_excluded_extents(extent_root, block_group);

	mutex_unlock(&caching_ctl->mutex);
458
out:
459 460 461
	wake_up(&caching_ctl->wait);

	put_caching_control(caching_ctl);
462
	btrfs_put_block_group(block_group);
J
Josef Bacik 已提交
463 464
}

465 466
static int cache_block_group(struct btrfs_block_group_cache *cache,
			     struct btrfs_trans_handle *trans,
467
			     struct btrfs_root *root,
468
			     int load_cache_only)
J
Josef Bacik 已提交
469
{
470
	DEFINE_WAIT(wait);
471 472
	struct btrfs_fs_info *fs_info = cache->fs_info;
	struct btrfs_caching_control *caching_ctl;
J
Josef Bacik 已提交
473 474
	int ret = 0;

475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
	caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
	BUG_ON(!caching_ctl);

	INIT_LIST_HEAD(&caching_ctl->list);
	mutex_init(&caching_ctl->mutex);
	init_waitqueue_head(&caching_ctl->wait);
	caching_ctl->block_group = cache;
	caching_ctl->progress = cache->key.objectid;
	atomic_set(&caching_ctl->count, 1);
	caching_ctl->work.func = caching_thread;

	spin_lock(&cache->lock);
	/*
	 * This should be a rare occasion, but this could happen I think in the
	 * case where one thread starts to load the space cache info, and then
	 * some other thread starts a transaction commit which tries to do an
	 * allocation while the other thread is still loading the space cache
	 * info.  The previous loop should have kept us from choosing this block
	 * group, but if we've moved to the state where we will wait on caching
	 * block groups we need to first check if we're doing a fast load here,
	 * so we can wait for it to finish, otherwise we could end up allocating
	 * from a block group who's cache gets evicted for one reason or
	 * another.
	 */
	while (cache->cached == BTRFS_CACHE_FAST) {
		struct btrfs_caching_control *ctl;

		ctl = cache->caching_ctl;
		atomic_inc(&ctl->count);
		prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
		spin_unlock(&cache->lock);

		schedule();

		finish_wait(&ctl->wait, &wait);
		put_caching_control(ctl);
		spin_lock(&cache->lock);
	}

	if (cache->cached != BTRFS_CACHE_NO) {
		spin_unlock(&cache->lock);
		kfree(caching_ctl);
517
		return 0;
518 519 520 521 522
	}
	WARN_ON(cache->caching_ctl);
	cache->caching_ctl = caching_ctl;
	cache->cached = BTRFS_CACHE_FAST;
	spin_unlock(&cache->lock);
523

524 525
	/*
	 * We can't do the read from on-disk cache during a commit since we need
526 527 528
	 * to have the normal tree locking.  Also if we are currently trying to
	 * allocate blocks for the tree root we can't do the fast caching since
	 * we likely hold important locks.
529
	 */
530
	if (trans && (!trans->transaction->in_commit) &&
531 532
	    (root && root != root->fs_info->tree_root) &&
	    btrfs_test_opt(root, SPACE_CACHE)) {
533 534 535 536
		ret = load_free_space_cache(fs_info, cache);

		spin_lock(&cache->lock);
		if (ret == 1) {
537
			cache->caching_ctl = NULL;
538 539 540
			cache->cached = BTRFS_CACHE_FINISHED;
			cache->last_byte_to_unpin = (u64)-1;
		} else {
541 542 543 544 545 546
			if (load_cache_only) {
				cache->caching_ctl = NULL;
				cache->cached = BTRFS_CACHE_NO;
			} else {
				cache->cached = BTRFS_CACHE_STARTED;
			}
547 548
		}
		spin_unlock(&cache->lock);
549
		wake_up(&caching_ctl->wait);
550
		if (ret == 1) {
551
			put_caching_control(caching_ctl);
552
			free_excluded_extents(fs_info->extent_root, cache);
553
			return 0;
554
		}
555 556 557 558 559 560 561 562 563 564 565 566 567 568
	} else {
		/*
		 * We are not going to do the fast caching, set cached to the
		 * appropriate value and wakeup any waiters.
		 */
		spin_lock(&cache->lock);
		if (load_cache_only) {
			cache->caching_ctl = NULL;
			cache->cached = BTRFS_CACHE_NO;
		} else {
			cache->cached = BTRFS_CACHE_STARTED;
		}
		spin_unlock(&cache->lock);
		wake_up(&caching_ctl->wait);
569 570
	}

571 572
	if (load_cache_only) {
		put_caching_control(caching_ctl);
573
		return 0;
J
Josef Bacik 已提交
574 575
	}

576
	down_write(&fs_info->extent_commit_sem);
577
	atomic_inc(&caching_ctl->count);
578 579 580
	list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
	up_write(&fs_info->extent_commit_sem);

581
	btrfs_get_block_group(cache);
582

583
	btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
J
Josef Bacik 已提交
584

585
	return ret;
586 587
}

J
Josef Bacik 已提交
588 589 590
/*
 * return the block group that starts at or after bytenr
 */
C
Chris Mason 已提交
591 592
static struct btrfs_block_group_cache *
btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
C
Chris Mason 已提交
593
{
J
Josef Bacik 已提交
594
	struct btrfs_block_group_cache *cache;
C
Chris Mason 已提交
595

J
Josef Bacik 已提交
596
	cache = block_group_cache_tree_search(info, bytenr, 0);
C
Chris Mason 已提交
597

J
Josef Bacik 已提交
598
	return cache;
C
Chris Mason 已提交
599 600
}

J
Josef Bacik 已提交
601
/*
602
 * return the block group that contains the given bytenr
J
Josef Bacik 已提交
603
 */
C
Chris Mason 已提交
604 605 606
struct btrfs_block_group_cache *btrfs_lookup_block_group(
						 struct btrfs_fs_info *info,
						 u64 bytenr)
C
Chris Mason 已提交
607
{
J
Josef Bacik 已提交
608
	struct btrfs_block_group_cache *cache;
C
Chris Mason 已提交
609

J
Josef Bacik 已提交
610
	cache = block_group_cache_tree_search(info, bytenr, 1);
611

J
Josef Bacik 已提交
612
	return cache;
C
Chris Mason 已提交
613
}
614

J
Josef Bacik 已提交
615 616
static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
						  u64 flags)
617
{
J
Josef Bacik 已提交
618 619
	struct list_head *head = &info->space_info;
	struct btrfs_space_info *found;
620

621 622 623
	flags &= BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_SYSTEM |
		 BTRFS_BLOCK_GROUP_METADATA;

624 625
	rcu_read_lock();
	list_for_each_entry_rcu(found, head, list) {
626
		if (found->flags & flags) {
627
			rcu_read_unlock();
J
Josef Bacik 已提交
628
			return found;
629
		}
J
Josef Bacik 已提交
630
	}
631
	rcu_read_unlock();
J
Josef Bacik 已提交
632
	return NULL;
633 634
}

635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
/*
 * after adding space to the filesystem, we need to clear the full flags
 * on all the space infos.
 */
void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
{
	struct list_head *head = &info->space_info;
	struct btrfs_space_info *found;

	rcu_read_lock();
	list_for_each_entry_rcu(found, head, list)
		found->full = 0;
	rcu_read_unlock();
}

650 651 652 653 654 655 656 657 658
static u64 div_factor(u64 num, int factor)
{
	if (factor == 10)
		return num;
	num *= factor;
	do_div(num, 10);
	return num;
}

659 660 661 662 663 664 665 666 667
static u64 div_factor_fine(u64 num, int factor)
{
	if (factor == 100)
		return num;
	num *= factor;
	do_div(num, 100);
	return num;
}

668 669
u64 btrfs_find_block_group(struct btrfs_root *root,
			   u64 search_start, u64 search_hint, int owner)
C
Chris Mason 已提交
670
{
671
	struct btrfs_block_group_cache *cache;
C
Chris Mason 已提交
672
	u64 used;
673 674
	u64 last = max(search_hint, search_start);
	u64 group_start = 0;
675
	int full_search = 0;
676
	int factor = 9;
C
Chris Mason 已提交
677
	int wrapped = 0;
678
again:
679 680
	while (1) {
		cache = btrfs_lookup_first_block_group(root->fs_info, last);
J
Josef Bacik 已提交
681 682
		if (!cache)
			break;
683

684
		spin_lock(&cache->lock);
685 686 687
		last = cache->key.objectid + cache->key.offset;
		used = btrfs_block_group_used(&cache->item);

688 689
		if ((full_search || !cache->ro) &&
		    block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
690
			if (used + cache->pinned + cache->reserved <
691 692
			    div_factor(cache->key.offset, factor)) {
				group_start = cache->key.objectid;
693
				spin_unlock(&cache->lock);
694
				btrfs_put_block_group(cache);
695 696
				goto found;
			}
697
		}
698
		spin_unlock(&cache->lock);
699
		btrfs_put_block_group(cache);
700
		cond_resched();
C
Chris Mason 已提交
701
	}
C
Chris Mason 已提交
702 703 704 705 706 707
	if (!wrapped) {
		last = search_start;
		wrapped = 1;
		goto again;
	}
	if (!full_search && factor < 10) {
C
Chris Mason 已提交
708
		last = search_start;
709
		full_search = 1;
C
Chris Mason 已提交
710
		factor = 10;
711 712
		goto again;
	}
C
Chris Mason 已提交
713
found:
714
	return group_start;
715
}
J
Josef Bacik 已提交
716

717
/* simple helper to search for an existing extent at a given offset */
Z
Zheng Yan 已提交
718
int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
719 720 721
{
	int ret;
	struct btrfs_key key;
Z
Zheng Yan 已提交
722
	struct btrfs_path *path;
723

Z
Zheng Yan 已提交
724
	path = btrfs_alloc_path();
725 726 727
	if (!path)
		return -ENOMEM;

728 729 730 731 732
	key.objectid = start;
	key.offset = len;
	btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
	ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
				0, 0);
Z
Zheng Yan 已提交
733
	btrfs_free_path(path);
734 735 736
	return ret;
}

737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816
/*
 * helper function to lookup reference count and flags of extent.
 *
 * the head node for delayed ref is used to store the sum of all the
 * reference count modifications queued up in the rbtree. the head
 * node may also store the extent flags to set. This way you can check
 * to see what the reference count and extent flags would be if all of
 * the delayed refs are not processed.
 */
int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
			     struct btrfs_root *root, u64 bytenr,
			     u64 num_bytes, u64 *refs, u64 *flags)
{
	struct btrfs_delayed_ref_head *head;
	struct btrfs_delayed_ref_root *delayed_refs;
	struct btrfs_path *path;
	struct btrfs_extent_item *ei;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	u32 item_size;
	u64 num_refs;
	u64 extent_flags;
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = bytenr;
	key.type = BTRFS_EXTENT_ITEM_KEY;
	key.offset = num_bytes;
	if (!trans) {
		path->skip_locking = 1;
		path->search_commit_root = 1;
	}
again:
	ret = btrfs_search_slot(trans, root->fs_info->extent_root,
				&key, path, 0, 0);
	if (ret < 0)
		goto out_free;

	if (ret == 0) {
		leaf = path->nodes[0];
		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
		if (item_size >= sizeof(*ei)) {
			ei = btrfs_item_ptr(leaf, path->slots[0],
					    struct btrfs_extent_item);
			num_refs = btrfs_extent_refs(leaf, ei);
			extent_flags = btrfs_extent_flags(leaf, ei);
		} else {
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
			struct btrfs_extent_item_v0 *ei0;
			BUG_ON(item_size != sizeof(*ei0));
			ei0 = btrfs_item_ptr(leaf, path->slots[0],
					     struct btrfs_extent_item_v0);
			num_refs = btrfs_extent_refs_v0(leaf, ei0);
			/* FIXME: this isn't correct for data */
			extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
#else
			BUG();
#endif
		}
		BUG_ON(num_refs == 0);
	} else {
		num_refs = 0;
		extent_flags = 0;
		ret = 0;
	}

	if (!trans)
		goto out;

	delayed_refs = &trans->transaction->delayed_refs;
	spin_lock(&delayed_refs->lock);
	head = btrfs_find_delayed_ref_head(trans, bytenr);
	if (head) {
		if (!mutex_trylock(&head->mutex)) {
			atomic_inc(&head->node.refs);
			spin_unlock(&delayed_refs->lock);

817
			btrfs_release_path(path);
818

819 820 821 822
			/*
			 * Mutex was contended, block until it's released and try
			 * again
			 */
823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847
			mutex_lock(&head->mutex);
			mutex_unlock(&head->mutex);
			btrfs_put_delayed_ref(&head->node);
			goto again;
		}
		if (head->extent_op && head->extent_op->update_flags)
			extent_flags |= head->extent_op->flags_to_set;
		else
			BUG_ON(num_refs == 0);

		num_refs += head->node.ref_mod;
		mutex_unlock(&head->mutex);
	}
	spin_unlock(&delayed_refs->lock);
out:
	WARN_ON(num_refs == 0);
	if (refs)
		*refs = num_refs;
	if (flags)
		*flags = extent_flags;
out_free:
	btrfs_free_path(path);
	return ret;
}

848 849 850 851 852 853 854 855 856 857 858 859 860 861
/*
 * Back reference rules.  Back refs have three main goals:
 *
 * 1) differentiate between all holders of references to an extent so that
 *    when a reference is dropped we can make sure it was a valid reference
 *    before freeing the extent.
 *
 * 2) Provide enough information to quickly find the holders of an extent
 *    if we notice a given block is corrupted or bad.
 *
 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
 *    maintenance.  This is actually the same as #2, but with a slightly
 *    different use case.
 *
862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906
 * There are two kinds of back refs. The implicit back refs is optimized
 * for pointers in non-shared tree blocks. For a given pointer in a block,
 * back refs of this kind provide information about the block's owner tree
 * and the pointer's key. These information allow us to find the block by
 * b-tree searching. The full back refs is for pointers in tree blocks not
 * referenced by their owner trees. The location of tree block is recorded
 * in the back refs. Actually the full back refs is generic, and can be
 * used in all cases the implicit back refs is used. The major shortcoming
 * of the full back refs is its overhead. Every time a tree block gets
 * COWed, we have to update back refs entry for all pointers in it.
 *
 * For a newly allocated tree block, we use implicit back refs for
 * pointers in it. This means most tree related operations only involve
 * implicit back refs. For a tree block created in old transaction, the
 * only way to drop a reference to it is COW it. So we can detect the
 * event that tree block loses its owner tree's reference and do the
 * back refs conversion.
 *
 * When a tree block is COW'd through a tree, there are four cases:
 *
 * The reference count of the block is one and the tree is the block's
 * owner tree. Nothing to do in this case.
 *
 * The reference count of the block is one and the tree is not the
 * block's owner tree. In this case, full back refs is used for pointers
 * in the block. Remove these full back refs, add implicit back refs for
 * every pointers in the new block.
 *
 * The reference count of the block is greater than one and the tree is
 * the block's owner tree. In this case, implicit back refs is used for
 * pointers in the block. Add full back refs for every pointers in the
 * block, increase lower level extents' reference counts. The original
 * implicit back refs are entailed to the new block.
 *
 * The reference count of the block is greater than one and the tree is
 * not the block's owner tree. Add implicit back refs for every pointer in
 * the new block, increase lower level extents' reference count.
 *
 * Back Reference Key composing:
 *
 * The key objectid corresponds to the first byte in the extent,
 * The key type is used to differentiate between types of back refs.
 * There are different meanings of the key offset for different types
 * of back refs.
 *
907 908 909
 * File extents can be referenced by:
 *
 * - multiple snapshots, subvolumes, or different generations in one subvol
Z
Zheng Yan 已提交
910
 * - different files inside a single subvolume
911 912
 * - different offsets inside a file (bookend extents in file.c)
 *
913
 * The extent ref structure for the implicit back refs has fields for:
914 915 916
 *
 * - Objectid of the subvolume root
 * - objectid of the file holding the reference
917 918
 * - original offset in the file
 * - how many bookend extents
919
 *
920 921
 * The key offset for the implicit back refs is hash of the first
 * three fields.
922
 *
923
 * The extent ref structure for the full back refs has field for:
924
 *
925
 * - number of pointers in the tree leaf
926
 *
927 928
 * The key offset for the implicit back refs is the first byte of
 * the tree leaf
929
 *
930 931
 * When a file extent is allocated, The implicit back refs is used.
 * the fields are filled in:
932
 *
933
 *     (root_key.objectid, inode objectid, offset in file, 1)
934
 *
935 936
 * When a file extent is removed file truncation, we find the
 * corresponding implicit back refs and check the following fields:
937
 *
938
 *     (btrfs_header_owner(leaf), inode objectid, offset in file)
939
 *
940
 * Btree extents can be referenced by:
941
 *
942
 * - Different subvolumes
943
 *
944 945 946 947
 * Both the implicit back refs and the full back refs for tree blocks
 * only consist of key. The key offset for the implicit back refs is
 * objectid of block's owner tree. The key offset for the full back refs
 * is the first byte of parent block.
948
 *
949 950 951
 * When implicit back refs is used, information about the lowest key and
 * level of the tree block are required. These information are stored in
 * tree block info structure.
952
 */
Z
Zheng Yan 已提交
953

954 955 956 957 958
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
				  struct btrfs_root *root,
				  struct btrfs_path *path,
				  u64 owner, u32 extra_size)
959
{
960 961 962 963 964
	struct btrfs_extent_item *item;
	struct btrfs_extent_item_v0 *ei0;
	struct btrfs_extent_ref_v0 *ref0;
	struct btrfs_tree_block_info *bi;
	struct extent_buffer *leaf;
965
	struct btrfs_key key;
966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
	struct btrfs_key found_key;
	u32 new_size = sizeof(*item);
	u64 refs;
	int ret;

	leaf = path->nodes[0];
	BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));

	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
	ei0 = btrfs_item_ptr(leaf, path->slots[0],
			     struct btrfs_extent_item_v0);
	refs = btrfs_extent_refs_v0(leaf, ei0);

	if (owner == (u64)-1) {
		while (1) {
			if (path->slots[0] >= btrfs_header_nritems(leaf)) {
				ret = btrfs_next_leaf(root, path);
				if (ret < 0)
					return ret;
				BUG_ON(ret > 0);
				leaf = path->nodes[0];
			}
			btrfs_item_key_to_cpu(leaf, &found_key,
					      path->slots[0]);
			BUG_ON(key.objectid != found_key.objectid);
			if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
				path->slots[0]++;
				continue;
			}
			ref0 = btrfs_item_ptr(leaf, path->slots[0],
					      struct btrfs_extent_ref_v0);
			owner = btrfs_ref_objectid_v0(leaf, ref0);
			break;
		}
	}
1001
	btrfs_release_path(path);
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042

	if (owner < BTRFS_FIRST_FREE_OBJECTID)
		new_size += sizeof(*bi);

	new_size -= sizeof(*ei0);
	ret = btrfs_search_slot(trans, root, &key, path,
				new_size + extra_size, 1);
	if (ret < 0)
		return ret;
	BUG_ON(ret);

	ret = btrfs_extend_item(trans, root, path, new_size);

	leaf = path->nodes[0];
	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
	btrfs_set_extent_refs(leaf, item, refs);
	/* FIXME: get real generation */
	btrfs_set_extent_generation(leaf, item, 0);
	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
		btrfs_set_extent_flags(leaf, item,
				       BTRFS_EXTENT_FLAG_TREE_BLOCK |
				       BTRFS_BLOCK_FLAG_FULL_BACKREF);
		bi = (struct btrfs_tree_block_info *)(item + 1);
		/* FIXME: get first key of the block */
		memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
		btrfs_set_tree_block_level(leaf, bi, (int)owner);
	} else {
		btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
	}
	btrfs_mark_buffer_dirty(leaf);
	return 0;
}
#endif

static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
{
	u32 high_crc = ~(u32)0;
	u32 low_crc = ~(u32)0;
	__le64 lenum;

	lenum = cpu_to_le64(root_objectid);
1043
	high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1044
	lenum = cpu_to_le64(owner);
1045
	low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1046
	lenum = cpu_to_le64(offset);
1047
	low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079

	return ((u64)high_crc << 31) ^ (u64)low_crc;
}

static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
				     struct btrfs_extent_data_ref *ref)
{
	return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
				    btrfs_extent_data_ref_objectid(leaf, ref),
				    btrfs_extent_data_ref_offset(leaf, ref));
}

static int match_extent_data_ref(struct extent_buffer *leaf,
				 struct btrfs_extent_data_ref *ref,
				 u64 root_objectid, u64 owner, u64 offset)
{
	if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
	    btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
		return 0;
	return 1;
}

static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
					   struct btrfs_root *root,
					   struct btrfs_path *path,
					   u64 bytenr, u64 parent,
					   u64 root_objectid,
					   u64 owner, u64 offset)
{
	struct btrfs_key key;
	struct btrfs_extent_data_ref *ref;
Z
Zheng Yan 已提交
1080
	struct extent_buffer *leaf;
1081
	u32 nritems;
1082
	int ret;
1083 1084
	int recow;
	int err = -ENOENT;
1085

Z
Zheng Yan 已提交
1086
	key.objectid = bytenr;
1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
	if (parent) {
		key.type = BTRFS_SHARED_DATA_REF_KEY;
		key.offset = parent;
	} else {
		key.type = BTRFS_EXTENT_DATA_REF_KEY;
		key.offset = hash_extent_data_ref(root_objectid,
						  owner, offset);
	}
again:
	recow = 0;
	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
	if (ret < 0) {
		err = ret;
		goto fail;
	}
Z
Zheng Yan 已提交
1102

1103 1104 1105 1106 1107
	if (parent) {
		if (!ret)
			return 0;
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
		key.type = BTRFS_EXTENT_REF_V0_KEY;
1108
		btrfs_release_path(path);
1109 1110 1111 1112 1113 1114 1115 1116 1117
		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
		if (ret < 0) {
			err = ret;
			goto fail;
		}
		if (!ret)
			return 0;
#endif
		goto fail;
Z
Zheng Yan 已提交
1118 1119 1120
	}

	leaf = path->nodes[0];
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145
	nritems = btrfs_header_nritems(leaf);
	while (1) {
		if (path->slots[0] >= nritems) {
			ret = btrfs_next_leaf(root, path);
			if (ret < 0)
				err = ret;
			if (ret)
				goto fail;

			leaf = path->nodes[0];
			nritems = btrfs_header_nritems(leaf);
			recow = 1;
		}

		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
		if (key.objectid != bytenr ||
		    key.type != BTRFS_EXTENT_DATA_REF_KEY)
			goto fail;

		ref = btrfs_item_ptr(leaf, path->slots[0],
				     struct btrfs_extent_data_ref);

		if (match_extent_data_ref(leaf, ref, root_objectid,
					  owner, offset)) {
			if (recow) {
1146
				btrfs_release_path(path);
1147 1148 1149 1150 1151 1152
				goto again;
			}
			err = 0;
			break;
		}
		path->slots[0]++;
Z
Zheng Yan 已提交
1153
	}
1154 1155
fail:
	return err;
Z
Zheng Yan 已提交
1156 1157
}

1158 1159 1160 1161 1162 1163
static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
					   struct btrfs_root *root,
					   struct btrfs_path *path,
					   u64 bytenr, u64 parent,
					   u64 root_objectid, u64 owner,
					   u64 offset, int refs_to_add)
Z
Zheng Yan 已提交
1164 1165 1166
{
	struct btrfs_key key;
	struct extent_buffer *leaf;
1167
	u32 size;
Z
Zheng Yan 已提交
1168 1169
	u32 num_refs;
	int ret;
1170 1171

	key.objectid = bytenr;
1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
	if (parent) {
		key.type = BTRFS_SHARED_DATA_REF_KEY;
		key.offset = parent;
		size = sizeof(struct btrfs_shared_data_ref);
	} else {
		key.type = BTRFS_EXTENT_DATA_REF_KEY;
		key.offset = hash_extent_data_ref(root_objectid,
						  owner, offset);
		size = sizeof(struct btrfs_extent_data_ref);
	}
1182

1183 1184 1185 1186 1187 1188 1189
	ret = btrfs_insert_empty_item(trans, root, path, &key, size);
	if (ret && ret != -EEXIST)
		goto fail;

	leaf = path->nodes[0];
	if (parent) {
		struct btrfs_shared_data_ref *ref;
Z
Zheng Yan 已提交
1190
		ref = btrfs_item_ptr(leaf, path->slots[0],
1191 1192 1193 1194 1195 1196 1197
				     struct btrfs_shared_data_ref);
		if (ret == 0) {
			btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
		} else {
			num_refs = btrfs_shared_data_ref_count(leaf, ref);
			num_refs += refs_to_add;
			btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
Z
Zheng Yan 已提交
1198
		}
1199 1200 1201 1202 1203 1204 1205 1206
	} else {
		struct btrfs_extent_data_ref *ref;
		while (ret == -EEXIST) {
			ref = btrfs_item_ptr(leaf, path->slots[0],
					     struct btrfs_extent_data_ref);
			if (match_extent_data_ref(leaf, ref, root_objectid,
						  owner, offset))
				break;
1207
			btrfs_release_path(path);
1208 1209 1210 1211 1212
			key.offset++;
			ret = btrfs_insert_empty_item(trans, root, path, &key,
						      size);
			if (ret && ret != -EEXIST)
				goto fail;
Z
Zheng Yan 已提交
1213

1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
			leaf = path->nodes[0];
		}
		ref = btrfs_item_ptr(leaf, path->slots[0],
				     struct btrfs_extent_data_ref);
		if (ret == 0) {
			btrfs_set_extent_data_ref_root(leaf, ref,
						       root_objectid);
			btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
			btrfs_set_extent_data_ref_offset(leaf, ref, offset);
			btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
		} else {
			num_refs = btrfs_extent_data_ref_count(leaf, ref);
			num_refs += refs_to_add;
			btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
Z
Zheng Yan 已提交
1228 1229
		}
	}
1230 1231 1232
	btrfs_mark_buffer_dirty(leaf);
	ret = 0;
fail:
1233
	btrfs_release_path(path);
1234
	return ret;
1235 1236
}

1237 1238 1239 1240
static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
					   struct btrfs_root *root,
					   struct btrfs_path *path,
					   int refs_to_drop)
Z
Zheng Yan 已提交
1241
{
1242 1243 1244
	struct btrfs_key key;
	struct btrfs_extent_data_ref *ref1 = NULL;
	struct btrfs_shared_data_ref *ref2 = NULL;
Z
Zheng Yan 已提交
1245
	struct extent_buffer *leaf;
1246
	u32 num_refs = 0;
Z
Zheng Yan 已提交
1247 1248 1249
	int ret = 0;

	leaf = path->nodes[0];
1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270
	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);

	if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
		ref1 = btrfs_item_ptr(leaf, path->slots[0],
				      struct btrfs_extent_data_ref);
		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
		ref2 = btrfs_item_ptr(leaf, path->slots[0],
				      struct btrfs_shared_data_ref);
		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
	} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
		struct btrfs_extent_ref_v0 *ref0;
		ref0 = btrfs_item_ptr(leaf, path->slots[0],
				      struct btrfs_extent_ref_v0);
		num_refs = btrfs_ref_count_v0(leaf, ref0);
#endif
	} else {
		BUG();
	}

1271 1272
	BUG_ON(num_refs < refs_to_drop);
	num_refs -= refs_to_drop;
1273

Z
Zheng Yan 已提交
1274 1275 1276
	if (num_refs == 0) {
		ret = btrfs_del_item(trans, root, path);
	} else {
1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288
		if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
			btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
		else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
			btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
		else {
			struct btrfs_extent_ref_v0 *ref0;
			ref0 = btrfs_item_ptr(leaf, path->slots[0],
					struct btrfs_extent_ref_v0);
			btrfs_set_ref_count_v0(leaf, ref0, num_refs);
		}
#endif
Z
Zheng Yan 已提交
1289 1290 1291 1292 1293
		btrfs_mark_buffer_dirty(leaf);
	}
	return ret;
}

1294 1295 1296
static noinline u32 extent_data_ref_count(struct btrfs_root *root,
					  struct btrfs_path *path,
					  struct btrfs_extent_inline_ref *iref)
1297
{
1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328
	struct btrfs_key key;
	struct extent_buffer *leaf;
	struct btrfs_extent_data_ref *ref1;
	struct btrfs_shared_data_ref *ref2;
	u32 num_refs = 0;

	leaf = path->nodes[0];
	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
	if (iref) {
		if (btrfs_extent_inline_ref_type(leaf, iref) ==
		    BTRFS_EXTENT_DATA_REF_KEY) {
			ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
			num_refs = btrfs_extent_data_ref_count(leaf, ref1);
		} else {
			ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
			num_refs = btrfs_shared_data_ref_count(leaf, ref2);
		}
	} else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
		ref1 = btrfs_item_ptr(leaf, path->slots[0],
				      struct btrfs_extent_data_ref);
		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
		ref2 = btrfs_item_ptr(leaf, path->slots[0],
				      struct btrfs_shared_data_ref);
		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
	} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
		struct btrfs_extent_ref_v0 *ref0;
		ref0 = btrfs_item_ptr(leaf, path->slots[0],
				      struct btrfs_extent_ref_v0);
		num_refs = btrfs_ref_count_v0(leaf, ref0);
C
Chris Mason 已提交
1329
#endif
1330 1331 1332 1333 1334
	} else {
		WARN_ON(1);
	}
	return num_refs;
}
1335

1336 1337 1338 1339 1340
static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
					  struct btrfs_root *root,
					  struct btrfs_path *path,
					  u64 bytenr, u64 parent,
					  u64 root_objectid)
1341
{
1342
	struct btrfs_key key;
1343 1344
	int ret;

1345 1346 1347 1348 1349 1350 1351
	key.objectid = bytenr;
	if (parent) {
		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
		key.offset = parent;
	} else {
		key.type = BTRFS_TREE_BLOCK_REF_KEY;
		key.offset = root_objectid;
1352 1353
	}

1354 1355 1356 1357 1358
	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
	if (ret > 0)
		ret = -ENOENT;
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
	if (ret == -ENOENT && parent) {
1359
		btrfs_release_path(path);
1360 1361 1362 1363 1364
		key.type = BTRFS_EXTENT_REF_V0_KEY;
		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
		if (ret > 0)
			ret = -ENOENT;
	}
1365
#endif
1366
	return ret;
1367 1368
}

1369 1370 1371 1372 1373
static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
					  struct btrfs_root *root,
					  struct btrfs_path *path,
					  u64 bytenr, u64 parent,
					  u64 root_objectid)
Z
Zheng Yan 已提交
1374
{
1375
	struct btrfs_key key;
Z
Zheng Yan 已提交
1376 1377
	int ret;

1378 1379 1380 1381 1382 1383 1384 1385 1386 1387
	key.objectid = bytenr;
	if (parent) {
		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
		key.offset = parent;
	} else {
		key.type = BTRFS_TREE_BLOCK_REF_KEY;
		key.offset = root_objectid;
	}

	ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1388
	btrfs_release_path(path);
Z
Zheng Yan 已提交
1389 1390 1391
	return ret;
}

1392
static inline int extent_ref_type(u64 parent, u64 owner)
Z
Zheng Yan 已提交
1393
{
1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406
	int type;
	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
		if (parent > 0)
			type = BTRFS_SHARED_BLOCK_REF_KEY;
		else
			type = BTRFS_TREE_BLOCK_REF_KEY;
	} else {
		if (parent > 0)
			type = BTRFS_SHARED_DATA_REF_KEY;
		else
			type = BTRFS_EXTENT_DATA_REF_KEY;
	}
	return type;
Z
Zheng Yan 已提交
1407
}
1408

1409 1410
static int find_next_key(struct btrfs_path *path, int level,
			 struct btrfs_key *key)
1411

C
Chris Mason 已提交
1412
{
1413
	for (; level < BTRFS_MAX_LEVEL; level++) {
1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428
		if (!path->nodes[level])
			break;
		if (path->slots[level] + 1 >=
		    btrfs_header_nritems(path->nodes[level]))
			continue;
		if (level == 0)
			btrfs_item_key_to_cpu(path->nodes[level], key,
					      path->slots[level] + 1);
		else
			btrfs_node_key_to_cpu(path->nodes[level], key,
					      path->slots[level] + 1);
		return 0;
	}
	return 1;
}
C
Chris Mason 已提交
1429

1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464
/*
 * look for inline back ref. if back ref is found, *ref_ret is set
 * to the address of inline back ref, and 0 is returned.
 *
 * if back ref isn't found, *ref_ret is set to the address where it
 * should be inserted, and -ENOENT is returned.
 *
 * if insert is true and there are too many inline back refs, the path
 * points to the extent item, and -EAGAIN is returned.
 *
 * NOTE: inline back refs are ordered in the same way that back ref
 *	 items in the tree are ordered.
 */
static noinline_for_stack
int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
				 struct btrfs_extent_inline_ref **ref_ret,
				 u64 bytenr, u64 num_bytes,
				 u64 parent, u64 root_objectid,
				 u64 owner, u64 offset, int insert)
{
	struct btrfs_key key;
	struct extent_buffer *leaf;
	struct btrfs_extent_item *ei;
	struct btrfs_extent_inline_ref *iref;
	u64 flags;
	u64 item_size;
	unsigned long ptr;
	unsigned long end;
	int extra_size;
	int type;
	int want;
	int ret;
	int err = 0;
1465

1466
	key.objectid = bytenr;
Z
Zheng Yan 已提交
1467
	key.type = BTRFS_EXTENT_ITEM_KEY;
1468
	key.offset = num_bytes;
Z
Zheng Yan 已提交
1469

1470 1471 1472
	want = extent_ref_type(parent, owner);
	if (insert) {
		extra_size = btrfs_extent_inline_ref_size(want);
1473
		path->keep_locks = 1;
1474 1475 1476
	} else
		extra_size = -1;
	ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1477
	if (ret < 0) {
1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
		err = ret;
		goto out;
	}
	BUG_ON(ret);

	leaf = path->nodes[0];
	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
	if (item_size < sizeof(*ei)) {
		if (!insert) {
			err = -ENOENT;
			goto out;
		}
		ret = convert_extent_item_v0(trans, root, path, owner,
					     extra_size);
		if (ret < 0) {
			err = ret;
			goto out;
		}
		leaf = path->nodes[0];
		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
	}
#endif
	BUG_ON(item_size < sizeof(*ei));

	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
	flags = btrfs_extent_flags(leaf, ei);

	ptr = (unsigned long)(ei + 1);
	end = (unsigned long)ei + item_size;

	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
		ptr += sizeof(struct btrfs_tree_block_info);
		BUG_ON(ptr > end);
	} else {
		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
	}

	err = -ENOENT;
	while (1) {
		if (ptr >= end) {
			WARN_ON(ptr > end);
			break;
		}
		iref = (struct btrfs_extent_inline_ref *)ptr;
		type = btrfs_extent_inline_ref_type(leaf, iref);
		if (want < type)
			break;
		if (want > type) {
			ptr += btrfs_extent_inline_ref_size(type);
			continue;
		}

		if (type == BTRFS_EXTENT_DATA_REF_KEY) {
			struct btrfs_extent_data_ref *dref;
			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
			if (match_extent_data_ref(leaf, dref, root_objectid,
						  owner, offset)) {
				err = 0;
				break;
			}
			if (hash_extent_data_ref_item(leaf, dref) <
			    hash_extent_data_ref(root_objectid, owner, offset))
				break;
		} else {
			u64 ref_offset;
			ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
			if (parent > 0) {
				if (parent == ref_offset) {
					err = 0;
					break;
				}
				if (ref_offset < parent)
					break;
			} else {
				if (root_objectid == ref_offset) {
					err = 0;
					break;
				}
				if (ref_offset < root_objectid)
					break;
			}
		}
		ptr += btrfs_extent_inline_ref_size(type);
	}
	if (err == -ENOENT && insert) {
		if (item_size + extra_size >=
		    BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
			err = -EAGAIN;
			goto out;
		}
		/*
		 * To add new inline back ref, we have to make sure
		 * there is no corresponding back ref item.
		 * For simplicity, we just do not add new inline back
		 * ref if there is any kind of item for this block
		 */
1575 1576
		if (find_next_key(path, 0, &key) == 0 &&
		    key.objectid == bytenr &&
1577
		    key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1578 1579 1580 1581 1582 1583
			err = -EAGAIN;
			goto out;
		}
	}
	*ref_ret = (struct btrfs_extent_inline_ref *)ptr;
out:
1584
	if (insert) {
1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670
		path->keep_locks = 0;
		btrfs_unlock_up_safe(path, 1);
	}
	return err;
}

/*
 * helper to add new inline back ref
 */
static noinline_for_stack
int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
				struct btrfs_root *root,
				struct btrfs_path *path,
				struct btrfs_extent_inline_ref *iref,
				u64 parent, u64 root_objectid,
				u64 owner, u64 offset, int refs_to_add,
				struct btrfs_delayed_extent_op *extent_op)
{
	struct extent_buffer *leaf;
	struct btrfs_extent_item *ei;
	unsigned long ptr;
	unsigned long end;
	unsigned long item_offset;
	u64 refs;
	int size;
	int type;
	int ret;

	leaf = path->nodes[0];
	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
	item_offset = (unsigned long)iref - (unsigned long)ei;

	type = extent_ref_type(parent, owner);
	size = btrfs_extent_inline_ref_size(type);

	ret = btrfs_extend_item(trans, root, path, size);

	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
	refs = btrfs_extent_refs(leaf, ei);
	refs += refs_to_add;
	btrfs_set_extent_refs(leaf, ei, refs);
	if (extent_op)
		__run_delayed_extent_op(extent_op, leaf, ei);

	ptr = (unsigned long)ei + item_offset;
	end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
	if (ptr < end - size)
		memmove_extent_buffer(leaf, ptr + size, ptr,
				      end - size - ptr);

	iref = (struct btrfs_extent_inline_ref *)ptr;
	btrfs_set_extent_inline_ref_type(leaf, iref, type);
	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
		struct btrfs_extent_data_ref *dref;
		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
		btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
		btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
		btrfs_set_extent_data_ref_offset(leaf, dref, offset);
		btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
		struct btrfs_shared_data_ref *sref;
		sref = (struct btrfs_shared_data_ref *)(iref + 1);
		btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
	} else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
	} else {
		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
	}
	btrfs_mark_buffer_dirty(leaf);
	return 0;
}

static int lookup_extent_backref(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
				 struct btrfs_extent_inline_ref **ref_ret,
				 u64 bytenr, u64 num_bytes, u64 parent,
				 u64 root_objectid, u64 owner, u64 offset)
{
	int ret;

	ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
					   bytenr, num_bytes, parent,
					   root_objectid, owner, offset, 0);
	if (ret != -ENOENT)
1671
		return ret;
1672

1673
	btrfs_release_path(path);
1674 1675 1676 1677 1678 1679 1680 1681
	*ref_ret = NULL;

	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
		ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
					    root_objectid);
	} else {
		ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
					     root_objectid, owner, offset);
1682
	}
1683 1684
	return ret;
}
Z
Zheng Yan 已提交
1685

1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728
/*
 * helper to update/remove inline back ref
 */
static noinline_for_stack
int update_inline_extent_backref(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
				 struct btrfs_extent_inline_ref *iref,
				 int refs_to_mod,
				 struct btrfs_delayed_extent_op *extent_op)
{
	struct extent_buffer *leaf;
	struct btrfs_extent_item *ei;
	struct btrfs_extent_data_ref *dref = NULL;
	struct btrfs_shared_data_ref *sref = NULL;
	unsigned long ptr;
	unsigned long end;
	u32 item_size;
	int size;
	int type;
	int ret;
	u64 refs;

	leaf = path->nodes[0];
	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
	refs = btrfs_extent_refs(leaf, ei);
	WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
	refs += refs_to_mod;
	btrfs_set_extent_refs(leaf, ei, refs);
	if (extent_op)
		__run_delayed_extent_op(extent_op, leaf, ei);

	type = btrfs_extent_inline_ref_type(leaf, iref);

	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
		refs = btrfs_extent_data_ref_count(leaf, dref);
	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
		sref = (struct btrfs_shared_data_ref *)(iref + 1);
		refs = btrfs_shared_data_ref_count(leaf, sref);
	} else {
		refs = 1;
		BUG_ON(refs_to_mod != -1);
1729
	}
Z
Zheng Yan 已提交
1730

1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777
	BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
	refs += refs_to_mod;

	if (refs > 0) {
		if (type == BTRFS_EXTENT_DATA_REF_KEY)
			btrfs_set_extent_data_ref_count(leaf, dref, refs);
		else
			btrfs_set_shared_data_ref_count(leaf, sref, refs);
	} else {
		size =  btrfs_extent_inline_ref_size(type);
		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
		ptr = (unsigned long)iref;
		end = (unsigned long)ei + item_size;
		if (ptr + size < end)
			memmove_extent_buffer(leaf, ptr, ptr + size,
					      end - ptr - size);
		item_size -= size;
		ret = btrfs_truncate_item(trans, root, path, item_size, 1);
	}
	btrfs_mark_buffer_dirty(leaf);
	return 0;
}

static noinline_for_stack
int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
				 u64 bytenr, u64 num_bytes, u64 parent,
				 u64 root_objectid, u64 owner,
				 u64 offset, int refs_to_add,
				 struct btrfs_delayed_extent_op *extent_op)
{
	struct btrfs_extent_inline_ref *iref;
	int ret;

	ret = lookup_inline_extent_backref(trans, root, path, &iref,
					   bytenr, num_bytes, parent,
					   root_objectid, owner, offset, 1);
	if (ret == 0) {
		BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
		ret = update_inline_extent_backref(trans, root, path, iref,
						   refs_to_add, extent_op);
	} else if (ret == -ENOENT) {
		ret = setup_inline_extent_backref(trans, root, path, iref,
						  parent, root_objectid,
						  owner, offset, refs_to_add,
						  extent_op);
1778
	}
1779 1780
	return ret;
}
Z
Zheng Yan 已提交
1781

1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799
static int insert_extent_backref(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
				 u64 bytenr, u64 parent, u64 root_objectid,
				 u64 owner, u64 offset, int refs_to_add)
{
	int ret;
	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
		BUG_ON(refs_to_add != 1);
		ret = insert_tree_block_ref(trans, root, path, bytenr,
					    parent, root_objectid);
	} else {
		ret = insert_extent_data_ref(trans, root, path, bytenr,
					     parent, root_objectid,
					     owner, offset, refs_to_add);
	}
	return ret;
}
1800

1801 1802 1803 1804 1805 1806 1807
static int remove_extent_backref(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
				 struct btrfs_extent_inline_ref *iref,
				 int refs_to_drop, int is_data)
{
	int ret;
1808

1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820
	BUG_ON(!is_data && refs_to_drop != 1);
	if (iref) {
		ret = update_inline_extent_backref(trans, root, path, iref,
						   -refs_to_drop, NULL);
	} else if (is_data) {
		ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
	} else {
		ret = btrfs_del_item(trans, root, path);
	}
	return ret;
}

1821
static int btrfs_issue_discard(struct block_device *bdev,
1822 1823
				u64 start, u64 len)
{
1824
	return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1825 1826 1827
}

static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1828
				u64 num_bytes, u64 *actual_bytes)
1829 1830
{
	int ret;
1831
	u64 discarded_bytes = 0;
1832
	struct btrfs_bio *bbio = NULL;
1833

C
Christoph Hellwig 已提交
1834

1835
	/* Tell the block device(s) that the sectors can be discarded */
1836
	ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
1837
			      bytenr, &num_bytes, &bbio, 0);
1838
	if (!ret) {
1839
		struct btrfs_bio_stripe *stripe = bbio->stripes;
1840 1841 1842
		int i;


1843
		for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1844 1845 1846
			if (!stripe->dev->can_discard)
				continue;

1847 1848 1849 1850 1851 1852 1853
			ret = btrfs_issue_discard(stripe->dev->bdev,
						  stripe->physical,
						  stripe->length);
			if (!ret)
				discarded_bytes += stripe->length;
			else if (ret != -EOPNOTSUPP)
				break;
1854 1855 1856 1857 1858 1859 1860

			/*
			 * Just in case we get back EOPNOTSUPP for some reason,
			 * just ignore the return value so we don't screw up
			 * people calling discard_extent.
			 */
			ret = 0;
1861
		}
1862
		kfree(bbio);
1863
	}
1864 1865 1866 1867

	if (actual_bytes)
		*actual_bytes = discarded_bytes;

1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931

	return ret;
}

int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
			 struct btrfs_root *root,
			 u64 bytenr, u64 num_bytes, u64 parent,
			 u64 root_objectid, u64 owner, u64 offset)
{
	int ret;
	BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
	       root_objectid == BTRFS_TREE_LOG_OBJECTID);

	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
		ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
					parent, root_objectid, (int)owner,
					BTRFS_ADD_DELAYED_REF, NULL);
	} else {
		ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
					parent, root_objectid, owner, offset,
					BTRFS_ADD_DELAYED_REF, NULL);
	}
	return ret;
}

static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
				  struct btrfs_root *root,
				  u64 bytenr, u64 num_bytes,
				  u64 parent, u64 root_objectid,
				  u64 owner, u64 offset, int refs_to_add,
				  struct btrfs_delayed_extent_op *extent_op)
{
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_extent_item *item;
	u64 refs;
	int ret;
	int err = 0;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	path->reada = 1;
	path->leave_spinning = 1;
	/* this will setup the path even if it fails to insert the back ref */
	ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
					   path, bytenr, num_bytes, parent,
					   root_objectid, owner, offset,
					   refs_to_add, extent_op);
	if (ret == 0)
		goto out;

	if (ret != -EAGAIN) {
		err = ret;
		goto out;
	}

	leaf = path->nodes[0];
	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
	refs = btrfs_extent_refs(leaf, item);
	btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
	if (extent_op)
		__run_delayed_extent_op(extent_op, leaf, item);
1932

1933
	btrfs_mark_buffer_dirty(leaf);
1934
	btrfs_release_path(path);
1935 1936

	path->reada = 1;
1937 1938
	path->leave_spinning = 1;

1939 1940
	/* now insert the actual backref */
	ret = insert_extent_backref(trans, root->fs_info->extent_root,
1941 1942
				    path, bytenr, parent, root_objectid,
				    owner, offset, refs_to_add);
1943
	BUG_ON(ret);
1944
out:
1945
	btrfs_free_path(path);
1946
	return err;
1947 1948
}

1949 1950 1951 1952 1953
static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
				struct btrfs_root *root,
				struct btrfs_delayed_ref_node *node,
				struct btrfs_delayed_extent_op *extent_op,
				int insert_reserved)
1954
{
1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026
	int ret = 0;
	struct btrfs_delayed_data_ref *ref;
	struct btrfs_key ins;
	u64 parent = 0;
	u64 ref_root = 0;
	u64 flags = 0;

	ins.objectid = node->bytenr;
	ins.offset = node->num_bytes;
	ins.type = BTRFS_EXTENT_ITEM_KEY;

	ref = btrfs_delayed_node_to_data_ref(node);
	if (node->type == BTRFS_SHARED_DATA_REF_KEY)
		parent = ref->parent;
	else
		ref_root = ref->root;

	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
		if (extent_op) {
			BUG_ON(extent_op->update_key);
			flags |= extent_op->flags_to_set;
		}
		ret = alloc_reserved_file_extent(trans, root,
						 parent, ref_root, flags,
						 ref->objectid, ref->offset,
						 &ins, node->ref_mod);
	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
		ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
					     node->num_bytes, parent,
					     ref_root, ref->objectid,
					     ref->offset, node->ref_mod,
					     extent_op);
	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
		ret = __btrfs_free_extent(trans, root, node->bytenr,
					  node->num_bytes, parent,
					  ref_root, ref->objectid,
					  ref->offset, node->ref_mod,
					  extent_op);
	} else {
		BUG();
	}
	return ret;
}

static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
				    struct extent_buffer *leaf,
				    struct btrfs_extent_item *ei)
{
	u64 flags = btrfs_extent_flags(leaf, ei);
	if (extent_op->update_flags) {
		flags |= extent_op->flags_to_set;
		btrfs_set_extent_flags(leaf, ei, flags);
	}

	if (extent_op->update_key) {
		struct btrfs_tree_block_info *bi;
		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
		bi = (struct btrfs_tree_block_info *)(ei + 1);
		btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
	}
}

static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_delayed_ref_node *node,
				 struct btrfs_delayed_extent_op *extent_op)
{
	struct btrfs_key key;
	struct btrfs_path *path;
	struct btrfs_extent_item *ei;
	struct extent_buffer *leaf;
	u32 item_size;
2027
	int ret;
2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067
	int err = 0;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = node->bytenr;
	key.type = BTRFS_EXTENT_ITEM_KEY;
	key.offset = node->num_bytes;

	path->reada = 1;
	path->leave_spinning = 1;
	ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
				path, 0, 1);
	if (ret < 0) {
		err = ret;
		goto out;
	}
	if (ret > 0) {
		err = -EIO;
		goto out;
	}

	leaf = path->nodes[0];
	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
	if (item_size < sizeof(*ei)) {
		ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
					     path, (u64)-1, 0);
		if (ret < 0) {
			err = ret;
			goto out;
		}
		leaf = path->nodes[0];
		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
	}
#endif
	BUG_ON(item_size < sizeof(*ei));
	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
	__run_delayed_extent_op(extent_op, leaf, ei);
2068

2069 2070 2071 2072
	btrfs_mark_buffer_dirty(leaf);
out:
	btrfs_free_path(path);
	return err;
2073 2074
}

2075 2076 2077 2078 2079
static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
				struct btrfs_root *root,
				struct btrfs_delayed_ref_node *node,
				struct btrfs_delayed_extent_op *extent_op,
				int insert_reserved)
2080 2081
{
	int ret = 0;
2082 2083 2084 2085
	struct btrfs_delayed_tree_ref *ref;
	struct btrfs_key ins;
	u64 parent = 0;
	u64 ref_root = 0;
2086

2087 2088 2089
	ins.objectid = node->bytenr;
	ins.offset = node->num_bytes;
	ins.type = BTRFS_EXTENT_ITEM_KEY;
2090

2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116
	ref = btrfs_delayed_node_to_tree_ref(node);
	if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
		parent = ref->parent;
	else
		ref_root = ref->root;

	BUG_ON(node->ref_mod != 1);
	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
		BUG_ON(!extent_op || !extent_op->update_flags ||
		       !extent_op->update_key);
		ret = alloc_reserved_tree_block(trans, root,
						parent, ref_root,
						extent_op->flags_to_set,
						&extent_op->key,
						ref->level, &ins);
	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
		ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
					     node->num_bytes, parent, ref_root,
					     ref->level, 0, 1, extent_op);
	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
		ret = __btrfs_free_extent(trans, root, node->bytenr,
					  node->num_bytes, parent, ref_root,
					  ref->level, 0, 1, extent_op);
	} else {
		BUG();
	}
2117 2118 2119 2120
	return ret;
}

/* helper function to actually process a single delayed ref entry */
2121 2122 2123 2124 2125
static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
			       struct btrfs_root *root,
			       struct btrfs_delayed_ref_node *node,
			       struct btrfs_delayed_extent_op *extent_op,
			       int insert_reserved)
2126 2127
{
	int ret;
2128
	if (btrfs_delayed_ref_is_head(node)) {
2129 2130 2131 2132 2133 2134 2135
		struct btrfs_delayed_ref_head *head;
		/*
		 * we've hit the end of the chain and we were supposed
		 * to insert this extent into the tree.  But, it got
		 * deleted before we ever needed to insert it, so all
		 * we have to do is clean up the accounting
		 */
2136 2137
		BUG_ON(extent_op);
		head = btrfs_delayed_node_to_head(node);
2138
		if (insert_reserved) {
2139 2140
			btrfs_pin_extent(root, node->bytenr,
					 node->num_bytes, 1);
2141 2142 2143 2144 2145 2146
			if (head->is_data) {
				ret = btrfs_del_csums(trans, root,
						      node->bytenr,
						      node->num_bytes);
				BUG_ON(ret);
			}
2147 2148 2149 2150 2151
		}
		mutex_unlock(&head->mutex);
		return 0;
	}

2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162
	if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
	    node->type == BTRFS_SHARED_BLOCK_REF_KEY)
		ret = run_delayed_tree_ref(trans, root, node, extent_op,
					   insert_reserved);
	else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
		 node->type == BTRFS_SHARED_DATA_REF_KEY)
		ret = run_delayed_data_ref(trans, root, node, extent_op,
					   insert_reserved);
	else
		BUG();
	return ret;
2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184
}

static noinline struct btrfs_delayed_ref_node *
select_delayed_ref(struct btrfs_delayed_ref_head *head)
{
	struct rb_node *node;
	struct btrfs_delayed_ref_node *ref;
	int action = BTRFS_ADD_DELAYED_REF;
again:
	/*
	 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
	 * this prevents ref count from going down to zero when
	 * there still are pending delayed ref.
	 */
	node = rb_prev(&head->node.rb_node);
	while (1) {
		if (!node)
			break;
		ref = rb_entry(node, struct btrfs_delayed_ref_node,
				rb_node);
		if (ref->bytenr != head->node.bytenr)
			break;
2185
		if (ref->action == action)
2186 2187 2188 2189 2190 2191 2192 2193 2194 2195
			return ref;
		node = rb_prev(node);
	}
	if (action == BTRFS_ADD_DELAYED_REF) {
		action = BTRFS_DROP_DELAYED_REF;
		goto again;
	}
	return NULL;
}

2196 2197 2198
static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
				       struct btrfs_root *root,
				       struct list_head *cluster)
2199 2200 2201 2202
{
	struct btrfs_delayed_ref_root *delayed_refs;
	struct btrfs_delayed_ref_node *ref;
	struct btrfs_delayed_ref_head *locked_ref = NULL;
2203
	struct btrfs_delayed_extent_op *extent_op;
2204
	int ret;
2205
	int count = 0;
2206 2207 2208 2209 2210
	int must_insert_reserved = 0;

	delayed_refs = &trans->transaction->delayed_refs;
	while (1) {
		if (!locked_ref) {
2211 2212
			/* pick a new head ref from the cluster list */
			if (list_empty(cluster))
2213 2214
				break;

2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231
			locked_ref = list_entry(cluster->next,
				     struct btrfs_delayed_ref_head, cluster);

			/* grab the lock that says we are going to process
			 * all the refs for this head */
			ret = btrfs_delayed_ref_lock(trans, locked_ref);

			/*
			 * we may have dropped the spin lock to get the head
			 * mutex lock, and that might have given someone else
			 * time to free the head.  If that's true, it has been
			 * removed from our list and we can move on.
			 */
			if (ret == -EAGAIN) {
				locked_ref = NULL;
				count++;
				continue;
2232 2233
			}
		}
2234

2235 2236 2237 2238 2239 2240
		/*
		 * record the must insert reserved flag before we
		 * drop the spin lock.
		 */
		must_insert_reserved = locked_ref->must_insert_reserved;
		locked_ref->must_insert_reserved = 0;
2241

2242 2243 2244
		extent_op = locked_ref->extent_op;
		locked_ref->extent_op = NULL;

2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255
		/*
		 * locked_ref is the head node, so we have to go one
		 * node back for any delayed ref updates
		 */
		ref = select_delayed_ref(locked_ref);
		if (!ref) {
			/* All delayed refs have been processed, Go ahead
			 * and send the head node to run_one_delayed_ref,
			 * so that any accounting fixes can happen
			 */
			ref = &locked_ref->node;
2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274

			if (extent_op && must_insert_reserved) {
				kfree(extent_op);
				extent_op = NULL;
			}

			if (extent_op) {
				spin_unlock(&delayed_refs->lock);

				ret = run_delayed_extent_op(trans, root,
							    ref, extent_op);
				BUG_ON(ret);
				kfree(extent_op);

				cond_resched();
				spin_lock(&delayed_refs->lock);
				continue;
			}

2275
			list_del_init(&locked_ref->cluster);
2276 2277
			locked_ref = NULL;
		}
C
Chris Mason 已提交
2278

2279 2280 2281
		ref->in_tree = 0;
		rb_erase(&ref->rb_node, &delayed_refs->root);
		delayed_refs->num_entries--;
2282

2283
		spin_unlock(&delayed_refs->lock);
2284

2285
		ret = run_one_delayed_ref(trans, root, ref, extent_op,
2286 2287
					  must_insert_reserved);
		BUG_ON(ret);
2288

2289 2290
		btrfs_put_delayed_ref(ref);
		kfree(extent_op);
2291
		count++;
2292

2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331
		cond_resched();
		spin_lock(&delayed_refs->lock);
	}
	return count;
}

/*
 * this starts processing the delayed reference count updates and
 * extent insertions we have queued up so far.  count can be
 * 0, which means to process everything in the tree at the start
 * of the run (but not newly added entries), or it can be some target
 * number you'd like to process.
 */
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
			   struct btrfs_root *root, unsigned long count)
{
	struct rb_node *node;
	struct btrfs_delayed_ref_root *delayed_refs;
	struct btrfs_delayed_ref_node *ref;
	struct list_head cluster;
	int ret;
	int run_all = count == (unsigned long)-1;
	int run_most = 0;

	if (root == root->fs_info->extent_root)
		root = root->fs_info->tree_root;

	delayed_refs = &trans->transaction->delayed_refs;
	INIT_LIST_HEAD(&cluster);
again:
	spin_lock(&delayed_refs->lock);
	if (count == 0) {
		count = delayed_refs->num_entries * 2;
		run_most = 1;
	}
	while (1) {
		if (!(run_all || run_most) &&
		    delayed_refs->num_heads_ready < 64)
			break;
2332

2333
		/*
2334 2335 2336 2337
		 * go find something we can process in the rbtree.  We start at
		 * the beginning of the tree, and then build a cluster
		 * of refs to process starting at the first one we are able to
		 * lock
2338
		 */
2339 2340 2341
		ret = btrfs_find_ref_cluster(trans, &cluster,
					     delayed_refs->run_delayed_start);
		if (ret)
2342 2343
			break;

2344 2345 2346 2347 2348 2349 2350
		ret = run_clustered_refs(trans, root, &cluster);
		BUG_ON(ret < 0);

		count -= min_t(unsigned long, ret, count);

		if (count == 0)
			break;
2351
	}
2352

2353 2354
	if (run_all) {
		node = rb_first(&delayed_refs->root);
2355
		if (!node)
2356
			goto out;
2357
		count = (unsigned long)-1;
2358

2359 2360 2361 2362 2363
		while (node) {
			ref = rb_entry(node, struct btrfs_delayed_ref_node,
				       rb_node);
			if (btrfs_delayed_ref_is_head(ref)) {
				struct btrfs_delayed_ref_head *head;
2364

2365 2366 2367 2368
				head = btrfs_delayed_node_to_head(ref);
				atomic_inc(&ref->refs);

				spin_unlock(&delayed_refs->lock);
2369 2370 2371 2372
				/*
				 * Mutex was contended, block until it's
				 * released and try again
				 */
2373 2374 2375 2376
				mutex_lock(&head->mutex);
				mutex_unlock(&head->mutex);

				btrfs_put_delayed_ref(ref);
2377
				cond_resched();
2378 2379 2380 2381 2382 2383 2384
				goto again;
			}
			node = rb_next(node);
		}
		spin_unlock(&delayed_refs->lock);
		schedule_timeout(1);
		goto again;
2385
	}
2386
out:
2387
	spin_unlock(&delayed_refs->lock);
2388 2389 2390
	return 0;
}

2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436
int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
				struct btrfs_root *root,
				u64 bytenr, u64 num_bytes, u64 flags,
				int is_data)
{
	struct btrfs_delayed_extent_op *extent_op;
	int ret;

	extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
	if (!extent_op)
		return -ENOMEM;

	extent_op->flags_to_set = flags;
	extent_op->update_flags = 1;
	extent_op->update_key = 0;
	extent_op->is_data = is_data ? 1 : 0;

	ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
	if (ret)
		kfree(extent_op);
	return ret;
}

static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
				      struct btrfs_root *root,
				      struct btrfs_path *path,
				      u64 objectid, u64 offset, u64 bytenr)
{
	struct btrfs_delayed_ref_head *head;
	struct btrfs_delayed_ref_node *ref;
	struct btrfs_delayed_data_ref *data_ref;
	struct btrfs_delayed_ref_root *delayed_refs;
	struct rb_node *node;
	int ret = 0;

	ret = -ENOENT;
	delayed_refs = &trans->transaction->delayed_refs;
	spin_lock(&delayed_refs->lock);
	head = btrfs_find_delayed_ref_head(trans, bytenr);
	if (!head)
		goto out;

	if (!mutex_trylock(&head->mutex)) {
		atomic_inc(&head->node.refs);
		spin_unlock(&delayed_refs->lock);

2437
		btrfs_release_path(path);
2438

2439 2440 2441 2442
		/*
		 * Mutex was contended, block until it's released and let
		 * caller try again
		 */
2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486
		mutex_lock(&head->mutex);
		mutex_unlock(&head->mutex);
		btrfs_put_delayed_ref(&head->node);
		return -EAGAIN;
	}

	node = rb_prev(&head->node.rb_node);
	if (!node)
		goto out_unlock;

	ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);

	if (ref->bytenr != bytenr)
		goto out_unlock;

	ret = 1;
	if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
		goto out_unlock;

	data_ref = btrfs_delayed_node_to_data_ref(ref);

	node = rb_prev(node);
	if (node) {
		ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
		if (ref->bytenr == bytenr)
			goto out_unlock;
	}

	if (data_ref->root != root->root_key.objectid ||
	    data_ref->objectid != objectid || data_ref->offset != offset)
		goto out_unlock;

	ret = 0;
out_unlock:
	mutex_unlock(&head->mutex);
out:
	spin_unlock(&delayed_refs->lock);
	return ret;
}

static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
					struct btrfs_root *root,
					struct btrfs_path *path,
					u64 objectid, u64 offset, u64 bytenr)
2487 2488
{
	struct btrfs_root *extent_root = root->fs_info->extent_root;
2489
	struct extent_buffer *leaf;
2490 2491 2492
	struct btrfs_extent_data_ref *ref;
	struct btrfs_extent_inline_ref *iref;
	struct btrfs_extent_item *ei;
2493
	struct btrfs_key key;
2494
	u32 item_size;
2495
	int ret;
2496

2497
	key.objectid = bytenr;
Z
Zheng Yan 已提交
2498
	key.offset = (u64)-1;
2499
	key.type = BTRFS_EXTENT_ITEM_KEY;
2500 2501 2502 2503 2504

	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
	if (ret < 0)
		goto out;
	BUG_ON(ret == 0);
Y
Yan Zheng 已提交
2505 2506 2507

	ret = -ENOENT;
	if (path->slots[0] == 0)
Z
Zheng Yan 已提交
2508
		goto out;
2509

Z
Zheng Yan 已提交
2510
	path->slots[0]--;
2511
	leaf = path->nodes[0];
2512
	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2513

2514
	if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2515
		goto out;
2516

2517 2518 2519 2520 2521 2522 2523 2524 2525
	ret = 1;
	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
	if (item_size < sizeof(*ei)) {
		WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
		goto out;
	}
#endif
	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2526

2527 2528 2529
	if (item_size != sizeof(*ei) +
	    btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
		goto out;
2530

2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569
	if (btrfs_extent_generation(leaf, ei) <=
	    btrfs_root_last_snapshot(&root->root_item))
		goto out;

	iref = (struct btrfs_extent_inline_ref *)(ei + 1);
	if (btrfs_extent_inline_ref_type(leaf, iref) !=
	    BTRFS_EXTENT_DATA_REF_KEY)
		goto out;

	ref = (struct btrfs_extent_data_ref *)(&iref->offset);
	if (btrfs_extent_refs(leaf, ei) !=
	    btrfs_extent_data_ref_count(leaf, ref) ||
	    btrfs_extent_data_ref_root(leaf, ref) !=
	    root->root_key.objectid ||
	    btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
		goto out;

	ret = 0;
out:
	return ret;
}

int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
			  struct btrfs_root *root,
			  u64 objectid, u64 offset, u64 bytenr)
{
	struct btrfs_path *path;
	int ret;
	int ret2;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOENT;

	do {
		ret = check_committed_ref(trans, root, path, objectid,
					  offset, bytenr);
		if (ret && ret != -ENOENT)
2570
			goto out;
Y
Yan Zheng 已提交
2571

2572 2573 2574 2575 2576 2577 2578
		ret2 = check_delayed_ref(trans, root, path, objectid,
					 offset, bytenr);
	} while (ret2 == -EAGAIN);

	if (ret2 && ret2 != -ENOENT) {
		ret = ret2;
		goto out;
2579
	}
2580 2581 2582

	if (ret != -ENOENT || ret2 != -ENOENT)
		ret = 0;
2583
out:
Y
Yan Zheng 已提交
2584
	btrfs_free_path(path);
2585 2586
	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
		WARN_ON(ret > 0);
2587
	return ret;
2588
}
C
Chris Mason 已提交
2589

2590
static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2591
			   struct btrfs_root *root,
2592 2593
			   struct extent_buffer *buf,
			   int full_backref, int inc)
Z
Zheng Yan 已提交
2594 2595
{
	u64 bytenr;
2596 2597
	u64 num_bytes;
	u64 parent;
Z
Zheng Yan 已提交
2598 2599 2600 2601 2602 2603 2604 2605
	u64 ref_root;
	u32 nritems;
	struct btrfs_key key;
	struct btrfs_file_extent_item *fi;
	int i;
	int level;
	int ret = 0;
	int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2606
			    u64, u64, u64, u64, u64, u64);
Z
Zheng Yan 已提交
2607 2608 2609 2610 2611

	ref_root = btrfs_header_owner(buf);
	nritems = btrfs_header_nritems(buf);
	level = btrfs_header_level(buf);

2612 2613
	if (!root->ref_cows && level == 0)
		return 0;
Z
Zheng Yan 已提交
2614

2615 2616 2617 2618
	if (inc)
		process_func = btrfs_inc_extent_ref;
	else
		process_func = btrfs_free_extent;
Z
Zheng Yan 已提交
2619

2620 2621 2622 2623 2624 2625
	if (full_backref)
		parent = buf->start;
	else
		parent = 0;

	for (i = 0; i < nritems; i++) {
Z
Zheng Yan 已提交
2626
		if (level == 0) {
2627
			btrfs_item_key_to_cpu(buf, &key, i);
Z
Zheng Yan 已提交
2628 2629
			if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
				continue;
2630
			fi = btrfs_item_ptr(buf, i,
Z
Zheng Yan 已提交
2631 2632 2633 2634 2635 2636 2637
					    struct btrfs_file_extent_item);
			if (btrfs_file_extent_type(buf, fi) ==
			    BTRFS_FILE_EXTENT_INLINE)
				continue;
			bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
			if (bytenr == 0)
				continue;
2638 2639 2640 2641 2642 2643

			num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
			key.offset -= btrfs_file_extent_offset(buf, fi);
			ret = process_func(trans, root, bytenr, num_bytes,
					   parent, ref_root, key.objectid,
					   key.offset);
Z
Zheng Yan 已提交
2644 2645 2646
			if (ret)
				goto fail;
		} else {
2647 2648 2649 2650
			bytenr = btrfs_node_blockptr(buf, i);
			num_bytes = btrfs_level_size(root, level - 1);
			ret = process_func(trans, root, bytenr, num_bytes,
					   parent, ref_root, level - 1, 0);
Z
Zheng Yan 已提交
2651 2652 2653 2654 2655 2656
			if (ret)
				goto fail;
		}
	}
	return 0;
fail:
2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670
	BUG();
	return ret;
}

int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
		  struct extent_buffer *buf, int full_backref)
{
	return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
}

int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
		  struct extent_buffer *buf, int full_backref)
{
	return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
Z
Zheng Yan 已提交
2671 2672
}

C
Chris Mason 已提交
2673 2674 2675 2676 2677 2678 2679
static int write_one_cache_group(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
				 struct btrfs_block_group_cache *cache)
{
	int ret;
	struct btrfs_root *extent_root = root->fs_info->extent_root;
2680 2681
	unsigned long bi;
	struct extent_buffer *leaf;
C
Chris Mason 已提交
2682 2683

	ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2684 2685
	if (ret < 0)
		goto fail;
C
Chris Mason 已提交
2686
	BUG_ON(ret);
2687 2688 2689 2690 2691

	leaf = path->nodes[0];
	bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
	write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
	btrfs_mark_buffer_dirty(leaf);
2692
	btrfs_release_path(path);
2693
fail:
C
Chris Mason 已提交
2694 2695 2696 2697 2698 2699
	if (ret)
		return ret;
	return 0;

}

2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710
static struct btrfs_block_group_cache *
next_block_group(struct btrfs_root *root,
		 struct btrfs_block_group_cache *cache)
{
	struct rb_node *node;
	spin_lock(&root->fs_info->block_group_cache_lock);
	node = rb_next(&cache->cache_node);
	btrfs_put_block_group(cache);
	if (node) {
		cache = rb_entry(node, struct btrfs_block_group_cache,
				 cache_node);
2711
		btrfs_get_block_group(cache);
2712 2713 2714 2715 2716 2717
	} else
		cache = NULL;
	spin_unlock(&root->fs_info->block_group_cache_lock);
	return cache;
}

2718 2719 2720 2721 2722 2723 2724
static int cache_save_setup(struct btrfs_block_group_cache *block_group,
			    struct btrfs_trans_handle *trans,
			    struct btrfs_path *path)
{
	struct btrfs_root *root = block_group->fs_info->tree_root;
	struct inode *inode = NULL;
	u64 alloc_hint = 0;
2725
	int dcs = BTRFS_DC_ERROR;
2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744
	int num_pages = 0;
	int retries = 0;
	int ret = 0;

	/*
	 * If this block group is smaller than 100 megs don't bother caching the
	 * block group.
	 */
	if (block_group->key.offset < (100 * 1024 * 1024)) {
		spin_lock(&block_group->lock);
		block_group->disk_cache_state = BTRFS_DC_WRITTEN;
		spin_unlock(&block_group->lock);
		return 0;
	}

again:
	inode = lookup_free_space_inode(root, block_group, path);
	if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
		ret = PTR_ERR(inode);
2745
		btrfs_release_path(path);
2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761
		goto out;
	}

	if (IS_ERR(inode)) {
		BUG_ON(retries);
		retries++;

		if (block_group->ro)
			goto out_free;

		ret = create_free_space_inode(root, trans, block_group, path);
		if (ret)
			goto out_free;
		goto again;
	}

2762 2763 2764 2765 2766 2767 2768
	/* We've already setup this transaction, go ahead and exit */
	if (block_group->cache_generation == trans->transid &&
	    i_size_read(inode)) {
		dcs = BTRFS_DC_SETUP;
		goto out_put;
	}

2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786
	/*
	 * We want to set the generation to 0, that way if anything goes wrong
	 * from here on out we know not to trust this cache when we load up next
	 * time.
	 */
	BTRFS_I(inode)->generation = 0;
	ret = btrfs_update_inode(trans, root, inode);
	WARN_ON(ret);

	if (i_size_read(inode) > 0) {
		ret = btrfs_truncate_free_space_cache(root, trans, path,
						      inode);
		if (ret)
			goto out_put;
	}

	spin_lock(&block_group->lock);
	if (block_group->cached != BTRFS_CACHE_FINISHED) {
2787 2788
		/* We're not cached, don't bother trying to write stuff out */
		dcs = BTRFS_DC_WRITTEN;
2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807
		spin_unlock(&block_group->lock);
		goto out_put;
	}
	spin_unlock(&block_group->lock);

	num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024);
	if (!num_pages)
		num_pages = 1;

	/*
	 * Just to make absolutely sure we have enough space, we're going to
	 * preallocate 12 pages worth of space for each block group.  In
	 * practice we ought to use at most 8, but we need extra space so we can
	 * add our header and have a terminator between the extents and the
	 * bitmaps.
	 */
	num_pages *= 16;
	num_pages *= PAGE_CACHE_SIZE;

2808
	ret = btrfs_check_data_free_space(inode, num_pages);
2809 2810 2811 2812 2813 2814
	if (ret)
		goto out_put;

	ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
					      num_pages, num_pages,
					      &alloc_hint);
2815
	if (!ret)
2816
		dcs = BTRFS_DC_SETUP;
2817
	btrfs_free_reserved_data_space(inode, num_pages);
2818

2819 2820 2821
out_put:
	iput(inode);
out_free:
2822
	btrfs_release_path(path);
2823 2824
out:
	spin_lock(&block_group->lock);
2825
	if (!ret && dcs == BTRFS_DC_SETUP)
2826
		block_group->cache_generation = trans->transid;
2827
	block_group->disk_cache_state = dcs;
2828 2829 2830 2831 2832
	spin_unlock(&block_group->lock);

	return ret;
}

2833 2834
int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
				   struct btrfs_root *root)
C
Chris Mason 已提交
2835
{
2836
	struct btrfs_block_group_cache *cache;
C
Chris Mason 已提交
2837 2838
	int err = 0;
	struct btrfs_path *path;
2839
	u64 last = 0;
C
Chris Mason 已提交
2840 2841 2842 2843 2844

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863
again:
	while (1) {
		cache = btrfs_lookup_first_block_group(root->fs_info, last);
		while (cache) {
			if (cache->disk_cache_state == BTRFS_DC_CLEAR)
				break;
			cache = next_block_group(root, cache);
		}
		if (!cache) {
			if (last == 0)
				break;
			last = 0;
			continue;
		}
		err = cache_save_setup(cache, trans, path);
		last = cache->key.objectid + cache->key.offset;
		btrfs_put_block_group(cache);
	}

C
Chris Mason 已提交
2864
	while (1) {
2865 2866 2867 2868
		if (last == 0) {
			err = btrfs_run_delayed_refs(trans, root,
						     (unsigned long)-1);
			BUG_ON(err);
J
Josef Bacik 已提交
2869
		}
2870

2871 2872
		cache = btrfs_lookup_first_block_group(root->fs_info, last);
		while (cache) {
2873 2874 2875 2876 2877
			if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
				btrfs_put_block_group(cache);
				goto again;
			}

2878 2879 2880 2881 2882 2883 2884 2885 2886 2887
			if (cache->dirty)
				break;
			cache = next_block_group(root, cache);
		}
		if (!cache) {
			if (last == 0)
				break;
			last = 0;
			continue;
		}
J
Josef Bacik 已提交
2888

J
Josef Bacik 已提交
2889 2890
		if (cache->disk_cache_state == BTRFS_DC_SETUP)
			cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
2891
		cache->dirty = 0;
2892
		last = cache->key.objectid + cache->key.offset;
J
Josef Bacik 已提交
2893

2894 2895 2896
		err = write_one_cache_group(trans, root, path, cache);
		BUG_ON(err);
		btrfs_put_block_group(cache);
C
Chris Mason 已提交
2897
	}
2898

J
Josef Bacik 已提交
2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944
	while (1) {
		/*
		 * I don't think this is needed since we're just marking our
		 * preallocated extent as written, but just in case it can't
		 * hurt.
		 */
		if (last == 0) {
			err = btrfs_run_delayed_refs(trans, root,
						     (unsigned long)-1);
			BUG_ON(err);
		}

		cache = btrfs_lookup_first_block_group(root->fs_info, last);
		while (cache) {
			/*
			 * Really this shouldn't happen, but it could if we
			 * couldn't write the entire preallocated extent and
			 * splitting the extent resulted in a new block.
			 */
			if (cache->dirty) {
				btrfs_put_block_group(cache);
				goto again;
			}
			if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
				break;
			cache = next_block_group(root, cache);
		}
		if (!cache) {
			if (last == 0)
				break;
			last = 0;
			continue;
		}

		btrfs_write_out_cache(root, trans, cache, path);

		/*
		 * If we didn't have an error then the cache state is still
		 * NEED_WRITE, so we can set it to WRITTEN.
		 */
		if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
			cache->disk_cache_state = BTRFS_DC_WRITTEN;
		last = cache->key.objectid + cache->key.offset;
		btrfs_put_block_group(cache);
	}

C
Chris Mason 已提交
2945
	btrfs_free_path(path);
2946
	return 0;
C
Chris Mason 已提交
2947 2948
}

2949 2950 2951 2952 2953 2954 2955 2956 2957
int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
{
	struct btrfs_block_group_cache *block_group;
	int readonly = 0;

	block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
	if (!block_group || block_group->ro)
		readonly = 1;
	if (block_group)
2958
		btrfs_put_block_group(block_group);
2959 2960 2961
	return readonly;
}

2962 2963 2964 2965 2966
static int update_space_info(struct btrfs_fs_info *info, u64 flags,
			     u64 total_bytes, u64 bytes_used,
			     struct btrfs_space_info **space_info)
{
	struct btrfs_space_info *found;
2967 2968 2969 2970 2971 2972 2973 2974
	int i;
	int factor;

	if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
		     BTRFS_BLOCK_GROUP_RAID10))
		factor = 2;
	else
		factor = 1;
2975 2976 2977

	found = __find_space_info(info, flags);
	if (found) {
2978
		spin_lock(&found->lock);
2979
		found->total_bytes += total_bytes;
J
Josef Bacik 已提交
2980
		found->disk_total += total_bytes * factor;
2981
		found->bytes_used += bytes_used;
2982
		found->disk_used += bytes_used * factor;
2983
		found->full = 0;
2984
		spin_unlock(&found->lock);
2985 2986 2987
		*space_info = found;
		return 0;
	}
Y
Yan Zheng 已提交
2988
	found = kzalloc(sizeof(*found), GFP_NOFS);
2989 2990 2991
	if (!found)
		return -ENOMEM;

2992 2993
	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
		INIT_LIST_HEAD(&found->block_groups[i]);
2994
	init_rwsem(&found->groups_sem);
J
Josef Bacik 已提交
2995
	spin_lock_init(&found->lock);
2996 2997 2998
	found->flags = flags & (BTRFS_BLOCK_GROUP_DATA |
				BTRFS_BLOCK_GROUP_SYSTEM |
				BTRFS_BLOCK_GROUP_METADATA);
2999
	found->total_bytes = total_bytes;
J
Josef Bacik 已提交
3000
	found->disk_total = total_bytes * factor;
3001
	found->bytes_used = bytes_used;
3002
	found->disk_used = bytes_used * factor;
3003
	found->bytes_pinned = 0;
3004
	found->bytes_reserved = 0;
Y
Yan Zheng 已提交
3005
	found->bytes_readonly = 0;
3006
	found->bytes_may_use = 0;
3007
	found->full = 0;
3008
	found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3009
	found->chunk_alloc = 0;
3010 3011
	found->flush = 0;
	init_waitqueue_head(&found->wait);
3012
	*space_info = found;
3013
	list_add_rcu(&found->list, &info->space_info);
3014 3015 3016
	return 0;
}

3017 3018 3019
static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
{
	u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
3020
				   BTRFS_BLOCK_GROUP_RAID1 |
C
Chris Mason 已提交
3021
				   BTRFS_BLOCK_GROUP_RAID10 |
3022
				   BTRFS_BLOCK_GROUP_DUP);
3023 3024 3025 3026 3027 3028 3029 3030 3031
	if (extra_flags) {
		if (flags & BTRFS_BLOCK_GROUP_DATA)
			fs_info->avail_data_alloc_bits |= extra_flags;
		if (flags & BTRFS_BLOCK_GROUP_METADATA)
			fs_info->avail_metadata_alloc_bits |= extra_flags;
		if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
			fs_info->avail_system_alloc_bits |= extra_flags;
	}
}
3032

Y
Yan Zheng 已提交
3033
u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3034
{
3035 3036 3037 3038 3039 3040 3041
	/*
	 * we add in the count of missing devices because we want
	 * to make sure that any RAID levels on a degraded FS
	 * continue to be honored.
	 */
	u64 num_devices = root->fs_info->fs_devices->rw_devices +
		root->fs_info->fs_devices->missing_devices;
3042 3043 3044 3045 3046 3047

	if (num_devices == 1)
		flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
	if (num_devices < 4)
		flags &= ~BTRFS_BLOCK_GROUP_RAID10;

3048 3049
	if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
	    (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3050
		      BTRFS_BLOCK_GROUP_RAID10))) {
3051
		flags &= ~BTRFS_BLOCK_GROUP_DUP;
3052
	}
3053 3054

	if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
3055
	    (flags & BTRFS_BLOCK_GROUP_RAID10)) {
3056
		flags &= ~BTRFS_BLOCK_GROUP_RAID1;
3057
	}
3058 3059 3060 3061 3062 3063 3064 3065 3066

	if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
	    ((flags & BTRFS_BLOCK_GROUP_RAID1) |
	     (flags & BTRFS_BLOCK_GROUP_RAID10) |
	     (flags & BTRFS_BLOCK_GROUP_DUP)))
		flags &= ~BTRFS_BLOCK_GROUP_RAID0;
	return flags;
}

3067
static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
J
Josef Bacik 已提交
3068
{
3069 3070 3071 3072 3073 3074 3075 3076 3077 3078
	if (flags & BTRFS_BLOCK_GROUP_DATA)
		flags |= root->fs_info->avail_data_alloc_bits &
			 root->fs_info->data_alloc_profile;
	else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
		flags |= root->fs_info->avail_system_alloc_bits &
			 root->fs_info->system_alloc_profile;
	else if (flags & BTRFS_BLOCK_GROUP_METADATA)
		flags |= root->fs_info->avail_metadata_alloc_bits &
			 root->fs_info->metadata_alloc_profile;
	return btrfs_reduce_alloc_profile(root, flags);
J
Josef Bacik 已提交
3079 3080
}

3081
u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
J
Josef Bacik 已提交
3082
{
3083
	u64 flags;
J
Josef Bacik 已提交
3084

3085 3086 3087 3088
	if (data)
		flags = BTRFS_BLOCK_GROUP_DATA;
	else if (root == root->fs_info->chunk_root)
		flags = BTRFS_BLOCK_GROUP_SYSTEM;
J
Josef Bacik 已提交
3089
	else
3090
		flags = BTRFS_BLOCK_GROUP_METADATA;
J
Josef Bacik 已提交
3091

3092
	return get_alloc_profile(root, flags);
J
Josef Bacik 已提交
3093
}
J
Josef Bacik 已提交
3094

J
Josef Bacik 已提交
3095 3096 3097
void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
{
	BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
3098
						       BTRFS_BLOCK_GROUP_DATA);
J
Josef Bacik 已提交
3099 3100
}

J
Josef Bacik 已提交
3101 3102 3103 3104
/*
 * This will check the space that the inode allocates from to make sure we have
 * enough space for bytes.
 */
3105
int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
J
Josef Bacik 已提交
3106 3107
{
	struct btrfs_space_info *data_sinfo;
3108
	struct btrfs_root *root = BTRFS_I(inode)->root;
3109
	u64 used;
3110
	int ret = 0, committed = 0, alloc_chunk = 1;
J
Josef Bacik 已提交
3111 3112 3113 3114

	/* make sure bytes are sectorsize aligned */
	bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);

3115 3116
	if (root == root->fs_info->tree_root ||
	    BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3117 3118 3119 3120
		alloc_chunk = 0;
		committed = 1;
	}

J
Josef Bacik 已提交
3121
	data_sinfo = BTRFS_I(inode)->space_info;
C
Chris Mason 已提交
3122 3123
	if (!data_sinfo)
		goto alloc;
J
Josef Bacik 已提交
3124

J
Josef Bacik 已提交
3125 3126 3127
again:
	/* make sure we have enough space to handle the data first */
	spin_lock(&data_sinfo->lock);
3128 3129 3130
	used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
		data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
		data_sinfo->bytes_may_use;
3131 3132

	if (used + bytes > data_sinfo->total_bytes) {
3133
		struct btrfs_trans_handle *trans;
J
Josef Bacik 已提交
3134

J
Josef Bacik 已提交
3135 3136 3137 3138
		/*
		 * if we don't have enough free bytes in this space then we need
		 * to alloc a new chunk.
		 */
3139
		if (!data_sinfo->full && alloc_chunk) {
J
Josef Bacik 已提交
3140
			u64 alloc_target;
J
Josef Bacik 已提交
3141

3142
			data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
J
Josef Bacik 已提交
3143
			spin_unlock(&data_sinfo->lock);
C
Chris Mason 已提交
3144
alloc:
J
Josef Bacik 已提交
3145
			alloc_target = btrfs_get_alloc_profile(root, 1);
3146
			trans = btrfs_join_transaction(root);
3147 3148
			if (IS_ERR(trans))
				return PTR_ERR(trans);
J
Josef Bacik 已提交
3149

J
Josef Bacik 已提交
3150 3151
			ret = do_chunk_alloc(trans, root->fs_info->extent_root,
					     bytes + 2 * 1024 * 1024,
3152 3153
					     alloc_target,
					     CHUNK_ALLOC_NO_FORCE);
J
Josef Bacik 已提交
3154
			btrfs_end_transaction(trans, root);
3155 3156 3157 3158 3159 3160
			if (ret < 0) {
				if (ret != -ENOSPC)
					return ret;
				else
					goto commit_trans;
			}
J
Josef Bacik 已提交
3161

C
Chris Mason 已提交
3162 3163 3164 3165
			if (!data_sinfo) {
				btrfs_set_inode_space_info(root, inode);
				data_sinfo = BTRFS_I(inode)->space_info;
			}
J
Josef Bacik 已提交
3166 3167
			goto again;
		}
3168 3169 3170 3171 3172 3173 3174

		/*
		 * If we have less pinned bytes than we want to allocate then
		 * don't bother committing the transaction, it won't help us.
		 */
		if (data_sinfo->bytes_pinned < bytes)
			committed = 1;
J
Josef Bacik 已提交
3175 3176
		spin_unlock(&data_sinfo->lock);

3177
		/* commit the current transaction and try again */
3178
commit_trans:
J
Josef Bacik 已提交
3179 3180
		if (!committed &&
		    !atomic_read(&root->fs_info->open_ioctl_trans)) {
3181
			committed = 1;
3182
			trans = btrfs_join_transaction(root);
3183 3184
			if (IS_ERR(trans))
				return PTR_ERR(trans);
3185 3186 3187 3188 3189
			ret = btrfs_commit_transaction(trans, root);
			if (ret)
				return ret;
			goto again;
		}
J
Josef Bacik 已提交
3190

J
Josef Bacik 已提交
3191 3192 3193 3194 3195
		return -ENOSPC;
	}
	data_sinfo->bytes_may_use += bytes;
	spin_unlock(&data_sinfo->lock);

J
Josef Bacik 已提交
3196 3197
	return 0;
}
J
Josef Bacik 已提交
3198 3199

/*
3200
 * Called if we need to clear a data reservation for this inode.
J
Josef Bacik 已提交
3201
 */
3202
void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3203
{
3204
	struct btrfs_root *root = BTRFS_I(inode)->root;
J
Josef Bacik 已提交
3205
	struct btrfs_space_info *data_sinfo;
3206

J
Josef Bacik 已提交
3207 3208
	/* make sure bytes are sectorsize aligned */
	bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3209

J
Josef Bacik 已提交
3210 3211 3212 3213
	data_sinfo = BTRFS_I(inode)->space_info;
	spin_lock(&data_sinfo->lock);
	data_sinfo->bytes_may_use -= bytes;
	spin_unlock(&data_sinfo->lock);
3214 3215
}

3216
static void force_metadata_allocation(struct btrfs_fs_info *info)
3217
{
3218 3219
	struct list_head *head = &info->space_info;
	struct btrfs_space_info *found;
3220

3221 3222 3223
	rcu_read_lock();
	list_for_each_entry_rcu(found, head, list) {
		if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3224
			found->force_alloc = CHUNK_ALLOC_FORCE;
3225
	}
3226
	rcu_read_unlock();
3227 3228
}

3229
static int should_alloc_chunk(struct btrfs_root *root,
3230 3231
			      struct btrfs_space_info *sinfo, u64 alloc_bytes,
			      int force)
3232
{
3233
	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3234
	u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3235
	u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3236
	u64 thresh;
3237

3238 3239 3240
	if (force == CHUNK_ALLOC_FORCE)
		return 1;

3241 3242 3243 3244 3245 3246 3247
	/*
	 * We need to take into account the global rsv because for all intents
	 * and purposes it's used space.  Don't worry about locking the
	 * global_rsv, it doesn't change except when the transaction commits.
	 */
	num_allocated += global_rsv->size;

3248 3249 3250 3251 3252
	/*
	 * in limited mode, we want to have some free space up to
	 * about 1% of the FS size.
	 */
	if (force == CHUNK_ALLOC_LIMITED) {
3253
		thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269
		thresh = max_t(u64, 64 * 1024 * 1024,
			       div_factor_fine(thresh, 1));

		if (num_bytes - num_allocated < thresh)
			return 1;
	}

	/*
	 * we have two similar checks here, one based on percentage
	 * and once based on a hard number of 256MB.  The idea
	 * is that if we have a good amount of free
	 * room, don't allocate a chunk.  A good mount is
	 * less than 80% utilized of the chunks we have allocated,
	 * or more than 256MB free
	 */
	if (num_allocated + alloc_bytes + 256 * 1024 * 1024 < num_bytes)
3270
		return 0;
3271

3272
	if (num_allocated + alloc_bytes < div_factor(num_bytes, 8))
3273
		return 0;
3274

3275
	thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3276 3277

	/* 256MB or 5% of the FS */
3278 3279 3280
	thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5));

	if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3))
3281
		return 0;
3282
	return 1;
3283 3284
}

3285 3286
static int do_chunk_alloc(struct btrfs_trans_handle *trans,
			  struct btrfs_root *extent_root, u64 alloc_bytes,
C
Chris Mason 已提交
3287
			  u64 flags, int force)
J
Josef Bacik 已提交
3288
{
3289
	struct btrfs_space_info *space_info;
3290
	struct btrfs_fs_info *fs_info = extent_root->fs_info;
3291
	int wait_for_alloc = 0;
J
Josef Bacik 已提交
3292 3293
	int ret = 0;

Y
Yan Zheng 已提交
3294
	flags = btrfs_reduce_alloc_profile(extent_root, flags);
3295

3296
	space_info = __find_space_info(extent_root->fs_info, flags);
3297 3298 3299 3300
	if (!space_info) {
		ret = update_space_info(extent_root->fs_info, flags,
					0, 0, &space_info);
		BUG_ON(ret);
J
Josef Bacik 已提交
3301
	}
3302
	BUG_ON(!space_info);
J
Josef Bacik 已提交
3303

3304
again:
3305
	spin_lock(&space_info->lock);
J
Josef Bacik 已提交
3306
	if (space_info->force_alloc)
3307
		force = space_info->force_alloc;
3308 3309
	if (space_info->full) {
		spin_unlock(&space_info->lock);
3310
		return 0;
J
Josef Bacik 已提交
3311 3312
	}

3313
	if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
3314
		spin_unlock(&space_info->lock);
3315 3316 3317 3318 3319
		return 0;
	} else if (space_info->chunk_alloc) {
		wait_for_alloc = 1;
	} else {
		space_info->chunk_alloc = 1;
J
Josef Bacik 已提交
3320
	}
3321

3322
	spin_unlock(&space_info->lock);
J
Josef Bacik 已提交
3323

3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337
	mutex_lock(&fs_info->chunk_mutex);

	/*
	 * The chunk_mutex is held throughout the entirety of a chunk
	 * allocation, so once we've acquired the chunk_mutex we know that the
	 * other guy is done and we need to recheck and see if we should
	 * allocate.
	 */
	if (wait_for_alloc) {
		mutex_unlock(&fs_info->chunk_mutex);
		wait_for_alloc = 0;
		goto again;
	}

3338 3339 3340 3341 3342 3343 3344
	/*
	 * If we have mixed data/metadata chunks we want to make sure we keep
	 * allocating mixed chunks instead of individual chunks.
	 */
	if (btrfs_mixed_space_info(space_info))
		flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);

3345 3346 3347 3348 3349
	/*
	 * if we're doing a data chunk, go ahead and make sure that
	 * we keep a reasonable number of metadata chunks allocated in the
	 * FS as well.
	 */
J
Josef Bacik 已提交
3350
	if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3351 3352 3353 3354
		fs_info->data_chunk_allocations++;
		if (!(fs_info->data_chunk_allocations %
		      fs_info->metadata_ratio))
			force_metadata_allocation(fs_info);
J
Josef Bacik 已提交
3355 3356
	}

Y
Yan Zheng 已提交
3357
	ret = btrfs_alloc_chunk(trans, extent_root, flags);
3358 3359 3360
	if (ret < 0 && ret != -ENOSPC)
		goto out;

J
Josef Bacik 已提交
3361 3362
	spin_lock(&space_info->lock);
	if (ret)
3363
		space_info->full = 1;
3364 3365
	else
		ret = 1;
3366

3367
	space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3368
	space_info->chunk_alloc = 0;
J
Josef Bacik 已提交
3369
	spin_unlock(&space_info->lock);
3370
out:
Y
Yan Zheng 已提交
3371
	mutex_unlock(&extent_root->fs_info->chunk_mutex);
J
Josef Bacik 已提交
3372
	return ret;
3373
}
J
Josef Bacik 已提交
3374 3375

/*
3376
 * shrink metadata reservation for delalloc
J
Josef Bacik 已提交
3377
 */
3378
static int shrink_delalloc(struct btrfs_root *root, u64 to_reclaim,
3379
			   bool wait_ordered)
3380
{
3381
	struct btrfs_block_rsv *block_rsv;
J
Josef Bacik 已提交
3382
	struct btrfs_space_info *space_info;
3383
	struct btrfs_trans_handle *trans;
3384 3385 3386
	u64 reserved;
	u64 max_reclaim;
	u64 reclaimed = 0;
3387
	long time_left;
3388
	unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3389
	int loops = 0;
3390
	unsigned long progress;
3391

3392
	trans = (struct btrfs_trans_handle *)current->journal_info;
3393
	block_rsv = &root->fs_info->delalloc_block_rsv;
J
Josef Bacik 已提交
3394
	space_info = block_rsv->space_info;
3395 3396

	smp_mb();
3397
	reserved = space_info->bytes_may_use;
3398
	progress = space_info->reservation_progress;
3399 3400 3401

	if (reserved == 0)
		return 0;
3402

3403 3404 3405 3406 3407 3408 3409 3410
	smp_mb();
	if (root->fs_info->delalloc_bytes == 0) {
		if (trans)
			return 0;
		btrfs_wait_ordered_extents(root, 0, 0);
		return 0;
	}

3411
	max_reclaim = min(reserved, to_reclaim);
3412 3413
	nr_pages = max_t(unsigned long, nr_pages,
			 max_reclaim >> PAGE_CACHE_SHIFT);
3414
	while (loops < 1024) {
3415 3416 3417 3418 3419
		/* have the flusher threads jump in and do some IO */
		smp_mb();
		nr_pages = min_t(unsigned long, nr_pages,
		       root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT);
		writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages);
3420

J
Josef Bacik 已提交
3421
		spin_lock(&space_info->lock);
3422 3423 3424
		if (reserved > space_info->bytes_may_use)
			reclaimed += reserved - space_info->bytes_may_use;
		reserved = space_info->bytes_may_use;
J
Josef Bacik 已提交
3425
		spin_unlock(&space_info->lock);
3426

3427 3428
		loops++;

3429 3430 3431 3432 3433
		if (reserved == 0 || reclaimed >= max_reclaim)
			break;

		if (trans && trans->transaction->blocked)
			return -EAGAIN;
3434

3435 3436 3437 3438
		if (wait_ordered && !trans) {
			btrfs_wait_ordered_extents(root, 0, 0);
		} else {
			time_left = schedule_timeout_interruptible(1);
3439

3440 3441 3442 3443
			/* We were interrupted, exit */
			if (time_left)
				break;
		}
3444

3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455
		/* we've kicked the IO a few times, if anything has been freed,
		 * exit.  There is no sense in looping here for a long time
		 * when we really need to commit the transaction, or there are
		 * just too many writers without enough free space
		 */

		if (loops > 3) {
			smp_mb();
			if (progress != space_info->reservation_progress)
				break;
		}
3456

3457
	}
3458

3459 3460 3461
	return reclaimed >= to_reclaim;
}

3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515
/**
 * maybe_commit_transaction - possibly commit the transaction if its ok to
 * @root - the root we're allocating for
 * @bytes - the number of bytes we want to reserve
 * @force - force the commit
 *
 * This will check to make sure that committing the transaction will actually
 * get us somewhere and then commit the transaction if it does.  Otherwise it
 * will return -ENOSPC.
 */
static int may_commit_transaction(struct btrfs_root *root,
				  struct btrfs_space_info *space_info,
				  u64 bytes, int force)
{
	struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
	struct btrfs_trans_handle *trans;

	trans = (struct btrfs_trans_handle *)current->journal_info;
	if (trans)
		return -EAGAIN;

	if (force)
		goto commit;

	/* See if there is enough pinned space to make this reservation */
	spin_lock(&space_info->lock);
	if (space_info->bytes_pinned >= bytes) {
		spin_unlock(&space_info->lock);
		goto commit;
	}
	spin_unlock(&space_info->lock);

	/*
	 * See if there is some space in the delayed insertion reservation for
	 * this reservation.
	 */
	if (space_info != delayed_rsv->space_info)
		return -ENOSPC;

	spin_lock(&delayed_rsv->lock);
	if (delayed_rsv->size < bytes) {
		spin_unlock(&delayed_rsv->lock);
		return -ENOSPC;
	}
	spin_unlock(&delayed_rsv->lock);

commit:
	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans))
		return -ENOSPC;

	return btrfs_commit_transaction(trans, root);
}

3516 3517 3518 3519 3520 3521
/**
 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
 * @root - the root we're allocating for
 * @block_rsv - the block_rsv we're allocating for
 * @orig_bytes - the number of bytes we want
 * @flush - wether or not we can flush to make our reservation
3522
 *
3523 3524 3525 3526 3527 3528
 * This will reserve orgi_bytes number of bytes from the space info associated
 * with the block_rsv.  If there is not enough space it will make an attempt to
 * flush out space to make room.  It will do this by flushing delalloc if
 * possible or committing the transaction.  If flush is 0 then no attempts to
 * regain reservations will be made and this will fail if there is not enough
 * space already.
3529
 */
3530
static int reserve_metadata_bytes(struct btrfs_root *root,
3531
				  struct btrfs_block_rsv *block_rsv,
3532
				  u64 orig_bytes, int flush)
J
Josef Bacik 已提交
3533
{
3534
	struct btrfs_space_info *space_info = block_rsv->space_info;
3535
	u64 used;
3536 3537 3538
	u64 num_bytes = orig_bytes;
	int retries = 0;
	int ret = 0;
3539
	bool committed = false;
3540
	bool flushing = false;
3541
	bool wait_ordered = false;
3542

3543
again:
3544
	ret = 0;
3545
	spin_lock(&space_info->lock);
3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557
	/*
	 * We only want to wait if somebody other than us is flushing and we are
	 * actually alloed to flush.
	 */
	while (flush && !flushing && space_info->flush) {
		spin_unlock(&space_info->lock);
		/*
		 * If we have a trans handle we can't wait because the flusher
		 * may have to commit the transaction, which would mean we would
		 * deadlock since we are waiting for the flusher to finish, but
		 * hold the current transaction open.
		 */
3558
		if (current->journal_info)
3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569
			return -EAGAIN;
		ret = wait_event_interruptible(space_info->wait,
					       !space_info->flush);
		/* Must have been interrupted, return */
		if (ret)
			return -EINTR;

		spin_lock(&space_info->lock);
	}

	ret = -ENOSPC;
3570 3571 3572
	used = space_info->bytes_used + space_info->bytes_reserved +
		space_info->bytes_pinned + space_info->bytes_readonly +
		space_info->bytes_may_use;
J
Josef Bacik 已提交
3573

3574 3575 3576 3577 3578 3579 3580
	/*
	 * The idea here is that we've not already over-reserved the block group
	 * then we can go ahead and save our reservation first and then start
	 * flushing if we need to.  Otherwise if we've already overcommitted
	 * lets start flushing stuff first and then come back and try to make
	 * our reservation.
	 */
3581 3582
	if (used <= space_info->total_bytes) {
		if (used + orig_bytes <= space_info->total_bytes) {
3583
			space_info->bytes_may_use += orig_bytes;
3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598
			ret = 0;
		} else {
			/*
			 * Ok set num_bytes to orig_bytes since we aren't
			 * overocmmitted, this way we only try and reclaim what
			 * we need.
			 */
			num_bytes = orig_bytes;
		}
	} else {
		/*
		 * Ok we're over committed, set num_bytes to the overcommitted
		 * amount plus the amount of bytes that we need for this
		 * reservation.
		 */
3599
		wait_ordered = true;
3600
		num_bytes = used - space_info->total_bytes +
3601 3602
			(orig_bytes * (retries + 1));
	}
J
Josef Bacik 已提交
3603

3604
	if (ret) {
3605 3606 3607
		u64 profile = btrfs_get_alloc_profile(root, 0);
		u64 avail;

3608 3609 3610 3611 3612 3613
		/*
		 * If we have a lot of space that's pinned, don't bother doing
		 * the overcommit dance yet and just commit the transaction.
		 */
		avail = (space_info->total_bytes - space_info->bytes_used) * 8;
		do_div(avail, 10);
3614
		if (space_info->bytes_pinned >= avail && flush && !committed) {
3615 3616 3617
			space_info->flush = 1;
			flushing = true;
			spin_unlock(&space_info->lock);
3618 3619 3620 3621 3622 3623
			ret = may_commit_transaction(root, space_info,
						     orig_bytes, 1);
			if (ret)
				goto out;
			committed = true;
			goto again;
3624 3625
		}

3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648
		spin_lock(&root->fs_info->free_chunk_lock);
		avail = root->fs_info->free_chunk_space;

		/*
		 * If we have dup, raid1 or raid10 then only half of the free
		 * space is actually useable.
		 */
		if (profile & (BTRFS_BLOCK_GROUP_DUP |
			       BTRFS_BLOCK_GROUP_RAID1 |
			       BTRFS_BLOCK_GROUP_RAID10))
			avail >>= 1;

		/*
		 * If we aren't flushing don't let us overcommit too much, say
		 * 1/8th of the space.  If we can flush, let it overcommit up to
		 * 1/2 of the space.
		 */
		if (flush)
			avail >>= 3;
		else
			avail >>= 1;
		 spin_unlock(&root->fs_info->free_chunk_lock);

3649
		if (used + num_bytes < space_info->total_bytes + avail) {
3650 3651
			space_info->bytes_may_use += orig_bytes;
			ret = 0;
3652 3653
		} else {
			wait_ordered = true;
3654 3655 3656
		}
	}

3657 3658 3659 3660 3661
	/*
	 * Couldn't make our reservation, save our place so while we're trying
	 * to reclaim space we can actually use it instead of somebody else
	 * stealing it from us.
	 */
3662 3663 3664
	if (ret && flush) {
		flushing = true;
		space_info->flush = 1;
3665
	}
J
Josef Bacik 已提交
3666

3667
	spin_unlock(&space_info->lock);
J
Josef Bacik 已提交
3668

3669
	if (!ret || !flush)
3670
		goto out;
3671

3672 3673 3674 3675
	/*
	 * We do synchronous shrinking since we don't actually unreserve
	 * metadata until after the IO is completed.
	 */
3676
	ret = shrink_delalloc(root, num_bytes, wait_ordered);
3677
	if (ret < 0)
3678
		goto out;
3679

3680 3681
	ret = 0;

3682 3683 3684 3685 3686 3687
	/*
	 * So if we were overcommitted it's possible that somebody else flushed
	 * out enough space and we simply didn't have enough space to reclaim,
	 * so go back around and try again.
	 */
	if (retries < 2) {
3688
		wait_ordered = true;
3689 3690 3691
		retries++;
		goto again;
	}
3692

3693
	ret = -ENOSPC;
3694 3695 3696
	if (committed)
		goto out;

3697
	ret = may_commit_transaction(root, space_info, orig_bytes, 0);
3698 3699
	if (!ret) {
		committed = true;
3700
		goto again;
3701
	}
3702 3703

out:
3704
	if (flushing) {
3705
		spin_lock(&space_info->lock);
3706 3707
		space_info->flush = 0;
		wake_up_all(&space_info->wait);
3708
		spin_unlock(&space_info->lock);
3709 3710 3711 3712 3713 3714 3715
	}
	return ret;
}

static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans,
					     struct btrfs_root *root)
{
3716 3717 3718
	struct btrfs_block_rsv *block_rsv = NULL;

	if (root->ref_cows || root == root->fs_info->csum_root)
3719
		block_rsv = trans->block_rsv;
3720 3721

	if (!block_rsv)
3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756
		block_rsv = root->block_rsv;

	if (!block_rsv)
		block_rsv = &root->fs_info->empty_block_rsv;

	return block_rsv;
}

static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
			       u64 num_bytes)
{
	int ret = -ENOSPC;
	spin_lock(&block_rsv->lock);
	if (block_rsv->reserved >= num_bytes) {
		block_rsv->reserved -= num_bytes;
		if (block_rsv->reserved < block_rsv->size)
			block_rsv->full = 0;
		ret = 0;
	}
	spin_unlock(&block_rsv->lock);
	return ret;
}

static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
				u64 num_bytes, int update_size)
{
	spin_lock(&block_rsv->lock);
	block_rsv->reserved += num_bytes;
	if (update_size)
		block_rsv->size += num_bytes;
	else if (block_rsv->reserved >= block_rsv->size)
		block_rsv->full = 1;
	spin_unlock(&block_rsv->lock);
}

3757 3758
static void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
				    struct btrfs_block_rsv *dest, u64 num_bytes)
3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776
{
	struct btrfs_space_info *space_info = block_rsv->space_info;

	spin_lock(&block_rsv->lock);
	if (num_bytes == (u64)-1)
		num_bytes = block_rsv->size;
	block_rsv->size -= num_bytes;
	if (block_rsv->reserved >= block_rsv->size) {
		num_bytes = block_rsv->reserved - block_rsv->size;
		block_rsv->reserved = block_rsv->size;
		block_rsv->full = 1;
	} else {
		num_bytes = 0;
	}
	spin_unlock(&block_rsv->lock);

	if (num_bytes > 0) {
		if (dest) {
3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790
			spin_lock(&dest->lock);
			if (!dest->full) {
				u64 bytes_to_add;

				bytes_to_add = dest->size - dest->reserved;
				bytes_to_add = min(num_bytes, bytes_to_add);
				dest->reserved += bytes_to_add;
				if (dest->reserved >= dest->size)
					dest->full = 1;
				num_bytes -= bytes_to_add;
			}
			spin_unlock(&dest->lock);
		}
		if (num_bytes) {
3791
			spin_lock(&space_info->lock);
3792
			space_info->bytes_may_use -= num_bytes;
3793
			space_info->reservation_progress++;
3794
			spin_unlock(&space_info->lock);
3795
		}
J
Josef Bacik 已提交
3796
	}
3797
}
3798

3799 3800 3801 3802
static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
				   struct btrfs_block_rsv *dst, u64 num_bytes)
{
	int ret;
J
Josef Bacik 已提交
3803

3804 3805 3806
	ret = block_rsv_use_bytes(src, num_bytes);
	if (ret)
		return ret;
J
Josef Bacik 已提交
3807

3808
	block_rsv_add_bytes(dst, num_bytes, 1);
J
Josef Bacik 已提交
3809 3810 3811
	return 0;
}

3812
void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv)
J
Josef Bacik 已提交
3813
{
3814 3815 3816 3817 3818 3819 3820 3821
	memset(rsv, 0, sizeof(*rsv));
	spin_lock_init(&rsv->lock);
}

struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
{
	struct btrfs_block_rsv *block_rsv;
	struct btrfs_fs_info *fs_info = root->fs_info;
J
Josef Bacik 已提交
3822

3823 3824 3825
	block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
	if (!block_rsv)
		return NULL;
J
Josef Bacik 已提交
3826

3827 3828 3829 3830 3831
	btrfs_init_block_rsv(block_rsv);
	block_rsv->space_info = __find_space_info(fs_info,
						  BTRFS_BLOCK_GROUP_METADATA);
	return block_rsv;
}
J
Josef Bacik 已提交
3832

3833 3834 3835
void btrfs_free_block_rsv(struct btrfs_root *root,
			  struct btrfs_block_rsv *rsv)
{
3836 3837
	btrfs_block_rsv_release(root, rsv, (u64)-1);
	kfree(rsv);
J
Josef Bacik 已提交
3838 3839
}

3840 3841 3842
static inline int __block_rsv_add(struct btrfs_root *root,
				  struct btrfs_block_rsv *block_rsv,
				  u64 num_bytes, int flush)
3843 3844
{
	int ret;
J
Josef Bacik 已提交
3845

3846 3847
	if (num_bytes == 0)
		return 0;
3848

3849
	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
3850 3851 3852 3853
	if (!ret) {
		block_rsv_add_bytes(block_rsv, num_bytes, 1);
		return 0;
	}
J
Josef Bacik 已提交
3854

3855 3856
	return ret;
}
J
Josef Bacik 已提交
3857

3858 3859 3860 3861 3862 3863 3864
int btrfs_block_rsv_add(struct btrfs_root *root,
			struct btrfs_block_rsv *block_rsv,
			u64 num_bytes)
{
	return __block_rsv_add(root, block_rsv, num_bytes, 1);
}

3865 3866 3867 3868
int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
				struct btrfs_block_rsv *block_rsv,
				u64 num_bytes)
{
3869
	return __block_rsv_add(root, block_rsv, num_bytes, 0);
3870 3871
}

3872
int btrfs_block_rsv_check(struct btrfs_root *root,
3873
			  struct btrfs_block_rsv *block_rsv, int min_factor)
3874 3875 3876
{
	u64 num_bytes = 0;
	int ret = -ENOSPC;
J
Josef Bacik 已提交
3877

3878 3879
	if (!block_rsv)
		return 0;
J
Josef Bacik 已提交
3880

3881
	spin_lock(&block_rsv->lock);
3882 3883 3884 3885
	num_bytes = div_factor(block_rsv->size, min_factor);
	if (block_rsv->reserved >= num_bytes)
		ret = 0;
	spin_unlock(&block_rsv->lock);
J
Josef Bacik 已提交
3886

3887 3888 3889
	return ret;
}

3890 3891 3892
static inline int __btrfs_block_rsv_refill(struct btrfs_root *root,
					   struct btrfs_block_rsv *block_rsv,
					   u64 min_reserved, int flush)
3893 3894 3895 3896 3897 3898 3899 3900 3901
{
	u64 num_bytes = 0;
	int ret = -ENOSPC;

	if (!block_rsv)
		return 0;

	spin_lock(&block_rsv->lock);
	num_bytes = min_reserved;
3902
	if (block_rsv->reserved >= num_bytes)
3903
		ret = 0;
3904
	else
3905 3906
		num_bytes -= block_rsv->reserved;
	spin_unlock(&block_rsv->lock);
3907

3908 3909 3910
	if (!ret)
		return 0;

3911
	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
3912 3913 3914
	if (!ret) {
		block_rsv_add_bytes(block_rsv, num_bytes, 0);
		return 0;
3915
	}
J
Josef Bacik 已提交
3916

3917
	return ret;
3918 3919
}

3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933
int btrfs_block_rsv_refill(struct btrfs_root *root,
			   struct btrfs_block_rsv *block_rsv,
			   u64 min_reserved)
{
	return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1);
}

int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
				   struct btrfs_block_rsv *block_rsv,
				   u64 min_reserved)
{
	return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0);
}

3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949
int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
			    struct btrfs_block_rsv *dst_rsv,
			    u64 num_bytes)
{
	return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
}

void btrfs_block_rsv_release(struct btrfs_root *root,
			     struct btrfs_block_rsv *block_rsv,
			     u64 num_bytes)
{
	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
	if (global_rsv->full || global_rsv == block_rsv ||
	    block_rsv->space_info != global_rsv->space_info)
		global_rsv = NULL;
	block_rsv_release_bytes(block_rsv, global_rsv, num_bytes);
J
Josef Bacik 已提交
3950 3951 3952
}

/*
3953 3954 3955
 * helper to calculate size of global block reservation.
 * the desired value is sum of space used by extent tree,
 * checksum tree and root tree
J
Josef Bacik 已提交
3956
 */
3957
static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
J
Josef Bacik 已提交
3958
{
3959 3960 3961 3962
	struct btrfs_space_info *sinfo;
	u64 num_bytes;
	u64 meta_used;
	u64 data_used;
3963
	int csum_size = btrfs_super_csum_size(fs_info->super_copy);
J
Josef Bacik 已提交
3964

3965 3966 3967 3968
	sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
	spin_lock(&sinfo->lock);
	data_used = sinfo->bytes_used;
	spin_unlock(&sinfo->lock);
C
Chris Mason 已提交
3969

3970 3971
	sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
	spin_lock(&sinfo->lock);
3972 3973
	if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
		data_used = 0;
3974 3975
	meta_used = sinfo->bytes_used;
	spin_unlock(&sinfo->lock);
3976

3977 3978 3979
	num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
		    csum_size * 2;
	num_bytes += div64_u64(data_used + meta_used, 50);
3980

3981 3982
	if (num_bytes * 3 > meta_used)
		num_bytes = div64_u64(meta_used, 3);
3983

3984 3985
	return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
}
J
Josef Bacik 已提交
3986

3987 3988 3989 3990 3991
static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
{
	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
	struct btrfs_space_info *sinfo = block_rsv->space_info;
	u64 num_bytes;
J
Josef Bacik 已提交
3992

3993
	num_bytes = calc_global_metadata_size(fs_info);
C
Chris Mason 已提交
3994

3995 3996
	spin_lock(&block_rsv->lock);
	spin_lock(&sinfo->lock);
3997

3998
	block_rsv->size = num_bytes;
3999

4000
	num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4001 4002
		    sinfo->bytes_reserved + sinfo->bytes_readonly +
		    sinfo->bytes_may_use;
4003 4004 4005 4006

	if (sinfo->total_bytes > num_bytes) {
		num_bytes = sinfo->total_bytes - num_bytes;
		block_rsv->reserved += num_bytes;
4007
		sinfo->bytes_may_use += num_bytes;
J
Josef Bacik 已提交
4008 4009
	}

4010 4011
	if (block_rsv->reserved >= block_rsv->size) {
		num_bytes = block_rsv->reserved - block_rsv->size;
4012
		sinfo->bytes_may_use -= num_bytes;
4013
		sinfo->reservation_progress++;
4014 4015 4016
		block_rsv->reserved = block_rsv->size;
		block_rsv->full = 1;
	}
4017

4018 4019
	spin_unlock(&sinfo->lock);
	spin_unlock(&block_rsv->lock);
J
Josef Bacik 已提交
4020 4021
}

4022
static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
J
Josef Bacik 已提交
4023
{
4024
	struct btrfs_space_info *space_info;
J
Josef Bacik 已提交
4025

4026 4027
	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
	fs_info->chunk_block_rsv.space_info = space_info;
J
Josef Bacik 已提交
4028

4029
	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4030 4031
	fs_info->global_block_rsv.space_info = space_info;
	fs_info->delalloc_block_rsv.space_info = space_info;
4032 4033
	fs_info->trans_block_rsv.space_info = space_info;
	fs_info->empty_block_rsv.space_info = space_info;
4034
	fs_info->delayed_block_rsv.space_info = space_info;
4035

4036 4037 4038 4039
	fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
	fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
	fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
	fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4040
	fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4041 4042

	update_global_block_rsv(fs_info);
J
Josef Bacik 已提交
4043 4044
}

4045
static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
J
Josef Bacik 已提交
4046
{
4047 4048 4049 4050 4051 4052 4053
	block_rsv_release_bytes(&fs_info->global_block_rsv, NULL, (u64)-1);
	WARN_ON(fs_info->delalloc_block_rsv.size > 0);
	WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
	WARN_ON(fs_info->trans_block_rsv.size > 0);
	WARN_ON(fs_info->trans_block_rsv.reserved > 0);
	WARN_ON(fs_info->chunk_block_rsv.size > 0);
	WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4054 4055
	WARN_ON(fs_info->delayed_block_rsv.size > 0);
	WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4056
}
J
Josef Bacik 已提交
4057

4058 4059
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
				  struct btrfs_root *root)
J
Josef Bacik 已提交
4060
{
4061 4062
	if (!trans->bytes_reserved)
		return;
J
Josef Bacik 已提交
4063

4064
	btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4065 4066
	trans->bytes_reserved = 0;
}
J
Josef Bacik 已提交
4067

4068 4069 4070 4071 4072 4073 4074 4075
int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
				  struct inode *inode)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
	struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;

	/*
4076 4077 4078
	 * We need to hold space in order to delete our orphan item once we've
	 * added it, so this takes the reservation so we can release it later
	 * when we are truly done with the orphan item.
4079
	 */
C
Chris Mason 已提交
4080
	u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4081
	return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
J
Josef Bacik 已提交
4082 4083
}

4084
void btrfs_orphan_release_metadata(struct inode *inode)
4085
{
4086
	struct btrfs_root *root = BTRFS_I(inode)->root;
C
Chris Mason 已提交
4087
	u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4088 4089
	btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
}
4090

4091 4092 4093 4094 4095 4096 4097 4098 4099 4100
int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
				struct btrfs_pending_snapshot *pending)
{
	struct btrfs_root *root = pending->root;
	struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
	struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
	/*
	 * two for root back/forward refs, two for directory entries
	 * and one for root of the snapshot.
	 */
4101
	u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
4102 4103
	dst_rsv->space_info = src_rsv->space_info;
	return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4104 4105
}

4106 4107 4108 4109 4110 4111 4112 4113 4114
/**
 * drop_outstanding_extent - drop an outstanding extent
 * @inode: the inode we're dropping the extent for
 *
 * This is called when we are freeing up an outstanding extent, either called
 * after an error or after an extent is written.  This will return the number of
 * reserved extents that need to be freed.  This must be called with
 * BTRFS_I(inode)->lock held.
 */
4115 4116
static unsigned drop_outstanding_extent(struct inode *inode)
{
4117
	unsigned drop_inode_space = 0;
4118 4119 4120 4121 4122
	unsigned dropped_extents = 0;

	BUG_ON(!BTRFS_I(inode)->outstanding_extents);
	BTRFS_I(inode)->outstanding_extents--;

4123 4124 4125 4126 4127 4128
	if (BTRFS_I(inode)->outstanding_extents == 0 &&
	    BTRFS_I(inode)->delalloc_meta_reserved) {
		drop_inode_space = 1;
		BTRFS_I(inode)->delalloc_meta_reserved = 0;
	}

4129 4130 4131 4132 4133 4134
	/*
	 * If we have more or the same amount of outsanding extents than we have
	 * reserved then we need to leave the reserved extents count alone.
	 */
	if (BTRFS_I(inode)->outstanding_extents >=
	    BTRFS_I(inode)->reserved_extents)
4135
		return drop_inode_space;
4136 4137 4138 4139

	dropped_extents = BTRFS_I(inode)->reserved_extents -
		BTRFS_I(inode)->outstanding_extents;
	BTRFS_I(inode)->reserved_extents -= dropped_extents;
4140
	return dropped_extents + drop_inode_space;
4141 4142
}

4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162
/**
 * calc_csum_metadata_size - return the amount of metada space that must be
 *	reserved/free'd for the given bytes.
 * @inode: the inode we're manipulating
 * @num_bytes: the number of bytes in question
 * @reserve: 1 if we are reserving space, 0 if we are freeing space
 *
 * This adjusts the number of csum_bytes in the inode and then returns the
 * correct amount of metadata that must either be reserved or freed.  We
 * calculate how many checksums we can fit into one leaf and then divide the
 * number of bytes that will need to be checksumed by this value to figure out
 * how many checksums will be required.  If we are adding bytes then the number
 * may go up and we will return the number of additional bytes that must be
 * reserved.  If it is going down we will return the number of bytes that must
 * be freed.
 *
 * This must be called with BTRFS_I(inode)->lock held.
 */
static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
				   int reserve)
4163
{
4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198
	struct btrfs_root *root = BTRFS_I(inode)->root;
	u64 csum_size;
	int num_csums_per_leaf;
	int num_csums;
	int old_csums;

	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
	    BTRFS_I(inode)->csum_bytes == 0)
		return 0;

	old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
	if (reserve)
		BTRFS_I(inode)->csum_bytes += num_bytes;
	else
		BTRFS_I(inode)->csum_bytes -= num_bytes;
	csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
	num_csums_per_leaf = (int)div64_u64(csum_size,
					    sizeof(struct btrfs_csum_item) +
					    sizeof(struct btrfs_disk_key));
	num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
	num_csums = num_csums + num_csums_per_leaf - 1;
	num_csums = num_csums / num_csums_per_leaf;

	old_csums = old_csums + num_csums_per_leaf - 1;
	old_csums = old_csums / num_csums_per_leaf;

	/* No change, no need to reserve more */
	if (old_csums == num_csums)
		return 0;

	if (reserve)
		return btrfs_calc_trans_metadata_size(root,
						      num_csums - old_csums);

	return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4199
}
Y
Yan Zheng 已提交
4200

4201 4202 4203 4204
int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4205
	u64 to_reserve = 0;
4206
	u64 csum_bytes;
4207
	unsigned nr_extents = 0;
4208
	int extra_reserve = 0;
4209
	int flush = 1;
4210
	int ret;
4211

4212
	/* Need to be holding the i_mutex here if we aren't free space cache */
4213 4214
	if (btrfs_is_free_space_inode(root, inode))
		flush = 0;
4215 4216
	else
		WARN_ON(!mutex_is_locked(&inode->i_mutex));
4217 4218

	if (flush && btrfs_transaction_in_commit(root->fs_info))
4219
		schedule_timeout(1);
4220

4221
	num_bytes = ALIGN(num_bytes, root->sectorsize);
4222

4223 4224 4225 4226
	spin_lock(&BTRFS_I(inode)->lock);
	BTRFS_I(inode)->outstanding_extents++;

	if (BTRFS_I(inode)->outstanding_extents >
4227
	    BTRFS_I(inode)->reserved_extents)
4228 4229
		nr_extents = BTRFS_I(inode)->outstanding_extents -
			BTRFS_I(inode)->reserved_extents;
4230

4231 4232 4233 4234 4235 4236
	/*
	 * Add an item to reserve for updating the inode when we complete the
	 * delalloc io.
	 */
	if (!BTRFS_I(inode)->delalloc_meta_reserved) {
		nr_extents++;
4237
		extra_reserve = 1;
4238
	}
4239 4240

	to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4241
	to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4242
	csum_bytes = BTRFS_I(inode)->csum_bytes;
4243
	spin_unlock(&BTRFS_I(inode)->lock);
4244

4245
	ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
4246
	if (ret) {
4247
		u64 to_free = 0;
4248
		unsigned dropped;
4249

4250
		spin_lock(&BTRFS_I(inode)->lock);
4251
		dropped = drop_outstanding_extent(inode);
4252
		/*
4253 4254 4255 4256 4257 4258
		 * If the inodes csum_bytes is the same as the original
		 * csum_bytes then we know we haven't raced with any free()ers
		 * so we can just reduce our inodes csum bytes and carry on.
		 * Otherwise we have to do the normal free thing to account for
		 * the case that the free side didn't free up its reserve
		 * because of this outstanding reservation.
4259
		 */
4260 4261 4262 4263 4264 4265 4266 4267
		if (BTRFS_I(inode)->csum_bytes == csum_bytes)
			calc_csum_metadata_size(inode, num_bytes, 0);
		else
			to_free = calc_csum_metadata_size(inode, num_bytes, 0);
		spin_unlock(&BTRFS_I(inode)->lock);
		if (dropped)
			to_free += btrfs_calc_trans_metadata_size(root, dropped);

4268 4269
		if (to_free)
			btrfs_block_rsv_release(root, block_rsv, to_free);
4270
		return ret;
4271
	}
4272

4273 4274 4275 4276 4277 4278 4279 4280
	spin_lock(&BTRFS_I(inode)->lock);
	if (extra_reserve) {
		BTRFS_I(inode)->delalloc_meta_reserved = 1;
		nr_extents--;
	}
	BTRFS_I(inode)->reserved_extents += nr_extents;
	spin_unlock(&BTRFS_I(inode)->lock);

4281 4282 4283 4284 4285
	block_rsv_add_bytes(block_rsv, to_reserve, 1);

	return 0;
}

4286 4287 4288 4289 4290 4291 4292 4293 4294
/**
 * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
 * @inode: the inode to release the reservation for
 * @num_bytes: the number of bytes we're releasing
 *
 * This will release the metadata reservation for an inode.  This can be called
 * once we complete IO for a given set of bytes to release their metadata
 * reservations.
 */
4295 4296 4297
void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
4298 4299
	u64 to_free = 0;
	unsigned dropped;
4300 4301

	num_bytes = ALIGN(num_bytes, root->sectorsize);
4302
	spin_lock(&BTRFS_I(inode)->lock);
4303
	dropped = drop_outstanding_extent(inode);
4304

4305 4306
	to_free = calc_csum_metadata_size(inode, num_bytes, 0);
	spin_unlock(&BTRFS_I(inode)->lock);
4307 4308
	if (dropped > 0)
		to_free += btrfs_calc_trans_metadata_size(root, dropped);
4309 4310 4311 4312 4313

	btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
				to_free);
}

4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328
/**
 * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
 * @inode: inode we're writing to
 * @num_bytes: the number of bytes we want to allocate
 *
 * This will do the following things
 *
 * o reserve space in the data space info for num_bytes
 * o reserve space in the metadata space info based on number of outstanding
 *   extents and how much csums will be needed
 * o add to the inodes ->delalloc_bytes
 * o add it to the fs_info's delalloc inodes list.
 *
 * This will return 0 for success and -ENOSPC if there is no space left.
 */
4329 4330 4331 4332 4333
int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
{
	int ret;

	ret = btrfs_check_data_free_space(inode, num_bytes);
C
Chris Mason 已提交
4334
	if (ret)
4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345
		return ret;

	ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
	if (ret) {
		btrfs_free_reserved_data_space(inode, num_bytes);
		return ret;
	}

	return 0;
}

4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358
/**
 * btrfs_delalloc_release_space - release data and metadata space for delalloc
 * @inode: inode we're releasing space for
 * @num_bytes: the number of bytes we want to free up
 *
 * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
 * called in the case that we don't need the metadata AND data reservations
 * anymore.  So if there is an error or we insert an inline extent.
 *
 * This function will release the metadata space that was not used and will
 * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
 * list if there are no delalloc bytes left.
 */
4359 4360 4361 4362
void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
{
	btrfs_delalloc_release_metadata(inode, num_bytes);
	btrfs_free_reserved_data_space(inode, num_bytes);
4363 4364
}

C
Chris Mason 已提交
4365 4366
static int update_block_group(struct btrfs_trans_handle *trans,
			      struct btrfs_root *root,
4367
			      u64 bytenr, u64 num_bytes, int alloc)
C
Chris Mason 已提交
4368
{
4369
	struct btrfs_block_group_cache *cache = NULL;
C
Chris Mason 已提交
4370
	struct btrfs_fs_info *info = root->fs_info;
4371
	u64 total = num_bytes;
C
Chris Mason 已提交
4372
	u64 old_val;
4373
	u64 byte_in_group;
4374
	int factor;
C
Chris Mason 已提交
4375

4376 4377
	/* block accounting for super block */
	spin_lock(&info->delalloc_lock);
4378
	old_val = btrfs_super_bytes_used(info->super_copy);
4379 4380 4381 4382
	if (alloc)
		old_val += num_bytes;
	else
		old_val -= num_bytes;
4383
	btrfs_set_super_bytes_used(info->super_copy, old_val);
4384 4385
	spin_unlock(&info->delalloc_lock);

C
Chris Mason 已提交
4386
	while (total) {
4387
		cache = btrfs_lookup_block_group(info, bytenr);
4388
		if (!cache)
C
Chris Mason 已提交
4389
			return -1;
4390 4391 4392 4393 4394 4395
		if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
				    BTRFS_BLOCK_GROUP_RAID1 |
				    BTRFS_BLOCK_GROUP_RAID10))
			factor = 2;
		else
			factor = 1;
4396 4397 4398 4399 4400 4401 4402
		/*
		 * If this block group has free space cache written out, we
		 * need to make sure to load it if we are removing space.  This
		 * is because we need the unpinning stage to actually add the
		 * space back to the block group, otherwise we will leak space.
		 */
		if (!alloc && cache->cached == BTRFS_CACHE_NO)
4403
			cache_block_group(cache, trans, NULL, 1);
4404

4405 4406
		byte_in_group = bytenr - cache->key.objectid;
		WARN_ON(byte_in_group > cache->key.offset);
C
Chris Mason 已提交
4407

4408
		spin_lock(&cache->space_info->lock);
4409
		spin_lock(&cache->lock);
4410

4411
		if (btrfs_test_opt(root, SPACE_CACHE) &&
4412 4413 4414
		    cache->disk_cache_state < BTRFS_DC_CLEAR)
			cache->disk_cache_state = BTRFS_DC_CLEAR;

J
Josef Bacik 已提交
4415
		cache->dirty = 1;
C
Chris Mason 已提交
4416
		old_val = btrfs_block_group_used(&cache->item);
4417
		num_bytes = min(total, cache->key.offset - byte_in_group);
C
Chris Mason 已提交
4418
		if (alloc) {
4419
			old_val += num_bytes;
4420 4421 4422
			btrfs_set_block_group_used(&cache->item, old_val);
			cache->reserved -= num_bytes;
			cache->space_info->bytes_reserved -= num_bytes;
4423 4424
			cache->space_info->bytes_used += num_bytes;
			cache->space_info->disk_used += num_bytes * factor;
4425
			spin_unlock(&cache->lock);
4426
			spin_unlock(&cache->space_info->lock);
C
Chris Mason 已提交
4427
		} else {
4428
			old_val -= num_bytes;
4429
			btrfs_set_block_group_used(&cache->item, old_val);
4430 4431
			cache->pinned += num_bytes;
			cache->space_info->bytes_pinned += num_bytes;
4432
			cache->space_info->bytes_used -= num_bytes;
4433
			cache->space_info->disk_used -= num_bytes * factor;
4434
			spin_unlock(&cache->lock);
4435
			spin_unlock(&cache->space_info->lock);
4436

4437 4438 4439
			set_extent_dirty(info->pinned_extents,
					 bytenr, bytenr + num_bytes - 1,
					 GFP_NOFS | __GFP_NOFAIL);
C
Chris Mason 已提交
4440
		}
4441
		btrfs_put_block_group(cache);
4442 4443
		total -= num_bytes;
		bytenr += num_bytes;
C
Chris Mason 已提交
4444 4445 4446
	}
	return 0;
}
4447

4448 4449
static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
{
J
Josef Bacik 已提交
4450
	struct btrfs_block_group_cache *cache;
4451
	u64 bytenr;
J
Josef Bacik 已提交
4452 4453 4454

	cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
	if (!cache)
4455
		return 0;
J
Josef Bacik 已提交
4456

4457
	bytenr = cache->key.objectid;
4458
	btrfs_put_block_group(cache);
4459 4460

	return bytenr;
4461 4462
}

4463 4464 4465
static int pin_down_extent(struct btrfs_root *root,
			   struct btrfs_block_group_cache *cache,
			   u64 bytenr, u64 num_bytes, int reserved)
4466
{
4467 4468 4469 4470 4471 4472 4473 4474 4475 4476
	spin_lock(&cache->space_info->lock);
	spin_lock(&cache->lock);
	cache->pinned += num_bytes;
	cache->space_info->bytes_pinned += num_bytes;
	if (reserved) {
		cache->reserved -= num_bytes;
		cache->space_info->bytes_reserved -= num_bytes;
	}
	spin_unlock(&cache->lock);
	spin_unlock(&cache->space_info->lock);
J
Josef Bacik 已提交
4477

4478 4479 4480 4481
	set_extent_dirty(root->fs_info->pinned_extents, bytenr,
			 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
	return 0;
}
J
Josef Bacik 已提交
4482

4483 4484 4485 4486 4487 4488 4489
/*
 * this function must be called within transaction
 */
int btrfs_pin_extent(struct btrfs_root *root,
		     u64 bytenr, u64 num_bytes, int reserved)
{
	struct btrfs_block_group_cache *cache;
J
Josef Bacik 已提交
4490

4491 4492 4493 4494 4495 4496
	cache = btrfs_lookup_block_group(root->fs_info, bytenr);
	BUG_ON(!cache);

	pin_down_extent(root, cache, bytenr, num_bytes, reserved);

	btrfs_put_block_group(cache);
4497 4498 4499
	return 0;
}

4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527
/*
 * this function must be called within transaction
 */
int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
				    struct btrfs_root *root,
				    u64 bytenr, u64 num_bytes)
{
	struct btrfs_block_group_cache *cache;

	cache = btrfs_lookup_block_group(root->fs_info, bytenr);
	BUG_ON(!cache);

	/*
	 * pull in the free space cache (if any) so that our pin
	 * removes the free space from the cache.  We have load_only set
	 * to one because the slow code to read in the free extents does check
	 * the pinned extents.
	 */
	cache_block_group(cache, trans, root, 1);

	pin_down_extent(root, cache, bytenr, num_bytes, 0);

	/* remove us from the free space cache (if we're there at all) */
	btrfs_remove_free_space(cache, bytenr, num_bytes);
	btrfs_put_block_group(cache);
	return 0;
}

4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548
/**
 * btrfs_update_reserved_bytes - update the block_group and space info counters
 * @cache:	The cache we are manipulating
 * @num_bytes:	The number of bytes in question
 * @reserve:	One of the reservation enums
 *
 * This is called by the allocator when it reserves space, or by somebody who is
 * freeing space that was never actually used on disk.  For example if you
 * reserve some space for a new leaf in transaction A and before transaction A
 * commits you free that leaf, you call this with reserve set to 0 in order to
 * clear the reservation.
 *
 * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
 * ENOSPC accounting.  For data we handle the reservation through clearing the
 * delalloc bits in the io_tree.  We have to do this since we could end up
 * allocating less disk space for the amount of data we have reserved in the
 * case of compression.
 *
 * If this is a reservation and the block group has become read only we cannot
 * make the reservation and return -EAGAIN, otherwise this function always
 * succeeds.
4549
 */
4550 4551
static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
				       u64 num_bytes, int reserve)
4552
{
4553
	struct btrfs_space_info *space_info = cache->space_info;
4554
	int ret = 0;
4555 4556 4557
	spin_lock(&space_info->lock);
	spin_lock(&cache->lock);
	if (reserve != RESERVE_FREE) {
4558 4559 4560
		if (cache->ro) {
			ret = -EAGAIN;
		} else {
4561 4562 4563 4564 4565 4566
			cache->reserved += num_bytes;
			space_info->bytes_reserved += num_bytes;
			if (reserve == RESERVE_ALLOC) {
				BUG_ON(space_info->bytes_may_use < num_bytes);
				space_info->bytes_may_use -= num_bytes;
			}
4567
		}
4568 4569 4570 4571 4572 4573
	} else {
		if (cache->ro)
			space_info->bytes_readonly += num_bytes;
		cache->reserved -= num_bytes;
		space_info->bytes_reserved -= num_bytes;
		space_info->reservation_progress++;
4574
	}
4575 4576
	spin_unlock(&cache->lock);
	spin_unlock(&space_info->lock);
4577
	return ret;
4578
}
C
Chris Mason 已提交
4579

4580 4581
int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
				struct btrfs_root *root)
4582 4583
{
	struct btrfs_fs_info *fs_info = root->fs_info;
4584 4585 4586
	struct btrfs_caching_control *next;
	struct btrfs_caching_control *caching_ctl;
	struct btrfs_block_group_cache *cache;
4587

4588
	down_write(&fs_info->extent_commit_sem);
4589

4590 4591 4592 4593 4594 4595 4596
	list_for_each_entry_safe(caching_ctl, next,
				 &fs_info->caching_block_groups, list) {
		cache = caching_ctl->block_group;
		if (block_group_cache_done(cache)) {
			cache->last_byte_to_unpin = (u64)-1;
			list_del_init(&caching_ctl->list);
			put_caching_control(caching_ctl);
4597
		} else {
4598
			cache->last_byte_to_unpin = caching_ctl->progress;
4599 4600
		}
	}
4601 4602 4603 4604 4605 4606 4607

	if (fs_info->pinned_extents == &fs_info->freed_extents[0])
		fs_info->pinned_extents = &fs_info->freed_extents[1];
	else
		fs_info->pinned_extents = &fs_info->freed_extents[0];

	up_write(&fs_info->extent_commit_sem);
4608 4609

	update_global_block_rsv(fs_info);
4610 4611 4612
	return 0;
}

4613
static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
C
Chris Mason 已提交
4614
{
4615 4616 4617
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct btrfs_block_group_cache *cache = NULL;
	u64 len;
C
Chris Mason 已提交
4618

4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635
	while (start <= end) {
		if (!cache ||
		    start >= cache->key.objectid + cache->key.offset) {
			if (cache)
				btrfs_put_block_group(cache);
			cache = btrfs_lookup_block_group(fs_info, start);
			BUG_ON(!cache);
		}

		len = cache->key.objectid + cache->key.offset - start;
		len = min(len, end + 1 - start);

		if (start < cache->last_byte_to_unpin) {
			len = min(len, cache->last_byte_to_unpin - start);
			btrfs_add_free_space(cache, start, len);
		}

4636 4637
		start += len;

4638 4639 4640 4641
		spin_lock(&cache->space_info->lock);
		spin_lock(&cache->lock);
		cache->pinned -= len;
		cache->space_info->bytes_pinned -= len;
4642
		if (cache->ro)
4643
			cache->space_info->bytes_readonly += len;
4644 4645
		spin_unlock(&cache->lock);
		spin_unlock(&cache->space_info->lock);
C
Chris Mason 已提交
4646
	}
4647 4648 4649

	if (cache)
		btrfs_put_block_group(cache);
C
Chris Mason 已提交
4650 4651 4652 4653
	return 0;
}

int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
4654
			       struct btrfs_root *root)
4655
{
4656 4657
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct extent_io_tree *unpin;
4658 4659
	u64 start;
	u64 end;
4660 4661
	int ret;

4662 4663 4664 4665 4666
	if (fs_info->pinned_extents == &fs_info->freed_extents[0])
		unpin = &fs_info->freed_extents[1];
	else
		unpin = &fs_info->freed_extents[0];

C
Chris Mason 已提交
4667
	while (1) {
4668 4669 4670
		ret = find_first_extent_bit(unpin, 0, &start, &end,
					    EXTENT_DIRTY);
		if (ret)
4671
			break;
4672

4673 4674 4675
		if (btrfs_test_opt(root, DISCARD))
			ret = btrfs_discard_extent(root, start,
						   end + 1 - start, NULL);
4676

4677
		clear_extent_dirty(unpin, start, end, GFP_NOFS);
4678
		unpin_extent_range(root, start, end);
4679
		cond_resched();
4680
	}
J
Josef Bacik 已提交
4681

C
Chris Mason 已提交
4682 4683 4684
	return 0;
}

4685 4686 4687 4688 4689 4690
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
				struct btrfs_root *root,
				u64 bytenr, u64 num_bytes, u64 parent,
				u64 root_objectid, u64 owner_objectid,
				u64 owner_offset, int refs_to_drop,
				struct btrfs_delayed_extent_op *extent_op)
4691
{
C
Chris Mason 已提交
4692
	struct btrfs_key key;
4693
	struct btrfs_path *path;
4694 4695
	struct btrfs_fs_info *info = root->fs_info;
	struct btrfs_root *extent_root = info->extent_root;
4696
	struct extent_buffer *leaf;
4697 4698
	struct btrfs_extent_item *ei;
	struct btrfs_extent_inline_ref *iref;
4699
	int ret;
4700
	int is_data;
4701 4702 4703
	int extent_slot = 0;
	int found_extent = 0;
	int num_to_del = 1;
4704 4705
	u32 item_size;
	u64 refs;
C
Chris Mason 已提交
4706

4707
	path = btrfs_alloc_path();
4708 4709
	if (!path)
		return -ENOMEM;
4710

4711
	path->reada = 1;
4712
	path->leave_spinning = 1;
4713 4714 4715 4716 4717 4718 4719 4720

	is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
	BUG_ON(!is_data && refs_to_drop != 1);

	ret = lookup_extent_backref(trans, extent_root, path, &iref,
				    bytenr, num_bytes, parent,
				    root_objectid, owner_objectid,
				    owner_offset);
4721
	if (ret == 0) {
4722
		extent_slot = path->slots[0];
4723 4724
		while (extent_slot >= 0) {
			btrfs_item_key_to_cpu(path->nodes[0], &key,
4725
					      extent_slot);
4726
			if (key.objectid != bytenr)
4727
				break;
4728 4729
			if (key.type == BTRFS_EXTENT_ITEM_KEY &&
			    key.offset == num_bytes) {
4730 4731 4732 4733 4734
				found_extent = 1;
				break;
			}
			if (path->slots[0] - extent_slot > 5)
				break;
4735
			extent_slot--;
4736
		}
4737 4738 4739 4740 4741
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
		item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
		if (found_extent && item_size < sizeof(*ei))
			found_extent = 0;
#endif
Z
Zheng Yan 已提交
4742
		if (!found_extent) {
4743
			BUG_ON(iref);
4744
			ret = remove_extent_backref(trans, extent_root, path,
4745 4746
						    NULL, refs_to_drop,
						    is_data);
Z
Zheng Yan 已提交
4747
			BUG_ON(ret);
4748
			btrfs_release_path(path);
4749
			path->leave_spinning = 1;
4750 4751 4752 4753 4754

			key.objectid = bytenr;
			key.type = BTRFS_EXTENT_ITEM_KEY;
			key.offset = num_bytes;

Z
Zheng Yan 已提交
4755 4756
			ret = btrfs_search_slot(trans, extent_root,
						&key, path, -1, 1);
4757 4758
			if (ret) {
				printk(KERN_ERR "umm, got %d back from search"
C
Chris Mason 已提交
4759 4760
				       ", was looking for %llu\n", ret,
				       (unsigned long long)bytenr);
4761 4762 4763
				if (ret > 0)
					btrfs_print_leaf(extent_root,
							 path->nodes[0]);
4764
			}
Z
Zheng Yan 已提交
4765 4766 4767
			BUG_ON(ret);
			extent_slot = path->slots[0];
		}
4768 4769 4770
	} else {
		btrfs_print_leaf(extent_root, path->nodes[0]);
		WARN_ON(1);
C
Chris Mason 已提交
4771
		printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
4772
		       "parent %llu root %llu  owner %llu offset %llu\n",
C
Chris Mason 已提交
4773
		       (unsigned long long)bytenr,
4774
		       (unsigned long long)parent,
C
Chris Mason 已提交
4775
		       (unsigned long long)root_objectid,
4776 4777
		       (unsigned long long)owner_objectid,
		       (unsigned long long)owner_offset);
4778
	}
4779 4780

	leaf = path->nodes[0];
4781 4782 4783 4784 4785 4786 4787 4788
	item_size = btrfs_item_size_nr(leaf, extent_slot);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
	if (item_size < sizeof(*ei)) {
		BUG_ON(found_extent || extent_slot != path->slots[0]);
		ret = convert_extent_item_v0(trans, extent_root, path,
					     owner_objectid, 0);
		BUG_ON(ret < 0);

4789
		btrfs_release_path(path);
4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810
		path->leave_spinning = 1;

		key.objectid = bytenr;
		key.type = BTRFS_EXTENT_ITEM_KEY;
		key.offset = num_bytes;

		ret = btrfs_search_slot(trans, extent_root, &key, path,
					-1, 1);
		if (ret) {
			printk(KERN_ERR "umm, got %d back from search"
			       ", was looking for %llu\n", ret,
			       (unsigned long long)bytenr);
			btrfs_print_leaf(extent_root, path->nodes[0]);
		}
		BUG_ON(ret);
		extent_slot = path->slots[0];
		leaf = path->nodes[0];
		item_size = btrfs_item_size_nr(leaf, extent_slot);
	}
#endif
	BUG_ON(item_size < sizeof(*ei));
4811
	ei = btrfs_item_ptr(leaf, extent_slot,
C
Chris Mason 已提交
4812
			    struct btrfs_extent_item);
4813 4814 4815 4816 4817 4818
	if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
		struct btrfs_tree_block_info *bi;
		BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
		bi = (struct btrfs_tree_block_info *)(ei + 1);
		WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
	}
4819

4820
	refs = btrfs_extent_refs(leaf, ei);
4821 4822
	BUG_ON(refs < refs_to_drop);
	refs -= refs_to_drop;
4823

4824 4825 4826 4827 4828 4829
	if (refs > 0) {
		if (extent_op)
			__run_delayed_extent_op(extent_op, leaf, ei);
		/*
		 * In the case of inline back ref, reference count will
		 * be updated by remove_extent_backref
4830
		 */
4831 4832 4833 4834 4835 4836 4837 4838 4839 4840
		if (iref) {
			BUG_ON(!found_extent);
		} else {
			btrfs_set_extent_refs(leaf, ei, refs);
			btrfs_mark_buffer_dirty(leaf);
		}
		if (found_extent) {
			ret = remove_extent_backref(trans, extent_root, path,
						    iref, refs_to_drop,
						    is_data);
4841 4842
			BUG_ON(ret);
		}
4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853
	} else {
		if (found_extent) {
			BUG_ON(is_data && refs_to_drop !=
			       extent_data_ref_count(root, path, iref));
			if (iref) {
				BUG_ON(path->slots[0] != extent_slot);
			} else {
				BUG_ON(path->slots[0] != extent_slot + 1);
				path->slots[0] = extent_slot;
				num_to_del = 2;
			}
C
Chris Mason 已提交
4854
		}
4855

4856 4857
		ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
				      num_to_del);
Z
Zheng Yan 已提交
4858
		BUG_ON(ret);
4859
		btrfs_release_path(path);
4860

4861
		if (is_data) {
4862 4863
			ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
			BUG_ON(ret);
4864 4865 4866 4867
		} else {
			invalidate_mapping_pages(info->btree_inode->i_mapping,
			     bytenr >> PAGE_CACHE_SHIFT,
			     (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
4868 4869
		}

4870
		ret = update_block_group(trans, root, bytenr, num_bytes, 0);
4871
		BUG_ON(ret);
4872
	}
4873
	btrfs_free_path(path);
4874 4875 4876
	return ret;
}

4877
/*
4878
 * when we free an block, it is possible (and likely) that we free the last
4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889
 * delayed ref for that extent as well.  This searches the delayed ref tree for
 * a given extent, and if there are no other delayed refs to be processed, it
 * removes it from the tree.
 */
static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
				      struct btrfs_root *root, u64 bytenr)
{
	struct btrfs_delayed_ref_head *head;
	struct btrfs_delayed_ref_root *delayed_refs;
	struct btrfs_delayed_ref_node *ref;
	struct rb_node *node;
4890
	int ret = 0;
4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907

	delayed_refs = &trans->transaction->delayed_refs;
	spin_lock(&delayed_refs->lock);
	head = btrfs_find_delayed_ref_head(trans, bytenr);
	if (!head)
		goto out;

	node = rb_prev(&head->node.rb_node);
	if (!node)
		goto out;

	ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);

	/* there are still entries for this ref, we can't drop it */
	if (ref->bytenr == bytenr)
		goto out;

4908 4909 4910 4911 4912 4913 4914
	if (head->extent_op) {
		if (!head->must_insert_reserved)
			goto out;
		kfree(head->extent_op);
		head->extent_op = NULL;
	}

4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927
	/*
	 * waiting for the lock here would deadlock.  If someone else has it
	 * locked they are already in the process of dropping it anyway
	 */
	if (!mutex_trylock(&head->mutex))
		goto out;

	/*
	 * at this point we have a head with no other entries.  Go
	 * ahead and process it.
	 */
	head->node.in_tree = 0;
	rb_erase(&head->node.rb_node, &delayed_refs->root);
4928

4929 4930 4931 4932 4933 4934
	delayed_refs->num_entries--;

	/*
	 * we don't take a ref on the node because we're removing it from the
	 * tree, so we just steal the ref the tree was holding.
	 */
4935 4936 4937 4938 4939
	delayed_refs->num_heads--;
	if (list_empty(&head->cluster))
		delayed_refs->num_heads_ready--;

	list_del_init(&head->cluster);
4940 4941
	spin_unlock(&delayed_refs->lock);

4942 4943 4944 4945 4946
	BUG_ON(head->extent_op);
	if (head->must_insert_reserved)
		ret = 1;

	mutex_unlock(&head->mutex);
4947
	btrfs_put_delayed_ref(&head->node);
4948
	return ret;
4949 4950 4951 4952 4953
out:
	spin_unlock(&delayed_refs->lock);
	return 0;
}

4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978
void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
			   struct btrfs_root *root,
			   struct extent_buffer *buf,
			   u64 parent, int last_ref)
{
	struct btrfs_block_group_cache *cache = NULL;
	int ret;

	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
		ret = btrfs_add_delayed_tree_ref(trans, buf->start, buf->len,
						parent, root->root_key.objectid,
						btrfs_header_level(buf),
						BTRFS_DROP_DELAYED_REF, NULL);
		BUG_ON(ret);
	}

	if (!last_ref)
		return;

	cache = btrfs_lookup_block_group(root->fs_info, buf->start);

	if (btrfs_header_generation(buf) == trans->transid) {
		if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
			ret = check_ref_cleanup(trans, root, buf->start);
			if (!ret)
4979
				goto out;
4980 4981 4982 4983
		}

		if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
			pin_down_extent(root, cache, buf->start, buf->len, 1);
4984
			goto out;
4985 4986 4987 4988 4989
		}

		WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));

		btrfs_add_free_space(cache, buf->start, buf->len);
4990
		btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
4991 4992
	}
out:
4993 4994 4995 4996 4997
	/*
	 * Deleting the buffer, clear the corrupt flag since it doesn't matter
	 * anymore.
	 */
	clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
4998 4999 5000
	btrfs_put_block_group(cache);
}

5001
int btrfs_free_extent(struct btrfs_trans_handle *trans,
Z
Zheng Yan 已提交
5002 5003
		      struct btrfs_root *root,
		      u64 bytenr, u64 num_bytes, u64 parent,
5004
		      u64 root_objectid, u64 owner, u64 offset)
5005 5006 5007
{
	int ret;

5008 5009 5010 5011
	/*
	 * tree log blocks never actually go into the extent allocation
	 * tree, just update pinning info and exit early.
	 */
5012 5013
	if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
		WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
5014
		/* unlocks the pinned mutex */
5015
		btrfs_pin_extent(root, bytenr, num_bytes, 1);
5016
		ret = 0;
5017 5018 5019 5020
	} else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
		ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
					parent, root_objectid, (int)owner,
					BTRFS_DROP_DELAYED_REF, NULL);
5021
		BUG_ON(ret);
5022 5023 5024 5025 5026
	} else {
		ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
					parent, root_objectid, owner,
					offset, BTRFS_DROP_DELAYED_REF, NULL);
		BUG_ON(ret);
5027
	}
5028 5029 5030
	return ret;
}

5031 5032 5033 5034 5035 5036 5037
static u64 stripe_align(struct btrfs_root *root, u64 val)
{
	u64 mask = ((u64)root->stripesize - 1);
	u64 ret = (val + mask) & ~mask;
	return ret;
}

J
Josef Bacik 已提交
5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052
/*
 * when we wait for progress in the block group caching, its because
 * our allocation attempt failed at least once.  So, we must sleep
 * and let some progress happen before we try again.
 *
 * This function will sleep at least once waiting for new free space to
 * show up, and then it will check the block group free space numbers
 * for our min num_bytes.  Another option is to have it go ahead
 * and look in the rbtree for a free extent of a given size, but this
 * is a good start.
 */
static noinline int
wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
				u64 num_bytes)
{
5053
	struct btrfs_caching_control *caching_ctl;
J
Josef Bacik 已提交
5054 5055
	DEFINE_WAIT(wait);

5056 5057
	caching_ctl = get_caching_control(cache);
	if (!caching_ctl)
J
Josef Bacik 已提交
5058 5059
		return 0;

5060
	wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
5061
		   (cache->free_space_ctl->free_space >= num_bytes));
5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079

	put_caching_control(caching_ctl);
	return 0;
}

static noinline int
wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
{
	struct btrfs_caching_control *caching_ctl;
	DEFINE_WAIT(wait);

	caching_ctl = get_caching_control(cache);
	if (!caching_ctl)
		return 0;

	wait_event(caching_ctl->wait, block_group_cache_done(cache));

	put_caching_control(caching_ctl);
J
Josef Bacik 已提交
5080 5081 5082
	return 0;
}

5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098
static int get_block_group_index(struct btrfs_block_group_cache *cache)
{
	int index;
	if (cache->flags & BTRFS_BLOCK_GROUP_RAID10)
		index = 0;
	else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1)
		index = 1;
	else if (cache->flags & BTRFS_BLOCK_GROUP_DUP)
		index = 2;
	else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0)
		index = 3;
	else
		index = 4;
	return index;
}

J
Josef Bacik 已提交
5099
enum btrfs_loop_type {
5100
	LOOP_FIND_IDEAL = 0,
J
Josef Bacik 已提交
5101 5102 5103 5104 5105 5106
	LOOP_CACHING_NOWAIT = 1,
	LOOP_CACHING_WAIT = 2,
	LOOP_ALLOC_CHUNK = 3,
	LOOP_NO_EMPTY_SIZE = 4,
};

5107 5108 5109 5110
/*
 * walks the btree of allocated extents and find a hole of a given size.
 * The key ins is changed to record the hole:
 * ins->objectid == block start
5111
 * ins->flags = BTRFS_EXTENT_ITEM_KEY
5112 5113 5114
 * ins->offset == number of blocks
 * Any available blocks before search_start are skipped.
 */
C
Chris Mason 已提交
5115
static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5116 5117 5118 5119
				     struct btrfs_root *orig_root,
				     u64 num_bytes, u64 empty_size,
				     u64 search_start, u64 search_end,
				     u64 hint_byte, struct btrfs_key *ins,
5120
				     u64 data)
5121
{
5122
	int ret = 0;
C
Chris Mason 已提交
5123
	struct btrfs_root *root = orig_root->fs_info->extent_root;
5124
	struct btrfs_free_cluster *last_ptr = NULL;
5125
	struct btrfs_block_group_cache *block_group = NULL;
5126
	struct btrfs_block_group_cache *used_block_group;
5127
	int empty_cluster = 2 * 1024 * 1024;
C
Chris Mason 已提交
5128
	int allowed_chunk_alloc = 0;
5129
	int done_chunk_alloc = 0;
5130
	struct btrfs_space_info *space_info;
5131
	int loop = 0;
5132
	int index = 0;
5133 5134
	int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
		RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
J
Josef Bacik 已提交
5135
	bool found_uncached_bg = false;
5136
	bool failed_cluster_refill = false;
5137
	bool failed_alloc = false;
5138
	bool use_cluster = true;
5139
	bool have_caching_bg = false;
5140 5141
	u64 ideal_cache_percent = 0;
	u64 ideal_cache_offset = 0;
5142

5143
	WARN_ON(num_bytes < root->sectorsize);
5144
	btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
5145 5146
	ins->objectid = 0;
	ins->offset = 0;
5147

J
Josef Bacik 已提交
5148
	space_info = __find_space_info(root->fs_info, data);
5149
	if (!space_info) {
5150
		printk(KERN_ERR "No space info for %llu\n", data);
5151 5152
		return -ENOSPC;
	}
J
Josef Bacik 已提交
5153

5154 5155 5156 5157 5158 5159 5160
	/*
	 * If the space info is for both data and metadata it means we have a
	 * small filesystem and we can't use the clustering stuff.
	 */
	if (btrfs_mixed_space_info(space_info))
		use_cluster = false;

C
Chris Mason 已提交
5161 5162 5163
	if (orig_root->ref_cows || empty_size)
		allowed_chunk_alloc = 1;

5164
	if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
5165
		last_ptr = &root->fs_info->meta_alloc_cluster;
5166 5167
		if (!btrfs_test_opt(root, SSD))
			empty_cluster = 64 * 1024;
5168 5169
	}

5170 5171
	if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
	    btrfs_test_opt(root, SSD)) {
5172 5173
		last_ptr = &root->fs_info->data_alloc_cluster;
	}
J
Josef Bacik 已提交
5174

5175
	if (last_ptr) {
5176 5177 5178 5179
		spin_lock(&last_ptr->lock);
		if (last_ptr->block_group)
			hint_byte = last_ptr->window_start;
		spin_unlock(&last_ptr->lock);
5180
	}
5181

5182
	search_start = max(search_start, first_logical_byte(root, 0));
5183
	search_start = max(search_start, hint_byte);
5184

J
Josef Bacik 已提交
5185
	if (!last_ptr)
5186 5187
		empty_cluster = 0;

J
Josef Bacik 已提交
5188
	if (search_start == hint_byte) {
5189
ideal_cache:
J
Josef Bacik 已提交
5190 5191
		block_group = btrfs_lookup_block_group(root->fs_info,
						       search_start);
5192
		used_block_group = block_group;
J
Josef Bacik 已提交
5193 5194 5195
		/*
		 * we don't want to use the block group if it doesn't match our
		 * allocation bits, or if its not cached.
5196 5197 5198
		 *
		 * However if we are re-searching with an ideal block group
		 * picked out then we don't care that the block group is cached.
J
Josef Bacik 已提交
5199 5200
		 */
		if (block_group && block_group_bits(block_group, data) &&
5201 5202
		    (block_group->cached != BTRFS_CACHE_NO ||
		     search_start == ideal_cache_offset)) {
J
Josef Bacik 已提交
5203
			down_read(&space_info->groups_sem);
5204 5205 5206 5207 5208 5209 5210 5211 5212 5213
			if (list_empty(&block_group->list) ||
			    block_group->ro) {
				/*
				 * someone is removing this block group,
				 * we can't jump into the have_block_group
				 * target because our list pointers are not
				 * valid
				 */
				btrfs_put_block_group(block_group);
				up_read(&space_info->groups_sem);
5214
			} else {
5215
				index = get_block_group_index(block_group);
5216
				goto have_block_group;
5217
			}
J
Josef Bacik 已提交
5218
		} else if (block_group) {
5219
			btrfs_put_block_group(block_group);
J
Josef Bacik 已提交
5220
		}
5221
	}
J
Josef Bacik 已提交
5222
search:
5223
	have_caching_bg = false;
5224
	down_read(&space_info->groups_sem);
5225 5226
	list_for_each_entry(block_group, &space_info->block_groups[index],
			    list) {
5227
		u64 offset;
J
Josef Bacik 已提交
5228
		int cached;
5229

5230
		used_block_group = block_group;
5231
		btrfs_get_block_group(block_group);
J
Josef Bacik 已提交
5232
		search_start = block_group->key.objectid;
5233

5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252
		/*
		 * this can happen if we end up cycling through all the
		 * raid types, but we want to make sure we only allocate
		 * for the proper type.
		 */
		if (!block_group_bits(block_group, data)) {
		    u64 extra = BTRFS_BLOCK_GROUP_DUP |
				BTRFS_BLOCK_GROUP_RAID1 |
				BTRFS_BLOCK_GROUP_RAID10;

			/*
			 * if they asked for extra copies and this block group
			 * doesn't provide them, bail.  This does allow us to
			 * fill raid0 from raid1.
			 */
			if ((data & extra) && !(block_group->flags & extra))
				goto loop;
		}

J
Josef Bacik 已提交
5253
have_block_group:
5254 5255
		cached = block_group_cache_done(block_group);
		if (unlikely(!cached)) {
5256 5257
			u64 free_percent;

5258
			found_uncached_bg = true;
5259 5260
			ret = cache_block_group(block_group, trans,
						orig_root, 1);
5261
			if (block_group->cached == BTRFS_CACHE_FINISHED)
5262
				goto alloc;
5263

5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274
			free_percent = btrfs_block_group_used(&block_group->item);
			free_percent *= 100;
			free_percent = div64_u64(free_percent,
						 block_group->key.offset);
			free_percent = 100 - free_percent;
			if (free_percent > ideal_cache_percent &&
			    likely(!block_group->ro)) {
				ideal_cache_offset = block_group->key.objectid;
				ideal_cache_percent = free_percent;
			}

J
Josef Bacik 已提交
5275
			/*
5276 5277
			 * The caching workers are limited to 2 threads, so we
			 * can queue as much work as we care to.
J
Josef Bacik 已提交
5278
			 */
5279
			if (loop > LOOP_FIND_IDEAL) {
5280 5281
				ret = cache_block_group(block_group, trans,
							orig_root, 0);
J
Josef Bacik 已提交
5282
				BUG_ON(ret);
J
Josef Bacik 已提交
5283
			}
J
Josef Bacik 已提交
5284

5285 5286 5287 5288 5289
			/*
			 * If loop is set for cached only, try the next block
			 * group.
			 */
			if (loop == LOOP_FIND_IDEAL)
J
Josef Bacik 已提交
5290 5291 5292
				goto loop;
		}

5293
alloc:
5294
		if (unlikely(block_group->ro))
J
Josef Bacik 已提交
5295
			goto loop;
J
Josef Bacik 已提交
5296

C
Chris Mason 已提交
5297
		spin_lock(&block_group->free_space_ctl->tree_lock);
5298
		if (cached &&
C
Chris Mason 已提交
5299
		    block_group->free_space_ctl->free_space <
5300
		    num_bytes + empty_cluster + empty_size) {
C
Chris Mason 已提交
5301
			spin_unlock(&block_group->free_space_ctl->tree_lock);
5302 5303
			goto loop;
		}
C
Chris Mason 已提交
5304
		spin_unlock(&block_group->free_space_ctl->tree_lock);
5305

5306
		/*
5307 5308
		 * Ok we want to try and use the cluster allocator, so
		 * lets look there
5309
		 */
5310
		if (last_ptr) {
5311 5312 5313 5314 5315
			/*
			 * the refill lock keeps out other
			 * people trying to start a new cluster
			 */
			spin_lock(&last_ptr->refill_lock);
5316 5317 5318 5319 5320 5321
			used_block_group = last_ptr->block_group;
			if (used_block_group != block_group &&
			    (!used_block_group ||
			     used_block_group->ro ||
			     !block_group_bits(used_block_group, data))) {
				used_block_group = block_group;
5322
				goto refill_cluster;
5323 5324 5325 5326
			}

			if (used_block_group != block_group)
				btrfs_get_block_group(used_block_group);
5327

5328 5329
			offset = btrfs_alloc_from_cluster(used_block_group,
			  last_ptr, num_bytes, used_block_group->key.objectid);
5330 5331 5332 5333 5334 5335
			if (offset) {
				/* we have a block, we're done */
				spin_unlock(&last_ptr->refill_lock);
				goto checks;
			}

5336 5337 5338 5339
			WARN_ON(last_ptr->block_group != used_block_group);
			if (used_block_group != block_group) {
				btrfs_put_block_group(used_block_group);
				used_block_group = block_group;
5340
			}
5341
refill_cluster:
5342
			BUG_ON(used_block_group != block_group);
5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356
			/* If we are on LOOP_NO_EMPTY_SIZE, we can't
			 * set up a new clusters, so lets just skip it
			 * and let the allocator find whatever block
			 * it can find.  If we reach this point, we
			 * will have tried the cluster allocator
			 * plenty of times and not have found
			 * anything, so we are likely way too
			 * fragmented for the clustering stuff to find
			 * anything.  */
			if (loop >= LOOP_NO_EMPTY_SIZE) {
				spin_unlock(&last_ptr->refill_lock);
				goto unclustered_alloc;
			}

5357 5358 5359 5360 5361 5362 5363
			/*
			 * this cluster didn't work out, free it and
			 * start over
			 */
			btrfs_return_cluster_to_free_space(NULL, last_ptr);

			/* allocate a cluster in this block group */
5364
			ret = btrfs_find_space_cluster(trans, root,
5365
					       block_group, last_ptr,
5366
					       search_start, num_bytes,
5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380
					       empty_cluster + empty_size);
			if (ret == 0) {
				/*
				 * now pull our allocation out of this
				 * cluster
				 */
				offset = btrfs_alloc_from_cluster(block_group,
						  last_ptr, num_bytes,
						  search_start);
				if (offset) {
					/* we found one, proceed */
					spin_unlock(&last_ptr->refill_lock);
					goto checks;
				}
5381 5382
			} else if (!cached && loop > LOOP_CACHING_NOWAIT
				   && !failed_cluster_refill) {
J
Josef Bacik 已提交
5383 5384
				spin_unlock(&last_ptr->refill_lock);

5385
				failed_cluster_refill = true;
J
Josef Bacik 已提交
5386 5387 5388
				wait_block_group_cache_progress(block_group,
				       num_bytes + empty_cluster + empty_size);
				goto have_block_group;
5389
			}
J
Josef Bacik 已提交
5390

5391 5392 5393 5394 5395 5396
			/*
			 * at this point we either didn't find a cluster
			 * or we weren't able to allocate a block from our
			 * cluster.  Free the cluster we've been trying
			 * to use, and go to the next block group
			 */
5397
			btrfs_return_cluster_to_free_space(NULL, last_ptr);
5398
			spin_unlock(&last_ptr->refill_lock);
5399
			goto loop;
5400 5401
		}

5402
unclustered_alloc:
5403 5404
		offset = btrfs_find_space_for_alloc(block_group, search_start,
						    num_bytes, empty_size);
5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415
		/*
		 * If we didn't find a chunk, and we haven't failed on this
		 * block group before, and this block group is in the middle of
		 * caching and we are ok with waiting, then go ahead and wait
		 * for progress to be made, and set failed_alloc to true.
		 *
		 * If failed_alloc is true then we've already waited on this
		 * block group once and should move on to the next block group.
		 */
		if (!offset && !failed_alloc && !cached &&
		    loop > LOOP_CACHING_NOWAIT) {
J
Josef Bacik 已提交
5416
			wait_block_group_cache_progress(block_group,
5417 5418
						num_bytes + empty_size);
			failed_alloc = true;
J
Josef Bacik 已提交
5419
			goto have_block_group;
5420
		} else if (!offset) {
5421 5422
			if (!cached)
				have_caching_bg = true;
5423
			goto loop;
J
Josef Bacik 已提交
5424
		}
5425
checks:
5426
		search_start = stripe_align(root, offset);
J
Josef Bacik 已提交
5427
		/* move on to the next group */
5428
		if (search_start + num_bytes >= search_end) {
5429
			btrfs_add_free_space(used_block_group, offset, num_bytes);
J
Josef Bacik 已提交
5430
			goto loop;
5431
		}
5432

J
Josef Bacik 已提交
5433 5434
		/* move on to the next group */
		if (search_start + num_bytes >
5435 5436
		    used_block_group->key.objectid + used_block_group->key.offset) {
			btrfs_add_free_space(used_block_group, offset, num_bytes);
J
Josef Bacik 已提交
5437
			goto loop;
5438
		}
5439

5440 5441
		ins->objectid = search_start;
		ins->offset = num_bytes;
J
Josef Bacik 已提交
5442

5443
		if (offset < search_start)
5444
			btrfs_add_free_space(used_block_group, offset,
5445 5446
					     search_start - offset);
		BUG_ON(offset > search_start);
J
Josef Bacik 已提交
5447

5448
		ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
5449
						  alloc_type);
5450
		if (ret == -EAGAIN) {
5451
			btrfs_add_free_space(used_block_group, offset, num_bytes);
J
Josef Bacik 已提交
5452
			goto loop;
J
Josef Bacik 已提交
5453
		}
5454

5455
		/* we are all good, lets return */
J
Josef Bacik 已提交
5456 5457
		ins->objectid = search_start;
		ins->offset = num_bytes;
5458

5459
		if (offset < search_start)
5460
			btrfs_add_free_space(used_block_group, offset,
5461 5462
					     search_start - offset);
		BUG_ON(offset > search_start);
5463 5464
		if (used_block_group != block_group)
			btrfs_put_block_group(used_block_group);
5465
		btrfs_put_block_group(block_group);
J
Josef Bacik 已提交
5466 5467
		break;
loop:
5468
		failed_cluster_refill = false;
5469
		failed_alloc = false;
5470
		BUG_ON(index != get_block_group_index(block_group));
5471 5472
		if (used_block_group != block_group)
			btrfs_put_block_group(used_block_group);
5473
		btrfs_put_block_group(block_group);
J
Josef Bacik 已提交
5474 5475 5476
	}
	up_read(&space_info->groups_sem);

5477 5478 5479
	if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
		goto search;

5480 5481 5482
	if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
		goto search;

5483 5484 5485 5486 5487
	/* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
	 *			for them to make caching progress.  Also
	 *			determine the best possible bg to cache
	 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
	 *			caching kthreads as we move along
J
Josef Bacik 已提交
5488 5489 5490 5491
	 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
	 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
	 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
	 *			again
5492
	 */
5493
	if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
5494
		index = 0;
5495
		if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
J
Josef Bacik 已提交
5496
			found_uncached_bg = false;
5497
			loop++;
5498
			if (!ideal_cache_percent)
J
Josef Bacik 已提交
5499
				goto search;
5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531

			/*
			 * 1 of the following 2 things have happened so far
			 *
			 * 1) We found an ideal block group for caching that
			 * is mostly full and will cache quickly, so we might
			 * as well wait for it.
			 *
			 * 2) We searched for cached only and we didn't find
			 * anything, and we didn't start any caching kthreads
			 * either, so chances are we will loop through and
			 * start a couple caching kthreads, and then come back
			 * around and just wait for them.  This will be slower
			 * because we will have 2 caching kthreads reading at
			 * the same time when we could have just started one
			 * and waited for it to get far enough to give us an
			 * allocation, so go ahead and go to the wait caching
			 * loop.
			 */
			loop = LOOP_CACHING_WAIT;
			search_start = ideal_cache_offset;
			ideal_cache_percent = 0;
			goto ideal_cache;
		} else if (loop == LOOP_FIND_IDEAL) {
			/*
			 * Didn't find a uncached bg, wait on anything we find
			 * next.
			 */
			loop = LOOP_CACHING_WAIT;
			goto search;
		}

5532
		loop++;
J
Josef Bacik 已提交
5533 5534

		if (loop == LOOP_ALLOC_CHUNK) {
5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546
		       if (allowed_chunk_alloc) {
				ret = do_chunk_alloc(trans, root, num_bytes +
						     2 * 1024 * 1024, data,
						     CHUNK_ALLOC_LIMITED);
				allowed_chunk_alloc = 0;
				if (ret == 1)
					done_chunk_alloc = 1;
			} else if (!done_chunk_alloc &&
				   space_info->force_alloc ==
				   CHUNK_ALLOC_NO_FORCE) {
				space_info->force_alloc = CHUNK_ALLOC_LIMITED;
			}
J
Josef Bacik 已提交
5547

5548 5549 5550 5551 5552 5553
		       /*
			* We didn't allocate a chunk, go ahead and drop the
			* empty size and loop again.
			*/
		       if (!done_chunk_alloc)
			       loop = LOOP_NO_EMPTY_SIZE;
J
Josef Bacik 已提交
5554 5555
		}

5556 5557 5558
		if (loop == LOOP_NO_EMPTY_SIZE) {
			empty_size = 0;
			empty_cluster = 0;
5559
		}
5560 5561

		goto search;
J
Josef Bacik 已提交
5562 5563
	} else if (!ins->objectid) {
		ret = -ENOSPC;
5564
	} else if (ins->objectid) {
5565
		ret = 0;
C
Chris Mason 已提交
5566 5567
	}

C
Chris Mason 已提交
5568
	return ret;
5569
}
5570

J
Josef Bacik 已提交
5571 5572
static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
			    int dump_block_groups)
J
Josef Bacik 已提交
5573 5574
{
	struct btrfs_block_group_cache *cache;
5575
	int index = 0;
J
Josef Bacik 已提交
5576

J
Josef Bacik 已提交
5577
	spin_lock(&info->lock);
5578 5579
	printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
	       (unsigned long long)info->flags,
C
Chris Mason 已提交
5580
	       (unsigned long long)(info->total_bytes - info->bytes_used -
J
Josef Bacik 已提交
5581
				    info->bytes_pinned - info->bytes_reserved -
5582
				    info->bytes_readonly),
C
Chris Mason 已提交
5583
	       (info->full) ? "" : "not ");
5584 5585
	printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
	       "reserved=%llu, may_use=%llu, readonly=%llu\n",
5586
	       (unsigned long long)info->total_bytes,
5587
	       (unsigned long long)info->bytes_used,
5588
	       (unsigned long long)info->bytes_pinned,
5589
	       (unsigned long long)info->bytes_reserved,
5590
	       (unsigned long long)info->bytes_may_use,
5591
	       (unsigned long long)info->bytes_readonly);
J
Josef Bacik 已提交
5592 5593 5594 5595
	spin_unlock(&info->lock);

	if (!dump_block_groups)
		return;
J
Josef Bacik 已提交
5596

5597
	down_read(&info->groups_sem);
5598 5599
again:
	list_for_each_entry(cache, &info->block_groups[index], list) {
J
Josef Bacik 已提交
5600
		spin_lock(&cache->lock);
C
Chris Mason 已提交
5601 5602 5603 5604 5605 5606 5607
		printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
		       "%llu pinned %llu reserved\n",
		       (unsigned long long)cache->key.objectid,
		       (unsigned long long)cache->key.offset,
		       (unsigned long long)btrfs_block_group_used(&cache->item),
		       (unsigned long long)cache->pinned,
		       (unsigned long long)cache->reserved);
J
Josef Bacik 已提交
5608 5609 5610
		btrfs_dump_free_space(cache, bytes);
		spin_unlock(&cache->lock);
	}
5611 5612
	if (++index < BTRFS_NR_RAID_TYPES)
		goto again;
5613
	up_read(&info->groups_sem);
J
Josef Bacik 已提交
5614
}
5615

5616 5617 5618 5619 5620 5621
int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
			 struct btrfs_root *root,
			 u64 num_bytes, u64 min_alloc_size,
			 u64 empty_size, u64 hint_byte,
			 u64 search_end, struct btrfs_key *ins,
			 u64 data)
5622 5623
{
	int ret;
5624
	u64 search_start = 0;
5625

J
Josef Bacik 已提交
5626
	data = btrfs_get_alloc_profile(root, data);
5627
again:
C
Chris Mason 已提交
5628 5629 5630 5631
	/*
	 * the only place that sets empty_size is btrfs_realloc_node, which
	 * is not called recursively on allocations
	 */
J
Josef Bacik 已提交
5632
	if (empty_size || root->ref_cows)
5633
		ret = do_chunk_alloc(trans, root->fs_info->extent_root,
5634 5635
				     num_bytes + 2 * 1024 * 1024, data,
				     CHUNK_ALLOC_NO_FORCE);
5636

5637 5638
	WARN_ON(num_bytes < root->sectorsize);
	ret = find_free_extent(trans, root, num_bytes, empty_size,
5639 5640
			       search_start, search_end, hint_byte,
			       ins, data);
5641

5642 5643
	if (ret == -ENOSPC && num_bytes > min_alloc_size) {
		num_bytes = num_bytes >> 1;
J
Josef Bacik 已提交
5644
		num_bytes = num_bytes & ~(root->sectorsize - 1);
5645
		num_bytes = max(num_bytes, min_alloc_size);
C
Chris Mason 已提交
5646
		do_chunk_alloc(trans, root->fs_info->extent_root,
5647
			       num_bytes, data, CHUNK_ALLOC_FORCE);
5648 5649
		goto again;
	}
5650
	if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
J
Josef Bacik 已提交
5651 5652 5653
		struct btrfs_space_info *sinfo;

		sinfo = __find_space_info(root->fs_info, data);
C
Chris Mason 已提交
5654 5655 5656
		printk(KERN_ERR "btrfs allocation failed flags %llu, "
		       "wanted %llu\n", (unsigned long long)data,
		       (unsigned long long)num_bytes);
J
Josef Bacik 已提交
5657
		dump_space_info(sinfo, num_bytes, 1);
5658
	}
J
Josef Bacik 已提交
5659

5660 5661
	trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);

J
Josef Bacik 已提交
5662
	return ret;
5663 5664
}

5665 5666
static int __btrfs_free_reserved_extent(struct btrfs_root *root,
					u64 start, u64 len, int pin)
5667
{
J
Josef Bacik 已提交
5668
	struct btrfs_block_group_cache *cache;
5669
	int ret = 0;
J
Josef Bacik 已提交
5670 5671 5672

	cache = btrfs_lookup_block_group(root->fs_info, start);
	if (!cache) {
C
Chris Mason 已提交
5673 5674
		printk(KERN_ERR "Unable to find block group for %llu\n",
		       (unsigned long long)start);
J
Josef Bacik 已提交
5675 5676
		return -ENOSPC;
	}
5677

5678 5679
	if (btrfs_test_opt(root, DISCARD))
		ret = btrfs_discard_extent(root, start, len, NULL);
5680

5681 5682 5683 5684 5685 5686
	if (pin)
		pin_down_extent(root, cache, start, len, 1);
	else {
		btrfs_add_free_space(cache, start, len);
		btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
	}
5687
	btrfs_put_block_group(cache);
J
Josef Bacik 已提交
5688

5689 5690
	trace_btrfs_reserved_extent_free(root, start, len);

5691 5692 5693
	return ret;
}

5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705
int btrfs_free_reserved_extent(struct btrfs_root *root,
					u64 start, u64 len)
{
	return __btrfs_free_reserved_extent(root, start, len, 0);
}

int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
				       u64 start, u64 len)
{
	return __btrfs_free_reserved_extent(root, start, len, 1);
}

5706 5707 5708 5709 5710
static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
				      struct btrfs_root *root,
				      u64 parent, u64 root_objectid,
				      u64 flags, u64 owner, u64 offset,
				      struct btrfs_key *ins, int ref_mod)
5711 5712
{
	int ret;
5713
	struct btrfs_fs_info *fs_info = root->fs_info;
5714
	struct btrfs_extent_item *extent_item;
5715
	struct btrfs_extent_inline_ref *iref;
5716
	struct btrfs_path *path;
5717 5718 5719
	struct extent_buffer *leaf;
	int type;
	u32 size;
5720

5721 5722 5723 5724
	if (parent > 0)
		type = BTRFS_SHARED_DATA_REF_KEY;
	else
		type = BTRFS_EXTENT_DATA_REF_KEY;
5725

5726
	size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
5727 5728

	path = btrfs_alloc_path();
T
Tsutomu Itoh 已提交
5729 5730
	if (!path)
		return -ENOMEM;
5731

5732
	path->leave_spinning = 1;
5733 5734
	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
				      ins, size);
C
Chris Mason 已提交
5735
	BUG_ON(ret);
J
Josef Bacik 已提交
5736

5737 5738
	leaf = path->nodes[0];
	extent_item = btrfs_item_ptr(leaf, path->slots[0],
5739
				     struct btrfs_extent_item);
5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759
	btrfs_set_extent_refs(leaf, extent_item, ref_mod);
	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
	btrfs_set_extent_flags(leaf, extent_item,
			       flags | BTRFS_EXTENT_FLAG_DATA);

	iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
	btrfs_set_extent_inline_ref_type(leaf, iref, type);
	if (parent > 0) {
		struct btrfs_shared_data_ref *ref;
		ref = (struct btrfs_shared_data_ref *)(iref + 1);
		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
		btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
	} else {
		struct btrfs_extent_data_ref *ref;
		ref = (struct btrfs_extent_data_ref *)(&iref->offset);
		btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
		btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
		btrfs_set_extent_data_ref_offset(leaf, ref, offset);
		btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
	}
5760 5761

	btrfs_mark_buffer_dirty(path->nodes[0]);
5762
	btrfs_free_path(path);
5763

5764
	ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5765
	if (ret) {
C
Chris Mason 已提交
5766 5767 5768
		printk(KERN_ERR "btrfs update block group failed for %llu "
		       "%llu\n", (unsigned long long)ins->objectid,
		       (unsigned long long)ins->offset);
5769 5770
		BUG();
	}
5771 5772 5773
	return ret;
}

5774 5775 5776 5777 5778
static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
				     struct btrfs_root *root,
				     u64 parent, u64 root_objectid,
				     u64 flags, struct btrfs_disk_key *key,
				     int level, struct btrfs_key *ins)
5779 5780
{
	int ret;
5781 5782 5783 5784 5785 5786 5787
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct btrfs_extent_item *extent_item;
	struct btrfs_tree_block_info *block_info;
	struct btrfs_extent_inline_ref *iref;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
5788

5789
	path = btrfs_alloc_path();
5790 5791
	if (!path)
		return -ENOMEM;
5792

5793 5794 5795
	path->leave_spinning = 1;
	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
				      ins, size);
5796
	BUG_ON(ret);
5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824

	leaf = path->nodes[0];
	extent_item = btrfs_item_ptr(leaf, path->slots[0],
				     struct btrfs_extent_item);
	btrfs_set_extent_refs(leaf, extent_item, 1);
	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
	btrfs_set_extent_flags(leaf, extent_item,
			       flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
	block_info = (struct btrfs_tree_block_info *)(extent_item + 1);

	btrfs_set_tree_block_key(leaf, block_info, key);
	btrfs_set_tree_block_level(leaf, block_info, level);

	iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
	if (parent > 0) {
		BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
		btrfs_set_extent_inline_ref_type(leaf, iref,
						 BTRFS_SHARED_BLOCK_REF_KEY);
		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
	} else {
		btrfs_set_extent_inline_ref_type(leaf, iref,
						 BTRFS_TREE_BLOCK_REF_KEY);
		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
	}

	btrfs_mark_buffer_dirty(leaf);
	btrfs_free_path(path);

5825
	ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846
	if (ret) {
		printk(KERN_ERR "btrfs update block group failed for %llu "
		       "%llu\n", (unsigned long long)ins->objectid,
		       (unsigned long long)ins->offset);
		BUG();
	}
	return ret;
}

int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
				     struct btrfs_root *root,
				     u64 root_objectid, u64 owner,
				     u64 offset, struct btrfs_key *ins)
{
	int ret;

	BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);

	ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
					 0, root_objectid, owner, offset,
					 BTRFS_ADD_DELAYED_EXTENT, NULL);
5847 5848
	return ret;
}
5849 5850 5851 5852 5853 5854

/*
 * this is used by the tree logging recovery code.  It records that
 * an extent has been allocated and makes sure to clear the free
 * space cache bits as well
 */
5855 5856 5857 5858
int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
				   struct btrfs_root *root,
				   u64 root_objectid, u64 owner, u64 offset,
				   struct btrfs_key *ins)
5859 5860 5861
{
	int ret;
	struct btrfs_block_group_cache *block_group;
5862 5863 5864
	struct btrfs_caching_control *caching_ctl;
	u64 start = ins->objectid;
	u64 num_bytes = ins->offset;
5865 5866

	block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
5867
	cache_block_group(block_group, trans, NULL, 0);
5868
	caching_ctl = get_caching_control(block_group);
5869

5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900
	if (!caching_ctl) {
		BUG_ON(!block_group_cache_done(block_group));
		ret = btrfs_remove_free_space(block_group, start, num_bytes);
		BUG_ON(ret);
	} else {
		mutex_lock(&caching_ctl->mutex);

		if (start >= caching_ctl->progress) {
			ret = add_excluded_extent(root, start, num_bytes);
			BUG_ON(ret);
		} else if (start + num_bytes <= caching_ctl->progress) {
			ret = btrfs_remove_free_space(block_group,
						      start, num_bytes);
			BUG_ON(ret);
		} else {
			num_bytes = caching_ctl->progress - start;
			ret = btrfs_remove_free_space(block_group,
						      start, num_bytes);
			BUG_ON(ret);

			start = caching_ctl->progress;
			num_bytes = ins->objectid + ins->offset -
				    caching_ctl->progress;
			ret = add_excluded_extent(root, start, num_bytes);
			BUG_ON(ret);
		}

		mutex_unlock(&caching_ctl->mutex);
		put_caching_control(caching_ctl);
	}

5901 5902
	ret = btrfs_update_reserved_bytes(block_group, ins->offset,
					  RESERVE_ALLOC_NO_ACCOUNT);
5903
	BUG_ON(ret);
5904
	btrfs_put_block_group(block_group);
5905 5906
	ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
					 0, owner, offset, ins, 1);
5907 5908 5909
	return ret;
}

5910 5911
struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
					    struct btrfs_root *root,
5912 5913
					    u64 bytenr, u32 blocksize,
					    int level)
5914 5915 5916 5917 5918 5919 5920
{
	struct extent_buffer *buf;

	buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
	if (!buf)
		return ERR_PTR(-ENOMEM);
	btrfs_set_header_generation(buf, trans->transid);
5921
	btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
5922 5923
	btrfs_tree_lock(buf);
	clean_tree_block(trans, root, buf);
5924 5925

	btrfs_set_lock_blocking(buf);
5926
	btrfs_set_buffer_uptodate(buf);
5927

5928
	if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
5929 5930 5931 5932 5933 5934 5935 5936 5937 5938
		/*
		 * we allow two log transactions at a time, use different
		 * EXENT bit to differentiate dirty pages.
		 */
		if (root->log_transid % 2 == 0)
			set_extent_dirty(&root->dirty_log_pages, buf->start,
					buf->start + buf->len - 1, GFP_NOFS);
		else
			set_extent_new(&root->dirty_log_pages, buf->start,
					buf->start + buf->len - 1, GFP_NOFS);
5939 5940
	} else {
		set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
5941
			 buf->start + buf->len - 1, GFP_NOFS);
5942
	}
5943
	trans->blocks_used++;
5944
	/* this returns a buffer locked for blocking */
5945 5946 5947
	return buf;
}

5948 5949 5950 5951 5952
static struct btrfs_block_rsv *
use_block_rsv(struct btrfs_trans_handle *trans,
	      struct btrfs_root *root, u32 blocksize)
{
	struct btrfs_block_rsv *block_rsv;
5953
	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5954 5955 5956 5957 5958
	int ret;

	block_rsv = get_block_rsv(trans, root);

	if (block_rsv->size == 0) {
5959
		ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
5960 5961 5962 5963 5964 5965 5966 5967
		/*
		 * If we couldn't reserve metadata bytes try and use some from
		 * the global reserve.
		 */
		if (ret && block_rsv != global_rsv) {
			ret = block_rsv_use_bytes(global_rsv, blocksize);
			if (!ret)
				return global_rsv;
5968
			return ERR_PTR(ret);
5969
		} else if (ret) {
5970
			return ERR_PTR(ret);
5971
		}
5972 5973 5974 5975 5976 5977
		return block_rsv;
	}

	ret = block_rsv_use_bytes(block_rsv, blocksize);
	if (!ret)
		return block_rsv;
5978
	if (ret) {
5979 5980 5981 5982 5983 5984 5985
		static DEFINE_RATELIMIT_STATE(_rs,
				DEFAULT_RATELIMIT_INTERVAL,
				/*DEFAULT_RATELIMIT_BURST*/ 2);
		if (__ratelimit(&_rs)) {
			printk(KERN_DEBUG "btrfs: block rsv returned %d\n", ret);
			WARN_ON(1);
		}
5986
		ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
5987 5988 5989 5990 5991 5992 5993 5994
		if (!ret) {
			return block_rsv;
		} else if (ret && block_rsv != global_rsv) {
			ret = block_rsv_use_bytes(global_rsv, blocksize);
			if (!ret)
				return global_rsv;
		}
	}
5995 5996 5997 5998 5999 6000 6001 6002 6003 6004

	return ERR_PTR(-ENOSPC);
}

static void unuse_block_rsv(struct btrfs_block_rsv *block_rsv, u32 blocksize)
{
	block_rsv_add_bytes(block_rsv, blocksize, 0);
	block_rsv_release_bytes(block_rsv, NULL, 0);
}

6005
/*
6006 6007 6008 6009
 * finds a free extent and does all the dirty work required for allocation
 * returns the key for the extent through ins, and a tree buffer for
 * the first block of the extent through buf.
 *
6010 6011
 * returns the tree buffer or NULL.
 */
6012
struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6013 6014 6015 6016
					struct btrfs_root *root, u32 blocksize,
					u64 parent, u64 root_objectid,
					struct btrfs_disk_key *key, int level,
					u64 hint, u64 empty_size)
6017
{
C
Chris Mason 已提交
6018
	struct btrfs_key ins;
6019
	struct btrfs_block_rsv *block_rsv;
6020
	struct extent_buffer *buf;
6021 6022 6023
	u64 flags = 0;
	int ret;

6024

6025 6026 6027 6028 6029 6030
	block_rsv = use_block_rsv(trans, root, blocksize);
	if (IS_ERR(block_rsv))
		return ERR_CAST(block_rsv);

	ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
				   empty_size, hint, (u64)-1, &ins, 0);
6031
	if (ret) {
6032
		unuse_block_rsv(block_rsv, blocksize);
6033
		return ERR_PTR(ret);
6034
	}
6035

6036 6037
	buf = btrfs_init_new_buffer(trans, root, ins.objectid,
				    blocksize, level);
6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065
	BUG_ON(IS_ERR(buf));

	if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
		if (parent == 0)
			parent = ins.objectid;
		flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
	} else
		BUG_ON(parent > 0);

	if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
		struct btrfs_delayed_extent_op *extent_op;
		extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
		BUG_ON(!extent_op);
		if (key)
			memcpy(&extent_op->key, key, sizeof(extent_op->key));
		else
			memset(&extent_op->key, 0, sizeof(extent_op->key));
		extent_op->flags_to_set = flags;
		extent_op->update_key = 1;
		extent_op->update_flags = 1;
		extent_op->is_data = 0;

		ret = btrfs_add_delayed_tree_ref(trans, ins.objectid,
					ins.offset, parent, root_objectid,
					level, BTRFS_ADD_DELAYED_EXTENT,
					extent_op);
		BUG_ON(ret);
	}
6066 6067
	return buf;
}
6068

6069 6070 6071 6072 6073 6074 6075 6076 6077
struct walk_control {
	u64 refs[BTRFS_MAX_LEVEL];
	u64 flags[BTRFS_MAX_LEVEL];
	struct btrfs_key update_progress;
	int stage;
	int level;
	int shared_level;
	int update_ref;
	int keep_locks;
Y
Yan, Zheng 已提交
6078 6079
	int reada_slot;
	int reada_count;
6080 6081 6082 6083 6084
};

#define DROP_REFERENCE	1
#define UPDATE_BACKREF	2

Y
Yan, Zheng 已提交
6085 6086 6087 6088
static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
				     struct btrfs_root *root,
				     struct walk_control *wc,
				     struct btrfs_path *path)
6089
{
Y
Yan, Zheng 已提交
6090 6091 6092
	u64 bytenr;
	u64 generation;
	u64 refs;
6093
	u64 flags;
6094
	u32 nritems;
Y
Yan, Zheng 已提交
6095 6096 6097
	u32 blocksize;
	struct btrfs_key key;
	struct extent_buffer *eb;
6098
	int ret;
Y
Yan, Zheng 已提交
6099 6100
	int slot;
	int nread = 0;
6101

Y
Yan, Zheng 已提交
6102 6103 6104 6105 6106 6107 6108 6109
	if (path->slots[wc->level] < wc->reada_slot) {
		wc->reada_count = wc->reada_count * 2 / 3;
		wc->reada_count = max(wc->reada_count, 2);
	} else {
		wc->reada_count = wc->reada_count * 3 / 2;
		wc->reada_count = min_t(int, wc->reada_count,
					BTRFS_NODEPTRS_PER_BLOCK(root));
	}
6110

Y
Yan, Zheng 已提交
6111 6112 6113
	eb = path->nodes[wc->level];
	nritems = btrfs_header_nritems(eb);
	blocksize = btrfs_level_size(root, wc->level - 1);
6114

Y
Yan, Zheng 已提交
6115 6116 6117
	for (slot = path->slots[wc->level]; slot < nritems; slot++) {
		if (nread >= wc->reada_count)
			break;
6118

C
Chris Mason 已提交
6119
		cond_resched();
Y
Yan, Zheng 已提交
6120 6121
		bytenr = btrfs_node_blockptr(eb, slot);
		generation = btrfs_node_ptr_generation(eb, slot);
C
Chris Mason 已提交
6122

Y
Yan, Zheng 已提交
6123 6124
		if (slot == path->slots[wc->level])
			goto reada;
6125

Y
Yan, Zheng 已提交
6126 6127
		if (wc->stage == UPDATE_BACKREF &&
		    generation <= root->root_key.offset)
6128 6129
			continue;

6130 6131 6132 6133 6134 6135
		/* We don't lock the tree block, it's OK to be racy here */
		ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
					       &refs, &flags);
		BUG_ON(ret);
		BUG_ON(refs == 0);

Y
Yan, Zheng 已提交
6136 6137 6138
		if (wc->stage == DROP_REFERENCE) {
			if (refs == 1)
				goto reada;
6139

6140 6141 6142
			if (wc->level == 1 &&
			    (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
				continue;
Y
Yan, Zheng 已提交
6143 6144 6145 6146 6147 6148 6149 6150
			if (!wc->update_ref ||
			    generation <= root->root_key.offset)
				continue;
			btrfs_node_key_to_cpu(eb, &key, slot);
			ret = btrfs_comp_cpu_keys(&key,
						  &wc->update_progress);
			if (ret < 0)
				continue;
6151 6152 6153 6154
		} else {
			if (wc->level == 1 &&
			    (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
				continue;
6155
		}
Y
Yan, Zheng 已提交
6156 6157 6158 6159
reada:
		ret = readahead_tree_block(root, bytenr, blocksize,
					   generation);
		if (ret)
6160
			break;
Y
Yan, Zheng 已提交
6161
		nread++;
C
Chris Mason 已提交
6162
	}
Y
Yan, Zheng 已提交
6163
	wc->reada_slot = slot;
C
Chris Mason 已提交
6164
}
6165

Y
Yan Zheng 已提交
6166
/*
6167 6168 6169 6170 6171 6172
 * hepler to process tree block while walking down the tree.
 *
 * when wc->stage == UPDATE_BACKREF, this function updates
 * back refs for pointers in the block.
 *
 * NOTE: return value 1 means we should stop walking down.
Y
Yan Zheng 已提交
6173
 */
6174
static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
6175
				   struct btrfs_root *root,
6176
				   struct btrfs_path *path,
6177
				   struct walk_control *wc, int lookup_info)
Y
Yan Zheng 已提交
6178
{
6179 6180 6181
	int level = wc->level;
	struct extent_buffer *eb = path->nodes[level];
	u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
Y
Yan Zheng 已提交
6182 6183
	int ret;

6184 6185 6186
	if (wc->stage == UPDATE_BACKREF &&
	    btrfs_header_owner(eb) != root->root_key.objectid)
		return 1;
Y
Yan Zheng 已提交
6187

6188 6189 6190 6191
	/*
	 * when reference count of tree block is 1, it won't increase
	 * again. once full backref flag is set, we never clear it.
	 */
6192 6193 6194
	if (lookup_info &&
	    ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
	     (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
6195 6196 6197 6198 6199 6200 6201 6202
		BUG_ON(!path->locks[level]);
		ret = btrfs_lookup_extent_info(trans, root,
					       eb->start, eb->len,
					       &wc->refs[level],
					       &wc->flags[level]);
		BUG_ON(ret);
		BUG_ON(wc->refs[level] == 0);
	}
6203

6204 6205 6206
	if (wc->stage == DROP_REFERENCE) {
		if (wc->refs[level] > 1)
			return 1;
Y
Yan Zheng 已提交
6207

6208
		if (path->locks[level] && !wc->keep_locks) {
6209
			btrfs_tree_unlock_rw(eb, path->locks[level]);
6210 6211 6212 6213
			path->locks[level] = 0;
		}
		return 0;
	}
Y
Yan Zheng 已提交
6214

6215 6216 6217 6218
	/* wc->stage == UPDATE_BACKREF */
	if (!(wc->flags[level] & flag)) {
		BUG_ON(!path->locks[level]);
		ret = btrfs_inc_ref(trans, root, eb, 1);
Y
Yan Zheng 已提交
6219
		BUG_ON(ret);
6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232
		ret = btrfs_dec_ref(trans, root, eb, 0);
		BUG_ON(ret);
		ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
						  eb->len, flag, 0);
		BUG_ON(ret);
		wc->flags[level] |= flag;
	}

	/*
	 * the block is shared by multiple trees, so it's not good to
	 * keep the tree lock
	 */
	if (path->locks[level] && level > 0) {
6233
		btrfs_tree_unlock_rw(eb, path->locks[level]);
6234 6235 6236 6237 6238
		path->locks[level] = 0;
	}
	return 0;
}

Y
Yan, Zheng 已提交
6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254
/*
 * hepler to process tree block pointer.
 *
 * when wc->stage == DROP_REFERENCE, this function checks
 * reference count of the block pointed to. if the block
 * is shared and we need update back refs for the subtree
 * rooted at the block, this function changes wc->stage to
 * UPDATE_BACKREF. if the block is shared and there is no
 * need to update back, this function drops the reference
 * to the block.
 *
 * NOTE: return value 1 means we should stop walking down.
 */
static noinline int do_walk_down(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
6255
				 struct walk_control *wc, int *lookup_info)
Y
Yan, Zheng 已提交
6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274
{
	u64 bytenr;
	u64 generation;
	u64 parent;
	u32 blocksize;
	struct btrfs_key key;
	struct extent_buffer *next;
	int level = wc->level;
	int reada = 0;
	int ret = 0;

	generation = btrfs_node_ptr_generation(path->nodes[level],
					       path->slots[level]);
	/*
	 * if the lower level block was created before the snapshot
	 * was created, we know there is no need to update back refs
	 * for the subtree
	 */
	if (wc->stage == UPDATE_BACKREF &&
6275 6276
	    generation <= root->root_key.offset) {
		*lookup_info = 1;
Y
Yan, Zheng 已提交
6277
		return 1;
6278
	}
Y
Yan, Zheng 已提交
6279 6280 6281 6282 6283 6284 6285

	bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
	blocksize = btrfs_level_size(root, level - 1);

	next = btrfs_find_tree_block(root, bytenr, blocksize);
	if (!next) {
		next = btrfs_find_create_tree_block(root, bytenr, blocksize);
6286 6287
		if (!next)
			return -ENOMEM;
Y
Yan, Zheng 已提交
6288 6289 6290 6291 6292
		reada = 1;
	}
	btrfs_tree_lock(next);
	btrfs_set_lock_blocking(next);

6293 6294 6295 6296 6297 6298
	ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
				       &wc->refs[level - 1],
				       &wc->flags[level - 1]);
	BUG_ON(ret);
	BUG_ON(wc->refs[level - 1] == 0);
	*lookup_info = 0;
Y
Yan, Zheng 已提交
6299

6300
	if (wc->stage == DROP_REFERENCE) {
Y
Yan, Zheng 已提交
6301
		if (wc->refs[level - 1] > 1) {
6302 6303 6304 6305
			if (level == 1 &&
			    (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
				goto skip;

Y
Yan, Zheng 已提交
6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318
			if (!wc->update_ref ||
			    generation <= root->root_key.offset)
				goto skip;

			btrfs_node_key_to_cpu(path->nodes[level], &key,
					      path->slots[level]);
			ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
			if (ret < 0)
				goto skip;

			wc->stage = UPDATE_BACKREF;
			wc->shared_level = level - 1;
		}
6319 6320 6321 6322
	} else {
		if (level == 1 &&
		    (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
			goto skip;
Y
Yan, Zheng 已提交
6323 6324 6325 6326 6327 6328
	}

	if (!btrfs_buffer_uptodate(next, generation)) {
		btrfs_tree_unlock(next);
		free_extent_buffer(next);
		next = NULL;
6329
		*lookup_info = 1;
Y
Yan, Zheng 已提交
6330 6331 6332 6333 6334 6335
	}

	if (!next) {
		if (reada && level == 1)
			reada_walk_down(trans, root, wc, path);
		next = read_tree_block(root, bytenr, blocksize, generation);
6336 6337
		if (!next)
			return -EIO;
Y
Yan, Zheng 已提交
6338 6339 6340 6341 6342 6343 6344 6345
		btrfs_tree_lock(next);
		btrfs_set_lock_blocking(next);
	}

	level--;
	BUG_ON(level != btrfs_header_level(next));
	path->nodes[level] = next;
	path->slots[level] = 0;
6346
	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
Y
Yan, Zheng 已提交
6347 6348 6349 6350 6351 6352 6353
	wc->level = level;
	if (wc->level == 1)
		wc->reada_slot = 0;
	return 0;
skip:
	wc->refs[level - 1] = 0;
	wc->flags[level - 1] = 0;
6354 6355 6356 6357 6358 6359 6360 6361
	if (wc->stage == DROP_REFERENCE) {
		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
			parent = path->nodes[level]->start;
		} else {
			BUG_ON(root->root_key.objectid !=
			       btrfs_header_owner(path->nodes[level]));
			parent = 0;
		}
Y
Yan, Zheng 已提交
6362

6363 6364 6365
		ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
					root->root_key.objectid, level - 1, 0);
		BUG_ON(ret);
Y
Yan, Zheng 已提交
6366 6367 6368
	}
	btrfs_tree_unlock(next);
	free_extent_buffer(next);
6369
	*lookup_info = 1;
Y
Yan, Zheng 已提交
6370 6371 6372
	return 1;
}

6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389
/*
 * hepler to process tree block while walking up the tree.
 *
 * when wc->stage == DROP_REFERENCE, this function drops
 * reference count on the block.
 *
 * when wc->stage == UPDATE_BACKREF, this function changes
 * wc->stage back to DROP_REFERENCE if we changed wc->stage
 * to UPDATE_BACKREF previously while processing the block.
 *
 * NOTE: return value 1 means we should stop walking up.
 */
static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
				 struct walk_control *wc)
{
6390
	int ret;
6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416
	int level = wc->level;
	struct extent_buffer *eb = path->nodes[level];
	u64 parent = 0;

	if (wc->stage == UPDATE_BACKREF) {
		BUG_ON(wc->shared_level < level);
		if (level < wc->shared_level)
			goto out;

		ret = find_next_key(path, level + 1, &wc->update_progress);
		if (ret > 0)
			wc->update_ref = 0;

		wc->stage = DROP_REFERENCE;
		wc->shared_level = -1;
		path->slots[level] = 0;

		/*
		 * check reference count again if the block isn't locked.
		 * we should start walking down the tree again if reference
		 * count is one.
		 */
		if (!path->locks[level]) {
			BUG_ON(level == 0);
			btrfs_tree_lock(eb);
			btrfs_set_lock_blocking(eb);
6417
			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6418 6419 6420 6421 6422

			ret = btrfs_lookup_extent_info(trans, root,
						       eb->start, eb->len,
						       &wc->refs[level],
						       &wc->flags[level]);
Y
Yan Zheng 已提交
6423
			BUG_ON(ret);
6424 6425
			BUG_ON(wc->refs[level] == 0);
			if (wc->refs[level] == 1) {
6426
				btrfs_tree_unlock_rw(eb, path->locks[level]);
6427 6428
				return 1;
			}
Y
Yan Zheng 已提交
6429
		}
6430
	}
Y
Yan Zheng 已提交
6431

6432 6433
	/* wc->stage == DROP_REFERENCE */
	BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
6434

6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447
	if (wc->refs[level] == 1) {
		if (level == 0) {
			if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
				ret = btrfs_dec_ref(trans, root, eb, 1);
			else
				ret = btrfs_dec_ref(trans, root, eb, 0);
			BUG_ON(ret);
		}
		/* make block locked assertion in clean_tree_block happy */
		if (!path->locks[level] &&
		    btrfs_header_generation(eb) == trans->transid) {
			btrfs_tree_lock(eb);
			btrfs_set_lock_blocking(eb);
6448
			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464
		}
		clean_tree_block(trans, root, eb);
	}

	if (eb == root->node) {
		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
			parent = eb->start;
		else
			BUG_ON(root->root_key.objectid !=
			       btrfs_header_owner(eb));
	} else {
		if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
			parent = path->nodes[level + 1]->start;
		else
			BUG_ON(root->root_key.objectid !=
			       btrfs_header_owner(path->nodes[level + 1]));
Y
Yan Zheng 已提交
6465 6466
	}

6467
	btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
6468 6469 6470
out:
	wc->refs[level] = 0;
	wc->flags[level] = 0;
6471
	return 0;
6472 6473 6474 6475 6476 6477 6478 6479
}

static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
				   struct btrfs_root *root,
				   struct btrfs_path *path,
				   struct walk_control *wc)
{
	int level = wc->level;
6480
	int lookup_info = 1;
6481 6482 6483
	int ret;

	while (level >= 0) {
6484
		ret = walk_down_proc(trans, root, path, wc, lookup_info);
6485 6486 6487 6488 6489 6490
		if (ret > 0)
			break;

		if (level == 0)
			break;

6491 6492 6493 6494
		if (path->slots[level] >=
		    btrfs_header_nritems(path->nodes[level]))
			break;

6495
		ret = do_walk_down(trans, root, path, wc, &lookup_info);
Y
Yan, Zheng 已提交
6496 6497 6498
		if (ret > 0) {
			path->slots[level]++;
			continue;
6499 6500
		} else if (ret < 0)
			return ret;
Y
Yan, Zheng 已提交
6501
		level = wc->level;
Y
Yan Zheng 已提交
6502 6503 6504 6505
	}
	return 0;
}

C
Chris Mason 已提交
6506
static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
6507
				 struct btrfs_root *root,
Y
Yan Zheng 已提交
6508
				 struct btrfs_path *path,
6509
				 struct walk_control *wc, int max_level)
C
Chris Mason 已提交
6510
{
6511
	int level = wc->level;
C
Chris Mason 已提交
6512
	int ret;
6513

6514 6515 6516 6517 6518 6519
	path->slots[level] = btrfs_header_nritems(path->nodes[level]);
	while (level < max_level && path->nodes[level]) {
		wc->level = level;
		if (path->slots[level] + 1 <
		    btrfs_header_nritems(path->nodes[level])) {
			path->slots[level]++;
C
Chris Mason 已提交
6520 6521
			return 0;
		} else {
6522 6523 6524
			ret = walk_up_proc(trans, root, path, wc);
			if (ret > 0)
				return 0;
6525

6526
			if (path->locks[level]) {
6527 6528
				btrfs_tree_unlock_rw(path->nodes[level],
						     path->locks[level]);
6529
				path->locks[level] = 0;
Y
Yan Zheng 已提交
6530
			}
6531 6532 6533
			free_extent_buffer(path->nodes[level]);
			path->nodes[level] = NULL;
			level++;
C
Chris Mason 已提交
6534 6535 6536 6537 6538
		}
	}
	return 1;
}

C
Chris Mason 已提交
6539
/*
6540 6541 6542 6543 6544 6545 6546 6547 6548
 * drop a subvolume tree.
 *
 * this function traverses the tree freeing any blocks that only
 * referenced by the tree.
 *
 * when a shared tree block is found. this function decreases its
 * reference count by one. if update_ref is true, this function
 * also make sure backrefs for the shared block and all lower level
 * blocks are properly updated.
C
Chris Mason 已提交
6549
 */
6550 6551
void btrfs_drop_snapshot(struct btrfs_root *root,
			 struct btrfs_block_rsv *block_rsv, int update_ref)
C
Chris Mason 已提交
6552
{
6553
	struct btrfs_path *path;
6554 6555
	struct btrfs_trans_handle *trans;
	struct btrfs_root *tree_root = root->fs_info->tree_root;
6556
	struct btrfs_root_item *root_item = &root->root_item;
6557 6558 6559 6560 6561
	struct walk_control *wc;
	struct btrfs_key key;
	int err = 0;
	int ret;
	int level;
C
Chris Mason 已提交
6562

6563
	path = btrfs_alloc_path();
6564 6565 6566 6567
	if (!path) {
		err = -ENOMEM;
		goto out;
	}
C
Chris Mason 已提交
6568

6569
	wc = kzalloc(sizeof(*wc), GFP_NOFS);
6570 6571
	if (!wc) {
		btrfs_free_path(path);
6572 6573
		err = -ENOMEM;
		goto out;
6574
	}
6575

6576
	trans = btrfs_start_transaction(tree_root, 0);
6577 6578
	BUG_ON(IS_ERR(trans));

6579 6580
	if (block_rsv)
		trans->block_rsv = block_rsv;
6581

6582
	if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
6583
		level = btrfs_header_level(root->node);
6584 6585
		path->nodes[level] = btrfs_lock_root_node(root);
		btrfs_set_lock_blocking(path->nodes[level]);
6586
		path->slots[level] = 0;
6587
		path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6588 6589
		memset(&wc->update_progress, 0,
		       sizeof(wc->update_progress));
6590 6591
	} else {
		btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
6592 6593 6594
		memcpy(&wc->update_progress, &key,
		       sizeof(wc->update_progress));

6595
		level = root_item->drop_level;
6596
		BUG_ON(level == 0);
6597
		path->lowest_level = level;
6598 6599 6600 6601
		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		path->lowest_level = 0;
		if (ret < 0) {
			err = ret;
6602
			goto out_free;
6603
		}
Y
Yan, Zheng 已提交
6604
		WARN_ON(ret > 0);
6605

6606 6607 6608 6609
		/*
		 * unlock our path, this is safe because only this
		 * function is allowed to delete this snapshot
		 */
6610
		btrfs_unlock_up_safe(path, 0);
6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631

		level = btrfs_header_level(root->node);
		while (1) {
			btrfs_tree_lock(path->nodes[level]);
			btrfs_set_lock_blocking(path->nodes[level]);

			ret = btrfs_lookup_extent_info(trans, root,
						path->nodes[level]->start,
						path->nodes[level]->len,
						&wc->refs[level],
						&wc->flags[level]);
			BUG_ON(ret);
			BUG_ON(wc->refs[level] == 0);

			if (level == root_item->drop_level)
				break;

			btrfs_tree_unlock(path->nodes[level]);
			WARN_ON(wc->refs[level] != 1);
			level--;
		}
6632
	}
6633 6634 6635 6636 6637 6638

	wc->level = level;
	wc->shared_level = -1;
	wc->stage = DROP_REFERENCE;
	wc->update_ref = update_ref;
	wc->keep_locks = 0;
Y
Yan, Zheng 已提交
6639
	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6640

C
Chris Mason 已提交
6641
	while (1) {
6642 6643 6644
		ret = walk_down_tree(trans, root, path, wc);
		if (ret < 0) {
			err = ret;
C
Chris Mason 已提交
6645
			break;
6646
		}
C
Chris Mason 已提交
6647

6648 6649 6650
		ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
		if (ret < 0) {
			err = ret;
C
Chris Mason 已提交
6651
			break;
6652 6653 6654 6655
		}

		if (ret > 0) {
			BUG_ON(wc->stage != DROP_REFERENCE);
6656 6657
			break;
		}
6658 6659 6660 6661 6662 6663 6664 6665 6666 6667

		if (wc->stage == DROP_REFERENCE) {
			level = wc->level;
			btrfs_node_key(path->nodes[level],
				       &root_item->drop_progress,
				       path->slots[level]);
			root_item->drop_level = level;
		}

		BUG_ON(wc->level == 0);
6668
		if (btrfs_should_end_transaction(trans, tree_root)) {
6669 6670 6671 6672 6673
			ret = btrfs_update_root(trans, tree_root,
						&root->root_key,
						root_item);
			BUG_ON(ret);

6674
			btrfs_end_transaction_throttle(trans, tree_root);
6675
			trans = btrfs_start_transaction(tree_root, 0);
6676
			BUG_ON(IS_ERR(trans));
6677 6678
			if (block_rsv)
				trans->block_rsv = block_rsv;
6679
		}
C
Chris Mason 已提交
6680
	}
6681
	btrfs_release_path(path);
6682 6683 6684 6685 6686
	BUG_ON(err);

	ret = btrfs_del_root(trans, tree_root, &root->root_key);
	BUG_ON(ret);

6687 6688 6689 6690 6691
	if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
		ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
					   NULL, NULL);
		BUG_ON(ret < 0);
		if (ret > 0) {
6692 6693 6694 6695 6696 6697 6698
			/* if we fail to delete the orphan item this time
			 * around, it'll get picked up the next time.
			 *
			 * The most common failure here is just -ENOENT.
			 */
			btrfs_del_orphan_item(trans, tree_root,
					      root->root_key.objectid);
6699 6700 6701 6702 6703 6704 6705 6706 6707 6708
		}
	}

	if (root->in_radix) {
		btrfs_free_fs_root(tree_root->fs_info, root);
	} else {
		free_extent_buffer(root->node);
		free_extent_buffer(root->commit_root);
		kfree(root);
	}
6709
out_free:
6710
	btrfs_end_transaction_throttle(trans, tree_root);
6711
	kfree(wc);
6712
	btrfs_free_path(path);
6713 6714 6715 6716
out:
	if (err)
		btrfs_std_error(root->fs_info, err);
	return;
C
Chris Mason 已提交
6717
}
C
Chris Mason 已提交
6718

6719 6720 6721 6722 6723
/*
 * drop subtree rooted at tree block 'node'.
 *
 * NOTE: this function will unlock and release tree block 'node'
 */
Y
Yan Zheng 已提交
6724 6725 6726 6727 6728 6729
int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
			struct btrfs_root *root,
			struct extent_buffer *node,
			struct extent_buffer *parent)
{
	struct btrfs_path *path;
6730
	struct walk_control *wc;
Y
Yan Zheng 已提交
6731 6732 6733 6734 6735
	int level;
	int parent_level;
	int ret = 0;
	int wret;

6736 6737
	BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);

Y
Yan Zheng 已提交
6738
	path = btrfs_alloc_path();
T
Tsutomu Itoh 已提交
6739 6740
	if (!path)
		return -ENOMEM;
Y
Yan Zheng 已提交
6741

6742
	wc = kzalloc(sizeof(*wc), GFP_NOFS);
T
Tsutomu Itoh 已提交
6743 6744 6745 6746
	if (!wc) {
		btrfs_free_path(path);
		return -ENOMEM;
	}
6747

6748
	btrfs_assert_tree_locked(parent);
Y
Yan Zheng 已提交
6749 6750 6751 6752 6753
	parent_level = btrfs_header_level(parent);
	extent_buffer_get(parent);
	path->nodes[parent_level] = parent;
	path->slots[parent_level] = btrfs_header_nritems(parent);

6754
	btrfs_assert_tree_locked(node);
Y
Yan Zheng 已提交
6755 6756 6757
	level = btrfs_header_level(node);
	path->nodes[level] = node;
	path->slots[level] = 0;
6758
	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6759 6760 6761 6762 6763 6764 6765 6766

	wc->refs[parent_level] = 1;
	wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
	wc->level = level;
	wc->shared_level = -1;
	wc->stage = DROP_REFERENCE;
	wc->update_ref = 0;
	wc->keep_locks = 1;
Y
Yan, Zheng 已提交
6767
	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
Y
Yan Zheng 已提交
6768 6769

	while (1) {
6770 6771
		wret = walk_down_tree(trans, root, path, wc);
		if (wret < 0) {
Y
Yan Zheng 已提交
6772 6773
			ret = wret;
			break;
6774
		}
Y
Yan Zheng 已提交
6775

6776
		wret = walk_up_tree(trans, root, path, wc, parent_level);
Y
Yan Zheng 已提交
6777 6778 6779 6780 6781 6782
		if (wret < 0)
			ret = wret;
		if (wret != 0)
			break;
	}

6783
	kfree(wc);
Y
Yan Zheng 已提交
6784 6785 6786 6787
	btrfs_free_path(path);
	return ret;
}

6788 6789 6790 6791 6792 6793
static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
{
	u64 num_devices;
	u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
		BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;

6794 6795 6796 6797 6798 6799 6800 6801
	/*
	 * we add in the count of missing devices because we want
	 * to make sure that any RAID levels on a degraded FS
	 * continue to be honored.
	 */
	num_devices = root->fs_info->fs_devices->rw_devices +
		root->fs_info->fs_devices->missing_devices;

6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832
	if (num_devices == 1) {
		stripped |= BTRFS_BLOCK_GROUP_DUP;
		stripped = flags & ~stripped;

		/* turn raid0 into single device chunks */
		if (flags & BTRFS_BLOCK_GROUP_RAID0)
			return stripped;

		/* turn mirroring into duplication */
		if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
			     BTRFS_BLOCK_GROUP_RAID10))
			return stripped | BTRFS_BLOCK_GROUP_DUP;
		return flags;
	} else {
		/* they already had raid on here, just return */
		if (flags & stripped)
			return flags;

		stripped |= BTRFS_BLOCK_GROUP_DUP;
		stripped = flags & ~stripped;

		/* switch duplicated blocks with raid1 */
		if (flags & BTRFS_BLOCK_GROUP_DUP)
			return stripped | BTRFS_BLOCK_GROUP_RAID1;

		/* turn single device chunks into raid0 */
		return stripped | BTRFS_BLOCK_GROUP_RAID0;
	}
	return flags;
}

6833
static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
C
Chris Mason 已提交
6834
{
6835 6836
	struct btrfs_space_info *sinfo = cache->space_info;
	u64 num_bytes;
6837
	u64 min_allocable_bytes;
6838
	int ret = -ENOSPC;
C
Chris Mason 已提交
6839

6840

6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852
	/*
	 * We need some metadata space and system metadata space for
	 * allocating chunks in some corner cases until we force to set
	 * it to be readonly.
	 */
	if ((sinfo->flags &
	     (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
	    !force)
		min_allocable_bytes = 1 * 1024 * 1024;
	else
		min_allocable_bytes = 0;

6853 6854
	spin_lock(&sinfo->lock);
	spin_lock(&cache->lock);
6855 6856 6857 6858 6859 6860

	if (cache->ro) {
		ret = 0;
		goto out;
	}

6861 6862 6863 6864
	num_bytes = cache->key.offset - cache->reserved - cache->pinned -
		    cache->bytes_super - btrfs_block_group_used(&cache->item);

	if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
6865 6866
	    sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
	    min_allocable_bytes <= sinfo->total_bytes) {
6867 6868 6869 6870
		sinfo->bytes_readonly += num_bytes;
		cache->ro = 1;
		ret = 0;
	}
6871
out:
6872 6873 6874 6875
	spin_unlock(&cache->lock);
	spin_unlock(&sinfo->lock);
	return ret;
}
6876

6877 6878
int btrfs_set_block_group_ro(struct btrfs_root *root,
			     struct btrfs_block_group_cache *cache)
6879

6880 6881 6882 6883
{
	struct btrfs_trans_handle *trans;
	u64 alloc_flags;
	int ret;
6884

6885
	BUG_ON(cache->ro);
C
Chris Mason 已提交
6886

C
Chris Mason 已提交
6887
	trans = btrfs_join_transaction(root);
6888
	BUG_ON(IS_ERR(trans));
6889

6890 6891
	alloc_flags = update_block_group_flags(root, cache->flags);
	if (alloc_flags != cache->flags)
6892 6893
		do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
			       CHUNK_ALLOC_FORCE);
6894

6895
	ret = set_block_group_ro(cache, 0);
6896 6897 6898
	if (!ret)
		goto out;
	alloc_flags = get_alloc_profile(root, cache->space_info->flags);
6899 6900
	ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
			     CHUNK_ALLOC_FORCE);
6901 6902
	if (ret < 0)
		goto out;
6903
	ret = set_block_group_ro(cache, 0);
6904 6905 6906 6907
out:
	btrfs_end_transaction(trans, root);
	return ret;
}
6908

6909 6910 6911 6912
int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
			    struct btrfs_root *root, u64 type)
{
	u64 alloc_flags = get_alloc_profile(root, type);
6913 6914
	return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
			      CHUNK_ALLOC_FORCE);
6915 6916
}

6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972
/*
 * helper to account the unused space of all the readonly block group in the
 * list. takes mirrors into account.
 */
static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
{
	struct btrfs_block_group_cache *block_group;
	u64 free_bytes = 0;
	int factor;

	list_for_each_entry(block_group, groups_list, list) {
		spin_lock(&block_group->lock);

		if (!block_group->ro) {
			spin_unlock(&block_group->lock);
			continue;
		}

		if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
					  BTRFS_BLOCK_GROUP_RAID10 |
					  BTRFS_BLOCK_GROUP_DUP))
			factor = 2;
		else
			factor = 1;

		free_bytes += (block_group->key.offset -
			       btrfs_block_group_used(&block_group->item)) *
			       factor;

		spin_unlock(&block_group->lock);
	}

	return free_bytes;
}

/*
 * helper to account the unused space of all the readonly block group in the
 * space_info. takes mirrors into account.
 */
u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
{
	int i;
	u64 free_bytes = 0;

	spin_lock(&sinfo->lock);

	for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
		if (!list_empty(&sinfo->block_groups[i]))
			free_bytes += __btrfs_get_ro_block_group_free_space(
						&sinfo->block_groups[i]);

	spin_unlock(&sinfo->lock);

	return free_bytes;
}

6973 6974
int btrfs_set_block_group_rw(struct btrfs_root *root,
			      struct btrfs_block_group_cache *cache)
6975
{
6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988
	struct btrfs_space_info *sinfo = cache->space_info;
	u64 num_bytes;

	BUG_ON(!cache->ro);

	spin_lock(&sinfo->lock);
	spin_lock(&cache->lock);
	num_bytes = cache->key.offset - cache->reserved - cache->pinned -
		    cache->bytes_super - btrfs_block_group_used(&cache->item);
	sinfo->bytes_readonly -= num_bytes;
	cache->ro = 0;
	spin_unlock(&cache->lock);
	spin_unlock(&sinfo->lock);
6989 6990 6991
	return 0;
}

6992 6993 6994 6995 6996 6997 6998
/*
 * checks to see if its even possible to relocate this block group.
 *
 * @return - -1 if it's not a good idea to relocate this block group, 0 if its
 * ok to go ahead and try.
 */
int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
Z
Zheng Yan 已提交
6999
{
7000 7001 7002 7003
	struct btrfs_block_group_cache *block_group;
	struct btrfs_space_info *space_info;
	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
	struct btrfs_device *device;
7004
	u64 min_free;
J
Josef Bacik 已提交
7005 7006
	u64 dev_min = 1;
	u64 dev_nr = 0;
7007
	int index;
7008 7009
	int full = 0;
	int ret = 0;
Z
Zheng Yan 已提交
7010

7011
	block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
Z
Zheng Yan 已提交
7012

7013 7014 7015
	/* odd, couldn't find the block group, leave it alone */
	if (!block_group)
		return -1;
Z
Zheng Yan 已提交
7016

7017 7018
	min_free = btrfs_block_group_used(&block_group->item);

7019
	/* no bytes used, we're good */
7020
	if (!min_free)
Z
Zheng Yan 已提交
7021 7022
		goto out;

7023 7024
	space_info = block_group->space_info;
	spin_lock(&space_info->lock);
7025

7026
	full = space_info->full;
7027

7028 7029
	/*
	 * if this is the last block group we have in this space, we can't
7030 7031 7032 7033
	 * relocate it unless we're able to allocate a new chunk below.
	 *
	 * Otherwise, we need to make sure we have room in the space to handle
	 * all of the extents from this block group.  If we can, we're good
7034
	 */
7035
	if ((space_info->total_bytes != block_group->key.offset) &&
7036 7037 7038
	    (space_info->bytes_used + space_info->bytes_reserved +
	     space_info->bytes_pinned + space_info->bytes_readonly +
	     min_free < space_info->total_bytes)) {
7039 7040
		spin_unlock(&space_info->lock);
		goto out;
7041
	}
7042
	spin_unlock(&space_info->lock);
7043

7044 7045 7046 7047 7048 7049 7050 7051 7052 7053
	/*
	 * ok we don't have enough space, but maybe we have free space on our
	 * devices to allocate new chunks for relocation, so loop through our
	 * alloc devices and guess if we have enough space.  However, if we
	 * were marked as full, then we know there aren't enough chunks, and we
	 * can just return.
	 */
	ret = -1;
	if (full)
		goto out;
7054

7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065
	/*
	 * index:
	 *      0: raid10
	 *      1: raid1
	 *      2: dup
	 *      3: raid0
	 *      4: single
	 */
	index = get_block_group_index(block_group);
	if (index == 0) {
		dev_min = 4;
J
Josef Bacik 已提交
7066 7067
		/* Divide by 2 */
		min_free >>= 1;
7068 7069 7070
	} else if (index == 1) {
		dev_min = 2;
	} else if (index == 2) {
J
Josef Bacik 已提交
7071 7072
		/* Multiply by 2 */
		min_free <<= 1;
7073 7074
	} else if (index == 3) {
		dev_min = fs_devices->rw_devices;
J
Josef Bacik 已提交
7075
		do_div(min_free, dev_min);
7076 7077
	}

7078 7079
	mutex_lock(&root->fs_info->chunk_mutex);
	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
7080
		u64 dev_offset;
7081

7082 7083 7084 7085 7086 7087
		/*
		 * check to make sure we can actually find a chunk with enough
		 * space to fit our block group in.
		 */
		if (device->total_bytes > device->bytes_used + min_free) {
			ret = find_free_dev_extent(NULL, device, min_free,
7088
						   &dev_offset, NULL);
7089
			if (!ret)
7090 7091 7092
				dev_nr++;

			if (dev_nr >= dev_min)
7093
				break;
7094

7095
			ret = -1;
7096
		}
7097
	}
7098
	mutex_unlock(&root->fs_info->chunk_mutex);
7099
out:
7100
	btrfs_put_block_group(block_group);
7101 7102 7103
	return ret;
}

7104 7105
static int find_first_block_group(struct btrfs_root *root,
		struct btrfs_path *path, struct btrfs_key *key)
7106
{
7107
	int ret = 0;
7108 7109 7110
	struct btrfs_key found_key;
	struct extent_buffer *leaf;
	int slot;
7111

7112 7113
	ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
	if (ret < 0)
7114 7115
		goto out;

C
Chris Mason 已提交
7116
	while (1) {
7117
		slot = path->slots[0];
7118
		leaf = path->nodes[0];
7119 7120 7121 7122 7123
		if (slot >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret == 0)
				continue;
			if (ret < 0)
7124
				goto out;
7125
			break;
7126
		}
7127
		btrfs_item_key_to_cpu(leaf, &found_key, slot);
7128

7129
		if (found_key.objectid >= key->objectid &&
7130 7131 7132 7133
		    found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
			ret = 0;
			goto out;
		}
7134
		path->slots[0]++;
7135
	}
7136
out:
7137
	return ret;
7138 7139
}

7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173
void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
{
	struct btrfs_block_group_cache *block_group;
	u64 last = 0;

	while (1) {
		struct inode *inode;

		block_group = btrfs_lookup_first_block_group(info, last);
		while (block_group) {
			spin_lock(&block_group->lock);
			if (block_group->iref)
				break;
			spin_unlock(&block_group->lock);
			block_group = next_block_group(info->tree_root,
						       block_group);
		}
		if (!block_group) {
			if (last == 0)
				break;
			last = 0;
			continue;
		}

		inode = block_group->inode;
		block_group->iref = 0;
		block_group->inode = NULL;
		spin_unlock(&block_group->lock);
		iput(inode);
		last = block_group->key.objectid + block_group->key.offset;
		btrfs_put_block_group(block_group);
	}
}

Z
Zheng Yan 已提交
7174 7175 7176
int btrfs_free_block_groups(struct btrfs_fs_info *info)
{
	struct btrfs_block_group_cache *block_group;
7177
	struct btrfs_space_info *space_info;
7178
	struct btrfs_caching_control *caching_ctl;
Z
Zheng Yan 已提交
7179 7180
	struct rb_node *n;

7181 7182 7183 7184 7185 7186 7187 7188 7189
	down_write(&info->extent_commit_sem);
	while (!list_empty(&info->caching_block_groups)) {
		caching_ctl = list_entry(info->caching_block_groups.next,
					 struct btrfs_caching_control, list);
		list_del(&caching_ctl->list);
		put_caching_control(caching_ctl);
	}
	up_write(&info->extent_commit_sem);

Z
Zheng Yan 已提交
7190 7191 7192 7193 7194 7195
	spin_lock(&info->block_group_cache_lock);
	while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
		block_group = rb_entry(n, struct btrfs_block_group_cache,
				       cache_node);
		rb_erase(&block_group->cache_node,
			 &info->block_group_cache_tree);
Y
Yan Zheng 已提交
7196 7197
		spin_unlock(&info->block_group_cache_lock);

7198
		down_write(&block_group->space_info->groups_sem);
Z
Zheng Yan 已提交
7199
		list_del(&block_group->list);
7200
		up_write(&block_group->space_info->groups_sem);
7201

J
Josef Bacik 已提交
7202
		if (block_group->cached == BTRFS_CACHE_STARTED)
7203
			wait_block_group_cache_done(block_group);
J
Josef Bacik 已提交
7204

7205 7206 7207 7208 7209 7210 7211
		/*
		 * We haven't cached this block group, which means we could
		 * possibly have excluded extents on this block group.
		 */
		if (block_group->cached == BTRFS_CACHE_NO)
			free_excluded_extents(info->extent_root, block_group);

J
Josef Bacik 已提交
7212
		btrfs_remove_free_space_cache(block_group);
7213
		btrfs_put_block_group(block_group);
Y
Yan Zheng 已提交
7214 7215

		spin_lock(&info->block_group_cache_lock);
Z
Zheng Yan 已提交
7216 7217
	}
	spin_unlock(&info->block_group_cache_lock);
7218 7219 7220 7221 7222 7223 7224 7225 7226

	/* now that all the block groups are freed, go through and
	 * free all the space_info structs.  This is only called during
	 * the final stages of unmount, and so we know nobody is
	 * using them.  We call synchronize_rcu() once before we start,
	 * just to be on the safe side.
	 */
	synchronize_rcu();

7227 7228
	release_global_block_rsv(info);

7229 7230 7231 7232
	while(!list_empty(&info->space_info)) {
		space_info = list_entry(info->space_info.next,
					struct btrfs_space_info,
					list);
7233
		if (space_info->bytes_pinned > 0 ||
7234 7235
		    space_info->bytes_reserved > 0 ||
		    space_info->bytes_may_use > 0) {
7236 7237 7238
			WARN_ON(1);
			dump_space_info(space_info, 0, 0);
		}
7239 7240 7241
		list_del(&space_info->list);
		kfree(space_info);
	}
Z
Zheng Yan 已提交
7242 7243 7244
	return 0;
}

7245 7246 7247 7248 7249 7250 7251 7252 7253 7254
static void __link_block_group(struct btrfs_space_info *space_info,
			       struct btrfs_block_group_cache *cache)
{
	int index = get_block_group_index(cache);

	down_write(&space_info->groups_sem);
	list_add_tail(&cache->list, &space_info->block_groups[index]);
	up_write(&space_info->groups_sem);
}

C
Chris Mason 已提交
7255 7256 7257 7258 7259
int btrfs_read_block_groups(struct btrfs_root *root)
{
	struct btrfs_path *path;
	int ret;
	struct btrfs_block_group_cache *cache;
C
Chris Mason 已提交
7260
	struct btrfs_fs_info *info = root->fs_info;
7261
	struct btrfs_space_info *space_info;
C
Chris Mason 已提交
7262 7263
	struct btrfs_key key;
	struct btrfs_key found_key;
7264
	struct extent_buffer *leaf;
7265 7266
	int need_clear = 0;
	u64 cache_gen;
7267

C
Chris Mason 已提交
7268
	root = info->extent_root;
C
Chris Mason 已提交
7269
	key.objectid = 0;
7270
	key.offset = 0;
C
Chris Mason 已提交
7271 7272 7273 7274
	btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
J
Josef Bacik 已提交
7275
	path->reada = 1;
C
Chris Mason 已提交
7276

7277
	cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
7278
	if (btrfs_test_opt(root, SPACE_CACHE) &&
7279
	    btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
7280
		need_clear = 1;
7281 7282
	if (btrfs_test_opt(root, CLEAR_CACHE))
		need_clear = 1;
7283

C
Chris Mason 已提交
7284
	while (1) {
7285
		ret = find_first_block_group(root, path, &key);
7286 7287
		if (ret > 0)
			break;
7288 7289
		if (ret != 0)
			goto error;
7290 7291
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7292
		cache = kzalloc(sizeof(*cache), GFP_NOFS);
C
Chris Mason 已提交
7293
		if (!cache) {
7294
			ret = -ENOMEM;
7295
			goto error;
C
Chris Mason 已提交
7296
		}
7297 7298 7299 7300 7301 7302 7303
		cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
						GFP_NOFS);
		if (!cache->free_space_ctl) {
			kfree(cache);
			ret = -ENOMEM;
			goto error;
		}
C
Chris Mason 已提交
7304

7305
		atomic_set(&cache->count, 1);
7306
		spin_lock_init(&cache->lock);
J
Josef Bacik 已提交
7307
		cache->fs_info = info;
J
Josef Bacik 已提交
7308
		INIT_LIST_HEAD(&cache->list);
7309
		INIT_LIST_HEAD(&cache->cluster_list);
7310

7311 7312 7313
		if (need_clear)
			cache->disk_cache_state = BTRFS_DC_CLEAR;

7314 7315 7316
		read_extent_buffer(leaf, &cache->item,
				   btrfs_item_ptr_offset(leaf, path->slots[0]),
				   sizeof(cache->item));
C
Chris Mason 已提交
7317
		memcpy(&cache->key, &found_key, sizeof(found_key));
7318

C
Chris Mason 已提交
7319
		key.objectid = found_key.objectid + found_key.offset;
7320
		btrfs_release_path(path);
7321
		cache->flags = btrfs_block_group_flags(&cache->item);
J
Josef Bacik 已提交
7322 7323
		cache->sectorsize = root->sectorsize;

7324 7325
		btrfs_init_free_space_ctl(cache);

7326 7327 7328 7329 7330 7331 7332
		/*
		 * We need to exclude the super stripes now so that the space
		 * info has super bytes accounted for, otherwise we'll think
		 * we have more space than we actually do.
		 */
		exclude_super_stripes(root, cache);

J
Josef Bacik 已提交
7333 7334 7335 7336 7337 7338 7339 7340
		/*
		 * check for two cases, either we are full, and therefore
		 * don't need to bother with the caching work since we won't
		 * find any space, or we are empty, and we can just add all
		 * the space in and be done with it.  This saves us _alot_ of
		 * time, particularly in the full case.
		 */
		if (found_key.offset == btrfs_block_group_used(&cache->item)) {
7341
			cache->last_byte_to_unpin = (u64)-1;
J
Josef Bacik 已提交
7342
			cache->cached = BTRFS_CACHE_FINISHED;
7343
			free_excluded_extents(root, cache);
J
Josef Bacik 已提交
7344
		} else if (btrfs_block_group_used(&cache->item) == 0) {
7345
			cache->last_byte_to_unpin = (u64)-1;
J
Josef Bacik 已提交
7346 7347 7348 7349 7350
			cache->cached = BTRFS_CACHE_FINISHED;
			add_new_free_space(cache, root->fs_info,
					   found_key.objectid,
					   found_key.objectid +
					   found_key.offset);
7351
			free_excluded_extents(root, cache);
J
Josef Bacik 已提交
7352
		}
7353

7354 7355 7356 7357 7358
		ret = update_space_info(info, cache->flags, found_key.offset,
					btrfs_block_group_used(&cache->item),
					&space_info);
		BUG_ON(ret);
		cache->space_info = space_info;
7359
		spin_lock(&cache->space_info->lock);
7360
		cache->space_info->bytes_readonly += cache->bytes_super;
7361 7362
		spin_unlock(&cache->space_info->lock);

7363
		__link_block_group(space_info, cache);
J
Josef Bacik 已提交
7364 7365 7366

		ret = btrfs_add_block_group_cache(root->fs_info, cache);
		BUG_ON(ret);
7367 7368

		set_avail_alloc_bits(root->fs_info, cache->flags);
Y
Yan Zheng 已提交
7369
		if (btrfs_chunk_readonly(root, cache->key.objectid))
7370
			set_block_group_ro(cache, 1);
C
Chris Mason 已提交
7371
	}
7372 7373 7374 7375 7376 7377 7378 7379 7380 7381 7382 7383

	list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
		if (!(get_alloc_profile(root, space_info->flags) &
		      (BTRFS_BLOCK_GROUP_RAID10 |
		       BTRFS_BLOCK_GROUP_RAID1 |
		       BTRFS_BLOCK_GROUP_DUP)))
			continue;
		/*
		 * avoid allocating from un-mirrored block group if there are
		 * mirrored block groups.
		 */
		list_for_each_entry(cache, &space_info->block_groups[3], list)
7384
			set_block_group_ro(cache, 1);
7385
		list_for_each_entry(cache, &space_info->block_groups[4], list)
7386
			set_block_group_ro(cache, 1);
C
Chris Mason 已提交
7387
	}
7388 7389

	init_global_block_rsv(info);
7390 7391
	ret = 0;
error:
C
Chris Mason 已提交
7392
	btrfs_free_path(path);
7393
	return ret;
C
Chris Mason 已提交
7394
}
7395 7396 7397

int btrfs_make_block_group(struct btrfs_trans_handle *trans,
			   struct btrfs_root *root, u64 bytes_used,
7398
			   u64 type, u64 chunk_objectid, u64 chunk_offset,
7399 7400 7401 7402 7403 7404 7405 7406
			   u64 size)
{
	int ret;
	struct btrfs_root *extent_root;
	struct btrfs_block_group_cache *cache;

	extent_root = root->fs_info->extent_root;

7407
	root->fs_info->last_trans_log_full_commit = trans->transid;
7408

7409
	cache = kzalloc(sizeof(*cache), GFP_NOFS);
J
Josef Bacik 已提交
7410 7411
	if (!cache)
		return -ENOMEM;
7412 7413 7414 7415 7416 7417
	cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
					GFP_NOFS);
	if (!cache->free_space_ctl) {
		kfree(cache);
		return -ENOMEM;
	}
J
Josef Bacik 已提交
7418

7419
	cache->key.objectid = chunk_offset;
7420
	cache->key.offset = size;
7421
	cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
7422
	cache->sectorsize = root->sectorsize;
7423
	cache->fs_info = root->fs_info;
7424

7425
	atomic_set(&cache->count, 1);
7426
	spin_lock_init(&cache->lock);
J
Josef Bacik 已提交
7427
	INIT_LIST_HEAD(&cache->list);
7428
	INIT_LIST_HEAD(&cache->cluster_list);
C
Chris Mason 已提交
7429

7430 7431
	btrfs_init_free_space_ctl(cache);

7432 7433 7434 7435 7436
	btrfs_set_block_group_used(&cache->item, bytes_used);
	btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
	cache->flags = type;
	btrfs_set_block_group_flags(&cache->item, type);

7437
	cache->last_byte_to_unpin = (u64)-1;
J
Josef Bacik 已提交
7438
	cache->cached = BTRFS_CACHE_FINISHED;
7439
	exclude_super_stripes(root, cache);
7440

J
Josef Bacik 已提交
7441 7442 7443
	add_new_free_space(cache, root->fs_info, chunk_offset,
			   chunk_offset + size);

7444 7445
	free_excluded_extents(root, cache);

7446 7447 7448
	ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
				&cache->space_info);
	BUG_ON(ret);
7449 7450

	spin_lock(&cache->space_info->lock);
7451
	cache->space_info->bytes_readonly += cache->bytes_super;
7452 7453
	spin_unlock(&cache->space_info->lock);

7454
	__link_block_group(cache->space_info, cache);
7455

J
Josef Bacik 已提交
7456 7457
	ret = btrfs_add_block_group_cache(root->fs_info, cache);
	BUG_ON(ret);
7458

7459 7460 7461 7462
	ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
				sizeof(cache->item));
	BUG_ON(ret);

C
Chris Mason 已提交
7463
	set_avail_alloc_bits(extent_root->fs_info, type);
7464

7465 7466
	return 0;
}
Z
Zheng Yan 已提交
7467 7468 7469 7470 7471 7472

int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
			     struct btrfs_root *root, u64 group_start)
{
	struct btrfs_path *path;
	struct btrfs_block_group_cache *block_group;
7473
	struct btrfs_free_cluster *cluster;
7474
	struct btrfs_root *tree_root = root->fs_info->tree_root;
Z
Zheng Yan 已提交
7475
	struct btrfs_key key;
7476
	struct inode *inode;
Z
Zheng Yan 已提交
7477
	int ret;
J
Josef Bacik 已提交
7478
	int factor;
Z
Zheng Yan 已提交
7479 7480 7481 7482 7483

	root = root->fs_info->extent_root;

	block_group = btrfs_lookup_block_group(root->fs_info, group_start);
	BUG_ON(!block_group);
Y
Yan Zheng 已提交
7484
	BUG_ON(!block_group->ro);
Z
Zheng Yan 已提交
7485

7486 7487 7488 7489 7490 7491
	/*
	 * Free the reserved super bytes from this block group before
	 * remove it.
	 */
	free_excluded_extents(root, block_group);

Z
Zheng Yan 已提交
7492
	memcpy(&key, &block_group->key, sizeof(key));
J
Josef Bacik 已提交
7493 7494 7495 7496 7497 7498
	if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
				  BTRFS_BLOCK_GROUP_RAID1 |
				  BTRFS_BLOCK_GROUP_RAID10))
		factor = 2;
	else
		factor = 1;
Z
Zheng Yan 已提交
7499

7500 7501 7502 7503 7504 7505 7506 7507 7508 7509 7510 7511 7512 7513 7514
	/* make sure this block group isn't part of an allocation cluster */
	cluster = &root->fs_info->data_alloc_cluster;
	spin_lock(&cluster->refill_lock);
	btrfs_return_cluster_to_free_space(block_group, cluster);
	spin_unlock(&cluster->refill_lock);

	/*
	 * make sure this block group isn't part of a metadata
	 * allocation cluster
	 */
	cluster = &root->fs_info->meta_alloc_cluster;
	spin_lock(&cluster->refill_lock);
	btrfs_return_cluster_to_free_space(block_group, cluster);
	spin_unlock(&cluster->refill_lock);

Z
Zheng Yan 已提交
7515
	path = btrfs_alloc_path();
7516 7517 7518 7519
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}
Z
Zheng Yan 已提交
7520

7521
	inode = lookup_free_space_inode(tree_root, block_group, path);
7522
	if (!IS_ERR(inode)) {
7523 7524
		ret = btrfs_orphan_add(trans, inode);
		BUG_ON(ret);
7525 7526 7527 7528 7529 7530 7531 7532 7533 7534 7535 7536
		clear_nlink(inode);
		/* One for the block groups ref */
		spin_lock(&block_group->lock);
		if (block_group->iref) {
			block_group->iref = 0;
			block_group->inode = NULL;
			spin_unlock(&block_group->lock);
			iput(inode);
		} else {
			spin_unlock(&block_group->lock);
		}
		/* One for our lookup ref */
7537
		btrfs_add_delayed_iput(inode);
7538 7539 7540 7541 7542 7543 7544 7545 7546 7547
	}

	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
	key.offset = block_group->key.objectid;
	key.type = 0;

	ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
	if (ret < 0)
		goto out;
	if (ret > 0)
7548
		btrfs_release_path(path);
7549 7550 7551 7552
	if (ret == 0) {
		ret = btrfs_del_item(trans, tree_root, path);
		if (ret)
			goto out;
7553
		btrfs_release_path(path);
7554 7555
	}

7556
	spin_lock(&root->fs_info->block_group_cache_lock);
Z
Zheng Yan 已提交
7557 7558
	rb_erase(&block_group->cache_node,
		 &root->fs_info->block_group_cache_tree);
7559
	spin_unlock(&root->fs_info->block_group_cache_lock);
J
Josef Bacik 已提交
7560

7561
	down_write(&block_group->space_info->groups_sem);
7562 7563 7564 7565 7566
	/*
	 * we must use list_del_init so people can check to see if they
	 * are still on the list after taking the semaphore
	 */
	list_del_init(&block_group->list);
7567
	up_write(&block_group->space_info->groups_sem);
Z
Zheng Yan 已提交
7568

J
Josef Bacik 已提交
7569
	if (block_group->cached == BTRFS_CACHE_STARTED)
7570
		wait_block_group_cache_done(block_group);
J
Josef Bacik 已提交
7571 7572 7573

	btrfs_remove_free_space_cache(block_group);

Y
Yan Zheng 已提交
7574 7575 7576
	spin_lock(&block_group->space_info->lock);
	block_group->space_info->total_bytes -= block_group->key.offset;
	block_group->space_info->bytes_readonly -= block_group->key.offset;
J
Josef Bacik 已提交
7577
	block_group->space_info->disk_total -= block_group->key.offset * factor;
Y
Yan Zheng 已提交
7578
	spin_unlock(&block_group->space_info->lock);
7579

7580 7581
	memcpy(&key, &block_group->key, sizeof(key));

7582
	btrfs_clear_space_info_full(root->fs_info);
Y
Yan Zheng 已提交
7583

7584 7585
	btrfs_put_block_group(block_group);
	btrfs_put_block_group(block_group);
Z
Zheng Yan 已提交
7586 7587 7588 7589 7590 7591 7592 7593 7594 7595 7596 7597

	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
	if (ret > 0)
		ret = -EIO;
	if (ret < 0)
		goto out;

	ret = btrfs_del_item(trans, root, path);
out:
	btrfs_free_path(path);
	return ret;
}
L
liubo 已提交
7598

7599 7600 7601
int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
{
	struct btrfs_space_info *space_info;
7602 7603 7604 7605
	struct btrfs_super_block *disk_super;
	u64 features;
	u64 flags;
	int mixed = 0;
7606 7607
	int ret;

7608
	disk_super = fs_info->super_copy;
7609 7610
	if (!btrfs_super_root(disk_super))
		return 1;
7611

7612 7613 7614
	features = btrfs_super_incompat_flags(disk_super);
	if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
		mixed = 1;
7615

7616 7617
	flags = BTRFS_BLOCK_GROUP_SYSTEM;
	ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7618
	if (ret)
7619
		goto out;
7620

7621 7622 7623 7624 7625 7626 7627 7628 7629 7630 7631 7632 7633
	if (mixed) {
		flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
		ret = update_space_info(fs_info, flags, 0, 0, &space_info);
	} else {
		flags = BTRFS_BLOCK_GROUP_METADATA;
		ret = update_space_info(fs_info, flags, 0, 0, &space_info);
		if (ret)
			goto out;

		flags = BTRFS_BLOCK_GROUP_DATA;
		ret = update_space_info(fs_info, flags, 0, 0, &space_info);
	}
out:
7634 7635 7636
	return ret;
}

L
liubo 已提交
7637 7638 7639 7640 7641 7642
int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
{
	return unpin_extent_range(root, start, end);
}

int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
7643
			       u64 num_bytes, u64 *actual_bytes)
L
liubo 已提交
7644
{
7645
	return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
L
liubo 已提交
7646
}
7647 7648 7649 7650 7651 7652 7653 7654 7655 7656 7657 7658 7659 7660 7661 7662 7663 7664 7665 7666 7667 7668 7669 7670 7671 7672 7673 7674 7675 7676 7677 7678 7679 7680 7681 7682 7683 7684 7685 7686 7687 7688 7689 7690 7691 7692 7693 7694

int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
{
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct btrfs_block_group_cache *cache = NULL;
	u64 group_trimmed;
	u64 start;
	u64 end;
	u64 trimmed = 0;
	int ret = 0;

	cache = btrfs_lookup_block_group(fs_info, range->start);

	while (cache) {
		if (cache->key.objectid >= (range->start + range->len)) {
			btrfs_put_block_group(cache);
			break;
		}

		start = max(range->start, cache->key.objectid);
		end = min(range->start + range->len,
				cache->key.objectid + cache->key.offset);

		if (end - start >= range->minlen) {
			if (!block_group_cache_done(cache)) {
				ret = cache_block_group(cache, NULL, root, 0);
				if (!ret)
					wait_block_group_cache_done(cache);
			}
			ret = btrfs_trim_block_group(cache,
						     &group_trimmed,
						     start,
						     end,
						     range->minlen);

			trimmed += group_trimmed;
			if (ret) {
				btrfs_put_block_group(cache);
				break;
			}
		}

		cache = next_block_group(fs_info->tree_root, cache);
	}

	range->len = trimmed;
	return ret;
}