disk-io.c 124.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
C
Chris Mason 已提交
2 3 4 5
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 */

C
Chris Mason 已提交
6
#include <linux/fs.h>
7
#include <linux/blkdev.h>
8
#include <linux/radix-tree.h>
C
Chris Mason 已提交
9
#include <linux/writeback.h>
C
Chris Mason 已提交
10
#include <linux/buffer_head.h>
11
#include <linux/workqueue.h>
12
#include <linux/kthread.h>
13
#include <linux/slab.h>
14
#include <linux/migrate.h>
15
#include <linux/ratelimit.h>
16
#include <linux/uuid.h>
S
Stefan Behrens 已提交
17
#include <linux/semaphore.h>
18
#include <linux/error-injection.h>
19
#include <linux/crc32c.h>
20
#include <linux/sched/mm.h>
21
#include <asm/unaligned.h>
22 23
#include "ctree.h"
#include "disk-io.h"
24
#include "transaction.h"
25
#include "btrfs_inode.h"
26
#include "volumes.h"
27
#include "print-tree.h"
28
#include "locking.h"
29
#include "tree-log.h"
30
#include "free-space-cache.h"
31
#include "free-space-tree.h"
32
#include "inode-map.h"
33
#include "check-integrity.h"
34
#include "rcu-string.h"
35
#include "dev-replace.h"
D
David Woodhouse 已提交
36
#include "raid56.h"
37
#include "sysfs.h"
J
Josef Bacik 已提交
38
#include "qgroup.h"
39
#include "compression.h"
40
#include "tree-checker.h"
J
Josef Bacik 已提交
41
#include "ref-verify.h"
42

43 44 45 46
#ifdef CONFIG_X86
#include <asm/cpufeature.h>
#endif

47 48 49 50
#define BTRFS_SUPER_FLAG_SUPP	(BTRFS_HEADER_FLAG_WRITTEN |\
				 BTRFS_HEADER_FLAG_RELOC |\
				 BTRFS_SUPER_FLAG_ERROR |\
				 BTRFS_SUPER_FLAG_SEEDING |\
51 52
				 BTRFS_SUPER_FLAG_METADUMP |\
				 BTRFS_SUPER_FLAG_METADUMP_V2)
53

54
static const struct extent_io_ops btree_extent_io_ops;
55
static void end_workqueue_fn(struct btrfs_work *work);
56
static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
L
liubo 已提交
57
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
58
				      struct btrfs_fs_info *fs_info);
59
static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
60
static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
L
liubo 已提交
61 62
					struct extent_io_tree *dirty_pages,
					int mark);
63
static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
L
liubo 已提交
64
				       struct extent_io_tree *pinned_extents);
65 66
static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
67

C
Chris Mason 已提交
68
/*
69 70
 * btrfs_end_io_wq structs are used to do processing in task context when an IO
 * is complete.  This is used during reads to verify checksums, and it is used
C
Chris Mason 已提交
71 72
 * by writes to insert metadata for new file extents after IO is complete.
 */
73
struct btrfs_end_io_wq {
74 75 76 77
	struct bio *bio;
	bio_end_io_t *end_io;
	void *private;
	struct btrfs_fs_info *info;
78
	blk_status_t status;
79
	enum btrfs_wq_endio_type metadata;
80
	struct btrfs_work work;
81
};
82

83 84 85 86 87 88 89
static struct kmem_cache *btrfs_end_io_wq_cache;

int __init btrfs_end_io_wq_init(void)
{
	btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
					sizeof(struct btrfs_end_io_wq),
					0,
90
					SLAB_MEM_SPREAD,
91 92 93 94 95 96
					NULL);
	if (!btrfs_end_io_wq_cache)
		return -ENOMEM;
	return 0;
}

97
void __cold btrfs_end_io_wq_exit(void)
98
{
99
	kmem_cache_destroy(btrfs_end_io_wq_cache);
100 101
}

C
Chris Mason 已提交
102 103 104 105 106
/*
 * async submit bios are used to offload expensive checksumming
 * onto the worker threads.  They checksum file and metadata bios
 * just before they are sent down the IO stack.
 */
107
struct async_submit_bio {
108
	void *private_data;
109
	struct bio *bio;
110
	extent_submit_bio_start_t *submit_bio_start;
111
	int mirror_num;
112 113 114 115 116
	/*
	 * bio_offset is optional, can be used if the pages in the bio
	 * can't tell us where in the file the bio should go
	 */
	u64 bio_offset;
117
	struct btrfs_work work;
118
	blk_status_t status;
119 120
};

121 122 123 124 125 126 127 128
/*
 * Lockdep class keys for extent_buffer->lock's in this root.  For a given
 * eb, the lockdep key is determined by the btrfs_root it belongs to and
 * the level the eb occupies in the tree.
 *
 * Different roots are used for different purposes and may nest inside each
 * other and they require separate keysets.  As lockdep keys should be
 * static, assign keysets according to the purpose of the root as indicated
129 130
 * by btrfs_root->root_key.objectid.  This ensures that all special purpose
 * roots have separate keysets.
131
 *
132 133 134
 * Lock-nesting across peer nodes is always done with the immediate parent
 * node locked thus preventing deadlock.  As lockdep doesn't know this, use
 * subclass to avoid triggering lockdep warning in such cases.
135
 *
136 137 138
 * The key is set by the readpage_end_io_hook after the buffer has passed
 * csum validation but before the pages are unlocked.  It is also set by
 * btrfs_init_new_buffer on freshly allocated blocks.
139
 *
140 141 142
 * We also add a check to make sure the highest level of the tree is the
 * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
 * needs update as well.
143 144 145 146 147
 */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# if BTRFS_MAX_LEVEL != 8
#  error
# endif
148 149 150 151 152 153 154 155 156 157 158 159 160

static struct btrfs_lockdep_keyset {
	u64			id;		/* root objectid */
	const char		*name_stem;	/* lock name stem */
	char			names[BTRFS_MAX_LEVEL + 1][20];
	struct lock_class_key	keys[BTRFS_MAX_LEVEL + 1];
} btrfs_lockdep_keysets[] = {
	{ .id = BTRFS_ROOT_TREE_OBJECTID,	.name_stem = "root"	},
	{ .id = BTRFS_EXTENT_TREE_OBJECTID,	.name_stem = "extent"	},
	{ .id = BTRFS_CHUNK_TREE_OBJECTID,	.name_stem = "chunk"	},
	{ .id = BTRFS_DEV_TREE_OBJECTID,	.name_stem = "dev"	},
	{ .id = BTRFS_FS_TREE_OBJECTID,		.name_stem = "fs"	},
	{ .id = BTRFS_CSUM_TREE_OBJECTID,	.name_stem = "csum"	},
161
	{ .id = BTRFS_QUOTA_TREE_OBJECTID,	.name_stem = "quota"	},
162 163 164
	{ .id = BTRFS_TREE_LOG_OBJECTID,	.name_stem = "log"	},
	{ .id = BTRFS_TREE_RELOC_OBJECTID,	.name_stem = "treloc"	},
	{ .id = BTRFS_DATA_RELOC_TREE_OBJECTID,	.name_stem = "dreloc"	},
165
	{ .id = BTRFS_UUID_TREE_OBJECTID,	.name_stem = "uuid"	},
166
	{ .id = BTRFS_FREE_SPACE_TREE_OBJECTID,	.name_stem = "free-space" },
167
	{ .id = 0,				.name_stem = "tree"	},
168
};
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199

void __init btrfs_init_lockdep(void)
{
	int i, j;

	/* initialize lockdep class names */
	for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
		struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];

		for (j = 0; j < ARRAY_SIZE(ks->names); j++)
			snprintf(ks->names[j], sizeof(ks->names[j]),
				 "btrfs-%s-%02d", ks->name_stem, j);
	}
}

void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
				    int level)
{
	struct btrfs_lockdep_keyset *ks;

	BUG_ON(level >= ARRAY_SIZE(ks->keys));

	/* find the matching keyset, id 0 is the default entry */
	for (ks = btrfs_lockdep_keysets; ks->id; ks++)
		if (ks->id == objectid)
			break;

	lockdep_set_class_and_name(&eb->lock,
				   &ks->keys[level], ks->names[level]);
}

200 201
#endif

C
Chris Mason 已提交
202 203 204 205
/*
 * extents on the btree inode are pretty simple, there's one extent
 * that covers the entire device
 */
206
struct extent_map *btree_get_extent(struct btrfs_inode *inode,
207
		struct page *page, size_t pg_offset, u64 start, u64 len,
208
		int create)
209
{
210
	struct btrfs_fs_info *fs_info = inode->root->fs_info;
211
	struct extent_map_tree *em_tree = &inode->extent_tree;
212 213 214
	struct extent_map *em;
	int ret;

215
	read_lock(&em_tree->lock);
216
	em = lookup_extent_mapping(em_tree, start, len);
217
	if (em) {
218
		em->bdev = fs_info->fs_devices->latest_bdev;
219
		read_unlock(&em_tree->lock);
220
		goto out;
221
	}
222
	read_unlock(&em_tree->lock);
223

224
	em = alloc_extent_map();
225 226 227 228 229
	if (!em) {
		em = ERR_PTR(-ENOMEM);
		goto out;
	}
	em->start = 0;
230
	em->len = (u64)-1;
C
Chris Mason 已提交
231
	em->block_len = (u64)-1;
232
	em->block_start = 0;
233
	em->bdev = fs_info->fs_devices->latest_bdev;
234

235
	write_lock(&em_tree->lock);
J
Josef Bacik 已提交
236
	ret = add_extent_mapping(em_tree, em, 0);
237 238
	if (ret == -EEXIST) {
		free_extent_map(em);
239
		em = lookup_extent_mapping(em_tree, start, len);
240
		if (!em)
241
			em = ERR_PTR(-EIO);
242
	} else if (ret) {
243
		free_extent_map(em);
244
		em = ERR_PTR(ret);
245
	}
246
	write_unlock(&em_tree->lock);
247

248 249
out:
	return em;
250 251
}

252
u32 btrfs_csum_data(const char *data, u32 seed, size_t len)
253
{
254
	return crc32c(seed, data, len);
255 256
}

257
void btrfs_csum_final(u32 crc, u8 *result)
258
{
259
	put_unaligned_le32(~crc, result);
260 261
}

C
Chris Mason 已提交
262
/*
263 264 265
 * Compute the csum of a btree block and store the result to provided buffer.
 *
 * Returns error if the extent buffer cannot be mapped.
C
Chris Mason 已提交
266
 */
267
static int csum_tree_block(struct extent_buffer *buf, u8 *result)
268 269 270 271 272 273 274 275 276 277 278
{
	unsigned long len;
	unsigned long cur_len;
	unsigned long offset = BTRFS_CSUM_SIZE;
	char *kaddr;
	unsigned long map_start;
	unsigned long map_len;
	int err;
	u32 crc = ~(u32)0;

	len = buf->len - offset;
C
Chris Mason 已提交
279
	while (len > 0) {
280 281 282 283 284 285
		/*
		 * Note: we don't need to check for the err == 1 case here, as
		 * with the given combination of 'start = BTRFS_CSUM_SIZE (32)'
		 * and 'min_len = 32' and the currently implemented mapping
		 * algorithm we cannot cross a page boundary.
		 */
286
		err = map_private_extent_buffer(buf, offset, 32,
287
					&kaddr, &map_start, &map_len);
288
		if (WARN_ON(err))
289
			return err;
290
		cur_len = min(len, map_len - (offset - map_start));
291
		crc = btrfs_csum_data(kaddr + offset - map_start,
292 293 294 295
				      crc, cur_len);
		len -= cur_len;
		offset += cur_len;
	}
296
	memset(result, 0, BTRFS_CSUM_SIZE);
297

298 299 300 301 302
	btrfs_csum_final(crc, result);

	return 0;
}

C
Chris Mason 已提交
303 304 305 306 307 308
/*
 * we can't consider a given block up to date unless the transid of the
 * block matches the transid in the parent node's pointer.  This is how we
 * detect blocks that either didn't get written at all or got written
 * in the wrong place.
 */
309
static int verify_parent_transid(struct extent_io_tree *io_tree,
310 311
				 struct extent_buffer *eb, u64 parent_transid,
				 int atomic)
312
{
313
	struct extent_state *cached_state = NULL;
314
	int ret;
315
	bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB);
316 317 318 319

	if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
		return 0;

320 321 322
	if (atomic)
		return -EAGAIN;

323 324
	if (need_lock) {
		btrfs_tree_read_lock(eb);
325
		btrfs_set_lock_blocking_read(eb);
326 327
	}

328
	lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
329
			 &cached_state);
330
	if (extent_buffer_uptodate(eb) &&
331 332 333 334
	    btrfs_header_generation(eb) == parent_transid) {
		ret = 0;
		goto out;
	}
335 336 337
	btrfs_err_rl(eb->fs_info,
		"parent transid verify failed on %llu wanted %llu found %llu",
			eb->start,
338
			parent_transid, btrfs_header_generation(eb));
339
	ret = 1;
340 341 342 343

	/*
	 * Things reading via commit roots that don't have normal protection,
	 * like send, can have a really old block in cache that may point at a
344
	 * block that has been freed and re-allocated.  So don't clear uptodate
345 346 347 348 349 350
	 * if we find an eb that is under IO (dirty/writeback) because we could
	 * end up reading in the stale data and then writing it back out and
	 * making everybody very sad.
	 */
	if (!extent_buffer_under_io(eb))
		clear_extent_buffer_uptodate(eb);
C
Chris Mason 已提交
351
out:
352
	unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
353
			     &cached_state);
354 355
	if (need_lock)
		btrfs_tree_read_unlock_blocking(eb);
356 357 358
	return ret;
}

D
David Sterba 已提交
359 360 361 362
/*
 * Return 0 if the superblock checksum type matches the checksum value of that
 * algorithm. Pass the raw disk superblock data.
 */
363 364
static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
				  char *raw_disk_sb)
D
David Sterba 已提交
365 366 367 368 369 370 371 372
{
	struct btrfs_super_block *disk_sb =
		(struct btrfs_super_block *)raw_disk_sb;
	u16 csum_type = btrfs_super_csum_type(disk_sb);
	int ret = 0;

	if (csum_type == BTRFS_CSUM_TYPE_CRC32) {
		u32 crc = ~(u32)0;
373
		char result[sizeof(crc)];
D
David Sterba 已提交
374 375 376 377

		/*
		 * The super_block structure does not span the whole
		 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
378
		 * is filled with zeros and is included in the checksum.
D
David Sterba 已提交
379 380 381 382 383
		 */
		crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
				crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
		btrfs_csum_final(crc, result);

384
		if (memcmp(raw_disk_sb, result, sizeof(result)))
D
David Sterba 已提交
385 386 387 388
			ret = 1;
	}

	if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
389
		btrfs_err(fs_info, "unsupported checksum algorithm %u",
D
David Sterba 已提交
390 391 392 393 394 395 396
				csum_type);
		ret = 1;
	}

	return ret;
}

397
int btrfs_verify_level_key(struct extent_buffer *eb, int level,
398
			   struct btrfs_key *first_key, u64 parent_transid)
399
{
400
	struct btrfs_fs_info *fs_info = eb->fs_info;
401 402 403 404 405 406
	int found_level;
	struct btrfs_key found_key;
	int ret;

	found_level = btrfs_header_level(eb);
	if (found_level != level) {
407 408
		WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
		     KERN_ERR "BTRFS: tree level check failed\n");
409 410 411 412 413 414 415 416 417
		btrfs_err(fs_info,
"tree level mismatch detected, bytenr=%llu level expected=%u has=%u",
			  eb->start, level, found_level);
		return -EIO;
	}

	if (!first_key)
		return 0;

418 419 420 421 422 423 424 425
	/*
	 * For live tree block (new tree blocks in current transaction),
	 * we need proper lock context to avoid race, which is impossible here.
	 * So we only checks tree blocks which is read from disk, whose
	 * generation <= fs_info->last_trans_committed.
	 */
	if (btrfs_header_generation(eb) > fs_info->last_trans_committed)
		return 0;
426 427 428 429 430 431 432
	if (found_level)
		btrfs_node_key_to_cpu(eb, &found_key, 0);
	else
		btrfs_item_key_to_cpu(eb, &found_key, 0);
	ret = btrfs_comp_cpu_keys(first_key, &found_key);

	if (ret) {
433 434
		WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
		     KERN_ERR "BTRFS: tree first key check failed\n");
435
		btrfs_err(fs_info,
436 437 438 439 440
"tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)",
			  eb->start, parent_transid, first_key->objectid,
			  first_key->type, first_key->offset,
			  found_key.objectid, found_key.type,
			  found_key.offset);
441 442 443 444
	}
	return ret;
}

C
Chris Mason 已提交
445 446 447
/*
 * helper to read a given tree block, doing retries as required when
 * the checksums don't match and we have alternate mirrors to try.
448 449 450 451
 *
 * @parent_transid:	expected transid, skip check if 0
 * @level:		expected level, mandatory check
 * @first_key:		expected key of first slot, skip check if NULL
C
Chris Mason 已提交
452
 */
453
static int btree_read_extent_buffer_pages(struct extent_buffer *eb,
454 455
					  u64 parent_transid, int level,
					  struct btrfs_key *first_key)
456
{
457
	struct btrfs_fs_info *fs_info = eb->fs_info;
458
	struct extent_io_tree *io_tree;
459
	int failed = 0;
460 461 462
	int ret;
	int num_copies = 0;
	int mirror_num = 0;
463
	int failed_mirror = 0;
464

465
	io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
466
	while (1) {
467
		clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
468
		ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
469
					       mirror_num);
470
		if (!ret) {
471
			if (verify_parent_transid(io_tree, eb,
472
						   parent_transid, 0))
473
				ret = -EIO;
474
			else if (btrfs_verify_level_key(eb, level,
475
						first_key, parent_transid))
476 477 478
				ret = -EUCLEAN;
			else
				break;
479
		}
C
Chris Mason 已提交
480

481
		num_copies = btrfs_num_copies(fs_info,
482
					      eb->start, eb->len);
C
Chris Mason 已提交
483
		if (num_copies == 1)
484
			break;
C
Chris Mason 已提交
485

486 487 488 489 490
		if (!failed_mirror) {
			failed = 1;
			failed_mirror = eb->read_mirror;
		}

491
		mirror_num++;
492 493 494
		if (mirror_num == failed_mirror)
			mirror_num++;

C
Chris Mason 已提交
495
		if (mirror_num > num_copies)
496
			break;
497
	}
498

499
	if (failed && !ret && failed_mirror)
500
		btrfs_repair_eb_io_failure(eb, failed_mirror);
501 502

	return ret;
503
}
504

C
Chris Mason 已提交
505
/*
C
Chris Mason 已提交
506 507
 * checksum a dirty tree block before IO.  This has extra checks to make sure
 * we only fill in the checksum field in the first page of a multi-page block
C
Chris Mason 已提交
508
 */
C
Chris Mason 已提交
509

510
static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
511
{
M
Miao Xie 已提交
512
	u64 start = page_offset(page);
513
	u64 found_start;
514 515
	u8 result[BTRFS_CSUM_SIZE];
	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
516
	struct extent_buffer *eb;
517

J
Josef Bacik 已提交
518 519 520
	eb = (struct extent_buffer *)page->private;
	if (page != eb->pages[0])
		return 0;
521

522
	found_start = btrfs_header_bytenr(eb);
523 524 525 526 527 528 529 530 531
	/*
	 * Please do not consolidate these warnings into a single if.
	 * It is useful to know what went wrong.
	 */
	if (WARN_ON(found_start != start))
		return -EUCLEAN;
	if (WARN_ON(!PageUptodate(page)))
		return -EUCLEAN;

532
	ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid,
533 534
			btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);

535 536 537 538 539
	if (csum_tree_block(eb, result))
		return -EINVAL;

	write_extent_buffer(eb, result, 0, csum_size);
	return 0;
540 541
}

542
static int check_tree_block_fsid(struct extent_buffer *eb)
Y
Yan Zheng 已提交
543
{
544
	struct btrfs_fs_info *fs_info = eb->fs_info;
545
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
546
	u8 fsid[BTRFS_FSID_SIZE];
Y
Yan Zheng 已提交
547 548
	int ret = 1;

549
	read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE);
Y
Yan Zheng 已提交
550
	while (fs_devices) {
551 552 553 554 555 556 557 558 559 560 561 562 563 564
		u8 *metadata_uuid;

		/*
		 * Checking the incompat flag is only valid for the current
		 * fs. For seed devices it's forbidden to have their uuid
		 * changed so reading ->fsid in this case is fine
		 */
		if (fs_devices == fs_info->fs_devices &&
		    btrfs_fs_incompat(fs_info, METADATA_UUID))
			metadata_uuid = fs_devices->metadata_uuid;
		else
			metadata_uuid = fs_devices->fsid;

		if (!memcmp(fsid, metadata_uuid, BTRFS_FSID_SIZE)) {
Y
Yan Zheng 已提交
565 566 567 568 569 570 571 572
			ret = 0;
			break;
		}
		fs_devices = fs_devices->seed;
	}
	return ret;
}

573 574 575
static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
				      u64 phy_offset, struct page *page,
				      u64 start, u64 end, int mirror)
576 577 578 579 580
{
	u64 found_start;
	int found_level;
	struct extent_buffer *eb;
	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
581
	struct btrfs_fs_info *fs_info = root->fs_info;
582
	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
583
	int ret = 0;
584
	u8 result[BTRFS_CSUM_SIZE];
585
	int reads_done;
586 587 588

	if (!page->private)
		goto out;
C
Chris Mason 已提交
589

J
Josef Bacik 已提交
590
	eb = (struct extent_buffer *)page->private;
C
Chris Mason 已提交
591

592 593 594 595 596 597
	/* the pending IO might have been the only thing that kept this buffer
	 * in memory.  Make sure we have a ref for all this other checks
	 */
	extent_buffer_get(eb);

	reads_done = atomic_dec_and_test(&eb->io_pages);
598 599
	if (!reads_done)
		goto err;
600

601
	eb->read_mirror = mirror;
602
	if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
603 604 605 606
		ret = -EIO;
		goto err;
	}

607
	found_start = btrfs_header_bytenr(eb);
608
	if (found_start != eb->start) {
609 610
		btrfs_err_rl(fs_info, "bad tree block start, want %llu have %llu",
			     eb->start, found_start);
611
		ret = -EIO;
612 613
		goto err;
	}
614
	if (check_tree_block_fsid(eb)) {
615 616
		btrfs_err_rl(fs_info, "bad fsid on block %llu",
			     eb->start);
617 618 619
		ret = -EIO;
		goto err;
	}
620
	found_level = btrfs_header_level(eb);
621
	if (found_level >= BTRFS_MAX_LEVEL) {
622 623
		btrfs_err(fs_info, "bad tree block level %d on %llu",
			  (int)btrfs_header_level(eb), eb->start);
624 625 626
		ret = -EIO;
		goto err;
	}
627

628 629
	btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
				       eb, found_level);
630

631
	ret = csum_tree_block(eb, result);
632
	if (ret)
633 634
		goto err;

635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
	if (memcmp_extent_buffer(eb, result, 0, csum_size)) {
		u32 val;
		u32 found = 0;

		memcpy(&found, result, csum_size);

		read_extent_buffer(eb, &val, 0, csum_size);
		btrfs_warn_rl(fs_info,
		"%s checksum verify failed on %llu wanted %x found %x level %d",
			      fs_info->sb->s_id, eb->start,
			      val, found, btrfs_header_level(eb));
		ret = -EUCLEAN;
		goto err;
	}

650 651 652 653 654
	/*
	 * If this is a leaf block and it is corrupt, set the corrupt bit so
	 * that we don't try and read the other copies of this block, just
	 * return -EIO.
	 */
655
	if (found_level == 0 && btrfs_check_leaf_full(eb)) {
656 657 658
		set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
		ret = -EIO;
	}
659

660
	if (found_level > 0 && btrfs_check_node(eb))
L
Liu Bo 已提交
661 662
		ret = -EIO;

663 664
	if (!ret)
		set_extent_buffer_uptodate(eb);
665 666 667 668
	else
		btrfs_err(fs_info,
			  "block=%llu read time tree block corruption detected",
			  eb->start);
669
err:
670 671
	if (reads_done &&
	    test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
672
		btree_readahead_hook(eb, ret);
A
Arne Jansen 已提交
673

D
David Woodhouse 已提交
674 675 676 677 678 679 680
	if (ret) {
		/*
		 * our io error hook is going to dec the io pages
		 * again, we have to make sure it has something
		 * to decrement
		 */
		atomic_inc(&eb->io_pages);
681
		clear_extent_buffer_uptodate(eb);
D
David Woodhouse 已提交
682
	}
683
	free_extent_buffer(eb);
684
out:
685
	return ret;
686 687
}

688
static void end_workqueue_bio(struct bio *bio)
689
{
690
	struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
691
	struct btrfs_fs_info *fs_info;
692 693
	struct btrfs_workqueue *wq;
	btrfs_work_func_t func;
694 695

	fs_info = end_io_wq->info;
696
	end_io_wq->status = bio->bi_status;
697

M
Mike Christie 已提交
698
	if (bio_op(bio) == REQ_OP_WRITE) {
699 700 701 702 703 704 705 706 707 708 709 710 711
		if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
			wq = fs_info->endio_meta_write_workers;
			func = btrfs_endio_meta_write_helper;
		} else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
			wq = fs_info->endio_freespace_worker;
			func = btrfs_freespace_write_helper;
		} else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
			wq = fs_info->endio_raid56_workers;
			func = btrfs_endio_raid56_helper;
		} else {
			wq = fs_info->endio_write_workers;
			func = btrfs_endio_write_helper;
		}
712
	} else {
713 714 715 716 717
		if (unlikely(end_io_wq->metadata ==
			     BTRFS_WQ_ENDIO_DIO_REPAIR)) {
			wq = fs_info->endio_repair_workers;
			func = btrfs_endio_repair_helper;
		} else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
718 719 720 721 722 723 724 725 726
			wq = fs_info->endio_raid56_workers;
			func = btrfs_endio_raid56_helper;
		} else if (end_io_wq->metadata) {
			wq = fs_info->endio_meta_workers;
			func = btrfs_endio_meta_helper;
		} else {
			wq = fs_info->endio_workers;
			func = btrfs_endio_helper;
		}
727
	}
728 729 730

	btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
	btrfs_queue_work(wq, &end_io_wq->work);
731 732
}

733
blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
734
			enum btrfs_wq_endio_type metadata)
735
{
736
	struct btrfs_end_io_wq *end_io_wq;
737

738
	end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
739
	if (!end_io_wq)
740
		return BLK_STS_RESOURCE;
741 742 743

	end_io_wq->private = bio->bi_private;
	end_io_wq->end_io = bio->bi_end_io;
744
	end_io_wq->info = info;
745
	end_io_wq->status = 0;
746
	end_io_wq->bio = bio;
747
	end_io_wq->metadata = metadata;
748 749 750

	bio->bi_private = end_io_wq;
	bio->bi_end_io = end_workqueue_bio;
751 752 753
	return 0;
}

C
Chris Mason 已提交
754 755 756
static void run_one_async_start(struct btrfs_work *work)
{
	struct async_submit_bio *async;
757
	blk_status_t ret;
C
Chris Mason 已提交
758 759

	async = container_of(work, struct  async_submit_bio, work);
760
	ret = async->submit_bio_start(async->private_data, async->bio,
761 762
				      async->bio_offset);
	if (ret)
763
		async->status = ret;
C
Chris Mason 已提交
764 765
}

766 767 768 769 770 771 772 773
/*
 * In order to insert checksums into the metadata in large chunks, we wait
 * until bio submission time.   All the pages in the bio are checksummed and
 * sums are attached onto the ordered extent record.
 *
 * At IO completion time the csums attached on the ordered extent record are
 * inserted into the tree.
 */
C
Chris Mason 已提交
774
static void run_one_async_done(struct btrfs_work *work)
775 776
{
	struct async_submit_bio *async;
777 778
	struct inode *inode;
	blk_status_t ret;
779 780

	async = container_of(work, struct  async_submit_bio, work);
781
	inode = async->private_data;
782

783
	/* If an error occurred we just want to clean up the bio and move on */
784 785
	if (async->status) {
		async->bio->bi_status = async->status;
786
		bio_endio(async->bio);
787 788 789
		return;
	}

790 791 792 793 794 795
	ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio,
			async->mirror_num, 1);
	if (ret) {
		async->bio->bi_status = ret;
		bio_endio(async->bio);
	}
C
Chris Mason 已提交
796 797 798 799 800 801 802
}

static void run_one_async_free(struct btrfs_work *work)
{
	struct async_submit_bio *async;

	async = container_of(work, struct  async_submit_bio, work);
803 804 805
	kfree(async);
}

806 807 808
blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
				 int mirror_num, unsigned long bio_flags,
				 u64 bio_offset, void *private_data,
809
				 extent_submit_bio_start_t *submit_bio_start)
810 811 812 813 814
{
	struct async_submit_bio *async;

	async = kmalloc(sizeof(*async), GFP_NOFS);
	if (!async)
815
		return BLK_STS_RESOURCE;
816

817
	async->private_data = private_data;
818 819
	async->bio = bio;
	async->mirror_num = mirror_num;
C
Chris Mason 已提交
820 821
	async->submit_bio_start = submit_bio_start;

822
	btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
823
			run_one_async_done, run_one_async_free);
C
Chris Mason 已提交
824

825
	async->bio_offset = bio_offset;
826

827
	async->status = 0;
828

829
	if (op_is_sync(bio->bi_opf))
830
		btrfs_set_work_high_priority(&async->work);
831

832
	btrfs_queue_work(fs_info->workers, &async->work);
833 834 835
	return 0;
}

836
static blk_status_t btree_csum_one_bio(struct bio *bio)
837
{
838
	struct bio_vec *bvec;
839
	struct btrfs_root *root;
840
	int i, ret = 0;
841
	struct bvec_iter_all iter_all;
842

843
	ASSERT(!bio_flagged(bio, BIO_CLONED));
844
	bio_for_each_segment_all(bvec, bio, i, iter_all) {
845
		root = BTRFS_I(bvec->bv_page->mapping->host)->root;
846
		ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
847 848
		if (ret)
			break;
849
	}
850

851
	return errno_to_blk_status(ret);
852 853
}

854
static blk_status_t btree_submit_bio_start(void *private_data, struct bio *bio,
855
					     u64 bio_offset)
856
{
857 858
	/*
	 * when we're called for a write, we're already in the async
859
	 * submission context.  Just jump into btrfs_map_bio
860
	 */
861
	return btree_csum_one_bio(bio);
C
Chris Mason 已提交
862
}
863

864
static int check_async_write(struct btrfs_inode *bi)
865
{
866 867
	if (atomic_read(&bi->sync_writers))
		return 0;
868
#ifdef CONFIG_X86
869
	if (static_cpu_has(X86_FEATURE_XMM4_2))
870 871 872 873 874
		return 0;
#endif
	return 1;
}

875 876 877
static blk_status_t btree_submit_bio_hook(void *private_data, struct bio *bio,
					  int mirror_num, unsigned long bio_flags,
					  u64 bio_offset)
878
{
879
	struct inode *inode = private_data;
880
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
881
	int async = check_async_write(BTRFS_I(inode));
882
	blk_status_t ret;
883

M
Mike Christie 已提交
884
	if (bio_op(bio) != REQ_OP_WRITE) {
C
Chris Mason 已提交
885 886 887 888
		/*
		 * called for a read, do the setup so that checksum validation
		 * can happen in the async kernel threads
		 */
889 890
		ret = btrfs_bio_wq_end_io(fs_info, bio,
					  BTRFS_WQ_ENDIO_METADATA);
891
		if (ret)
892
			goto out_w_error;
893
		ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
894 895 896
	} else if (!async) {
		ret = btree_csum_one_bio(bio);
		if (ret)
897
			goto out_w_error;
898
		ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
899 900 901 902 903
	} else {
		/*
		 * kthread helpers are used to submit writes so that
		 * checksumming can happen in parallel across all CPUs
		 */
904 905
		ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0,
					  bio_offset, private_data,
906
					  btree_submit_bio_start);
907
	}
908

909 910 911 912
	if (ret)
		goto out_w_error;
	return 0;

913
out_w_error:
914
	bio->bi_status = ret;
915
	bio_endio(bio);
916
	return ret;
917 918
}

J
Jan Beulich 已提交
919
#ifdef CONFIG_MIGRATION
920
static int btree_migratepage(struct address_space *mapping,
921 922
			struct page *newpage, struct page *page,
			enum migrate_mode mode)
923 924 925 926 927 928 929 930 931 932 933 934 935 936
{
	/*
	 * we can't safely write a btree page from here,
	 * we haven't done the locking hook
	 */
	if (PageDirty(page))
		return -EAGAIN;
	/*
	 * Buffers may be managed in a filesystem specific way.
	 * We must have no buffers or drop them.
	 */
	if (page_has_private(page) &&
	    !try_to_release_page(page, GFP_KERNEL))
		return -EAGAIN;
937
	return migrate_page(mapping, newpage, page, mode);
938
}
J
Jan Beulich 已提交
939
#endif
940

941 942 943 944

static int btree_writepages(struct address_space *mapping,
			    struct writeback_control *wbc)
{
945 946 947
	struct btrfs_fs_info *fs_info;
	int ret;

948
	if (wbc->sync_mode == WB_SYNC_NONE) {
949 950 951 952

		if (wbc->for_kupdate)
			return 0;

953
		fs_info = BTRFS_I(mapping->host)->root->fs_info;
954
		/* this is a bit racy, but that's ok */
955 956 957
		ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
					     BTRFS_DIRTY_METADATA_THRESH,
					     fs_info->dirty_metadata_batch);
958
		if (ret < 0)
959 960
			return 0;
	}
961
	return btree_write_cache_pages(mapping, wbc);
962 963
}

964
static int btree_readpage(struct file *file, struct page *page)
965
{
966 967
	struct extent_io_tree *tree;
	tree = &BTRFS_I(page->mapping->host)->io_tree;
968
	return extent_read_full_page(tree, page, btree_get_extent, 0);
969
}
C
Chris Mason 已提交
970

971
static int btree_releasepage(struct page *page, gfp_t gfp_flags)
972
{
973
	if (PageWriteback(page) || PageDirty(page))
C
Chris Mason 已提交
974
		return 0;
975

976
	return try_release_extent_buffer(page);
977 978
}

979 980
static void btree_invalidatepage(struct page *page, unsigned int offset,
				 unsigned int length)
981
{
982 983
	struct extent_io_tree *tree;
	tree = &BTRFS_I(page->mapping->host)->io_tree;
984 985
	extent_invalidatepage(tree, page, offset);
	btree_releasepage(page, GFP_NOFS);
986
	if (PagePrivate(page)) {
987 988 989
		btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
			   "page private not zero on page %llu",
			   (unsigned long long)page_offset(page));
990 991
		ClearPagePrivate(page);
		set_page_private(page, 0);
992
		put_page(page);
993
	}
994 995
}

996 997
static int btree_set_page_dirty(struct page *page)
{
998
#ifdef DEBUG
999 1000 1001 1002 1003 1004 1005 1006
	struct extent_buffer *eb;

	BUG_ON(!PagePrivate(page));
	eb = (struct extent_buffer *)page->private;
	BUG_ON(!eb);
	BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
	BUG_ON(!atomic_read(&eb->refs));
	btrfs_assert_tree_locked(eb);
1007
#endif
1008 1009 1010
	return __set_page_dirty_nobuffers(page);
}

1011
static const struct address_space_operations btree_aops = {
1012
	.readpage	= btree_readpage,
1013
	.writepages	= btree_writepages,
1014 1015
	.releasepage	= btree_releasepage,
	.invalidatepage = btree_invalidatepage,
1016
#ifdef CONFIG_MIGRATION
1017
	.migratepage	= btree_migratepage,
1018
#endif
1019
	.set_page_dirty = btree_set_page_dirty,
1020 1021
};

1022
void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
C
Chris Mason 已提交
1023
{
1024
	struct extent_buffer *buf = NULL;
1025
	struct inode *btree_inode = fs_info->btree_inode;
1026
	int ret;
C
Chris Mason 已提交
1027

1028
	buf = btrfs_find_create_tree_block(fs_info, bytenr);
1029
	if (IS_ERR(buf))
1030
		return;
1031 1032 1033 1034 1035 1036 1037

	ret = read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, buf,
			WAIT_NONE, 0);
	if (ret < 0)
		free_extent_buffer_stale(buf);
	else
		free_extent_buffer(buf);
C
Chris Mason 已提交
1038 1039
}

1040
int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
1041 1042 1043
			 int mirror_num, struct extent_buffer **eb)
{
	struct extent_buffer *buf = NULL;
1044
	struct inode *btree_inode = fs_info->btree_inode;
1045 1046 1047
	struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
	int ret;

1048
	buf = btrfs_find_create_tree_block(fs_info, bytenr);
1049
	if (IS_ERR(buf))
1050 1051 1052 1053
		return 0;

	set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);

1054
	ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK,
1055
				       mirror_num);
1056
	if (ret) {
1057
		free_extent_buffer_stale(buf);
1058 1059 1060 1061
		return ret;
	}

	if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1062
		free_extent_buffer_stale(buf);
1063
		return -EIO;
1064
	} else if (extent_buffer_uptodate(buf)) {
1065 1066 1067 1068 1069 1070 1071
		*eb = buf;
	} else {
		free_extent_buffer(buf);
	}
	return 0;
}

1072 1073 1074
struct extent_buffer *btrfs_find_create_tree_block(
						struct btrfs_fs_info *fs_info,
						u64 bytenr)
1075
{
1076 1077 1078
	if (btrfs_is_testing(fs_info))
		return alloc_test_extent_buffer(fs_info, bytenr);
	return alloc_extent_buffer(fs_info, bytenr);
1079 1080
}

1081 1082 1083 1084 1085 1086 1087 1088
/*
 * Read tree block at logical address @bytenr and do variant basic but critical
 * verification.
 *
 * @parent_transid:	expected transid of this tree block, skip check if 0
 * @level:		expected level, mandatory check
 * @first_key:		expected key in slot 0, skip check if NULL
 */
1089
struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
1090 1091
				      u64 parent_transid, int level,
				      struct btrfs_key *first_key)
1092 1093 1094 1095
{
	struct extent_buffer *buf = NULL;
	int ret;

1096
	buf = btrfs_find_create_tree_block(fs_info, bytenr);
1097 1098
	if (IS_ERR(buf))
		return buf;
1099

1100
	ret = btree_read_extent_buffer_pages(buf, parent_transid,
1101
					     level, first_key);
1102
	if (ret) {
1103
		free_extent_buffer_stale(buf);
1104
		return ERR_PTR(ret);
1105
	}
1106
	return buf;
1107

1108 1109
}

1110
void btrfs_clean_tree_block(struct extent_buffer *buf)
1111
{
1112
	struct btrfs_fs_info *fs_info = buf->fs_info;
1113
	if (btrfs_header_generation(buf) ==
1114
	    fs_info->running_transaction->transid) {
1115
		btrfs_assert_tree_locked(buf);
1116

1117
		if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1118 1119 1120
			percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
						 -buf->len,
						 fs_info->dirty_metadata_batch);
1121
			/* ugh, clear_extent_buffer_dirty needs to lock the page */
1122
			btrfs_set_lock_blocking_write(buf);
1123 1124
			clear_extent_buffer_dirty(buf);
		}
1125
	}
1126 1127
}

1128 1129 1130 1131 1132 1133 1134 1135 1136
static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
{
	struct btrfs_subvolume_writers *writers;
	int ret;

	writers = kmalloc(sizeof(*writers), GFP_NOFS);
	if (!writers)
		return ERR_PTR(-ENOMEM);

1137
	ret = percpu_counter_init(&writers->counter, 0, GFP_NOFS);
1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
	if (ret < 0) {
		kfree(writers);
		return ERR_PTR(ret);
	}

	init_waitqueue_head(&writers->wait);
	return writers;
}

static void
btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers)
{
	percpu_counter_destroy(&writers->counter);
	kfree(writers);
}

1154
static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
1155
			 u64 objectid)
1156
{
1157
	bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
C
Chris Mason 已提交
1158
	root->node = NULL;
1159
	root->commit_root = NULL;
1160
	root->state = 0;
1161
	root->orphan_cleanup_state = 0;
1162

1163
	root->last_trans = 0;
1164
	root->highest_objectid = 0;
1165
	root->nr_delalloc_inodes = 0;
1166
	root->nr_ordered_extents = 0;
1167
	root->inode_tree = RB_ROOT;
1168
	INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1169
	root->block_rsv = NULL;
1170 1171

	INIT_LIST_HEAD(&root->dirty_list);
1172
	INIT_LIST_HEAD(&root->root_list);
1173 1174
	INIT_LIST_HEAD(&root->delalloc_inodes);
	INIT_LIST_HEAD(&root->delalloc_root);
1175 1176
	INIT_LIST_HEAD(&root->ordered_extents);
	INIT_LIST_HEAD(&root->ordered_root);
1177
	INIT_LIST_HEAD(&root->reloc_dirty_list);
1178 1179
	INIT_LIST_HEAD(&root->logged_list[0]);
	INIT_LIST_HEAD(&root->logged_list[1]);
1180
	spin_lock_init(&root->inode_lock);
1181
	spin_lock_init(&root->delalloc_lock);
1182
	spin_lock_init(&root->ordered_extent_lock);
1183
	spin_lock_init(&root->accounting_lock);
1184 1185
	spin_lock_init(&root->log_extents_lock[0]);
	spin_lock_init(&root->log_extents_lock[1]);
1186
	spin_lock_init(&root->qgroup_meta_rsv_lock);
1187
	mutex_init(&root->objectid_mutex);
1188
	mutex_init(&root->log_mutex);
1189
	mutex_init(&root->ordered_extent_mutex);
1190
	mutex_init(&root->delalloc_mutex);
Y
Yan Zheng 已提交
1191 1192 1193
	init_waitqueue_head(&root->log_writer_wait);
	init_waitqueue_head(&root->log_commit_wait[0]);
	init_waitqueue_head(&root->log_commit_wait[1]);
1194 1195
	INIT_LIST_HEAD(&root->log_ctxs[0]);
	INIT_LIST_HEAD(&root->log_ctxs[1]);
Y
Yan Zheng 已提交
1196 1197 1198
	atomic_set(&root->log_commit[0], 0);
	atomic_set(&root->log_commit[1], 0);
	atomic_set(&root->log_writers, 0);
M
Miao Xie 已提交
1199
	atomic_set(&root->log_batch, 0);
1200
	refcount_set(&root->refs, 1);
1201
	atomic_set(&root->will_be_snapshotted, 0);
1202
	atomic_set(&root->snapshot_force_cow, 0);
1203
	atomic_set(&root->nr_swapfiles, 0);
Y
Yan Zheng 已提交
1204
	root->log_transid = 0;
1205
	root->log_transid_committed = -1;
1206
	root->last_log_commit = 0;
1207
	if (!dummy)
1208 1209
		extent_io_tree_init(fs_info, &root->dirty_log_pages,
				    IO_TREE_ROOT_DIRTY_LOG_PAGES, NULL);
C
Chris Mason 已提交
1210

1211 1212
	memset(&root->root_key, 0, sizeof(root->root_key));
	memset(&root->root_item, 0, sizeof(root->root_item));
1213
	memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1214
	if (!dummy)
1215 1216 1217
		root->defrag_trans_start = fs_info->generation;
	else
		root->defrag_trans_start = 0;
1218
	root->root_key.objectid = objectid;
1219
	root->anon_dev = 0;
1220

1221
	spin_lock_init(&root->root_item_lock);
1222
	btrfs_qgroup_init_swapped_blocks(&root->swapped_blocks);
1223 1224
}

1225 1226
static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
		gfp_t flags)
A
Al Viro 已提交
1227
{
1228
	struct btrfs_root *root = kzalloc(sizeof(*root), flags);
A
Al Viro 已提交
1229 1230 1231 1232 1233
	if (root)
		root->fs_info = fs_info;
	return root;
}

1234 1235
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
/* Should only be used by the testing infrastructure */
1236
struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
1237 1238 1239
{
	struct btrfs_root *root;

1240 1241 1242 1243
	if (!fs_info)
		return ERR_PTR(-EINVAL);

	root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1244 1245
	if (!root)
		return ERR_PTR(-ENOMEM);
1246

1247
	/* We don't use the stripesize in selftest, set it as sectorsize */
1248
	__setup_root(root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
1249
	root->alloc_bytenr = 0;
1250 1251 1252 1253 1254

	return root;
}
#endif

1255 1256 1257 1258 1259 1260 1261 1262
struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
				     struct btrfs_fs_info *fs_info,
				     u64 objectid)
{
	struct extent_buffer *leaf;
	struct btrfs_root *tree_root = fs_info->tree_root;
	struct btrfs_root *root;
	struct btrfs_key key;
1263
	unsigned int nofs_flag;
1264
	int ret = 0;
1265
	uuid_le uuid = NULL_UUID_LE;
1266

1267 1268 1269 1270 1271
	/*
	 * We're holding a transaction handle, so use a NOFS memory allocation
	 * context to avoid deadlock if reclaim happens.
	 */
	nofs_flag = memalloc_nofs_save();
1272
	root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1273
	memalloc_nofs_restore(nofs_flag);
1274 1275 1276
	if (!root)
		return ERR_PTR(-ENOMEM);

1277
	__setup_root(root, fs_info, objectid);
1278 1279 1280 1281
	root->root_key.objectid = objectid;
	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
	root->root_key.offset = 0;

1282
	leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
1283 1284
	if (IS_ERR(leaf)) {
		ret = PTR_ERR(leaf);
1285
		leaf = NULL;
1286 1287 1288 1289 1290 1291 1292
		goto fail;
	}

	root->node = leaf;
	btrfs_mark_buffer_dirty(leaf);

	root->commit_root = btrfs_root_node(root);
1293
	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
1294 1295 1296 1297 1298 1299 1300 1301 1302 1303

	root->root_item.flags = 0;
	root->root_item.byte_limit = 0;
	btrfs_set_root_bytenr(&root->root_item, leaf->start);
	btrfs_set_root_generation(&root->root_item, trans->transid);
	btrfs_set_root_level(&root->root_item, 0);
	btrfs_set_root_refs(&root->root_item, 1);
	btrfs_set_root_used(&root->root_item, leaf->len);
	btrfs_set_root_last_snapshot(&root->root_item, 0);
	btrfs_set_root_dirid(&root->root_item, 0);
1304 1305
	if (is_fstree(objectid))
		uuid_le_gen(&uuid);
1306
	memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
	root->root_item.drop_level = 0;

	key.objectid = objectid;
	key.type = BTRFS_ROOT_ITEM_KEY;
	key.offset = 0;
	ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
	if (ret)
		goto fail;

	btrfs_tree_unlock(leaf);

1318 1319
	return root;

1320
fail:
1321 1322
	if (leaf) {
		btrfs_tree_unlock(leaf);
1323
		free_extent_buffer(root->commit_root);
1324 1325 1326
		free_extent_buffer(leaf);
	}
	kfree(root);
1327

1328
	return ERR_PTR(ret);
1329 1330
}

Y
Yan Zheng 已提交
1331 1332
static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
					 struct btrfs_fs_info *fs_info)
1333 1334
{
	struct btrfs_root *root;
Y
Yan Zheng 已提交
1335
	struct extent_buffer *leaf;
1336

1337
	root = btrfs_alloc_root(fs_info, GFP_NOFS);
1338
	if (!root)
Y
Yan Zheng 已提交
1339
		return ERR_PTR(-ENOMEM);
1340

1341
	__setup_root(root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1342 1343 1344 1345

	root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
	root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1346

Y
Yan Zheng 已提交
1347
	/*
1348 1349
	 * DON'T set REF_COWS for log trees
	 *
Y
Yan Zheng 已提交
1350 1351 1352 1353 1354
	 * log trees do not get reference counted because they go away
	 * before a real commit is actually done.  They do store pointers
	 * to file data extents, and those reference counts still get
	 * updated (along with back refs to the log tree).
	 */
1355

1356 1357
	leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
			NULL, 0, 0, 0);
Y
Yan Zheng 已提交
1358 1359 1360 1361
	if (IS_ERR(leaf)) {
		kfree(root);
		return ERR_CAST(leaf);
	}
1362

Y
Yan Zheng 已提交
1363
	root->node = leaf;
1364 1365 1366

	btrfs_mark_buffer_dirty(root->node);
	btrfs_tree_unlock(root->node);
Y
Yan Zheng 已提交
1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385
	return root;
}

int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
			     struct btrfs_fs_info *fs_info)
{
	struct btrfs_root *log_root;

	log_root = alloc_log_tree(trans, fs_info);
	if (IS_ERR(log_root))
		return PTR_ERR(log_root);
	WARN_ON(fs_info->log_root_tree);
	fs_info->log_root_tree = log_root;
	return 0;
}

int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
		       struct btrfs_root *root)
{
1386
	struct btrfs_fs_info *fs_info = root->fs_info;
Y
Yan Zheng 已提交
1387 1388 1389
	struct btrfs_root *log_root;
	struct btrfs_inode_item *inode_item;

1390
	log_root = alloc_log_tree(trans, fs_info);
Y
Yan Zheng 已提交
1391 1392 1393 1394 1395 1396 1397
	if (IS_ERR(log_root))
		return PTR_ERR(log_root);

	log_root->last_trans = trans->transid;
	log_root->root_key.offset = root->root_key.objectid;

	inode_item = &log_root->root_item.inode;
1398 1399 1400
	btrfs_set_stack_inode_generation(inode_item, 1);
	btrfs_set_stack_inode_size(inode_item, 3);
	btrfs_set_stack_inode_nlink(inode_item, 1);
1401
	btrfs_set_stack_inode_nbytes(inode_item,
1402
				     fs_info->nodesize);
1403
	btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
Y
Yan Zheng 已提交
1404

1405
	btrfs_set_root_node(&log_root->root_item, log_root->node);
Y
Yan Zheng 已提交
1406 1407 1408 1409

	WARN_ON(root->log_root);
	root->log_root = log_root;
	root->log_transid = 0;
1410
	root->log_transid_committed = -1;
1411
	root->last_log_commit = 0;
1412 1413 1414
	return 0;
}

1415 1416
static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
					       struct btrfs_key *key)
1417 1418 1419
{
	struct btrfs_root *root;
	struct btrfs_fs_info *fs_info = tree_root->fs_info;
1420
	struct btrfs_path *path;
1421
	u64 generation;
1422
	int ret;
1423
	int level;
1424

1425 1426
	path = btrfs_alloc_path();
	if (!path)
1427
		return ERR_PTR(-ENOMEM);
1428

1429
	root = btrfs_alloc_root(fs_info, GFP_NOFS);
1430 1431 1432
	if (!root) {
		ret = -ENOMEM;
		goto alloc_fail;
1433 1434
	}

1435
	__setup_root(root, fs_info, key->objectid);
1436

1437 1438
	ret = btrfs_find_root(tree_root, key, path,
			      &root->root_item, &root->root_key);
1439
	if (ret) {
1440 1441
		if (ret > 0)
			ret = -ENOENT;
1442
		goto find_fail;
1443
	}
1444

1445
	generation = btrfs_root_generation(&root->root_item);
1446
	level = btrfs_root_level(&root->root_item);
1447 1448
	root->node = read_tree_block(fs_info,
				     btrfs_root_bytenr(&root->root_item),
1449
				     generation, level, NULL);
1450 1451
	if (IS_ERR(root->node)) {
		ret = PTR_ERR(root->node);
1452 1453 1454
		goto find_fail;
	} else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
		ret = -EIO;
1455 1456
		free_extent_buffer(root->node);
		goto find_fail;
1457
	}
1458
	root->commit_root = btrfs_root_node(root);
1459
out:
1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479
	btrfs_free_path(path);
	return root;

find_fail:
	kfree(root);
alloc_fail:
	root = ERR_PTR(ret);
	goto out;
}

struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
				      struct btrfs_key *location)
{
	struct btrfs_root *root;

	root = btrfs_read_tree_root(tree_root, location);
	if (IS_ERR(root))
		return root;

	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
1480
		set_bit(BTRFS_ROOT_REF_COWS, &root->state);
1481 1482
		btrfs_check_and_init_root_item(&root->root_item);
	}
1483

1484 1485 1486
	return root;
}

1487 1488 1489
int btrfs_init_fs_root(struct btrfs_root *root)
{
	int ret;
1490
	struct btrfs_subvolume_writers *writers;
1491 1492 1493 1494 1495 1496 1497 1498 1499

	root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
	root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
					GFP_NOFS);
	if (!root->free_ino_pinned || !root->free_ino_ctl) {
		ret = -ENOMEM;
		goto fail;
	}

1500 1501 1502 1503 1504 1505 1506
	writers = btrfs_alloc_subvolume_writers();
	if (IS_ERR(writers)) {
		ret = PTR_ERR(writers);
		goto fail;
	}
	root->subv_writers = writers;

1507
	btrfs_init_free_ino_ctl(root);
1508 1509
	spin_lock_init(&root->ino_cache_lock);
	init_waitqueue_head(&root->ino_cache_wait);
1510 1511 1512

	ret = get_anon_bdev(&root->anon_dev);
	if (ret)
L
Liu Bo 已提交
1513
		goto fail;
1514 1515 1516 1517 1518 1519

	mutex_lock(&root->objectid_mutex);
	ret = btrfs_find_highest_objectid(root,
					&root->highest_objectid);
	if (ret) {
		mutex_unlock(&root->objectid_mutex);
L
Liu Bo 已提交
1520
		goto fail;
1521 1522 1523 1524 1525 1526
	}

	ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);

	mutex_unlock(&root->objectid_mutex);

1527 1528
	return 0;
fail:
D
David Sterba 已提交
1529
	/* The caller is responsible to call btrfs_free_fs_root */
1530 1531 1532
	return ret;
}

1533 1534
struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
					u64 root_id)
1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549
{
	struct btrfs_root *root;

	spin_lock(&fs_info->fs_roots_radix_lock);
	root = radix_tree_lookup(&fs_info->fs_roots_radix,
				 (unsigned long)root_id);
	spin_unlock(&fs_info->fs_roots_radix_lock);
	return root;
}

int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
			 struct btrfs_root *root)
{
	int ret;

1550
	ret = radix_tree_preload(GFP_NOFS);
1551 1552 1553 1554 1555 1556 1557 1558
	if (ret)
		return ret;

	spin_lock(&fs_info->fs_roots_radix_lock);
	ret = radix_tree_insert(&fs_info->fs_roots_radix,
				(unsigned long)root->root_key.objectid,
				root);
	if (ret == 0)
1559
		set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1560 1561 1562 1563 1564 1565
	spin_unlock(&fs_info->fs_roots_radix_lock);
	radix_tree_preload_end();

	return ret;
}

1566 1567 1568
struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
				     struct btrfs_key *location,
				     bool check_ref)
1569 1570
{
	struct btrfs_root *root;
1571
	struct btrfs_path *path;
1572
	struct btrfs_key key;
1573 1574
	int ret;

1575 1576 1577 1578
	if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
		return fs_info->tree_root;
	if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
		return fs_info->extent_root;
1579 1580 1581 1582
	if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
		return fs_info->chunk_root;
	if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
		return fs_info->dev_root;
1583 1584
	if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
		return fs_info->csum_root;
1585 1586 1587
	if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
		return fs_info->quota_root ? fs_info->quota_root :
					     ERR_PTR(-ENOENT);
1588 1589 1590
	if (location->objectid == BTRFS_UUID_TREE_OBJECTID)
		return fs_info->uuid_root ? fs_info->uuid_root :
					    ERR_PTR(-ENOENT);
1591 1592 1593
	if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
		return fs_info->free_space_root ? fs_info->free_space_root :
						  ERR_PTR(-ENOENT);
1594
again:
1595
	root = btrfs_lookup_fs_root(fs_info, location->objectid);
1596
	if (root) {
1597
		if (check_ref && btrfs_root_refs(&root->root_item) == 0)
1598
			return ERR_PTR(-ENOENT);
1599
		return root;
1600
	}
1601

1602
	root = btrfs_read_fs_root(fs_info->tree_root, location);
1603 1604
	if (IS_ERR(root))
		return root;
1605

1606
	if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1607
		ret = -ENOENT;
1608
		goto fail;
1609
	}
1610

1611
	ret = btrfs_init_fs_root(root);
1612 1613
	if (ret)
		goto fail;
1614

1615 1616 1617 1618 1619
	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto fail;
	}
1620 1621 1622 1623 1624
	key.objectid = BTRFS_ORPHAN_OBJECTID;
	key.type = BTRFS_ORPHAN_ITEM_KEY;
	key.offset = location->objectid;

	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1625
	btrfs_free_path(path);
1626 1627 1628
	if (ret < 0)
		goto fail;
	if (ret == 0)
1629
		set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1630

1631
	ret = btrfs_insert_fs_root(fs_info, root);
1632
	if (ret) {
1633
		if (ret == -EEXIST) {
D
David Sterba 已提交
1634
			btrfs_free_fs_root(root);
1635 1636 1637
			goto again;
		}
		goto fail;
1638
	}
1639
	return root;
1640
fail:
D
David Sterba 已提交
1641
	btrfs_free_fs_root(root);
1642
	return ERR_PTR(ret);
1643 1644
}

C
Chris Mason 已提交
1645 1646 1647 1648 1649 1650
static int btrfs_congested_fn(void *congested_data, int bdi_bits)
{
	struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
	int ret = 0;
	struct btrfs_device *device;
	struct backing_dev_info *bdi;
C
Chris Mason 已提交
1651

1652 1653
	rcu_read_lock();
	list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1654 1655
		if (!device->bdev)
			continue;
1656
		bdi = device->bdev->bd_bdi;
1657
		if (bdi_congested(bdi, bdi_bits)) {
C
Chris Mason 已提交
1658 1659 1660 1661
			ret = 1;
			break;
		}
	}
1662
	rcu_read_unlock();
C
Chris Mason 已提交
1663 1664 1665
	return ret;
}

1666 1667 1668 1669 1670
/*
 * called by the kthread helper functions to finally call the bio end_io
 * functions.  This is where read checksum verification actually happens
 */
static void end_workqueue_fn(struct btrfs_work *work)
1671 1672
{
	struct bio *bio;
1673
	struct btrfs_end_io_wq *end_io_wq;
1674

1675
	end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
1676
	bio = end_io_wq->bio;
1677

1678
	bio->bi_status = end_io_wq->status;
1679 1680
	bio->bi_private = end_io_wq->private;
	bio->bi_end_io = end_io_wq->end_io;
1681
	kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
1682
	bio_endio(bio);
1683 1684
}

1685 1686 1687
static int cleaner_kthread(void *arg)
{
	struct btrfs_root *root = arg;
1688
	struct btrfs_fs_info *fs_info = root->fs_info;
1689
	int again;
1690

1691
	while (1) {
1692
		again = 0;
1693

1694 1695
		set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);

1696
		/* Make the cleaner go to sleep early. */
1697
		if (btrfs_need_cleaner_sleep(fs_info))
1698 1699
			goto sleep;

1700 1701 1702 1703
		/*
		 * Do not do anything if we might cause open_ctree() to block
		 * before we have finished mounting the filesystem.
		 */
1704
		if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1705 1706
			goto sleep;

1707
		if (!mutex_trylock(&fs_info->cleaner_mutex))
1708 1709
			goto sleep;

1710 1711 1712 1713
		/*
		 * Avoid the problem that we change the status of the fs
		 * during the above check and trylock.
		 */
1714
		if (btrfs_need_cleaner_sleep(fs_info)) {
1715
			mutex_unlock(&fs_info->cleaner_mutex);
1716
			goto sleep;
1717
		}
1718

1719
		btrfs_run_delayed_iputs(fs_info);
1720

1721
		again = btrfs_clean_one_deleted_snapshot(root);
1722
		mutex_unlock(&fs_info->cleaner_mutex);
1723 1724

		/*
1725 1726
		 * The defragger has dealt with the R/O remount and umount,
		 * needn't do anything special here.
1727
		 */
1728
		btrfs_run_defrag_inodes(fs_info);
1729 1730 1731 1732 1733 1734 1735 1736 1737

		/*
		 * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
		 * with relocation (btrfs_relocate_chunk) and relocation
		 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
		 * after acquiring fs_info->delete_unused_bgs_mutex. So we
		 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
		 * unused block groups.
		 */
1738
		btrfs_delete_unused_bgs(fs_info);
1739
sleep:
1740
		clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1741 1742 1743 1744
		if (kthread_should_park())
			kthread_parkme();
		if (kthread_should_stop())
			return 0;
1745
		if (!again) {
1746
			set_current_state(TASK_INTERRUPTIBLE);
1747
			schedule();
1748 1749
			__set_current_state(TASK_RUNNING);
		}
1750
	}
1751 1752 1753 1754 1755
}

static int transaction_kthread(void *arg)
{
	struct btrfs_root *root = arg;
1756
	struct btrfs_fs_info *fs_info = root->fs_info;
1757 1758
	struct btrfs_trans_handle *trans;
	struct btrfs_transaction *cur;
1759
	u64 transid;
1760
	time64_t now;
1761
	unsigned long delay;
1762
	bool cannot_commit;
1763 1764

	do {
1765
		cannot_commit = false;
1766 1767
		delay = HZ * fs_info->commit_interval;
		mutex_lock(&fs_info->transaction_kthread_mutex);
1768

1769 1770
		spin_lock(&fs_info->trans_lock);
		cur = fs_info->running_transaction;
1771
		if (!cur) {
1772
			spin_unlock(&fs_info->trans_lock);
1773 1774
			goto sleep;
		}
Y
Yan Zheng 已提交
1775

1776
		now = ktime_get_seconds();
1777
		if (cur->state < TRANS_STATE_BLOCKED &&
1778
		    !test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) &&
1779
		    (now < cur->start_time ||
1780 1781
		     now - cur->start_time < fs_info->commit_interval)) {
			spin_unlock(&fs_info->trans_lock);
1782 1783 1784
			delay = HZ * 5;
			goto sleep;
		}
1785
		transid = cur->transid;
1786
		spin_unlock(&fs_info->trans_lock);
1787

1788
		/* If the file system is aborted, this will always fail. */
1789
		trans = btrfs_attach_transaction(root);
1790
		if (IS_ERR(trans)) {
1791 1792
			if (PTR_ERR(trans) != -ENOENT)
				cannot_commit = true;
1793
			goto sleep;
1794
		}
1795
		if (transid == trans->transid) {
1796
			btrfs_commit_transaction(trans);
1797
		} else {
1798
			btrfs_end_transaction(trans);
1799
		}
1800
sleep:
1801 1802
		wake_up_process(fs_info->cleaner_kthread);
		mutex_unlock(&fs_info->transaction_kthread_mutex);
1803

J
Josef Bacik 已提交
1804
		if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
1805
				      &fs_info->fs_state)))
1806
			btrfs_cleanup_transaction(fs_info);
1807
		if (!kthread_should_stop() &&
1808
				(!btrfs_transaction_blocked(fs_info) ||
1809
				 cannot_commit))
1810
			schedule_timeout_interruptible(delay);
1811 1812 1813 1814
	} while (!kthread_should_stop());
	return 0;
}

C
Chris Mason 已提交
1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920
/*
 * this will find the highest generation in the array of
 * root backups.  The index of the highest array is returned,
 * or -1 if we can't find anything.
 *
 * We check to make sure the array is valid by comparing the
 * generation of the latest  root in the array with the generation
 * in the super block.  If they don't match we pitch it.
 */
static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
{
	u64 cur;
	int newest_index = -1;
	struct btrfs_root_backup *root_backup;
	int i;

	for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
		root_backup = info->super_copy->super_roots + i;
		cur = btrfs_backup_tree_root_gen(root_backup);
		if (cur == newest_gen)
			newest_index = i;
	}

	/* check to see if we actually wrapped around */
	if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
		root_backup = info->super_copy->super_roots;
		cur = btrfs_backup_tree_root_gen(root_backup);
		if (cur == newest_gen)
			newest_index = 0;
	}
	return newest_index;
}


/*
 * find the oldest backup so we know where to store new entries
 * in the backup array.  This will set the backup_root_index
 * field in the fs_info struct
 */
static void find_oldest_super_backup(struct btrfs_fs_info *info,
				     u64 newest_gen)
{
	int newest_index = -1;

	newest_index = find_newest_super_backup(info, newest_gen);
	/* if there was garbage in there, just move along */
	if (newest_index == -1) {
		info->backup_root_index = 0;
	} else {
		info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
	}
}

/*
 * copy all the root pointers into the super backup array.
 * this will bump the backup pointer by one when it is
 * done
 */
static void backup_super_roots(struct btrfs_fs_info *info)
{
	int next_backup;
	struct btrfs_root_backup *root_backup;
	int last_backup;

	next_backup = info->backup_root_index;
	last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
		BTRFS_NUM_BACKUP_ROOTS;

	/*
	 * just overwrite the last backup if we're at the same generation
	 * this happens only at umount
	 */
	root_backup = info->super_for_commit->super_roots + last_backup;
	if (btrfs_backup_tree_root_gen(root_backup) ==
	    btrfs_header_generation(info->tree_root->node))
		next_backup = last_backup;

	root_backup = info->super_for_commit->super_roots + next_backup;

	/*
	 * make sure all of our padding and empty slots get zero filled
	 * regardless of which ones we use today
	 */
	memset(root_backup, 0, sizeof(*root_backup));

	info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;

	btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
	btrfs_set_backup_tree_root_gen(root_backup,
			       btrfs_header_generation(info->tree_root->node));

	btrfs_set_backup_tree_root_level(root_backup,
			       btrfs_header_level(info->tree_root->node));

	btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
	btrfs_set_backup_chunk_root_gen(root_backup,
			       btrfs_header_generation(info->chunk_root->node));
	btrfs_set_backup_chunk_root_level(root_backup,
			       btrfs_header_level(info->chunk_root->node));

	btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
	btrfs_set_backup_extent_root_gen(root_backup,
			       btrfs_header_generation(info->extent_root->node));
	btrfs_set_backup_extent_root_level(root_backup,
			       btrfs_header_level(info->extent_root->node));

1921 1922 1923 1924 1925 1926 1927 1928
	/*
	 * we might commit during log recovery, which happens before we set
	 * the fs_root.  Make sure it is valid before we fill it in.
	 */
	if (info->fs_root && info->fs_root->node) {
		btrfs_set_backup_fs_root(root_backup,
					 info->fs_root->node->start);
		btrfs_set_backup_fs_root_gen(root_backup,
C
Chris Mason 已提交
1929
			       btrfs_header_generation(info->fs_root->node));
1930
		btrfs_set_backup_fs_root_level(root_backup,
C
Chris Mason 已提交
1931
			       btrfs_header_level(info->fs_root->node));
1932
	}
C
Chris Mason 已提交
1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013

	btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
	btrfs_set_backup_dev_root_gen(root_backup,
			       btrfs_header_generation(info->dev_root->node));
	btrfs_set_backup_dev_root_level(root_backup,
				       btrfs_header_level(info->dev_root->node));

	btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
	btrfs_set_backup_csum_root_gen(root_backup,
			       btrfs_header_generation(info->csum_root->node));
	btrfs_set_backup_csum_root_level(root_backup,
			       btrfs_header_level(info->csum_root->node));

	btrfs_set_backup_total_bytes(root_backup,
			     btrfs_super_total_bytes(info->super_copy));
	btrfs_set_backup_bytes_used(root_backup,
			     btrfs_super_bytes_used(info->super_copy));
	btrfs_set_backup_num_devices(root_backup,
			     btrfs_super_num_devices(info->super_copy));

	/*
	 * if we don't copy this out to the super_copy, it won't get remembered
	 * for the next commit
	 */
	memcpy(&info->super_copy->super_roots,
	       &info->super_for_commit->super_roots,
	       sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
}

/*
 * this copies info out of the root backup array and back into
 * the in-memory super block.  It is meant to help iterate through
 * the array, so you send it the number of backups you've already
 * tried and the last backup index you used.
 *
 * this returns -1 when it has tried all the backups
 */
static noinline int next_root_backup(struct btrfs_fs_info *info,
				     struct btrfs_super_block *super,
				     int *num_backups_tried, int *backup_index)
{
	struct btrfs_root_backup *root_backup;
	int newest = *backup_index;

	if (*num_backups_tried == 0) {
		u64 gen = btrfs_super_generation(super);

		newest = find_newest_super_backup(info, gen);
		if (newest == -1)
			return -1;

		*backup_index = newest;
		*num_backups_tried = 1;
	} else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
		/* we've tried all the backups, all done */
		return -1;
	} else {
		/* jump to the next oldest backup */
		newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
			BTRFS_NUM_BACKUP_ROOTS;
		*backup_index = newest;
		*num_backups_tried += 1;
	}
	root_backup = super->super_roots + newest;

	btrfs_set_super_generation(super,
				   btrfs_backup_tree_root_gen(root_backup));
	btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
	btrfs_set_super_root_level(super,
				   btrfs_backup_tree_root_level(root_backup));
	btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));

	/*
	 * fixme: the total bytes and num_devices need to match or we should
	 * need a fsck
	 */
	btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
	btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
	return 0;
}

L
Liu Bo 已提交
2014 2015 2016
/* helper to cleanup workers */
static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
{
2017
	btrfs_destroy_workqueue(fs_info->fixup_workers);
2018
	btrfs_destroy_workqueue(fs_info->delalloc_workers);
2019
	btrfs_destroy_workqueue(fs_info->workers);
2020 2021
	btrfs_destroy_workqueue(fs_info->endio_workers);
	btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
2022
	btrfs_destroy_workqueue(fs_info->endio_repair_workers);
2023
	btrfs_destroy_workqueue(fs_info->rmw_workers);
2024 2025
	btrfs_destroy_workqueue(fs_info->endio_write_workers);
	btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
2026
	btrfs_destroy_workqueue(fs_info->submit_workers);
2027
	btrfs_destroy_workqueue(fs_info->delayed_workers);
2028
	btrfs_destroy_workqueue(fs_info->caching_workers);
2029
	btrfs_destroy_workqueue(fs_info->readahead_workers);
2030
	btrfs_destroy_workqueue(fs_info->flush_workers);
2031
	btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
C
Chris Mason 已提交
2032
	btrfs_destroy_workqueue(fs_info->extent_workers);
2033 2034 2035 2036 2037 2038 2039
	/*
	 * Now that all other work queues are destroyed, we can safely destroy
	 * the queues used for metadata I/O, since tasks from those other work
	 * queues can do metadata I/O operations.
	 */
	btrfs_destroy_workqueue(fs_info->endio_meta_workers);
	btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
L
Liu Bo 已提交
2040 2041
}

2042 2043 2044 2045 2046 2047 2048 2049 2050 2051
static void free_root_extent_buffers(struct btrfs_root *root)
{
	if (root) {
		free_extent_buffer(root->node);
		free_extent_buffer(root->commit_root);
		root->node = NULL;
		root->commit_root = NULL;
	}
}

C
Chris Mason 已提交
2052 2053 2054
/* helper to cleanup tree roots */
static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
{
2055
	free_root_extent_buffers(info->tree_root);
2056

2057 2058 2059 2060 2061 2062 2063
	free_root_extent_buffers(info->dev_root);
	free_root_extent_buffers(info->extent_root);
	free_root_extent_buffers(info->csum_root);
	free_root_extent_buffers(info->quota_root);
	free_root_extent_buffers(info->uuid_root);
	if (chunk_root)
		free_root_extent_buffers(info->chunk_root);
2064
	free_root_extent_buffers(info->free_space_root);
C
Chris Mason 已提交
2065 2066
}

2067
void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
2068 2069 2070 2071 2072 2073 2074 2075 2076 2077
{
	int ret;
	struct btrfs_root *gang[8];
	int i;

	while (!list_empty(&fs_info->dead_roots)) {
		gang[0] = list_entry(fs_info->dead_roots.next,
				     struct btrfs_root, root_list);
		list_del(&gang[0]->root_list);

2078
		if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) {
2079
			btrfs_drop_and_free_fs_root(fs_info, gang[0]);
2080 2081 2082
		} else {
			free_extent_buffer(gang[0]->node);
			free_extent_buffer(gang[0]->commit_root);
2083
			btrfs_put_fs_root(gang[0]);
2084 2085 2086 2087 2088 2089 2090 2091 2092 2093
		}
	}

	while (1) {
		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
					     (void **)gang, 0,
					     ARRAY_SIZE(gang));
		if (!ret)
			break;
		for (i = 0; i < ret; i++)
2094
			btrfs_drop_and_free_fs_root(fs_info, gang[i]);
2095
	}
2096 2097 2098

	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
		btrfs_free_log_root_tree(NULL, fs_info);
2099
		btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
2100
	}
2101
}
C
Chris Mason 已提交
2102

2103 2104 2105 2106 2107 2108 2109 2110
static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
{
	mutex_init(&fs_info->scrub_lock);
	atomic_set(&fs_info->scrubs_running, 0);
	atomic_set(&fs_info->scrub_pause_req, 0);
	atomic_set(&fs_info->scrubs_paused, 0);
	atomic_set(&fs_info->scrub_cancel_req, 0);
	init_waitqueue_head(&fs_info->scrub_pause_wait);
2111
	refcount_set(&fs_info->scrub_workers_refcnt, 0);
2112 2113
}

2114 2115 2116 2117 2118 2119 2120 2121 2122 2123
static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
{
	spin_lock_init(&fs_info->balance_lock);
	mutex_init(&fs_info->balance_mutex);
	atomic_set(&fs_info->balance_pause_req, 0);
	atomic_set(&fs_info->balance_cancel_req, 0);
	fs_info->balance_ctl = NULL;
	init_waitqueue_head(&fs_info->balance_wait_q);
}

2124
static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
2125
{
2126 2127 2128 2129
	struct inode *inode = fs_info->btree_inode;

	inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
	set_nlink(inode, 1);
2130 2131 2132 2133 2134
	/*
	 * we set the i_size on the btree inode to the max possible int.
	 * the real end of the address space is determined by all of
	 * the devices in the system
	 */
2135 2136
	inode->i_size = OFFSET_MAX;
	inode->i_mapping->a_ops = &btree_aops;
2137

2138
	RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
2139 2140
	extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree,
			    IO_TREE_INODE_IO, inode);
2141
	BTRFS_I(inode)->io_tree.track_uptodate = false;
2142
	extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
2143

2144
	BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops;
2145

2146 2147 2148 2149
	BTRFS_I(inode)->root = fs_info->tree_root;
	memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key));
	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
	btrfs_insert_inode_hash(inode);
2150 2151
}

2152 2153 2154
static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
{
	mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2155
	init_rwsem(&fs_info->dev_replace.rwsem);
2156
	init_waitqueue_head(&fs_info->dev_replace.replace_wait);
2157 2158
}

2159 2160 2161 2162 2163 2164 2165 2166
static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
{
	spin_lock_init(&fs_info->qgroup_lock);
	mutex_init(&fs_info->qgroup_ioctl_lock);
	fs_info->qgroup_tree = RB_ROOT;
	INIT_LIST_HEAD(&fs_info->dirty_qgroups);
	fs_info->qgroup_seq = 1;
	fs_info->qgroup_ulist = NULL;
2167
	fs_info->qgroup_rescan_running = false;
2168 2169 2170
	mutex_init(&fs_info->qgroup_rescan_lock);
}

2171 2172 2173
static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
		struct btrfs_fs_devices *fs_devices)
{
2174
	u32 max_active = fs_info->thread_pool_size;
2175
	unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
2176 2177

	fs_info->workers =
2178 2179
		btrfs_alloc_workqueue(fs_info, "worker",
				      flags | WQ_HIGHPRI, max_active, 16);
2180 2181

	fs_info->delalloc_workers =
2182 2183
		btrfs_alloc_workqueue(fs_info, "delalloc",
				      flags, max_active, 2);
2184 2185

	fs_info->flush_workers =
2186 2187
		btrfs_alloc_workqueue(fs_info, "flush_delalloc",
				      flags, max_active, 0);
2188 2189

	fs_info->caching_workers =
2190
		btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
2191 2192 2193 2194 2195 2196 2197

	/*
	 * a higher idle thresh on the submit workers makes it much more
	 * likely that bios will be send down in a sane order to the
	 * devices
	 */
	fs_info->submit_workers =
2198
		btrfs_alloc_workqueue(fs_info, "submit", flags,
2199 2200 2201 2202
				      min_t(u64, fs_devices->num_devices,
					    max_active), 64);

	fs_info->fixup_workers =
2203
		btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
2204 2205 2206 2207 2208 2209

	/*
	 * endios are largely parallel and should have a very
	 * low idle thresh
	 */
	fs_info->endio_workers =
2210
		btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4);
2211
	fs_info->endio_meta_workers =
2212 2213
		btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
				      max_active, 4);
2214
	fs_info->endio_meta_write_workers =
2215 2216
		btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
				      max_active, 2);
2217
	fs_info->endio_raid56_workers =
2218 2219
		btrfs_alloc_workqueue(fs_info, "endio-raid56", flags,
				      max_active, 4);
2220
	fs_info->endio_repair_workers =
2221
		btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0);
2222
	fs_info->rmw_workers =
2223
		btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2);
2224
	fs_info->endio_write_workers =
2225 2226
		btrfs_alloc_workqueue(fs_info, "endio-write", flags,
				      max_active, 2);
2227
	fs_info->endio_freespace_worker =
2228 2229
		btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
				      max_active, 0);
2230
	fs_info->delayed_workers =
2231 2232
		btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
				      max_active, 0);
2233
	fs_info->readahead_workers =
2234 2235
		btrfs_alloc_workqueue(fs_info, "readahead", flags,
				      max_active, 2);
2236
	fs_info->qgroup_rescan_workers =
2237
		btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
2238
	fs_info->extent_workers =
2239
		btrfs_alloc_workqueue(fs_info, "extent-refs", flags,
2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259
				      min_t(u64, fs_devices->num_devices,
					    max_active), 8);

	if (!(fs_info->workers && fs_info->delalloc_workers &&
	      fs_info->submit_workers && fs_info->flush_workers &&
	      fs_info->endio_workers && fs_info->endio_meta_workers &&
	      fs_info->endio_meta_write_workers &&
	      fs_info->endio_repair_workers &&
	      fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
	      fs_info->endio_freespace_worker && fs_info->rmw_workers &&
	      fs_info->caching_workers && fs_info->readahead_workers &&
	      fs_info->fixup_workers && fs_info->delayed_workers &&
	      fs_info->extent_workers &&
	      fs_info->qgroup_rescan_workers)) {
		return -ENOMEM;
	}

	return 0;
}

2260 2261 2262 2263 2264 2265 2266
static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
			    struct btrfs_fs_devices *fs_devices)
{
	int ret;
	struct btrfs_root *log_tree_root;
	struct btrfs_super_block *disk_super = fs_info->super_copy;
	u64 bytenr = btrfs_super_log_root(disk_super);
2267
	int level = btrfs_super_log_root_level(disk_super);
2268 2269

	if (fs_devices->rw_devices == 0) {
2270
		btrfs_warn(fs_info, "log replay required on RO media");
2271 2272 2273
		return -EIO;
	}

2274
	log_tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2275 2276 2277
	if (!log_tree_root)
		return -ENOMEM;

2278
	__setup_root(log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2279

2280
	log_tree_root->node = read_tree_block(fs_info, bytenr,
2281 2282
					      fs_info->generation + 1,
					      level, NULL);
2283
	if (IS_ERR(log_tree_root->node)) {
2284
		btrfs_warn(fs_info, "failed to read log tree");
2285
		ret = PTR_ERR(log_tree_root->node);
2286
		kfree(log_tree_root);
2287
		return ret;
2288
	} else if (!extent_buffer_uptodate(log_tree_root->node)) {
2289
		btrfs_err(fs_info, "failed to read log tree");
2290 2291 2292 2293 2294 2295 2296
		free_extent_buffer(log_tree_root->node);
		kfree(log_tree_root);
		return -EIO;
	}
	/* returns with log_tree_root freed on success */
	ret = btrfs_recover_log_trees(log_tree_root);
	if (ret) {
2297 2298
		btrfs_handle_fs_error(fs_info, ret,
				      "Failed to recover log tree");
2299 2300 2301 2302 2303
		free_extent_buffer(log_tree_root->node);
		kfree(log_tree_root);
		return ret;
	}

2304
	if (sb_rdonly(fs_info->sb)) {
2305
		ret = btrfs_commit_super(fs_info);
2306 2307 2308 2309 2310 2311 2312
		if (ret)
			return ret;
	}

	return 0;
}

2313
static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
2314
{
2315
	struct btrfs_root *tree_root = fs_info->tree_root;
2316
	struct btrfs_root *root;
2317 2318 2319
	struct btrfs_key location;
	int ret;

2320 2321
	BUG_ON(!fs_info->tree_root);

2322 2323 2324 2325
	location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
	location.type = BTRFS_ROOT_ITEM_KEY;
	location.offset = 0;

2326
	root = btrfs_read_tree_root(tree_root, &location);
2327 2328 2329 2330
	if (IS_ERR(root)) {
		ret = PTR_ERR(root);
		goto out;
	}
2331 2332
	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
	fs_info->extent_root = root;
2333 2334

	location.objectid = BTRFS_DEV_TREE_OBJECTID;
2335
	root = btrfs_read_tree_root(tree_root, &location);
2336 2337 2338 2339
	if (IS_ERR(root)) {
		ret = PTR_ERR(root);
		goto out;
	}
2340 2341
	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
	fs_info->dev_root = root;
2342 2343 2344
	btrfs_init_devices_late(fs_info);

	location.objectid = BTRFS_CSUM_TREE_OBJECTID;
2345
	root = btrfs_read_tree_root(tree_root, &location);
2346 2347 2348 2349
	if (IS_ERR(root)) {
		ret = PTR_ERR(root);
		goto out;
	}
2350 2351
	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
	fs_info->csum_root = root;
2352 2353

	location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2354 2355 2356
	root = btrfs_read_tree_root(tree_root, &location);
	if (!IS_ERR(root)) {
		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2357
		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
2358
		fs_info->quota_root = root;
2359 2360 2361
	}

	location.objectid = BTRFS_UUID_TREE_OBJECTID;
2362 2363 2364
	root = btrfs_read_tree_root(tree_root, &location);
	if (IS_ERR(root)) {
		ret = PTR_ERR(root);
2365
		if (ret != -ENOENT)
2366
			goto out;
2367
	} else {
2368 2369
		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
		fs_info->uuid_root = root;
2370 2371
	}

2372 2373 2374
	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
		location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID;
		root = btrfs_read_tree_root(tree_root, &location);
2375 2376 2377 2378
		if (IS_ERR(root)) {
			ret = PTR_ERR(root);
			goto out;
		}
2379 2380 2381 2382
		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
		fs_info->free_space_root = root;
	}

2383
	return 0;
2384 2385 2386 2387
out:
	btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d",
		   location.objectid, ret);
	return ret;
2388 2389
}

2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401
/*
 * Real super block validation
 * NOTE: super csum type and incompat features will not be checked here.
 *
 * @sb:		super block to check
 * @mirror_num:	the super block number to check its bytenr:
 * 		0	the primary (1st) sb
 * 		1, 2	2nd and 3rd backup copy
 * 	       -1	skip bytenr check
 */
static int validate_super(struct btrfs_fs_info *fs_info,
			    struct btrfs_super_block *sb, int mirror_num)
2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475
{
	u64 nodesize = btrfs_super_nodesize(sb);
	u64 sectorsize = btrfs_super_sectorsize(sb);
	int ret = 0;

	if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
		btrfs_err(fs_info, "no valid FS found");
		ret = -EINVAL;
	}
	if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) {
		btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu",
				btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
		ret = -EINVAL;
	}
	if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
		btrfs_err(fs_info, "tree_root level too big: %d >= %d",
				btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
		ret = -EINVAL;
	}
	if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
		btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
				btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
		ret = -EINVAL;
	}
	if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
		btrfs_err(fs_info, "log_root level too big: %d >= %d",
				btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
		ret = -EINVAL;
	}

	/*
	 * Check sectorsize and nodesize first, other check will need it.
	 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
	 */
	if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
	    sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
		btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
		ret = -EINVAL;
	}
	/* Only PAGE SIZE is supported yet */
	if (sectorsize != PAGE_SIZE) {
		btrfs_err(fs_info,
			"sectorsize %llu not supported yet, only support %lu",
			sectorsize, PAGE_SIZE);
		ret = -EINVAL;
	}
	if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
	    nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
		btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
		ret = -EINVAL;
	}
	if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
		btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
			  le32_to_cpu(sb->__unused_leafsize), nodesize);
		ret = -EINVAL;
	}

	/* Root alignment check */
	if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
		btrfs_warn(fs_info, "tree_root block unaligned: %llu",
			   btrfs_super_root(sb));
		ret = -EINVAL;
	}
	if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
		btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
			   btrfs_super_chunk_root(sb));
		ret = -EINVAL;
	}
	if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
		btrfs_warn(fs_info, "log_root block unaligned: %llu",
			   btrfs_super_log_root(sb));
		ret = -EINVAL;
	}

2476
	if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid,
2477
		   BTRFS_FSID_SIZE) != 0) {
2478
		btrfs_err(fs_info,
2479
			"dev_item UUID does not match metadata fsid: %pU != %pU",
2480
			fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid);
2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505
		ret = -EINVAL;
	}

	/*
	 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
	 * done later
	 */
	if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
		btrfs_err(fs_info, "bytes_used is too small %llu",
			  btrfs_super_bytes_used(sb));
		ret = -EINVAL;
	}
	if (!is_power_of_2(btrfs_super_stripesize(sb))) {
		btrfs_err(fs_info, "invalid stripesize %u",
			  btrfs_super_stripesize(sb));
		ret = -EINVAL;
	}
	if (btrfs_super_num_devices(sb) > (1UL << 31))
		btrfs_warn(fs_info, "suspicious number of devices: %llu",
			   btrfs_super_num_devices(sb));
	if (btrfs_super_num_devices(sb) == 0) {
		btrfs_err(fs_info, "number of devices is 0");
		ret = -EINVAL;
	}

2506 2507
	if (mirror_num >= 0 &&
	    btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) {
2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550
		btrfs_err(fs_info, "super offset mismatch %llu != %u",
			  btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
		ret = -EINVAL;
	}

	/*
	 * Obvious sys_chunk_array corruptions, it must hold at least one key
	 * and one chunk
	 */
	if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
		btrfs_err(fs_info, "system chunk array too big %u > %u",
			  btrfs_super_sys_array_size(sb),
			  BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
		ret = -EINVAL;
	}
	if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
			+ sizeof(struct btrfs_chunk)) {
		btrfs_err(fs_info, "system chunk array too small %u < %zu",
			  btrfs_super_sys_array_size(sb),
			  sizeof(struct btrfs_disk_key)
			  + sizeof(struct btrfs_chunk));
		ret = -EINVAL;
	}

	/*
	 * The generation is a global counter, we'll trust it more than the others
	 * but it's still possible that it's the one that's wrong.
	 */
	if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
		btrfs_warn(fs_info,
			"suspicious: generation < chunk_root_generation: %llu < %llu",
			btrfs_super_generation(sb),
			btrfs_super_chunk_root_generation(sb));
	if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
	    && btrfs_super_cache_generation(sb) != (u64)-1)
		btrfs_warn(fs_info,
			"suspicious: generation < cache_generation: %llu < %llu",
			btrfs_super_generation(sb),
			btrfs_super_cache_generation(sb));

	return ret;
}

2551 2552 2553 2554 2555 2556 2557 2558 2559 2560
/*
 * Validation of super block at mount time.
 * Some checks already done early at mount time, like csum type and incompat
 * flags will be skipped.
 */
static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info)
{
	return validate_super(fs_info, fs_info->super_copy, 0);
}

2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595
/*
 * Validation of super block at write time.
 * Some checks like bytenr check will be skipped as their values will be
 * overwritten soon.
 * Extra checks like csum type and incompat flags will be done here.
 */
static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info,
				      struct btrfs_super_block *sb)
{
	int ret;

	ret = validate_super(fs_info, sb, -1);
	if (ret < 0)
		goto out;
	if (btrfs_super_csum_type(sb) != BTRFS_CSUM_TYPE_CRC32) {
		ret = -EUCLEAN;
		btrfs_err(fs_info, "invalid csum type, has %u want %u",
			  btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32);
		goto out;
	}
	if (btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
		ret = -EUCLEAN;
		btrfs_err(fs_info,
		"invalid incompat flags, has 0x%llx valid mask 0x%llx",
			  btrfs_super_incompat_flags(sb),
			  (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP);
		goto out;
	}
out:
	if (ret < 0)
		btrfs_err(fs_info,
		"super block corruption detected before writing it to disk");
	return ret;
}

A
Al Viro 已提交
2596 2597 2598
int open_ctree(struct super_block *sb,
	       struct btrfs_fs_devices *fs_devices,
	       char *options)
2599
{
2600 2601
	u32 sectorsize;
	u32 nodesize;
2602
	u32 stripesize;
2603
	u64 generation;
2604
	u64 features;
2605
	struct btrfs_key location;
2606
	struct buffer_head *bh;
2607
	struct btrfs_super_block *disk_super;
2608
	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2609
	struct btrfs_root *tree_root;
2610
	struct btrfs_root *chunk_root;
2611
	int ret;
2612
	int err = -EINVAL;
C
Chris Mason 已提交
2613 2614
	int num_backups_tried = 0;
	int backup_index = 0;
2615
	int clear_free_space_tree = 0;
2616
	int level;
2617

2618 2619
	tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
	chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2620
	if (!tree_root || !chunk_root) {
C
Chris Mason 已提交
2621 2622 2623
		err = -ENOMEM;
		goto fail;
	}
2624 2625 2626 2627 2628 2629 2630

	ret = init_srcu_struct(&fs_info->subvol_srcu);
	if (ret) {
		err = ret;
		goto fail;
	}

2631
	ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2632 2633
	if (ret) {
		err = ret;
2634
		goto fail_srcu;
2635
	}
2636
	fs_info->dirty_metadata_batch = PAGE_SIZE *
2637 2638
					(1 + ilog2(nr_cpu_ids));

2639
	ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2640 2641 2642 2643 2644
	if (ret) {
		err = ret;
		goto fail_dirty_metadata_bytes;
	}

2645 2646
	ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0,
			GFP_KERNEL);
2647 2648 2649 2650 2651
	if (ret) {
		err = ret;
		goto fail_delalloc_bytes;
	}

2652
	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2653
	INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
C
Chris Mason 已提交
2654
	INIT_LIST_HEAD(&fs_info->trans_list);
2655
	INIT_LIST_HEAD(&fs_info->dead_roots);
Y
Yan, Zheng 已提交
2656
	INIT_LIST_HEAD(&fs_info->delayed_iputs);
2657
	INIT_LIST_HEAD(&fs_info->delalloc_roots);
2658
	INIT_LIST_HEAD(&fs_info->caching_block_groups);
2659 2660
	INIT_LIST_HEAD(&fs_info->pending_raid_kobjs);
	spin_lock_init(&fs_info->pending_raid_kobjs_lock);
2661
	spin_lock_init(&fs_info->delalloc_root_lock);
J
Josef Bacik 已提交
2662
	spin_lock_init(&fs_info->trans_lock);
2663
	spin_lock_init(&fs_info->fs_roots_radix_lock);
Y
Yan, Zheng 已提交
2664
	spin_lock_init(&fs_info->delayed_iput_lock);
C
Chris Mason 已提交
2665
	spin_lock_init(&fs_info->defrag_inodes_lock);
J
Jan Schmidt 已提交
2666
	spin_lock_init(&fs_info->tree_mod_seq_lock);
2667
	spin_lock_init(&fs_info->super_lock);
2668
	spin_lock_init(&fs_info->buffer_lock);
2669
	spin_lock_init(&fs_info->unused_bgs_lock);
J
Jan Schmidt 已提交
2670
	rwlock_init(&fs_info->tree_mod_log_lock);
2671
	mutex_init(&fs_info->unused_bg_unpin_mutex);
2672
	mutex_init(&fs_info->delete_unused_bgs_mutex);
C
Chris Mason 已提交
2673
	mutex_init(&fs_info->reloc_mutex);
2674
	mutex_init(&fs_info->delalloc_root_mutex);
2675
	seqlock_init(&fs_info->profiles_lock);
2676

2677
	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2678
	INIT_LIST_HEAD(&fs_info->space_info);
J
Jan Schmidt 已提交
2679
	INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2680
	INIT_LIST_HEAD(&fs_info->unused_bgs);
2681
	btrfs_mapping_init(&fs_info->mapping_tree);
2682 2683 2684 2685 2686 2687 2688
	btrfs_init_block_rsv(&fs_info->global_block_rsv,
			     BTRFS_BLOCK_RSV_GLOBAL);
	btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
	btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
	btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
	btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
			     BTRFS_BLOCK_RSV_DELOPS);
J
Josef Bacik 已提交
2689 2690 2691
	btrfs_init_block_rsv(&fs_info->delayed_refs_rsv,
			     BTRFS_BLOCK_RSV_DELREFS);

2692
	atomic_set(&fs_info->async_delalloc_pages, 0);
C
Chris Mason 已提交
2693
	atomic_set(&fs_info->defrag_running, 0);
Z
Zhao Lei 已提交
2694
	atomic_set(&fs_info->reada_works_cnt, 0);
2695
	atomic_set(&fs_info->nr_delayed_iputs, 0);
2696
	atomic64_set(&fs_info->tree_mod_seq, 0);
C
Chris Mason 已提交
2697
	fs_info->sb = sb;
2698
	fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
J
Josef Bacik 已提交
2699
	fs_info->metadata_ratio = 0;
C
Chris Mason 已提交
2700
	fs_info->defrag_inodes = RB_ROOT;
2701
	atomic64_set(&fs_info->free_chunk_space, 0);
J
Jan Schmidt 已提交
2702
	fs_info->tree_mod_log = RB_ROOT;
2703
	fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2704
	fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
2705
	/* readahead state */
2706
	INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
2707
	spin_lock_init(&fs_info->reada_lock);
J
Josef Bacik 已提交
2708
	btrfs_init_ref_verify(fs_info);
C
Chris Mason 已提交
2709

2710 2711
	fs_info->thread_pool_size = min_t(unsigned long,
					  num_online_cpus() + 2, 8);
2712

2713 2714
	INIT_LIST_HEAD(&fs_info->ordered_roots);
	spin_lock_init(&fs_info->ordered_root_lock);
2715 2716 2717 2718 2719 2720 2721 2722

	fs_info->btree_inode = new_inode(sb);
	if (!fs_info->btree_inode) {
		err = -ENOMEM;
		goto fail_bio_counter;
	}
	mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);

2723
	fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2724
					GFP_KERNEL);
2725 2726 2727 2728 2729
	if (!fs_info->delayed_root) {
		err = -ENOMEM;
		goto fail_iput;
	}
	btrfs_init_delayed_root(fs_info->delayed_root);
2730

2731
	btrfs_init_scrub(fs_info);
2732 2733 2734
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
	fs_info->check_integrity_print_mask = 0;
#endif
2735
	btrfs_init_balance(fs_info);
2736
	btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work);
A
Arne Jansen 已提交
2737

2738 2739
	sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
	sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
2740

2741
	btrfs_init_btree_inode(fs_info);
2742

J
Josef Bacik 已提交
2743
	spin_lock_init(&fs_info->block_group_cache_lock);
2744
	fs_info->block_group_cache_tree = RB_ROOT;
2745
	fs_info->first_logical_byte = (u64)-1;
J
Josef Bacik 已提交
2746

2747 2748 2749 2750
	extent_io_tree_init(fs_info, &fs_info->freed_extents[0],
			    IO_TREE_FS_INFO_FREED_EXTENTS0, NULL);
	extent_io_tree_init(fs_info, &fs_info->freed_extents[1],
			    IO_TREE_FS_INFO_FREED_EXTENTS1, NULL);
2751
	fs_info->pinned_extents = &fs_info->freed_extents[0];
2752
	set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
C
Chris Mason 已提交
2753

2754
	mutex_init(&fs_info->ordered_operations_mutex);
2755
	mutex_init(&fs_info->tree_log_mutex);
2756
	mutex_init(&fs_info->chunk_mutex);
2757 2758
	mutex_init(&fs_info->transaction_kthread_mutex);
	mutex_init(&fs_info->cleaner_mutex);
2759
	mutex_init(&fs_info->ro_block_group_mutex);
2760
	init_rwsem(&fs_info->commit_root_sem);
2761
	init_rwsem(&fs_info->cleanup_work_sem);
2762
	init_rwsem(&fs_info->subvol_sem);
S
Stefan Behrens 已提交
2763
	sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2764

2765
	btrfs_init_dev_replace_locks(fs_info);
2766
	btrfs_init_qgroup(fs_info);
2767

2768 2769 2770
	btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
	btrfs_init_free_cluster(&fs_info->data_alloc_cluster);

2771
	init_waitqueue_head(&fs_info->transaction_throttle);
2772
	init_waitqueue_head(&fs_info->transaction_wait);
S
Sage Weil 已提交
2773
	init_waitqueue_head(&fs_info->transaction_blocked_wait);
2774
	init_waitqueue_head(&fs_info->async_submit_wait);
2775
	init_waitqueue_head(&fs_info->delayed_iputs_wait);
2776

2777 2778 2779 2780 2781
	/* Usable values until the real ones are cached from the superblock */
	fs_info->nodesize = 4096;
	fs_info->sectorsize = 4096;
	fs_info->stripesize = 4096;

2782 2783 2784
	spin_lock_init(&fs_info->swapfile_pins_lock);
	fs_info->swapfile_pins = RB_ROOT;

D
David Woodhouse 已提交
2785 2786
	ret = btrfs_alloc_stripe_hash_table(fs_info);
	if (ret) {
2787
		err = ret;
D
David Woodhouse 已提交
2788 2789 2790
		goto fail_alloc;
	}

2791
	__setup_root(tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
2792

2793
	invalidate_bdev(fs_devices->latest_bdev);
D
David Sterba 已提交
2794 2795 2796 2797

	/*
	 * Read super block and check the signature bytes only
	 */
Y
Yan Zheng 已提交
2798
	bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2799 2800
	if (IS_ERR(bh)) {
		err = PTR_ERR(bh);
2801
		goto fail_alloc;
2802
	}
C
Chris Mason 已提交
2803

D
David Sterba 已提交
2804 2805 2806 2807
	/*
	 * We want to check superblock checksum, the type is stored inside.
	 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
	 */
2808
	if (btrfs_check_super_csum(fs_info, bh->b_data)) {
2809
		btrfs_err(fs_info, "superblock checksum mismatch");
D
David Sterba 已提交
2810
		err = -EINVAL;
2811
		brelse(bh);
D
David Sterba 已提交
2812 2813 2814 2815 2816 2817 2818 2819
		goto fail_alloc;
	}

	/*
	 * super_copy is zeroed at allocation time and we never touch the
	 * following bytes up to INFO_SIZE, the checksum is calculated from
	 * the whole block of INFO_SIZE
	 */
2820
	memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2821
	brelse(bh);
2822

2823 2824
	disk_super = fs_info->super_copy;

2825 2826 2827
	ASSERT(!memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
		       BTRFS_FSID_SIZE));

2828
	if (btrfs_fs_incompat(fs_info, METADATA_UUID)) {
2829 2830 2831
		ASSERT(!memcmp(fs_info->fs_devices->metadata_uuid,
				fs_info->super_copy->metadata_uuid,
				BTRFS_FSID_SIZE));
2832
	}
2833

2834 2835 2836 2837 2838 2839 2840 2841 2842 2843
	features = btrfs_super_flags(disk_super);
	if (features & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) {
		features &= ~BTRFS_SUPER_FLAG_CHANGING_FSID_V2;
		btrfs_set_super_flags(disk_super, features);
		btrfs_info(fs_info,
			"found metadata UUID change in progress flag, clearing");
	}

	memcpy(fs_info->super_for_commit, fs_info->super_copy,
	       sizeof(*fs_info->super_for_commit));
2844

2845
	ret = btrfs_validate_mount_super(fs_info);
D
David Sterba 已提交
2846
	if (ret) {
2847
		btrfs_err(fs_info, "superblock contains fatal errors");
D
David Sterba 已提交
2848 2849 2850 2851
		err = -EINVAL;
		goto fail_alloc;
	}

2852
	if (!btrfs_super_root(disk_super))
2853
		goto fail_alloc;
2854

L
liubo 已提交
2855
	/* check FS state, whether FS is broken. */
2856 2857
	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
		set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
L
liubo 已提交
2858

C
Chris Mason 已提交
2859 2860 2861 2862 2863 2864 2865
	/*
	 * run through our array of backup supers and setup
	 * our ring pointer to the oldest one
	 */
	generation = btrfs_super_generation(disk_super);
	find_oldest_super_backup(fs_info, generation);

2866 2867 2868 2869 2870 2871
	/*
	 * In the long term, we'll store the compression type in the super
	 * block, and it'll be used for per file compression control.
	 */
	fs_info->compress_type = BTRFS_COMPRESS_ZLIB;

2872
	ret = btrfs_parse_options(fs_info, options, sb->s_flags);
Y
Yan Zheng 已提交
2873 2874
	if (ret) {
		err = ret;
2875
		goto fail_alloc;
Y
Yan Zheng 已提交
2876
	}
2877

2878 2879 2880
	features = btrfs_super_incompat_flags(disk_super) &
		~BTRFS_FEATURE_INCOMPAT_SUPP;
	if (features) {
2881 2882 2883
		btrfs_err(fs_info,
		    "cannot mount because of unsupported optional features (%llx)",
		    features);
2884
		err = -EINVAL;
2885
		goto fail_alloc;
2886 2887
	}

2888
	features = btrfs_super_incompat_flags(disk_super);
L
Li Zefan 已提交
2889
	features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2890
	if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
L
Li Zefan 已提交
2891
		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
N
Nick Terrell 已提交
2892 2893
	else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
2894

2895
	if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
2896
		btrfs_info(fs_info, "has skinny extents");
2897

2898 2899 2900 2901
	/*
	 * flag our filesystem as having big metadata blocks if
	 * they are bigger than the page size
	 */
2902
	if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
2903
		if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2904 2905
			btrfs_info(fs_info,
				"flagging fs with big metadata feature");
2906 2907 2908
		features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
	}

2909 2910
	nodesize = btrfs_super_nodesize(disk_super);
	sectorsize = btrfs_super_sectorsize(disk_super);
2911
	stripesize = sectorsize;
2912
	fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
2913
	fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2914

2915 2916 2917 2918 2919
	/* Cache block sizes */
	fs_info->nodesize = nodesize;
	fs_info->sectorsize = sectorsize;
	fs_info->stripesize = stripesize;

2920 2921 2922 2923 2924
	/*
	 * mixed block groups end up with duplicate but slightly offset
	 * extent buffers for the same range.  It leads to corruptions
	 */
	if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2925
	    (sectorsize != nodesize)) {
2926 2927 2928
		btrfs_err(fs_info,
"unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
			nodesize, sectorsize);
2929 2930 2931
		goto fail_alloc;
	}

2932 2933 2934 2935
	/*
	 * Needn't use the lock because there is no other task which will
	 * update the flag.
	 */
L
Li Zefan 已提交
2936
	btrfs_set_super_incompat_flags(disk_super, features);
2937

2938 2939
	features = btrfs_super_compat_ro_flags(disk_super) &
		~BTRFS_FEATURE_COMPAT_RO_SUPP;
2940
	if (!sb_rdonly(sb) && features) {
2941 2942
		btrfs_err(fs_info,
	"cannot mount read-write because of unsupported optional features (%llx)",
2943
		       features);
2944
		err = -EINVAL;
2945
		goto fail_alloc;
2946
	}
2947

2948 2949 2950
	ret = btrfs_init_workqueues(fs_info, fs_devices);
	if (ret) {
		err = ret;
2951 2952
		goto fail_sb_buffer;
	}
2953

2954 2955 2956
	sb->s_bdi->congested_fn = btrfs_congested_fn;
	sb->s_bdi->congested_data = fs_info;
	sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
2957
	sb->s_bdi->ra_pages = VM_READAHEAD_PAGES;
2958 2959
	sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
	sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
2960

2961 2962
	sb->s_blocksize = sectorsize;
	sb->s_blocksize_bits = blksize_bits(sectorsize);
2963
	memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE);
2964

2965
	mutex_lock(&fs_info->chunk_mutex);
2966
	ret = btrfs_read_sys_array(fs_info);
2967
	mutex_unlock(&fs_info->chunk_mutex);
2968
	if (ret) {
2969
		btrfs_err(fs_info, "failed to read the system array: %d", ret);
2970
		goto fail_sb_buffer;
2971
	}
2972

2973
	generation = btrfs_super_chunk_root_generation(disk_super);
2974
	level = btrfs_super_chunk_root_level(disk_super);
2975

2976
	__setup_root(chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2977

2978
	chunk_root->node = read_tree_block(fs_info,
2979
					   btrfs_super_chunk_root(disk_super),
2980
					   generation, level, NULL);
2981 2982
	if (IS_ERR(chunk_root->node) ||
	    !extent_buffer_uptodate(chunk_root->node)) {
2983
		btrfs_err(fs_info, "failed to read chunk root");
2984 2985
		if (!IS_ERR(chunk_root->node))
			free_extent_buffer(chunk_root->node);
2986
		chunk_root->node = NULL;
C
Chris Mason 已提交
2987
		goto fail_tree_roots;
2988
	}
2989 2990
	btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
	chunk_root->commit_root = btrfs_root_node(chunk_root);
2991

2992
	read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2993
	   btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE);
2994

2995
	ret = btrfs_read_chunk_tree(fs_info);
Y
Yan Zheng 已提交
2996
	if (ret) {
2997
		btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
C
Chris Mason 已提交
2998
		goto fail_tree_roots;
Y
Yan Zheng 已提交
2999
	}
3000

3001
	/*
3002 3003
	 * Keep the devid that is marked to be the target device for the
	 * device replace procedure
3004
	 */
3005
	btrfs_free_extra_devids(fs_devices, 0);
3006

3007
	if (!fs_devices->latest_bdev) {
3008
		btrfs_err(fs_info, "failed to read devices");
3009 3010 3011
		goto fail_tree_roots;
	}

C
Chris Mason 已提交
3012
retry_root_backup:
3013
	generation = btrfs_super_generation(disk_super);
3014
	level = btrfs_super_root_level(disk_super);
3015

3016
	tree_root->node = read_tree_block(fs_info,
3017
					  btrfs_super_root(disk_super),
3018
					  generation, level, NULL);
3019 3020
	if (IS_ERR(tree_root->node) ||
	    !extent_buffer_uptodate(tree_root->node)) {
3021
		btrfs_warn(fs_info, "failed to read tree root");
3022 3023
		if (!IS_ERR(tree_root->node))
			free_extent_buffer(tree_root->node);
3024
		tree_root->node = NULL;
C
Chris Mason 已提交
3025
		goto recovery_tree_root;
3026
	}
C
Chris Mason 已提交
3027

3028 3029
	btrfs_set_root_node(&tree_root->root_item, tree_root->node);
	tree_root->commit_root = btrfs_root_node(tree_root);
3030
	btrfs_set_root_refs(&tree_root->root_item, 1);
3031

3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043
	mutex_lock(&tree_root->objectid_mutex);
	ret = btrfs_find_highest_objectid(tree_root,
					&tree_root->highest_objectid);
	if (ret) {
		mutex_unlock(&tree_root->objectid_mutex);
		goto recovery_tree_root;
	}

	ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);

	mutex_unlock(&tree_root->objectid_mutex);

3044
	ret = btrfs_read_roots(fs_info);
3045
	if (ret)
C
Chris Mason 已提交
3046
		goto recovery_tree_root;
3047

3048 3049 3050
	fs_info->generation = generation;
	fs_info->last_trans_committed = generation;

3051 3052 3053 3054 3055 3056 3057
	ret = btrfs_verify_dev_extents(fs_info);
	if (ret) {
		btrfs_err(fs_info,
			  "failed to verify dev extents against chunks: %d",
			  ret);
		goto fail_block_groups;
	}
3058 3059
	ret = btrfs_recover_balance(fs_info);
	if (ret) {
3060
		btrfs_err(fs_info, "failed to recover balance: %d", ret);
3061 3062 3063
		goto fail_block_groups;
	}

3064 3065
	ret = btrfs_init_dev_stats(fs_info);
	if (ret) {
3066
		btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
3067 3068 3069
		goto fail_block_groups;
	}

3070 3071
	ret = btrfs_init_dev_replace(fs_info);
	if (ret) {
3072
		btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
3073 3074 3075
		goto fail_block_groups;
	}

3076
	btrfs_free_extra_devids(fs_devices, 1);
3077

3078 3079
	ret = btrfs_sysfs_add_fsid(fs_devices, NULL);
	if (ret) {
3080 3081
		btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
				ret);
3082 3083 3084 3085 3086
		goto fail_block_groups;
	}

	ret = btrfs_sysfs_add_device(fs_devices);
	if (ret) {
3087 3088
		btrfs_err(fs_info, "failed to init sysfs device interface: %d",
				ret);
3089 3090 3091
		goto fail_fsdev_sysfs;
	}

3092
	ret = btrfs_sysfs_add_mounted(fs_info);
3093
	if (ret) {
3094
		btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
3095
		goto fail_fsdev_sysfs;
3096 3097 3098 3099
	}

	ret = btrfs_init_space_info(fs_info);
	if (ret) {
3100
		btrfs_err(fs_info, "failed to initialize space info: %d", ret);
3101
		goto fail_sysfs;
3102 3103
	}

3104
	ret = btrfs_read_block_groups(fs_info);
3105
	if (ret) {
3106
		btrfs_err(fs_info, "failed to read block groups: %d", ret);
3107
		goto fail_sysfs;
3108
	}
3109

3110
	if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info, NULL)) {
3111
		btrfs_warn(fs_info,
3112
		"writable mount is not allowed due to too many missing devices");
3113
		goto fail_sysfs;
3114
	}
C
Chris Mason 已提交
3115

3116 3117
	fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
					       "btrfs-cleaner");
3118
	if (IS_ERR(fs_info->cleaner_kthread))
3119
		goto fail_sysfs;
3120 3121 3122 3123

	fs_info->transaction_kthread = kthread_run(transaction_kthread,
						   tree_root,
						   "btrfs-transaction");
3124
	if (IS_ERR(fs_info->transaction_kthread))
3125
		goto fail_cleaner;
3126

3127
	if (!btrfs_test_opt(fs_info, NOSSD) &&
C
Chris Mason 已提交
3128
	    !fs_info->fs_devices->rotating) {
3129
		btrfs_set_and_info(fs_info, SSD, "enabling ssd optimizations");
C
Chris Mason 已提交
3130 3131
	}

3132
	/*
3133
	 * Mount does not set all options immediately, we can do it now and do
3134 3135 3136
	 * not have to wait for transaction commit
	 */
	btrfs_apply_pending_changes(fs_info);
3137

3138
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3139
	if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
3140
		ret = btrfsic_mount(fs_info, fs_devices,
3141
				    btrfs_test_opt(fs_info,
3142 3143 3144 3145
					CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
				    1 : 0,
				    fs_info->check_integrity_print_mask);
		if (ret)
3146 3147 3148
			btrfs_warn(fs_info,
				"failed to initialize integrity check module: %d",
				ret);
3149 3150
	}
#endif
3151 3152 3153
	ret = btrfs_read_qgroup_config(fs_info);
	if (ret)
		goto fail_trans_kthread;
3154

J
Josef Bacik 已提交
3155 3156 3157
	if (btrfs_build_ref_tree(fs_info))
		btrfs_err(fs_info, "couldn't build ref tree");

3158 3159
	/* do not make disk changes in broken FS or nologreplay is given */
	if (btrfs_super_log_root(disk_super) != 0 &&
3160
	    !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3161
		ret = btrfs_replay_log(fs_info, fs_devices);
3162
		if (ret) {
3163
			err = ret;
3164
			goto fail_qgroup;
3165
		}
3166
	}
Z
Zheng Yan 已提交
3167

3168
	ret = btrfs_find_orphan_roots(fs_info);
3169
	if (ret)
3170
		goto fail_qgroup;
3171

3172
	if (!sb_rdonly(sb)) {
3173
		ret = btrfs_cleanup_fs_roots(fs_info);
3174
		if (ret)
3175
			goto fail_qgroup;
3176 3177

		mutex_lock(&fs_info->cleaner_mutex);
3178
		ret = btrfs_recover_relocation(tree_root);
3179
		mutex_unlock(&fs_info->cleaner_mutex);
3180
		if (ret < 0) {
3181 3182
			btrfs_warn(fs_info, "failed to recover relocation: %d",
					ret);
3183
			err = -EINVAL;
3184
			goto fail_qgroup;
3185
		}
3186
	}
Z
Zheng Yan 已提交
3187

3188 3189
	location.objectid = BTRFS_FS_TREE_OBJECTID;
	location.type = BTRFS_ROOT_ITEM_KEY;
3190
	location.offset = 0;
3191 3192

	fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
3193 3194
	if (IS_ERR(fs_info->fs_root)) {
		err = PTR_ERR(fs_info->fs_root);
3195
		btrfs_warn(fs_info, "failed to read fs tree: %d", err);
3196
		goto fail_qgroup;
3197
	}
C
Chris Mason 已提交
3198

3199
	if (sb_rdonly(sb))
3200
		return 0;
I
Ilya Dryomov 已提交
3201

3202 3203
	if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
	    btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3204 3205 3206 3207 3208 3209 3210 3211
		clear_free_space_tree = 1;
	} else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
		   !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
		btrfs_warn(fs_info, "free space tree is invalid");
		clear_free_space_tree = 1;
	}

	if (clear_free_space_tree) {
3212 3213 3214 3215 3216
		btrfs_info(fs_info, "clearing free space tree");
		ret = btrfs_clear_free_space_tree(fs_info);
		if (ret) {
			btrfs_warn(fs_info,
				   "failed to clear free space tree: %d", ret);
3217
			close_ctree(fs_info);
3218 3219 3220 3221
			return ret;
		}
	}

3222
	if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
3223
	    !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3224
		btrfs_info(fs_info, "creating free space tree");
3225 3226
		ret = btrfs_create_free_space_tree(fs_info);
		if (ret) {
3227 3228
			btrfs_warn(fs_info,
				"failed to create free space tree: %d", ret);
3229
			close_ctree(fs_info);
3230 3231 3232 3233
			return ret;
		}
	}

3234 3235 3236
	down_read(&fs_info->cleanup_work_sem);
	if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
	    (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
3237
		up_read(&fs_info->cleanup_work_sem);
3238
		close_ctree(fs_info);
3239 3240 3241
		return ret;
	}
	up_read(&fs_info->cleanup_work_sem);
I
Ilya Dryomov 已提交
3242

3243 3244
	ret = btrfs_resume_balance_async(fs_info);
	if (ret) {
3245
		btrfs_warn(fs_info, "failed to resume balance: %d", ret);
3246
		close_ctree(fs_info);
3247
		return ret;
3248 3249
	}

3250 3251
	ret = btrfs_resume_dev_replace_async(fs_info);
	if (ret) {
3252
		btrfs_warn(fs_info, "failed to resume device replace: %d", ret);
3253
		close_ctree(fs_info);
3254 3255 3256
		return ret;
	}

3257 3258
	btrfs_qgroup_rescan_resume(fs_info);

3259
	if (!fs_info->uuid_root) {
3260
		btrfs_info(fs_info, "creating UUID tree");
3261 3262
		ret = btrfs_create_uuid_tree(fs_info);
		if (ret) {
3263 3264
			btrfs_warn(fs_info,
				"failed to create the UUID tree: %d", ret);
3265
			close_ctree(fs_info);
3266 3267
			return ret;
		}
3268
	} else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
3269 3270
		   fs_info->generation !=
				btrfs_super_uuid_tree_generation(disk_super)) {
3271
		btrfs_info(fs_info, "checking UUID tree");
3272 3273
		ret = btrfs_check_uuid_tree(fs_info);
		if (ret) {
3274 3275
			btrfs_warn(fs_info,
				"failed to check the UUID tree: %d", ret);
3276
			close_ctree(fs_info);
3277 3278 3279
			return ret;
		}
	} else {
3280
		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3281
	}
3282
	set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3283

3284 3285 3286 3287 3288 3289
	/*
	 * backuproot only affect mount behavior, and if open_ctree succeeded,
	 * no need to keep the flag
	 */
	btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);

A
Al Viro 已提交
3290
	return 0;
C
Chris Mason 已提交
3291

3292 3293
fail_qgroup:
	btrfs_free_qgroup_config(fs_info);
3294 3295
fail_trans_kthread:
	kthread_stop(fs_info->transaction_kthread);
3296
	btrfs_cleanup_transaction(fs_info);
3297
	btrfs_free_fs_roots(fs_info);
3298
fail_cleaner:
3299
	kthread_stop(fs_info->cleaner_kthread);
3300 3301 3302 3303 3304 3305 3306

	/*
	 * make sure we're done with the btree inode before we stop our
	 * kthreads
	 */
	filemap_write_and_wait(fs_info->btree_inode->i_mapping);

3307
fail_sysfs:
3308
	btrfs_sysfs_remove_mounted(fs_info);
3309

3310 3311 3312
fail_fsdev_sysfs:
	btrfs_sysfs_remove_fsid(fs_info->fs_devices);

3313
fail_block_groups:
J
Josef Bacik 已提交
3314
	btrfs_put_block_group_cache(fs_info);
C
Chris Mason 已提交
3315 3316 3317

fail_tree_roots:
	free_root_pointers(fs_info, 1);
3318
	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
C
Chris Mason 已提交
3319

C
Chris Mason 已提交
3320
fail_sb_buffer:
L
Liu Bo 已提交
3321
	btrfs_stop_all_workers(fs_info);
3322
	btrfs_free_block_groups(fs_info);
3323
fail_alloc:
3324
fail_iput:
3325 3326
	btrfs_mapping_tree_free(&fs_info->mapping_tree);

3327
	iput(fs_info->btree_inode);
3328
fail_bio_counter:
3329
	percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
3330 3331
fail_delalloc_bytes:
	percpu_counter_destroy(&fs_info->delalloc_bytes);
3332 3333
fail_dirty_metadata_bytes:
	percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3334 3335
fail_srcu:
	cleanup_srcu_struct(&fs_info->subvol_srcu);
3336
fail:
D
David Woodhouse 已提交
3337
	btrfs_free_stripe_hash_table(fs_info);
3338
	btrfs_close_devices(fs_info->fs_devices);
A
Al Viro 已提交
3339
	return err;
C
Chris Mason 已提交
3340 3341

recovery_tree_root:
3342
	if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
C
Chris Mason 已提交
3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357
		goto fail_tree_roots;

	free_root_pointers(fs_info, 0);

	/* don't use the log in recovery mode, it won't be valid */
	btrfs_set_super_log_root(disk_super, 0);

	/* we can't trust the free space cache either */
	btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);

	ret = next_root_backup(fs_info, fs_info->super_copy,
			       &num_backups_tried, &backup_index);
	if (ret == -1)
		goto fail_block_groups;
	goto retry_root_backup;
3358
}
3359
ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
3360

3361 3362 3363 3364 3365
static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
{
	if (uptodate) {
		set_buffer_uptodate(bh);
	} else {
3366 3367 3368
		struct btrfs_device *device = (struct btrfs_device *)
			bh->b_private;

3369
		btrfs_warn_rl_in_rcu(device->fs_info,
3370
				"lost page write due to IO error on %s",
3371
					  rcu_str_deref(device->name));
3372
		/* note, we don't set_buffer_write_io_error because we have
3373 3374
		 * our own ways of dealing with the IO errors
		 */
3375
		clear_buffer_uptodate(bh);
3376
		btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
3377 3378 3379 3380 3381
	}
	unlock_buffer(bh);
	put_bh(bh);
}

3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392
int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
			struct buffer_head **bh_ret)
{
	struct buffer_head *bh;
	struct btrfs_super_block *super;
	u64 bytenr;

	bytenr = btrfs_sb_offset(copy_num);
	if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode))
		return -EINVAL;

3393
	bh = __bread(bdev, bytenr / BTRFS_BDEV_BLOCKSIZE, BTRFS_SUPER_INFO_SIZE);
3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412
	/*
	 * If we fail to read from the underlying devices, as of now
	 * the best option we have is to mark it EIO.
	 */
	if (!bh)
		return -EIO;

	super = (struct btrfs_super_block *)bh->b_data;
	if (btrfs_super_bytenr(super) != bytenr ||
		    btrfs_super_magic(super) != BTRFS_MAGIC) {
		brelse(bh);
		return -EINVAL;
	}

	*bh_ret = bh;
	return 0;
}


Y
Yan Zheng 已提交
3413 3414 3415 3416 3417 3418 3419
struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
{
	struct buffer_head *bh;
	struct buffer_head *latest = NULL;
	struct btrfs_super_block *super;
	int i;
	u64 transid = 0;
3420
	int ret = -EINVAL;
Y
Yan Zheng 已提交
3421 3422 3423 3424 3425 3426 3427

	/* we would like to check all the supers, but that would make
	 * a btrfs mount succeed after a mkfs from a different FS.
	 * So, we need to add a special mount option to scan for
	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
	 */
	for (i = 0; i < 1; i++) {
3428 3429
		ret = btrfs_read_dev_one_super(bdev, i, &bh);
		if (ret)
Y
Yan Zheng 已提交
3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441
			continue;

		super = (struct btrfs_super_block *)bh->b_data;

		if (!latest || btrfs_super_generation(super) > transid) {
			brelse(latest);
			latest = bh;
			transid = btrfs_super_generation(super);
		} else {
			brelse(bh);
		}
	}
3442 3443 3444 3445

	if (!latest)
		return ERR_PTR(ret);

Y
Yan Zheng 已提交
3446 3447 3448
	return latest;
}

3449
/*
3450 3451
 * Write superblock @sb to the @device. Do not wait for completion, all the
 * buffer heads we write are pinned.
3452
 *
3453 3454 3455
 * Write @max_mirrors copies of the superblock, where 0 means default that fit
 * the expected device size at commit time. Note that max_mirrors must be
 * same for write and wait phases.
3456
 *
3457
 * Return number of errors when buffer head is not found or submission fails.
3458
 */
Y
Yan Zheng 已提交
3459
static int write_dev_supers(struct btrfs_device *device,
3460
			    struct btrfs_super_block *sb, int max_mirrors)
Y
Yan Zheng 已提交
3461 3462 3463 3464 3465 3466 3467
{
	struct buffer_head *bh;
	int i;
	int ret;
	int errors = 0;
	u32 crc;
	u64 bytenr;
3468
	int op_flags;
Y
Yan Zheng 已提交
3469 3470 3471 3472 3473 3474

	if (max_mirrors == 0)
		max_mirrors = BTRFS_SUPER_MIRROR_MAX;

	for (i = 0; i < max_mirrors; i++) {
		bytenr = btrfs_sb_offset(i);
3475 3476
		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
		    device->commit_total_bytes)
Y
Yan Zheng 已提交
3477 3478
			break;

3479
		btrfs_set_super_bytenr(sb, bytenr);
3480

3481 3482 3483 3484
		crc = ~(u32)0;
		crc = btrfs_csum_data((const char *)sb + BTRFS_CSUM_SIZE, crc,
				      BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
		btrfs_csum_final(crc, sb->csum);
3485

3486
		/* One reference for us, and we leave it for the caller */
3487
		bh = __getblk(device->bdev, bytenr / BTRFS_BDEV_BLOCKSIZE,
3488 3489 3490 3491 3492 3493
			      BTRFS_SUPER_INFO_SIZE);
		if (!bh) {
			btrfs_err(device->fs_info,
			    "couldn't get super buffer head for bytenr %llu",
			    bytenr);
			errors++;
3494
			continue;
3495
		}
3496

3497
		memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
Y
Yan Zheng 已提交
3498

3499 3500
		/* one reference for submit_bh */
		get_bh(bh);
3501

3502 3503 3504 3505
		set_buffer_uptodate(bh);
		lock_buffer(bh);
		bh->b_end_io = btrfs_end_buffer_write_sync;
		bh->b_private = device;
Y
Yan Zheng 已提交
3506

C
Chris Mason 已提交
3507 3508 3509 3510
		/*
		 * we fua the first super.  The others we allow
		 * to go down lazy.
		 */
3511 3512 3513 3514
		op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
		if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
			op_flags |= REQ_FUA;
		ret = btrfsic_submit_bh(REQ_OP_WRITE, op_flags, bh);
3515
		if (ret)
Y
Yan Zheng 已提交
3516 3517 3518 3519 3520
			errors++;
	}
	return errors < i ? 0 : -1;
}

3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532
/*
 * Wait for write completion of superblocks done by write_dev_supers,
 * @max_mirrors same for write and wait phases.
 *
 * Return number of errors when buffer head is not found or not marked up to
 * date.
 */
static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
{
	struct buffer_head *bh;
	int i;
	int errors = 0;
3533
	bool primary_failed = false;
3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544
	u64 bytenr;

	if (max_mirrors == 0)
		max_mirrors = BTRFS_SUPER_MIRROR_MAX;

	for (i = 0; i < max_mirrors; i++) {
		bytenr = btrfs_sb_offset(i);
		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
		    device->commit_total_bytes)
			break;

3545 3546
		bh = __find_get_block(device->bdev,
				      bytenr / BTRFS_BDEV_BLOCKSIZE,
3547 3548 3549
				      BTRFS_SUPER_INFO_SIZE);
		if (!bh) {
			errors++;
3550 3551
			if (i == 0)
				primary_failed = true;
3552 3553 3554
			continue;
		}
		wait_on_buffer(bh);
3555
		if (!buffer_uptodate(bh)) {
3556
			errors++;
3557 3558 3559
			if (i == 0)
				primary_failed = true;
		}
3560 3561 3562 3563 3564 3565 3566 3567

		/* drop our reference */
		brelse(bh);

		/* drop the reference from the writing run */
		brelse(bh);
	}

3568 3569 3570 3571 3572 3573 3574
	/* log error, force error return */
	if (primary_failed) {
		btrfs_err(device->fs_info, "error writing primary super block to device %llu",
			  device->devid);
		return -1;
	}

3575 3576 3577
	return errors < i ? 0 : -1;
}

C
Chris Mason 已提交
3578 3579 3580 3581
/*
 * endio for the write_dev_flush, this will wake anyone waiting
 * for the barrier when it is done
 */
3582
static void btrfs_end_empty_barrier(struct bio *bio)
C
Chris Mason 已提交
3583
{
3584
	complete(bio->bi_private);
C
Chris Mason 已提交
3585 3586 3587
}

/*
3588 3589
 * Submit a flush request to the device if it supports it. Error handling is
 * done in the waiting counterpart.
C
Chris Mason 已提交
3590
 */
3591
static void write_dev_flush(struct btrfs_device *device)
C
Chris Mason 已提交
3592
{
3593
	struct request_queue *q = bdev_get_queue(device->bdev);
3594
	struct bio *bio = device->flush_bio;
C
Chris Mason 已提交
3595

3596
	if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
3597
		return;
C
Chris Mason 已提交
3598

3599
	bio_reset(bio);
C
Chris Mason 已提交
3600
	bio->bi_end_io = btrfs_end_empty_barrier;
3601
	bio_set_dev(bio, device->bdev);
3602
	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
C
Chris Mason 已提交
3603 3604 3605
	init_completion(&device->flush_wait);
	bio->bi_private = &device->flush_wait;

3606
	btrfsic_submit_bio(bio);
3607
	set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3608
}
C
Chris Mason 已提交
3609

3610 3611 3612
/*
 * If the flush bio has been submitted by write_dev_flush, wait for it.
 */
3613
static blk_status_t wait_dev_flush(struct btrfs_device *device)
3614 3615
{
	struct bio *bio = device->flush_bio;
C
Chris Mason 已提交
3616

3617
	if (!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state))
3618
		return BLK_STS_OK;
C
Chris Mason 已提交
3619

3620
	clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3621
	wait_for_completion_io(&device->flush_wait);
C
Chris Mason 已提交
3622

3623
	return bio->bi_status;
C
Chris Mason 已提交
3624 3625
}

3626
static int check_barrier_error(struct btrfs_fs_info *fs_info)
3627
{
3628
	if (!btrfs_check_rw_degradable(fs_info, NULL))
3629
		return -EIO;
C
Chris Mason 已提交
3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640
	return 0;
}

/*
 * send an empty flush down to each device in parallel,
 * then wait for them
 */
static int barrier_all_devices(struct btrfs_fs_info *info)
{
	struct list_head *head;
	struct btrfs_device *dev;
3641
	int errors_wait = 0;
3642
	blk_status_t ret;
C
Chris Mason 已提交
3643

3644
	lockdep_assert_held(&info->fs_devices->device_list_mutex);
C
Chris Mason 已提交
3645 3646
	/* send down all the barriers */
	head = &info->fs_devices->devices;
3647
	list_for_each_entry(dev, head, dev_list) {
3648
		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3649
			continue;
3650
		if (!dev->bdev)
C
Chris Mason 已提交
3651
			continue;
3652
		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3653
		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
C
Chris Mason 已提交
3654 3655
			continue;

3656
		write_dev_flush(dev);
3657
		dev->last_flush_error = BLK_STS_OK;
C
Chris Mason 已提交
3658 3659 3660
	}

	/* wait for all the barriers */
3661
	list_for_each_entry(dev, head, dev_list) {
3662
		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3663
			continue;
C
Chris Mason 已提交
3664
		if (!dev->bdev) {
3665
			errors_wait++;
C
Chris Mason 已提交
3666 3667
			continue;
		}
3668
		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3669
		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
C
Chris Mason 已提交
3670 3671
			continue;

3672
		ret = wait_dev_flush(dev);
3673 3674
		if (ret) {
			dev->last_flush_error = ret;
3675 3676
			btrfs_dev_stat_inc_and_print(dev,
					BTRFS_DEV_STAT_FLUSH_ERRS);
3677
			errors_wait++;
3678 3679 3680
		}
	}

3681
	if (errors_wait) {
3682 3683 3684 3685 3686
		/*
		 * At some point we need the status of all disks
		 * to arrive at the volume status. So error checking
		 * is being pushed to a separate loop.
		 */
3687
		return check_barrier_error(info);
C
Chris Mason 已提交
3688 3689 3690 3691
	}
	return 0;
}

3692 3693
int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
{
3694 3695
	int raid_type;
	int min_tolerated = INT_MAX;
3696

3697 3698 3699 3700 3701
	if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
	    (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
		min_tolerated = min(min_tolerated,
				    btrfs_raid_array[BTRFS_RAID_SINGLE].
				    tolerated_failures);
3702

3703 3704 3705
	for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
		if (raid_type == BTRFS_RAID_SINGLE)
			continue;
3706
		if (!(flags & btrfs_raid_array[raid_type].bg_flag))
3707 3708 3709 3710 3711
			continue;
		min_tolerated = min(min_tolerated,
				    btrfs_raid_array[raid_type].
				    tolerated_failures);
	}
3712

3713
	if (min_tolerated == INT_MAX) {
3714
		pr_warn("BTRFS: unknown raid flag: %llu", flags);
3715 3716 3717 3718
		min_tolerated = 0;
	}

	return min_tolerated;
3719 3720
}

3721
int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
3722
{
3723
	struct list_head *head;
3724
	struct btrfs_device *dev;
3725
	struct btrfs_super_block *sb;
3726 3727 3728
	struct btrfs_dev_item *dev_item;
	int ret;
	int do_barriers;
3729 3730
	int max_errors;
	int total_errors = 0;
3731
	u64 flags;
3732

3733
	do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
3734 3735 3736 3737 3738 3739 3740 3741

	/*
	 * max_mirrors == 0 indicates we're from commit_transaction,
	 * not from fsync where the tree roots in fs_info have not
	 * been consistent on disk.
	 */
	if (max_mirrors == 0)
		backup_super_roots(fs_info);
3742

3743
	sb = fs_info->super_for_commit;
3744
	dev_item = &sb->dev_item;
3745

3746 3747 3748
	mutex_lock(&fs_info->fs_devices->device_list_mutex);
	head = &fs_info->fs_devices->devices;
	max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
C
Chris Mason 已提交
3749

3750
	if (do_barriers) {
3751
		ret = barrier_all_devices(fs_info);
3752 3753
		if (ret) {
			mutex_unlock(
3754 3755 3756
				&fs_info->fs_devices->device_list_mutex);
			btrfs_handle_fs_error(fs_info, ret,
					      "errors while submitting device barriers.");
3757 3758 3759
			return ret;
		}
	}
C
Chris Mason 已提交
3760

3761
	list_for_each_entry(dev, head, dev_list) {
3762 3763 3764 3765
		if (!dev->bdev) {
			total_errors++;
			continue;
		}
3766
		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3767
		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3768 3769
			continue;

Y
Yan Zheng 已提交
3770
		btrfs_set_stack_device_generation(dev_item, 0);
3771 3772
		btrfs_set_stack_device_type(dev_item, dev->type);
		btrfs_set_stack_device_id(dev_item, dev->devid);
3773
		btrfs_set_stack_device_total_bytes(dev_item,
3774
						   dev->commit_total_bytes);
3775 3776
		btrfs_set_stack_device_bytes_used(dev_item,
						  dev->commit_bytes_used);
3777 3778 3779 3780
		btrfs_set_stack_device_io_align(dev_item, dev->io_align);
		btrfs_set_stack_device_io_width(dev_item, dev->io_width);
		btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
		memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3781 3782
		memcpy(dev_item->fsid, dev->fs_devices->metadata_uuid,
		       BTRFS_FSID_SIZE);
Y
Yan Zheng 已提交
3783

3784 3785 3786
		flags = btrfs_super_flags(sb);
		btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);

3787 3788 3789 3790 3791 3792 3793 3794
		ret = btrfs_validate_write_super(fs_info, sb);
		if (ret < 0) {
			mutex_unlock(&fs_info->fs_devices->device_list_mutex);
			btrfs_handle_fs_error(fs_info, -EUCLEAN,
				"unexpected superblock corruption detected");
			return -EUCLEAN;
		}

3795
		ret = write_dev_supers(dev, sb, max_mirrors);
3796 3797
		if (ret)
			total_errors++;
3798
	}
3799
	if (total_errors > max_errors) {
3800 3801 3802
		btrfs_err(fs_info, "%d errors while writing supers",
			  total_errors);
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3803

3804
		/* FUA is masked off if unsupported and can't be the reason */
3805 3806 3807
		btrfs_handle_fs_error(fs_info, -EIO,
				      "%d errors while writing supers",
				      total_errors);
3808
		return -EIO;
3809
	}
3810

Y
Yan Zheng 已提交
3811
	total_errors = 0;
3812
	list_for_each_entry(dev, head, dev_list) {
3813 3814
		if (!dev->bdev)
			continue;
3815
		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3816
		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3817 3818
			continue;

3819
		ret = wait_dev_supers(dev, max_mirrors);
Y
Yan Zheng 已提交
3820 3821
		if (ret)
			total_errors++;
3822
	}
3823
	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3824
	if (total_errors > max_errors) {
3825 3826 3827
		btrfs_handle_fs_error(fs_info, -EIO,
				      "%d errors while writing supers",
				      total_errors);
3828
		return -EIO;
3829
	}
3830 3831 3832
	return 0;
}

3833 3834 3835
/* Drop a fs root from the radix tree and free it. */
void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
				  struct btrfs_root *root)
C
Chris Mason 已提交
3836
{
3837
	spin_lock(&fs_info->fs_roots_radix_lock);
C
Chris Mason 已提交
3838 3839
	radix_tree_delete(&fs_info->fs_roots_radix,
			  (unsigned long)root->root_key.objectid);
3840
	spin_unlock(&fs_info->fs_roots_radix_lock);
3841 3842 3843 3844

	if (btrfs_root_refs(&root->root_item) == 0)
		synchronize_srcu(&fs_info->subvol_srcu);

L
Liu Bo 已提交
3845
	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
L
Liu Bo 已提交
3846
		btrfs_free_log(NULL, root);
L
Liu Bo 已提交
3847 3848 3849 3850 3851 3852 3853
		if (root->reloc_root) {
			free_extent_buffer(root->reloc_root->node);
			free_extent_buffer(root->reloc_root->commit_root);
			btrfs_put_fs_root(root->reloc_root);
			root->reloc_root = NULL;
		}
	}
L
Liu Bo 已提交
3854

3855 3856 3857 3858
	if (root->free_ino_pinned)
		__btrfs_remove_free_space_cache(root->free_ino_pinned);
	if (root->free_ino_ctl)
		__btrfs_remove_free_space_cache(root->free_ino_ctl);
D
David Sterba 已提交
3859
	btrfs_free_fs_root(root);
3860 3861
}

D
David Sterba 已提交
3862
void btrfs_free_fs_root(struct btrfs_root *root)
3863
{
3864
	iput(root->ino_cache_inode);
3865
	WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
3866 3867
	if (root->anon_dev)
		free_anon_bdev(root->anon_dev);
3868 3869
	if (root->subv_writers)
		btrfs_free_subvolume_writers(root->subv_writers);
3870 3871
	free_extent_buffer(root->node);
	free_extent_buffer(root->commit_root);
3872 3873
	kfree(root->free_ino_ctl);
	kfree(root->free_ino_pinned);
3874
	btrfs_put_fs_root(root);
C
Chris Mason 已提交
3875 3876
}

Y
Yan Zheng 已提交
3877
int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
C
Chris Mason 已提交
3878
{
Y
Yan Zheng 已提交
3879 3880
	u64 root_objectid = 0;
	struct btrfs_root *gang[8];
3881 3882 3883 3884
	int i = 0;
	int err = 0;
	unsigned int ret = 0;
	int index;
3885

Y
Yan Zheng 已提交
3886
	while (1) {
3887
		index = srcu_read_lock(&fs_info->subvol_srcu);
Y
Yan Zheng 已提交
3888 3889 3890
		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
					     (void **)gang, root_objectid,
					     ARRAY_SIZE(gang));
3891 3892
		if (!ret) {
			srcu_read_unlock(&fs_info->subvol_srcu, index);
Y
Yan Zheng 已提交
3893
			break;
3894
		}
3895
		root_objectid = gang[ret - 1]->root_key.objectid + 1;
3896

Y
Yan Zheng 已提交
3897
		for (i = 0; i < ret; i++) {
3898 3899 3900 3901 3902 3903 3904 3905 3906
			/* Avoid to grab roots in dead_roots */
			if (btrfs_root_refs(&gang[i]->root_item) == 0) {
				gang[i] = NULL;
				continue;
			}
			/* grab all the search result for later use */
			gang[i] = btrfs_grab_fs_root(gang[i]);
		}
		srcu_read_unlock(&fs_info->subvol_srcu, index);
3907

3908 3909 3910
		for (i = 0; i < ret; i++) {
			if (!gang[i])
				continue;
Y
Yan Zheng 已提交
3911
			root_objectid = gang[i]->root_key.objectid;
3912 3913
			err = btrfs_orphan_cleanup(gang[i]);
			if (err)
3914 3915
				break;
			btrfs_put_fs_root(gang[i]);
Y
Yan Zheng 已提交
3916 3917 3918
		}
		root_objectid++;
	}
3919 3920 3921 3922 3923 3924 3925

	/* release the uncleaned roots due to error */
	for (; i < ret; i++) {
		if (gang[i])
			btrfs_put_fs_root(gang[i]);
	}
	return err;
Y
Yan Zheng 已提交
3926
}
3927

3928
int btrfs_commit_super(struct btrfs_fs_info *fs_info)
Y
Yan Zheng 已提交
3929
{
3930
	struct btrfs_root *root = fs_info->tree_root;
Y
Yan Zheng 已提交
3931
	struct btrfs_trans_handle *trans;
3932

3933
	mutex_lock(&fs_info->cleaner_mutex);
3934
	btrfs_run_delayed_iputs(fs_info);
3935 3936
	mutex_unlock(&fs_info->cleaner_mutex);
	wake_up_process(fs_info->cleaner_kthread);
3937 3938

	/* wait until ongoing cleanup work done */
3939 3940
	down_write(&fs_info->cleanup_work_sem);
	up_write(&fs_info->cleanup_work_sem);
3941

3942
	trans = btrfs_join_transaction(root);
3943 3944
	if (IS_ERR(trans))
		return PTR_ERR(trans);
3945
	return btrfs_commit_transaction(trans);
Y
Yan Zheng 已提交
3946 3947
}

3948
void close_ctree(struct btrfs_fs_info *fs_info)
Y
Yan Zheng 已提交
3949 3950 3951
{
	int ret;

3952
	set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
3953 3954 3955 3956 3957 3958 3959
	/*
	 * We don't want the cleaner to start new transactions, add more delayed
	 * iputs, etc. while we're closing. We can't use kthread_stop() yet
	 * because that frees the task_struct, and the transaction kthread might
	 * still try to wake up the cleaner.
	 */
	kthread_park(fs_info->cleaner_kthread);
Y
Yan Zheng 已提交
3960

3961
	/* wait for the qgroup rescan worker to stop */
3962
	btrfs_qgroup_wait_for_completion(fs_info, false);
3963

S
Stefan Behrens 已提交
3964 3965 3966 3967 3968
	/* wait for the uuid_scan task to finish */
	down(&fs_info->uuid_tree_rescan_sem);
	/* avoid complains from lockdep et al., set sem back to initial state */
	up(&fs_info->uuid_tree_rescan_sem);

3969
	/* pause restriper - we want to resume on mount */
3970
	btrfs_pause_balance(fs_info);
3971

3972 3973
	btrfs_dev_replace_suspend_for_unmount(fs_info);

3974
	btrfs_scrub_cancel(fs_info);
C
Chris Mason 已提交
3975 3976 3977 3978 3979 3980

	/* wait for any defraggers to finish */
	wait_event(fs_info->transaction_wait,
		   (atomic_read(&fs_info->defrag_running) == 0));

	/* clear out the rbtree of defraggable inodes */
3981
	btrfs_cleanup_defrag_inodes(fs_info);
C
Chris Mason 已提交
3982

3983 3984
	cancel_work_sync(&fs_info->async_reclaim_work);

3985
	if (!sb_rdonly(fs_info->sb)) {
3986
		/*
3987 3988
		 * The cleaner kthread is stopped, so do one final pass over
		 * unused block groups.
3989
		 */
3990
		btrfs_delete_unused_bgs(fs_info);
3991

3992
		ret = btrfs_commit_super(fs_info);
L
liubo 已提交
3993
		if (ret)
3994
			btrfs_err(fs_info, "commit super ret %d", ret);
L
liubo 已提交
3995 3996
	}

3997 3998
	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state) ||
	    test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state))
3999
		btrfs_error_commit_super(fs_info);
4000

A
Al Viro 已提交
4001 4002
	kthread_stop(fs_info->transaction_kthread);
	kthread_stop(fs_info->cleaner_kthread);
4003

4004
	ASSERT(list_empty(&fs_info->delayed_iputs));
4005
	set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
4006

4007
	btrfs_free_qgroup_config(fs_info);
4008
	ASSERT(list_empty(&fs_info->delalloc_roots));
4009

4010
	if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
4011
		btrfs_info(fs_info, "at unmount delalloc count %lld",
4012
		       percpu_counter_sum(&fs_info->delalloc_bytes));
C
Chris Mason 已提交
4013
	}
4014

4015
	btrfs_sysfs_remove_mounted(fs_info);
4016
	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
4017

4018
	btrfs_free_fs_roots(fs_info);
4019

4020 4021
	btrfs_put_block_group_cache(fs_info);

4022 4023 4024 4025 4026
	/*
	 * we must make sure there is not any read request to
	 * submit after we stopping all workers.
	 */
	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
4027 4028
	btrfs_stop_all_workers(fs_info);

4029 4030
	btrfs_free_block_groups(fs_info);

4031
	clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
4032
	free_root_pointers(fs_info, 1);
4033

4034
	iput(fs_info->btree_inode);
4035

4036
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4037
	if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
4038
		btrfsic_unmount(fs_info->fs_devices);
4039 4040
#endif

4041
	btrfs_mapping_tree_free(&fs_info->mapping_tree);
4042
	btrfs_close_devices(fs_info->fs_devices);
4043

4044
	percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
4045
	percpu_counter_destroy(&fs_info->delalloc_bytes);
4046
	percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
4047
	cleanup_srcu_struct(&fs_info->subvol_srcu);
4048

D
David Woodhouse 已提交
4049
	btrfs_free_stripe_hash_table(fs_info);
J
Josef Bacik 已提交
4050
	btrfs_free_ref_cache(fs_info);
4051 4052
}

4053 4054
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
			  int atomic)
4055
{
4056
	int ret;
4057
	struct inode *btree_inode = buf->pages[0]->mapping->host;
4058

4059
	ret = extent_buffer_uptodate(buf);
4060 4061 4062 4063
	if (!ret)
		return ret;

	ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
4064 4065 4066
				    parent_transid, atomic);
	if (ret == -EAGAIN)
		return ret;
4067
	return !ret;
4068 4069 4070 4071
}

void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
{
4072
	struct btrfs_fs_info *fs_info;
4073
	struct btrfs_root *root;
4074
	u64 transid = btrfs_header_generation(buf);
4075
	int was_dirty;
4076

4077 4078 4079
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
	/*
	 * This is a fast path so only do this check if we have sanity tests
4080
	 * enabled.  Normal people shouldn't be using unmapped buffers as dirty
4081 4082
	 * outside of the sanity tests.
	 */
4083
	if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
4084 4085 4086
		return;
#endif
	root = BTRFS_I(buf->pages[0]->mapping->host)->root;
4087
	fs_info = root->fs_info;
4088
	btrfs_assert_tree_locked(buf);
4089
	if (transid != fs_info->generation)
J
Jeff Mahoney 已提交
4090
		WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
4091
			buf->start, transid, fs_info->generation);
4092
	was_dirty = set_extent_buffer_dirty(buf);
4093
	if (!was_dirty)
4094 4095 4096
		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
					 buf->len,
					 fs_info->dirty_metadata_batch);
4097
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4098 4099 4100 4101 4102 4103
	/*
	 * Since btrfs_mark_buffer_dirty() can be called with item pointer set
	 * but item data not updated.
	 * So here we should only check item pointers, not item data.
	 */
	if (btrfs_header_level(buf) == 0 &&
4104
	    btrfs_check_leaf_relaxed(buf)) {
4105
		btrfs_print_leaf(buf);
4106 4107 4108
		ASSERT(0);
	}
#endif
4109 4110
}

4111
static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
4112
					int flush_delayed)
4113 4114 4115 4116 4117
{
	/*
	 * looks as though older kernels can get into trouble with
	 * this code, they end up stuck in balance_dirty_pages forever
	 */
4118
	int ret;
4119 4120 4121 4122

	if (current->flags & PF_MEMALLOC)
		return;

4123
	if (flush_delayed)
4124
		btrfs_balance_delayed_items(fs_info);
4125

4126 4127 4128
	ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
				     BTRFS_DIRTY_METADATA_THRESH,
				     fs_info->dirty_metadata_batch);
4129
	if (ret > 0) {
4130
		balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
4131 4132 4133
	}
}

4134
void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
C
Chris Mason 已提交
4135
{
4136
	__btrfs_btree_balance_dirty(fs_info, 1);
4137
}
4138

4139
void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
4140
{
4141
	__btrfs_btree_balance_dirty(fs_info, 0);
C
Chris Mason 已提交
4142
}
4143

4144 4145
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid, int level,
		      struct btrfs_key *first_key)
4146
{
4147
	return btree_read_extent_buffer_pages(buf, parent_transid,
4148
					      level, first_key);
4149
}
4150

4151
static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
L
liubo 已提交
4152
{
4153 4154 4155
	/* cleanup FS via transaction */
	btrfs_cleanup_transaction(fs_info);

4156
	mutex_lock(&fs_info->cleaner_mutex);
4157
	btrfs_run_delayed_iputs(fs_info);
4158
	mutex_unlock(&fs_info->cleaner_mutex);
L
liubo 已提交
4159

4160 4161
	down_write(&fs_info->cleanup_work_sem);
	up_write(&fs_info->cleanup_work_sem);
L
liubo 已提交
4162 4163
}

4164
static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
L
liubo 已提交
4165 4166 4167
{
	struct btrfs_ordered_extent *ordered;

4168
	spin_lock(&root->ordered_extent_lock);
4169 4170 4171 4172
	/*
	 * This will just short circuit the ordered completion stuff which will
	 * make sure the ordered extent gets properly cleaned up.
	 */
4173
	list_for_each_entry(ordered, &root->ordered_extents,
4174 4175
			    root_extent_list)
		set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190
	spin_unlock(&root->ordered_extent_lock);
}

static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
{
	struct btrfs_root *root;
	struct list_head splice;

	INIT_LIST_HEAD(&splice);

	spin_lock(&fs_info->ordered_root_lock);
	list_splice_init(&fs_info->ordered_roots, &splice);
	while (!list_empty(&splice)) {
		root = list_first_entry(&splice, struct btrfs_root,
					ordered_root);
4191 4192
		list_move_tail(&root->ordered_root,
			       &fs_info->ordered_roots);
4193

4194
		spin_unlock(&fs_info->ordered_root_lock);
4195 4196
		btrfs_destroy_ordered_extents(root);

4197 4198
		cond_resched();
		spin_lock(&fs_info->ordered_root_lock);
4199 4200
	}
	spin_unlock(&fs_info->ordered_root_lock);
4201 4202 4203 4204 4205 4206 4207 4208

	/*
	 * We need this here because if we've been flipped read-only we won't
	 * get sync() from the umount, so we need to make sure any ordered
	 * extents that haven't had their dirty pages IO start writeout yet
	 * actually get run and error out properly.
	 */
	btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
L
liubo 已提交
4209 4210
}

4211
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4212
				      struct btrfs_fs_info *fs_info)
L
liubo 已提交
4213 4214 4215 4216 4217 4218 4219 4220 4221
{
	struct rb_node *node;
	struct btrfs_delayed_ref_root *delayed_refs;
	struct btrfs_delayed_ref_node *ref;
	int ret = 0;

	delayed_refs = &trans->delayed_refs;

	spin_lock(&delayed_refs->lock);
4222
	if (atomic_read(&delayed_refs->num_entries) == 0) {
4223
		spin_unlock(&delayed_refs->lock);
4224
		btrfs_info(fs_info, "delayed_refs has NO entry");
L
liubo 已提交
4225 4226 4227
		return ret;
	}

4228
	while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
4229
		struct btrfs_delayed_ref_head *head;
4230
		struct rb_node *n;
4231
		bool pin_bytes = false;
L
liubo 已提交
4232

4233 4234
		head = rb_entry(node, struct btrfs_delayed_ref_head,
				href_node);
4235
		if (btrfs_delayed_ref_lock(delayed_refs, head))
4236
			continue;
4237

4238
		spin_lock(&head->lock);
4239
		while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
4240 4241
			ref = rb_entry(n, struct btrfs_delayed_ref_node,
				       ref_node);
4242
			ref->in_tree = 0;
4243
			rb_erase_cached(&ref->ref_node, &head->ref_tree);
4244
			RB_CLEAR_NODE(&ref->ref_node);
4245 4246
			if (!list_empty(&ref->add_list))
				list_del(&ref->add_list);
4247 4248
			atomic_dec(&delayed_refs->num_entries);
			btrfs_put_delayed_ref(ref);
4249
		}
4250 4251 4252
		if (head->must_insert_reserved)
			pin_bytes = true;
		btrfs_free_delayed_extent_op(head->extent_op);
4253
		btrfs_delete_ref_head(delayed_refs, head);
4254 4255 4256
		spin_unlock(&head->lock);
		spin_unlock(&delayed_refs->lock);
		mutex_unlock(&head->mutex);
L
liubo 已提交
4257

4258
		if (pin_bytes)
4259 4260
			btrfs_pin_extent(fs_info, head->bytenr,
					 head->num_bytes, 1);
4261
		btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
4262
		btrfs_put_delayed_ref_head(head);
L
liubo 已提交
4263 4264 4265 4266 4267 4268 4269 4270 4271
		cond_resched();
		spin_lock(&delayed_refs->lock);
	}

	spin_unlock(&delayed_refs->lock);

	return ret;
}

4272
static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
L
liubo 已提交
4273 4274 4275 4276 4277 4278
{
	struct btrfs_inode *btrfs_inode;
	struct list_head splice;

	INIT_LIST_HEAD(&splice);

4279 4280
	spin_lock(&root->delalloc_lock);
	list_splice_init(&root->delalloc_inodes, &splice);
L
liubo 已提交
4281 4282

	while (!list_empty(&splice)) {
4283
		struct inode *inode = NULL;
4284 4285
		btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
					       delalloc_inodes);
4286
		__btrfs_del_delalloc_inode(root, btrfs_inode);
4287
		spin_unlock(&root->delalloc_lock);
L
liubo 已提交
4288

4289 4290 4291 4292 4293 4294 4295 4296 4297
		/*
		 * Make sure we get a live inode and that it'll not disappear
		 * meanwhile.
		 */
		inode = igrab(&btrfs_inode->vfs_inode);
		if (inode) {
			invalidate_inode_pages2(inode->i_mapping);
			iput(inode);
		}
4298
		spin_lock(&root->delalloc_lock);
L
liubo 已提交
4299
	}
4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324
	spin_unlock(&root->delalloc_lock);
}

static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
{
	struct btrfs_root *root;
	struct list_head splice;

	INIT_LIST_HEAD(&splice);

	spin_lock(&fs_info->delalloc_root_lock);
	list_splice_init(&fs_info->delalloc_roots, &splice);
	while (!list_empty(&splice)) {
		root = list_first_entry(&splice, struct btrfs_root,
					 delalloc_root);
		root = btrfs_grab_fs_root(root);
		BUG_ON(!root);
		spin_unlock(&fs_info->delalloc_root_lock);

		btrfs_destroy_delalloc_inodes(root);
		btrfs_put_fs_root(root);

		spin_lock(&fs_info->delalloc_root_lock);
	}
	spin_unlock(&fs_info->delalloc_root_lock);
L
liubo 已提交
4325 4326
}

4327
static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
L
liubo 已提交
4328 4329 4330 4331 4332 4333 4334 4335 4336 4337
					struct extent_io_tree *dirty_pages,
					int mark)
{
	int ret;
	struct extent_buffer *eb;
	u64 start = 0;
	u64 end;

	while (1) {
		ret = find_first_extent_bit(dirty_pages, start, &start, &end,
4338
					    mark, NULL);
L
liubo 已提交
4339 4340 4341
		if (ret)
			break;

4342
		clear_extent_bits(dirty_pages, start, end, mark);
L
liubo 已提交
4343
		while (start <= end) {
4344 4345
			eb = find_extent_buffer(fs_info, start);
			start += fs_info->nodesize;
4346
			if (!eb)
L
liubo 已提交
4347
				continue;
4348
			wait_on_extent_buffer_writeback(eb);
L
liubo 已提交
4349

4350 4351 4352 4353
			if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
					       &eb->bflags))
				clear_extent_buffer_dirty(eb);
			free_extent_buffer_stale(eb);
L
liubo 已提交
4354 4355 4356 4357 4358 4359
		}
	}

	return ret;
}

4360
static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
L
liubo 已提交
4361 4362 4363 4364 4365 4366
				       struct extent_io_tree *pinned_extents)
{
	struct extent_io_tree *unpin;
	u64 start;
	u64 end;
	int ret;
4367
	bool loop = true;
L
liubo 已提交
4368 4369

	unpin = pinned_extents;
4370
again:
L
liubo 已提交
4371
	while (1) {
4372 4373
		struct extent_state *cached_state = NULL;

4374 4375 4376 4377 4378 4379 4380
		/*
		 * The btrfs_finish_extent_commit() may get the same range as
		 * ours between find_first_extent_bit and clear_extent_dirty.
		 * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
		 * the same extent range.
		 */
		mutex_lock(&fs_info->unused_bg_unpin_mutex);
L
liubo 已提交
4381
		ret = find_first_extent_bit(unpin, 0, &start, &end,
4382
					    EXTENT_DIRTY, &cached_state);
4383 4384
		if (ret) {
			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
L
liubo 已提交
4385
			break;
4386
		}
L
liubo 已提交
4387

4388 4389
		clear_extent_dirty(unpin, start, end, &cached_state);
		free_extent_state(cached_state);
4390
		btrfs_error_unpin_extent_range(fs_info, start, end);
4391
		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
L
liubo 已提交
4392 4393 4394
		cond_resched();
	}

4395
	if (loop) {
4396 4397
		if (unpin == &fs_info->freed_extents[0])
			unpin = &fs_info->freed_extents[1];
4398
		else
4399
			unpin = &fs_info->freed_extents[0];
4400 4401 4402 4403
		loop = false;
		goto again;
	}

L
liubo 已提交
4404 4405 4406
	return 0;
}

4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421
static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
{
	struct inode *inode;

	inode = cache->io_ctl.inode;
	if (inode) {
		invalidate_inode_pages2(inode->i_mapping);
		BTRFS_I(inode)->generation = 0;
		cache->io_ctl.inode = NULL;
		iput(inode);
	}
	btrfs_put_block_group(cache);
}

void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4422
			     struct btrfs_fs_info *fs_info)
4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445
{
	struct btrfs_block_group_cache *cache;

	spin_lock(&cur_trans->dirty_bgs_lock);
	while (!list_empty(&cur_trans->dirty_bgs)) {
		cache = list_first_entry(&cur_trans->dirty_bgs,
					 struct btrfs_block_group_cache,
					 dirty_list);

		if (!list_empty(&cache->io_list)) {
			spin_unlock(&cur_trans->dirty_bgs_lock);
			list_del_init(&cache->io_list);
			btrfs_cleanup_bg_io(cache);
			spin_lock(&cur_trans->dirty_bgs_lock);
		}

		list_del_init(&cache->dirty_list);
		spin_lock(&cache->lock);
		cache->disk_cache_state = BTRFS_DC_ERROR;
		spin_unlock(&cache->lock);

		spin_unlock(&cur_trans->dirty_bgs_lock);
		btrfs_put_block_group(cache);
J
Josef Bacik 已提交
4446
		btrfs_delayed_refs_rsv_release(fs_info, 1);
4447 4448 4449 4450
		spin_lock(&cur_trans->dirty_bgs_lock);
	}
	spin_unlock(&cur_trans->dirty_bgs_lock);

4451 4452 4453 4454
	/*
	 * Refer to the definition of io_bgs member for details why it's safe
	 * to use it without any locking
	 */
4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467
	while (!list_empty(&cur_trans->io_bgs)) {
		cache = list_first_entry(&cur_trans->io_bgs,
					 struct btrfs_block_group_cache,
					 io_list);

		list_del_init(&cache->io_list);
		spin_lock(&cache->lock);
		cache->disk_cache_state = BTRFS_DC_ERROR;
		spin_unlock(&cache->lock);
		btrfs_cleanup_bg_io(cache);
	}
}

4468
void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4469
				   struct btrfs_fs_info *fs_info)
4470
{
4471 4472
	struct btrfs_device *dev, *tmp;

4473
	btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
4474 4475 4476
	ASSERT(list_empty(&cur_trans->dirty_bgs));
	ASSERT(list_empty(&cur_trans->io_bgs));

4477 4478 4479 4480 4481
	list_for_each_entry_safe(dev, tmp, &cur_trans->dev_update_list,
				 post_commit_list) {
		list_del_init(&dev->post_commit_list);
	}

4482
	btrfs_destroy_delayed_refs(cur_trans, fs_info);
4483

4484
	cur_trans->state = TRANS_STATE_COMMIT_START;
4485
	wake_up(&fs_info->transaction_blocked_wait);
4486

4487
	cur_trans->state = TRANS_STATE_UNBLOCKED;
4488
	wake_up(&fs_info->transaction_wait);
4489

4490 4491
	btrfs_destroy_delayed_inodes(fs_info);
	btrfs_assert_delayed_root_empty(fs_info);
4492

4493
	btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
4494
				     EXTENT_DIRTY);
4495
	btrfs_destroy_pinned_extent(fs_info,
4496
				    fs_info->pinned_extents);
4497

4498 4499
	cur_trans->state =TRANS_STATE_COMPLETED;
	wake_up(&cur_trans->commit_wait);
4500 4501
}

4502
static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
L
liubo 已提交
4503 4504 4505
{
	struct btrfs_transaction *t;

4506
	mutex_lock(&fs_info->transaction_kthread_mutex);
L
liubo 已提交
4507

4508 4509 4510
	spin_lock(&fs_info->trans_lock);
	while (!list_empty(&fs_info->trans_list)) {
		t = list_first_entry(&fs_info->trans_list,
4511 4512
				     struct btrfs_transaction, list);
		if (t->state >= TRANS_STATE_COMMIT_START) {
4513
			refcount_inc(&t->use_count);
4514
			spin_unlock(&fs_info->trans_lock);
4515
			btrfs_wait_for_commit(fs_info, t->transid);
4516
			btrfs_put_transaction(t);
4517
			spin_lock(&fs_info->trans_lock);
4518 4519
			continue;
		}
4520
		if (t == fs_info->running_transaction) {
4521
			t->state = TRANS_STATE_COMMIT_DOING;
4522
			spin_unlock(&fs_info->trans_lock);
4523 4524 4525 4526 4527 4528 4529
			/*
			 * We wait for 0 num_writers since we don't hold a trans
			 * handle open currently for this transaction.
			 */
			wait_event(t->writer_wait,
				   atomic_read(&t->num_writers) == 0);
		} else {
4530
			spin_unlock(&fs_info->trans_lock);
4531
		}
4532
		btrfs_cleanup_one_transaction(t, fs_info);
4533

4534 4535 4536
		spin_lock(&fs_info->trans_lock);
		if (t == fs_info->running_transaction)
			fs_info->running_transaction = NULL;
L
liubo 已提交
4537
		list_del_init(&t->list);
4538
		spin_unlock(&fs_info->trans_lock);
L
liubo 已提交
4539

4540
		btrfs_put_transaction(t);
4541
		trace_btrfs_transaction_commit(fs_info->tree_root);
4542
		spin_lock(&fs_info->trans_lock);
4543
	}
4544 4545
	spin_unlock(&fs_info->trans_lock);
	btrfs_destroy_all_ordered_extents(fs_info);
4546 4547
	btrfs_destroy_delayed_inodes(fs_info);
	btrfs_assert_delayed_root_empty(fs_info);
4548
	btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
4549 4550
	btrfs_destroy_all_delalloc_inodes(fs_info);
	mutex_unlock(&fs_info->transaction_kthread_mutex);
L
liubo 已提交
4551 4552 4553 4554

	return 0;
}

4555
static const struct extent_io_ops btree_extent_io_ops = {
4556
	/* mandatory callbacks */
4557
	.submit_bio_hook = btree_submit_bio_hook,
4558
	.readpage_end_io_hook = btree_readpage_end_io_hook,
4559
};