tree-checker.c 29.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * Copyright (C) Qu Wenruo 2017.  All rights reserved.
 */

/*
 * The module is used to catch unexpected/corrupted tree block data.
 * Such behavior can be caused either by a fuzzed image or bugs.
 *
 * The objective is to do leaf/node validation checks when tree block is read
 * from disk, and check *every* possible member, so other code won't
 * need to checking them again.
 *
 * Due to the potential and unwanted damage, every checker needs to be
 * carefully reviewed otherwise so it does not prevent mount of valid images.
 */

#include "ctree.h"
#include "tree-checker.h"
#include "disk-io.h"
#include "compression.h"
22
#include "volumes.h"
23

24 25 26 27 28 29
/*
 * Error message should follow the following format:
 * corrupt <type>: <identifier>, <reason>[, <bad_value>]
 *
 * @type:	leaf or node
 * @identifier:	the necessary info to locate the leaf/node.
30
 * 		It's recommended to decode key.objecitd/offset if it's
31 32
 * 		meaningful.
 * @reason:	describe the error
33
 * @bad_value:	optional, it's recommended to output bad value and its
34 35 36 37 38 39 40 41 42 43
 *		expected value (range).
 *
 * Since comma is used to separate the components, only space is allowed
 * inside each component.
 */

/*
 * Append generic "corrupt leaf/node root=%llu block=%llu slot=%d: " to @fmt.
 * Allows callers to customize the output.
 */
44
__printf(3, 4)
45
__cold
46
static void generic_err(const struct extent_buffer *eb, int slot,
47 48
			const char *fmt, ...)
{
49
	const struct btrfs_fs_info *fs_info = eb->fs_info;
50 51 52 53 54 55 56 57
	struct va_format vaf;
	va_list args;

	va_start(args, fmt);

	vaf.fmt = fmt;
	vaf.va = &args;

58
	btrfs_crit(fs_info,
59 60
		"corrupt %s: root=%llu block=%llu slot=%d, %pV",
		btrfs_header_level(eb) == 0 ? "leaf" : "node",
61
		btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, &vaf);
62 63 64
	va_end(args);
}

65 66 67 68
/*
 * Customized reporter for extent data item, since its key objectid and
 * offset has its own meaning.
 */
69
__printf(3, 4)
70
__cold
71
static void file_extent_err(const struct extent_buffer *eb, int slot,
72 73
			    const char *fmt, ...)
{
74
	const struct btrfs_fs_info *fs_info = eb->fs_info;
75 76 77 78 79 80 81 82 83 84
	struct btrfs_key key;
	struct va_format vaf;
	va_list args;

	btrfs_item_key_to_cpu(eb, &key, slot);
	va_start(args, fmt);

	vaf.fmt = fmt;
	vaf.va = &args;

85
	btrfs_crit(fs_info,
86
	"corrupt %s: root=%llu block=%llu slot=%d ino=%llu file_offset=%llu, %pV",
87 88 89
		btrfs_header_level(eb) == 0 ? "leaf" : "node",
		btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
		key.objectid, key.offset, &vaf);
90 91 92 93 94 95 96
	va_end(args);
}

/*
 * Return 0 if the btrfs_file_extent_##name is aligned to @alignment
 * Else return 1
 */
97
#define CHECK_FE_ALIGNED(fs_info, leaf, slot, fi, name, alignment)	      \
98 99
({									      \
	if (!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment))) \
100
		file_extent_err((leaf), (slot),				      \
101 102 103 104 105 106
	"invalid %s for file extent, have %llu, should be aligned to %u",     \
			(#name), btrfs_file_extent_##name((leaf), (fi)),      \
			(alignment));					      \
	(!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment)));   \
})

107
static int check_extent_data_item(struct extent_buffer *leaf,
108 109
				  struct btrfs_key *key, int slot)
{
110
	struct btrfs_fs_info *fs_info = leaf->fs_info;
111
	struct btrfs_file_extent_item *fi;
112
	u32 sectorsize = fs_info->sectorsize;
113 114 115
	u32 item_size = btrfs_item_size_nr(leaf, slot);

	if (!IS_ALIGNED(key->offset, sectorsize)) {
116
		file_extent_err(leaf, slot,
117 118
"unaligned file_offset for file extent, have %llu should be aligned to %u",
			key->offset, sectorsize);
119 120 121 122 123 124
		return -EUCLEAN;
	}

	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);

	if (btrfs_file_extent_type(leaf, fi) > BTRFS_FILE_EXTENT_TYPES) {
125
		file_extent_err(leaf, slot,
126 127 128
		"invalid type for file extent, have %u expect range [0, %u]",
			btrfs_file_extent_type(leaf, fi),
			BTRFS_FILE_EXTENT_TYPES);
129 130 131 132
		return -EUCLEAN;
	}

	/*
133
	 * Support for new compression/encryption must introduce incompat flag,
134 135 136
	 * and must be caught in open_ctree().
	 */
	if (btrfs_file_extent_compression(leaf, fi) > BTRFS_COMPRESS_TYPES) {
137
		file_extent_err(leaf, slot,
138 139 140
	"invalid compression for file extent, have %u expect range [0, %u]",
			btrfs_file_extent_compression(leaf, fi),
			BTRFS_COMPRESS_TYPES);
141 142 143
		return -EUCLEAN;
	}
	if (btrfs_file_extent_encryption(leaf, fi)) {
144
		file_extent_err(leaf, slot,
145 146
			"invalid encryption for file extent, have %u expect 0",
			btrfs_file_extent_encryption(leaf, fi));
147 148 149 150 151
		return -EUCLEAN;
	}
	if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) {
		/* Inline extent must have 0 as key offset */
		if (key->offset) {
152
			file_extent_err(leaf, slot,
153 154
		"invalid file_offset for inline file extent, have %llu expect 0",
				key->offset);
155 156 157 158 159 160 161 162 163 164 165
			return -EUCLEAN;
		}

		/* Compressed inline extent has no on-disk size, skip it */
		if (btrfs_file_extent_compression(leaf, fi) !=
		    BTRFS_COMPRESS_NONE)
			return 0;

		/* Uncompressed inline extent size must match item size */
		if (item_size != BTRFS_FILE_EXTENT_INLINE_DATA_START +
		    btrfs_file_extent_ram_bytes(leaf, fi)) {
166
			file_extent_err(leaf, slot,
167 168 169
	"invalid ram_bytes for uncompressed inline extent, have %u expect %llu",
				item_size, BTRFS_FILE_EXTENT_INLINE_DATA_START +
				btrfs_file_extent_ram_bytes(leaf, fi));
170 171 172 173 174 175 176
			return -EUCLEAN;
		}
		return 0;
	}

	/* Regular or preallocated extent has fixed item size */
	if (item_size != sizeof(*fi)) {
177
		file_extent_err(leaf, slot,
178
	"invalid item size for reg/prealloc file extent, have %u expect %zu",
179
			item_size, sizeof(*fi));
180 181
		return -EUCLEAN;
	}
182 183 184 185 186
	if (CHECK_FE_ALIGNED(fs_info, leaf, slot, fi, ram_bytes, sectorsize) ||
	    CHECK_FE_ALIGNED(fs_info, leaf, slot, fi, disk_bytenr, sectorsize) ||
	    CHECK_FE_ALIGNED(fs_info, leaf, slot, fi, disk_num_bytes, sectorsize) ||
	    CHECK_FE_ALIGNED(fs_info, leaf, slot, fi, offset, sectorsize) ||
	    CHECK_FE_ALIGNED(fs_info, leaf, slot, fi, num_bytes, sectorsize))
187 188 189 190
		return -EUCLEAN;
	return 0;
}

191
static int check_csum_item(struct extent_buffer *leaf, struct btrfs_key *key,
192
			   int slot)
193
{
194
	struct btrfs_fs_info *fs_info = leaf->fs_info;
195 196
	u32 sectorsize = fs_info->sectorsize;
	u32 csumsize = btrfs_super_csum_size(fs_info->super_copy);
197 198

	if (key->objectid != BTRFS_EXTENT_CSUM_OBJECTID) {
199
		generic_err(leaf, slot,
200 201
		"invalid key objectid for csum item, have %llu expect %llu",
			key->objectid, BTRFS_EXTENT_CSUM_OBJECTID);
202 203 204
		return -EUCLEAN;
	}
	if (!IS_ALIGNED(key->offset, sectorsize)) {
205
		generic_err(leaf, slot,
206 207
	"unaligned key offset for csum item, have %llu should be aligned to %u",
			key->offset, sectorsize);
208 209 210
		return -EUCLEAN;
	}
	if (!IS_ALIGNED(btrfs_item_size_nr(leaf, slot), csumsize)) {
211
		generic_err(leaf, slot,
212 213
	"unaligned item size for csum item, have %u should be aligned to %u",
			btrfs_item_size_nr(leaf, slot), csumsize);
214 215 216 217 218
		return -EUCLEAN;
	}
	return 0;
}

219 220 221 222
/*
 * Customized reported for dir_item, only important new info is key->objectid,
 * which represents inode number
 */
223
__printf(3, 4)
224
__cold
225
static void dir_item_err(const struct extent_buffer *eb, int slot,
226 227
			 const char *fmt, ...)
{
228
	const struct btrfs_fs_info *fs_info = eb->fs_info;
229 230 231 232 233 234 235 236 237 238
	struct btrfs_key key;
	struct va_format vaf;
	va_list args;

	btrfs_item_key_to_cpu(eb, &key, slot);
	va_start(args, fmt);

	vaf.fmt = fmt;
	vaf.va = &args;

239
	btrfs_crit(fs_info,
240
	"corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV",
241 242 243
		btrfs_header_level(eb) == 0 ? "leaf" : "node",
		btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
		key.objectid, &vaf);
244 245 246
	va_end(args);
}

247
static int check_dir_item(struct extent_buffer *leaf,
248 249
			  struct btrfs_key *key, int slot)
{
250
	struct btrfs_fs_info *fs_info = leaf->fs_info;
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
	struct btrfs_dir_item *di;
	u32 item_size = btrfs_item_size_nr(leaf, slot);
	u32 cur = 0;

	di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
	while (cur < item_size) {
		u32 name_len;
		u32 data_len;
		u32 max_name_len;
		u32 total_size;
		u32 name_hash;
		u8 dir_type;

		/* header itself should not cross item boundary */
		if (cur + sizeof(*di) > item_size) {
266
			dir_item_err(leaf, slot,
267
		"dir item header crosses item boundary, have %zu boundary %u",
268 269 270 271 272 273 274
				cur + sizeof(*di), item_size);
			return -EUCLEAN;
		}

		/* dir type check */
		dir_type = btrfs_dir_type(leaf, di);
		if (dir_type >= BTRFS_FT_MAX) {
275
			dir_item_err(leaf, slot,
276 277 278 279 280 281 282
			"invalid dir item type, have %u expect [0, %u)",
				dir_type, BTRFS_FT_MAX);
			return -EUCLEAN;
		}

		if (key->type == BTRFS_XATTR_ITEM_KEY &&
		    dir_type != BTRFS_FT_XATTR) {
283
			dir_item_err(leaf, slot,
284 285 286 287 288 289
		"invalid dir item type for XATTR key, have %u expect %u",
				dir_type, BTRFS_FT_XATTR);
			return -EUCLEAN;
		}
		if (dir_type == BTRFS_FT_XATTR &&
		    key->type != BTRFS_XATTR_ITEM_KEY) {
290
			dir_item_err(leaf, slot,
291 292 293 294 295 296 297 298 299 300 301 302
			"xattr dir type found for non-XATTR key");
			return -EUCLEAN;
		}
		if (dir_type == BTRFS_FT_XATTR)
			max_name_len = XATTR_NAME_MAX;
		else
			max_name_len = BTRFS_NAME_LEN;

		/* Name/data length check */
		name_len = btrfs_dir_name_len(leaf, di);
		data_len = btrfs_dir_data_len(leaf, di);
		if (name_len > max_name_len) {
303
			dir_item_err(leaf, slot,
304 305 306 307
			"dir item name len too long, have %u max %u",
				name_len, max_name_len);
			return -EUCLEAN;
		}
308
		if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(fs_info)) {
309
			dir_item_err(leaf, slot,
310 311
			"dir item name and data len too long, have %u max %u",
				name_len + data_len,
312
				BTRFS_MAX_XATTR_SIZE(fs_info));
313 314 315 316
			return -EUCLEAN;
		}

		if (data_len && dir_type != BTRFS_FT_XATTR) {
317
			dir_item_err(leaf, slot,
318 319 320 321 322 323 324 325 326
			"dir item with invalid data len, have %u expect 0",
				data_len);
			return -EUCLEAN;
		}

		total_size = sizeof(*di) + name_len + data_len;

		/* header and name/data should not cross item boundary */
		if (cur + total_size > item_size) {
327
			dir_item_err(leaf, slot,
328 329 330 331 332 333 334 335 336 337 338
		"dir item data crosses item boundary, have %u boundary %u",
				cur + total_size, item_size);
			return -EUCLEAN;
		}

		/*
		 * Special check for XATTR/DIR_ITEM, as key->offset is name
		 * hash, should match its name
		 */
		if (key->type == BTRFS_DIR_ITEM_KEY ||
		    key->type == BTRFS_XATTR_ITEM_KEY) {
339 340
			char namebuf[max(BTRFS_NAME_LEN, XATTR_NAME_MAX)];

341 342 343 344
			read_extent_buffer(leaf, namebuf,
					(unsigned long)(di + 1), name_len);
			name_hash = btrfs_name_hash(namebuf, name_len);
			if (key->offset != name_hash) {
345
				dir_item_err(leaf, slot,
346 347 348 349 350 351 352 353 354 355 356
		"name hash mismatch with key, have 0x%016x expect 0x%016llx",
					name_hash, key->offset);
				return -EUCLEAN;
			}
		}
		cur += total_size;
		di = (struct btrfs_dir_item *)((void *)di + total_size);
	}
	return 0;
}

357
__printf(3, 4)
358
__cold
359
static void block_group_err(const struct extent_buffer *eb, int slot,
360 361
			    const char *fmt, ...)
{
362
	const struct btrfs_fs_info *fs_info = eb->fs_info;
363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
	struct btrfs_key key;
	struct va_format vaf;
	va_list args;

	btrfs_item_key_to_cpu(eb, &key, slot);
	va_start(args, fmt);

	vaf.fmt = fmt;
	vaf.va = &args;

	btrfs_crit(fs_info,
	"corrupt %s: root=%llu block=%llu slot=%d bg_start=%llu bg_len=%llu, %pV",
		btrfs_header_level(eb) == 0 ? "leaf" : "node",
		btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
		key.objectid, key.offset, &vaf);
	va_end(args);
}

381
static int check_block_group_item(struct extent_buffer *leaf,
382 383 384 385 386 387 388 389 390
				  struct btrfs_key *key, int slot)
{
	struct btrfs_block_group_item bgi;
	u32 item_size = btrfs_item_size_nr(leaf, slot);
	u64 flags;
	u64 type;

	/*
	 * Here we don't really care about alignment since extent allocator can
391
	 * handle it.  We care more about the size.
392
	 */
393
	if (key->offset == 0) {
394
		block_group_err(leaf, slot,
395
				"invalid block group size 0");
396 397 398 399
		return -EUCLEAN;
	}

	if (item_size != sizeof(bgi)) {
400
		block_group_err(leaf, slot,
401 402 403 404 405 406 407 408 409
			"invalid item size, have %u expect %zu",
				item_size, sizeof(bgi));
		return -EUCLEAN;
	}

	read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
			   sizeof(bgi));
	if (btrfs_block_group_chunk_objectid(&bgi) !=
	    BTRFS_FIRST_CHUNK_TREE_OBJECTID) {
410
		block_group_err(leaf, slot,
411 412 413 414 415 416 417
		"invalid block group chunk objectid, have %llu expect %llu",
				btrfs_block_group_chunk_objectid(&bgi),
				BTRFS_FIRST_CHUNK_TREE_OBJECTID);
		return -EUCLEAN;
	}

	if (btrfs_block_group_used(&bgi) > key->offset) {
418
		block_group_err(leaf, slot,
419 420 421 422 423 424 425
			"invalid block group used, have %llu expect [0, %llu)",
				btrfs_block_group_used(&bgi), key->offset);
		return -EUCLEAN;
	}

	flags = btrfs_block_group_flags(&bgi);
	if (hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) > 1) {
426
		block_group_err(leaf, slot,
427 428 429 430 431 432 433 434 435 436 437 438
"invalid profile flags, have 0x%llx (%lu bits set) expect no more than 1 bit set",
			flags & BTRFS_BLOCK_GROUP_PROFILE_MASK,
			hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK));
		return -EUCLEAN;
	}

	type = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
	if (type != BTRFS_BLOCK_GROUP_DATA &&
	    type != BTRFS_BLOCK_GROUP_METADATA &&
	    type != BTRFS_BLOCK_GROUP_SYSTEM &&
	    type != (BTRFS_BLOCK_GROUP_METADATA |
			   BTRFS_BLOCK_GROUP_DATA)) {
439
		block_group_err(leaf, slot,
440
"invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llx or 0x%llx",
441 442 443 444 445 446 447
			type, hweight64(type),
			BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_METADATA,
			BTRFS_BLOCK_GROUP_SYSTEM,
			BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA);
		return -EUCLEAN;
	}
	return 0;
448 449
}

450
__printf(4, 5)
451
__cold
452
static void chunk_err(const struct extent_buffer *leaf,
453 454 455
		      const struct btrfs_chunk *chunk, u64 logical,
		      const char *fmt, ...)
{
456
	const struct btrfs_fs_info *fs_info = leaf->fs_info;
457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
	bool is_sb;
	struct va_format vaf;
	va_list args;
	int i;
	int slot = -1;

	/* Only superblock eb is able to have such small offset */
	is_sb = (leaf->start == BTRFS_SUPER_INFO_OFFSET);

	if (!is_sb) {
		/*
		 * Get the slot number by iterating through all slots, this
		 * would provide better readability.
		 */
		for (i = 0; i < btrfs_header_nritems(leaf); i++) {
			if (btrfs_item_ptr_offset(leaf, i) ==
					(unsigned long)chunk) {
				slot = i;
				break;
			}
		}
	}
	va_start(args, fmt);
	vaf.fmt = fmt;
	vaf.va = &args;

	if (is_sb)
		btrfs_crit(fs_info,
		"corrupt superblock syschunk array: chunk_start=%llu, %pV",
			   logical, &vaf);
	else
		btrfs_crit(fs_info,
	"corrupt leaf: root=%llu block=%llu slot=%d chunk_start=%llu, %pV",
			   BTRFS_CHUNK_TREE_OBJECTID, leaf->start, slot,
			   logical, &vaf);
	va_end(args);
}

495 496 497
/*
 * The common chunk check which could also work on super block sys chunk array.
 *
498
 * Return -EUCLEAN if anything is corrupted.
499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519
 * Return 0 if everything is OK.
 */
int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
			    struct extent_buffer *leaf,
			    struct btrfs_chunk *chunk, u64 logical)
{
	u64 length;
	u64 stripe_len;
	u16 num_stripes;
	u16 sub_stripes;
	u64 type;
	u64 features;
	bool mixed = false;

	length = btrfs_chunk_length(leaf, chunk);
	stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
	sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
	type = btrfs_chunk_type(leaf, chunk);

	if (!num_stripes) {
520
		chunk_err(leaf, chunk, logical,
521
			  "invalid chunk num_stripes, have %u", num_stripes);
522
		return -EUCLEAN;
523 524
	}
	if (!IS_ALIGNED(logical, fs_info->sectorsize)) {
525
		chunk_err(leaf, chunk, logical,
526 527
		"invalid chunk logical, have %llu should aligned to %u",
			  logical, fs_info->sectorsize);
528
		return -EUCLEAN;
529 530
	}
	if (btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize) {
531
		chunk_err(leaf, chunk, logical,
532 533 534
			  "invalid chunk sectorsize, have %u expect %u",
			  btrfs_chunk_sector_size(leaf, chunk),
			  fs_info->sectorsize);
535
		return -EUCLEAN;
536 537
	}
	if (!length || !IS_ALIGNED(length, fs_info->sectorsize)) {
538
		chunk_err(leaf, chunk, logical,
539
			  "invalid chunk length, have %llu", length);
540
		return -EUCLEAN;
541 542
	}
	if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) {
543
		chunk_err(leaf, chunk, logical,
544
			  "invalid chunk stripe length: %llu",
545
			  stripe_len);
546
		return -EUCLEAN;
547 548 549
	}
	if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
	    type) {
550
		chunk_err(leaf, chunk, logical,
551
			  "unrecognized chunk type: 0x%llx",
552 553 554
			  ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
			    BTRFS_BLOCK_GROUP_PROFILE_MASK) &
			  btrfs_chunk_type(leaf, chunk));
555
		return -EUCLEAN;
556 557
	}

558 559
	if (!is_power_of_2(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
	    (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0) {
560
		chunk_err(leaf, chunk, logical,
561 562 563 564
		"invalid chunk profile flag: 0x%llx, expect 0 or 1 bit set",
			  type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
		return -EUCLEAN;
	}
565
	if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) {
566
		chunk_err(leaf, chunk, logical,
567 568
	"missing chunk type flag, have 0x%llx one bit must be set in 0x%llx",
			  type, BTRFS_BLOCK_GROUP_TYPE_MASK);
569
		return -EUCLEAN;
570 571 572 573
	}

	if ((type & BTRFS_BLOCK_GROUP_SYSTEM) &&
	    (type & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA))) {
574
		chunk_err(leaf, chunk, logical,
575 576
			  "system chunk with data or metadata type: 0x%llx",
			  type);
577
		return -EUCLEAN;
578 579 580 581 582 583 584 585 586
	}

	features = btrfs_super_incompat_flags(fs_info->super_copy);
	if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
		mixed = true;

	if (!mixed) {
		if ((type & BTRFS_BLOCK_GROUP_METADATA) &&
		    (type & BTRFS_BLOCK_GROUP_DATA)) {
587
			chunk_err(leaf, chunk, logical,
588
			"mixed chunk type in non-mixed mode: 0x%llx", type);
589
			return -EUCLEAN;
590 591 592 593 594 595 596 597 598
		}
	}

	if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
	    (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes != 2) ||
	    (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
	    (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
	    (type & BTRFS_BLOCK_GROUP_DUP && num_stripes != 2) ||
	    ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 && num_stripes != 1)) {
599
		chunk_err(leaf, chunk, logical,
600 601 602
			"invalid num_stripes:sub_stripes %u:%u for profile %llu",
			num_stripes, sub_stripes,
			type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
603
		return -EUCLEAN;
604 605 606
	}

	return 0;
607 608
}

609
__printf(3, 4)
Q
Qu Wenruo 已提交
610
__cold
611
static void dev_item_err(const struct extent_buffer *eb, int slot,
Q
Qu Wenruo 已提交
612 613 614 615 616 617 618 619 620 621 622 623
			 const char *fmt, ...)
{
	struct btrfs_key key;
	struct va_format vaf;
	va_list args;

	btrfs_item_key_to_cpu(eb, &key, slot);
	va_start(args, fmt);

	vaf.fmt = fmt;
	vaf.va = &args;

624
	btrfs_crit(eb->fs_info,
Q
Qu Wenruo 已提交
625 626 627 628 629 630 631
	"corrupt %s: root=%llu block=%llu slot=%d devid=%llu %pV",
		btrfs_header_level(eb) == 0 ? "leaf" : "node",
		btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
		key.objectid, &vaf);
	va_end(args);
}

632
static int check_dev_item(struct extent_buffer *leaf,
Q
Qu Wenruo 已提交
633 634
			  struct btrfs_key *key, int slot)
{
635
	struct btrfs_fs_info *fs_info = leaf->fs_info;
Q
Qu Wenruo 已提交
636 637 638 639
	struct btrfs_dev_item *ditem;
	u64 max_devid = max(BTRFS_MAX_DEVS(fs_info), BTRFS_MAX_DEVS_SYS_CHUNK);

	if (key->objectid != BTRFS_DEV_ITEMS_OBJECTID) {
640
		dev_item_err(leaf, slot,
Q
Qu Wenruo 已提交
641 642 643 644 645
			     "invalid objectid: has=%llu expect=%llu",
			     key->objectid, BTRFS_DEV_ITEMS_OBJECTID);
		return -EUCLEAN;
	}
	if (key->offset > max_devid) {
646
		dev_item_err(leaf, slot,
Q
Qu Wenruo 已提交
647 648 649 650 651 652
			     "invalid devid: has=%llu expect=[0, %llu]",
			     key->offset, max_devid);
		return -EUCLEAN;
	}
	ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item);
	if (btrfs_device_id(leaf, ditem) != key->offset) {
653
		dev_item_err(leaf, slot,
Q
Qu Wenruo 已提交
654 655 656 657 658 659 660 661 662 663 664 665
			     "devid mismatch: key has=%llu item has=%llu",
			     key->offset, btrfs_device_id(leaf, ditem));
		return -EUCLEAN;
	}

	/*
	 * For device total_bytes, we don't have reliable way to check it, as
	 * it can be 0 for device removal. Device size check can only be done
	 * by dev extents check.
	 */
	if (btrfs_device_bytes_used(leaf, ditem) >
	    btrfs_device_total_bytes(leaf, ditem)) {
666
		dev_item_err(leaf, slot,
Q
Qu Wenruo 已提交
667 668 669 670 671 672 673 674 675 676 677 678
			     "invalid bytes used: have %llu expect [0, %llu]",
			     btrfs_device_bytes_used(leaf, ditem),
			     btrfs_device_total_bytes(leaf, ditem));
		return -EUCLEAN;
	}
	/*
	 * Remaining members like io_align/type/gen/dev_group aren't really
	 * utilized.  Skip them to make later usage of them easier.
	 */
	return 0;
}

679 680
/* Inode item error output has the same format as dir_item_err() */
#define inode_item_err(fs_info, eb, slot, fmt, ...)			\
681
	dir_item_err(eb, slot, fmt, __VA_ARGS__)
682

683
static int check_inode_item(struct extent_buffer *leaf,
684 685
			    struct btrfs_key *key, int slot)
{
686
	struct btrfs_fs_info *fs_info = leaf->fs_info;
687 688 689 690 691 692 693 694 695
	struct btrfs_inode_item *iitem;
	u64 super_gen = btrfs_super_generation(fs_info->super_copy);
	u32 valid_mask = (S_IFMT | S_ISUID | S_ISGID | S_ISVTX | 0777);
	u32 mode;

	if ((key->objectid < BTRFS_FIRST_FREE_OBJECTID ||
	     key->objectid > BTRFS_LAST_FREE_OBJECTID) &&
	    key->objectid != BTRFS_ROOT_TREE_DIR_OBJECTID &&
	    key->objectid != BTRFS_FREE_INO_OBJECTID) {
696
		generic_err(leaf, slot,
697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769
	"invalid key objectid: has %llu expect %llu or [%llu, %llu] or %llu",
			    key->objectid, BTRFS_ROOT_TREE_DIR_OBJECTID,
			    BTRFS_FIRST_FREE_OBJECTID,
			    BTRFS_LAST_FREE_OBJECTID,
			    BTRFS_FREE_INO_OBJECTID);
		return -EUCLEAN;
	}
	if (key->offset != 0) {
		inode_item_err(fs_info, leaf, slot,
			"invalid key offset: has %llu expect 0",
			key->offset);
		return -EUCLEAN;
	}
	iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item);

	/* Here we use super block generation + 1 to handle log tree */
	if (btrfs_inode_generation(leaf, iitem) > super_gen + 1) {
		inode_item_err(fs_info, leaf, slot,
			"invalid inode generation: has %llu expect (0, %llu]",
			       btrfs_inode_generation(leaf, iitem),
			       super_gen + 1);
		return -EUCLEAN;
	}
	/* Note for ROOT_TREE_DIR_ITEM, mkfs could set its transid 0 */
	if (btrfs_inode_transid(leaf, iitem) > super_gen + 1) {
		inode_item_err(fs_info, leaf, slot,
			"invalid inode generation: has %llu expect [0, %llu]",
			       btrfs_inode_transid(leaf, iitem), super_gen + 1);
		return -EUCLEAN;
	}

	/*
	 * For size and nbytes it's better not to be too strict, as for dir
	 * item its size/nbytes can easily get wrong, but doesn't affect
	 * anything in the fs. So here we skip the check.
	 */
	mode = btrfs_inode_mode(leaf, iitem);
	if (mode & ~valid_mask) {
		inode_item_err(fs_info, leaf, slot,
			       "unknown mode bit detected: 0x%x",
			       mode & ~valid_mask);
		return -EUCLEAN;
	}

	/*
	 * S_IFMT is not bit mapped so we can't completely rely on is_power_of_2,
	 * but is_power_of_2() can save us from checking FIFO/CHR/DIR/REG.
	 * Only needs to check BLK, LNK and SOCKS
	 */
	if (!is_power_of_2(mode & S_IFMT)) {
		if (!S_ISLNK(mode) && !S_ISBLK(mode) && !S_ISSOCK(mode)) {
			inode_item_err(fs_info, leaf, slot,
			"invalid mode: has 0%o expect valid S_IF* bit(s)",
				       mode & S_IFMT);
			return -EUCLEAN;
		}
	}
	if (S_ISDIR(mode) && btrfs_inode_nlink(leaf, iitem) > 1) {
		inode_item_err(fs_info, leaf, slot,
		       "invalid nlink: has %u expect no more than 1 for dir",
			btrfs_inode_nlink(leaf, iitem));
		return -EUCLEAN;
	}
	if (btrfs_inode_flags(leaf, iitem) & ~BTRFS_INODE_FLAG_MASK) {
		inode_item_err(fs_info, leaf, slot,
			       "unknown flags detected: 0x%llx",
			       btrfs_inode_flags(leaf, iitem) &
			       ~BTRFS_INODE_FLAG_MASK);
		return -EUCLEAN;
	}
	return 0;
}

770 771 772
/*
 * Common point to switch the item-specific validation.
 */
773
static int check_leaf_item(struct extent_buffer *leaf,
774 775 776
			   struct btrfs_key *key, int slot)
{
	int ret = 0;
777
	struct btrfs_chunk *chunk;
778 779 780

	switch (key->type) {
	case BTRFS_EXTENT_DATA_KEY:
781
		ret = check_extent_data_item(leaf, key, slot);
782 783
		break;
	case BTRFS_EXTENT_CSUM_KEY:
784
		ret = check_csum_item(leaf, key, slot);
785
		break;
786 787 788
	case BTRFS_DIR_ITEM_KEY:
	case BTRFS_DIR_INDEX_KEY:
	case BTRFS_XATTR_ITEM_KEY:
789
		ret = check_dir_item(leaf, key, slot);
790
		break;
791
	case BTRFS_BLOCK_GROUP_ITEM_KEY:
792
		ret = check_block_group_item(leaf, key, slot);
793
		break;
794 795
	case BTRFS_CHUNK_ITEM_KEY:
		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
796
		ret = btrfs_check_chunk_valid(leaf->fs_info, leaf, chunk,
797 798
					      key->offset);
		break;
Q
Qu Wenruo 已提交
799
	case BTRFS_DEV_ITEM_KEY:
800
		ret = check_dev_item(leaf, key, slot);
Q
Qu Wenruo 已提交
801
		break;
802
	case BTRFS_INODE_ITEM_KEY:
803
		ret = check_inode_item(leaf, key, slot);
804
		break;
805 806 807 808
	}
	return ret;
}

809
static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
810
{
811
	struct btrfs_fs_info *fs_info = leaf->fs_info;
812 813 814 815 816 817
	/* No valid key type is 0, so all key should be larger than this key */
	struct btrfs_key prev_key = {0, 0, 0};
	struct btrfs_key key;
	u32 nritems = btrfs_header_nritems(leaf);
	int slot;

818
	if (btrfs_header_level(leaf) != 0) {
819
		generic_err(leaf, 0,
820 821 822 823 824
			"invalid level for leaf, have %d expect 0",
			btrfs_header_level(leaf));
		return -EUCLEAN;
	}

825 826 827 828 829 830 831 832 833
	/*
	 * Extent buffers from a relocation tree have a owner field that
	 * corresponds to the subvolume tree they are based on. So just from an
	 * extent buffer alone we can not find out what is the id of the
	 * corresponding subvolume tree, so we can not figure out if the extent
	 * buffer corresponds to the root of the relocation tree or not. So
	 * skip this check for relocation trees.
	 */
	if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
834
		u64 owner = btrfs_header_owner(leaf);
835 836
		struct btrfs_root *check_root;

837 838 839 840 841 842 843
		/* These trees must never be empty */
		if (owner == BTRFS_ROOT_TREE_OBJECTID ||
		    owner == BTRFS_CHUNK_TREE_OBJECTID ||
		    owner == BTRFS_EXTENT_TREE_OBJECTID ||
		    owner == BTRFS_DEV_TREE_OBJECTID ||
		    owner == BTRFS_FS_TREE_OBJECTID ||
		    owner == BTRFS_DATA_RELOC_TREE_OBJECTID) {
844
			generic_err(leaf, 0,
845 846 847 848 849
			"invalid root, root %llu must never be empty",
				    owner);
			return -EUCLEAN;
		}
		key.objectid = owner;
850 851 852 853 854 855 856 857 858 859 860 861 862 863
		key.type = BTRFS_ROOT_ITEM_KEY;
		key.offset = (u64)-1;

		check_root = btrfs_get_fs_root(fs_info, &key, false);
		/*
		 * The only reason we also check NULL here is that during
		 * open_ctree() some roots has not yet been set up.
		 */
		if (!IS_ERR_OR_NULL(check_root)) {
			struct extent_buffer *eb;

			eb = btrfs_root_node(check_root);
			/* if leaf is the root, then it's fine */
			if (leaf != eb) {
864
				generic_err(leaf, 0,
865 866
		"invalid nritems, have %u should not be 0 for non-root leaf",
					nritems);
867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896
				free_extent_buffer(eb);
				return -EUCLEAN;
			}
			free_extent_buffer(eb);
		}
		return 0;
	}

	if (nritems == 0)
		return 0;

	/*
	 * Check the following things to make sure this is a good leaf, and
	 * leaf users won't need to bother with similar sanity checks:
	 *
	 * 1) key ordering
	 * 2) item offset and size
	 *    No overlap, no hole, all inside the leaf.
	 * 3) item content
	 *    If possible, do comprehensive sanity check.
	 *    NOTE: All checks must only rely on the item data itself.
	 */
	for (slot = 0; slot < nritems; slot++) {
		u32 item_end_expected;
		int ret;

		btrfs_item_key_to_cpu(leaf, &key, slot);

		/* Make sure the keys are in the right order */
		if (btrfs_comp_cpu_keys(&prev_key, &key) >= 0) {
897
			generic_err(leaf, slot,
898 899 900 901
	"bad key order, prev (%llu %u %llu) current (%llu %u %llu)",
				prev_key.objectid, prev_key.type,
				prev_key.offset, key.objectid, key.type,
				key.offset);
902 903 904 905 906 907 908 909 910 911 912 913 914 915
			return -EUCLEAN;
		}

		/*
		 * Make sure the offset and ends are right, remember that the
		 * item data starts at the end of the leaf and grows towards the
		 * front.
		 */
		if (slot == 0)
			item_end_expected = BTRFS_LEAF_DATA_SIZE(fs_info);
		else
			item_end_expected = btrfs_item_offset_nr(leaf,
								 slot - 1);
		if (btrfs_item_end_nr(leaf, slot) != item_end_expected) {
916
			generic_err(leaf, slot,
917 918 919
				"unexpected item end, have %u expect %u",
				btrfs_item_end_nr(leaf, slot),
				item_end_expected);
920 921 922 923 924 925 926 927 928 929
			return -EUCLEAN;
		}

		/*
		 * Check to make sure that we don't point outside of the leaf,
		 * just in case all the items are consistent to each other, but
		 * all point outside of the leaf.
		 */
		if (btrfs_item_end_nr(leaf, slot) >
		    BTRFS_LEAF_DATA_SIZE(fs_info)) {
930
			generic_err(leaf, slot,
931 932 933
			"slot end outside of leaf, have %u expect range [0, %u]",
				btrfs_item_end_nr(leaf, slot),
				BTRFS_LEAF_DATA_SIZE(fs_info));
934 935 936 937 938 939
			return -EUCLEAN;
		}

		/* Also check if the item pointer overlaps with btrfs item. */
		if (btrfs_item_nr_offset(slot) + sizeof(struct btrfs_item) >
		    btrfs_item_ptr_offset(leaf, slot)) {
940
			generic_err(leaf, slot,
941 942 943 944
		"slot overlaps with its data, item end %lu data start %lu",
				btrfs_item_nr_offset(slot) +
				sizeof(struct btrfs_item),
				btrfs_item_ptr_offset(leaf, slot));
945 946 947
			return -EUCLEAN;
		}

948 949 950 951 952
		if (check_item_data) {
			/*
			 * Check if the item size and content meet other
			 * criteria
			 */
953
			ret = check_leaf_item(leaf, &key, slot);
954 955 956
			if (ret < 0)
				return ret;
		}
957 958 959 960 961 962 963 964 965

		prev_key.objectid = key.objectid;
		prev_key.type = key.type;
		prev_key.offset = key.offset;
	}

	return 0;
}

966 967
int btrfs_check_leaf_full(struct btrfs_fs_info *fs_info,
			  struct extent_buffer *leaf)
968
{
969
	return check_leaf(leaf, true);
970 971
}

972
int btrfs_check_leaf_relaxed(struct btrfs_fs_info *fs_info,
973 974
			     struct extent_buffer *leaf)
{
975
	return check_leaf(leaf, false);
976 977
}

978
int btrfs_check_node(struct btrfs_fs_info *fs_info, struct extent_buffer *node)
979 980 981 982
{
	unsigned long nr = btrfs_header_nritems(node);
	struct btrfs_key key, next_key;
	int slot;
983
	int level = btrfs_header_level(node);
984 985 986
	u64 bytenr;
	int ret = 0;

987
	if (level <= 0 || level >= BTRFS_MAX_LEVEL) {
988
		generic_err(node, 0,
989 990 991 992
			"invalid level for node, have %d expect [1, %d]",
			level, BTRFS_MAX_LEVEL - 1);
		return -EUCLEAN;
	}
993 994
	if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(fs_info)) {
		btrfs_crit(fs_info,
995
"corrupt node: root=%llu block=%llu, nritems too %s, have %lu expect range [1,%u]",
996
			   btrfs_header_owner(node), node->start,
997
			   nr == 0 ? "small" : "large", nr,
998
			   BTRFS_NODEPTRS_PER_BLOCK(fs_info));
999
		return -EUCLEAN;
1000 1001 1002 1003 1004 1005 1006 1007
	}

	for (slot = 0; slot < nr - 1; slot++) {
		bytenr = btrfs_node_blockptr(node, slot);
		btrfs_node_key_to_cpu(node, &key, slot);
		btrfs_node_key_to_cpu(node, &next_key, slot + 1);

		if (!bytenr) {
1008
			generic_err(node, slot,
1009 1010 1011 1012
				"invalid NULL node pointer");
			ret = -EUCLEAN;
			goto out;
		}
1013
		if (!IS_ALIGNED(bytenr, fs_info->sectorsize)) {
1014
			generic_err(node, slot,
1015
			"unaligned pointer, have %llu should be aligned to %u",
1016
				bytenr, fs_info->sectorsize);
1017
			ret = -EUCLEAN;
1018 1019 1020 1021
			goto out;
		}

		if (btrfs_comp_cpu_keys(&key, &next_key) >= 0) {
1022
			generic_err(node, slot,
1023 1024 1025 1026 1027
	"bad key order, current (%llu %u %llu) next (%llu %u %llu)",
				key.objectid, key.type, key.offset,
				next_key.objectid, next_key.type,
				next_key.offset);
			ret = -EUCLEAN;
1028 1029 1030 1031 1032 1033
			goto out;
		}
	}
out:
	return ret;
}