tree-checker.c 55.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * Copyright (C) Qu Wenruo 2017.  All rights reserved.
 */

/*
 * The module is used to catch unexpected/corrupted tree block data.
 * Such behavior can be caused either by a fuzzed image or bugs.
 *
 * The objective is to do leaf/node validation checks when tree block is read
 * from disk, and check *every* possible member, so other code won't
 * need to checking them again.
 *
 * Due to the potential and unwanted damage, every checker needs to be
 * carefully reviewed otherwise so it does not prevent mount of valid images.
 */

18 19 20
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/error-injection.h>
21 22 23 24
#include "ctree.h"
#include "tree-checker.h"
#include "disk-io.h"
#include "compression.h"
25
#include "volumes.h"
26
#include "misc.h"
27
#include "btrfs_inode.h"
28

29 30 31 32 33 34
/*
 * Error message should follow the following format:
 * corrupt <type>: <identifier>, <reason>[, <bad_value>]
 *
 * @type:	leaf or node
 * @identifier:	the necessary info to locate the leaf/node.
35
 * 		It's recommended to decode key.objecitd/offset if it's
36 37
 * 		meaningful.
 * @reason:	describe the error
38
 * @bad_value:	optional, it's recommended to output bad value and its
39 40 41 42 43 44 45 46 47 48
 *		expected value (range).
 *
 * Since comma is used to separate the components, only space is allowed
 * inside each component.
 */

/*
 * Append generic "corrupt leaf/node root=%llu block=%llu slot=%d: " to @fmt.
 * Allows callers to customize the output.
 */
49
__printf(3, 4)
50
__cold
51
static void generic_err(const struct extent_buffer *eb, int slot,
52 53
			const char *fmt, ...)
{
54
	const struct btrfs_fs_info *fs_info = eb->fs_info;
55 56 57 58 59 60 61 62
	struct va_format vaf;
	va_list args;

	va_start(args, fmt);

	vaf.fmt = fmt;
	vaf.va = &args;

63
	btrfs_crit(fs_info,
64 65
		"corrupt %s: root=%llu block=%llu slot=%d, %pV",
		btrfs_header_level(eb) == 0 ? "leaf" : "node",
66
		btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, &vaf);
67 68 69
	va_end(args);
}

70 71 72 73
/*
 * Customized reporter for extent data item, since its key objectid and
 * offset has its own meaning.
 */
74
__printf(3, 4)
75
__cold
76
static void file_extent_err(const struct extent_buffer *eb, int slot,
77 78
			    const char *fmt, ...)
{
79
	const struct btrfs_fs_info *fs_info = eb->fs_info;
80 81 82 83 84 85 86 87 88 89
	struct btrfs_key key;
	struct va_format vaf;
	va_list args;

	btrfs_item_key_to_cpu(eb, &key, slot);
	va_start(args, fmt);

	vaf.fmt = fmt;
	vaf.va = &args;

90
	btrfs_crit(fs_info,
91
	"corrupt %s: root=%llu block=%llu slot=%d ino=%llu file_offset=%llu, %pV",
92 93 94
		btrfs_header_level(eb) == 0 ? "leaf" : "node",
		btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
		key.objectid, key.offset, &vaf);
95 96 97 98 99 100 101
	va_end(args);
}

/*
 * Return 0 if the btrfs_file_extent_##name is aligned to @alignment
 * Else return 1
 */
102
#define CHECK_FE_ALIGNED(leaf, slot, fi, name, alignment)		      \
103
({									      \
104 105
	if (unlikely(!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)),      \
				 (alignment))))				      \
106
		file_extent_err((leaf), (slot),				      \
107 108 109 110 111 112
	"invalid %s for file extent, have %llu, should be aligned to %u",     \
			(#name), btrfs_file_extent_##name((leaf), (fi)),      \
			(alignment));					      \
	(!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment)));   \
})

113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
static u64 file_extent_end(struct extent_buffer *leaf,
			   struct btrfs_key *key,
			   struct btrfs_file_extent_item *extent)
{
	u64 end;
	u64 len;

	if (btrfs_file_extent_type(leaf, extent) == BTRFS_FILE_EXTENT_INLINE) {
		len = btrfs_file_extent_ram_bytes(leaf, extent);
		end = ALIGN(key->offset + len, leaf->fs_info->sectorsize);
	} else {
		len = btrfs_file_extent_num_bytes(leaf, extent);
		end = key->offset + len;
	}
	return end;
}

130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
/*
 * Customized report for dir_item, the only new important information is
 * key->objectid, which represents inode number
 */
__printf(3, 4)
__cold
static void dir_item_err(const struct extent_buffer *eb, int slot,
			 const char *fmt, ...)
{
	const struct btrfs_fs_info *fs_info = eb->fs_info;
	struct btrfs_key key;
	struct va_format vaf;
	va_list args;

	btrfs_item_key_to_cpu(eb, &key, slot);
	va_start(args, fmt);

	vaf.fmt = fmt;
	vaf.va = &args;

	btrfs_crit(fs_info,
		"corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV",
		btrfs_header_level(eb) == 0 ? "leaf" : "node",
		btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
		key.objectid, &vaf);
	va_end(args);
}

/*
 * This functions checks prev_key->objectid, to ensure current key and prev_key
 * share the same objectid as inode number.
 *
 * This is to detect missing INODE_ITEM in subvolume trees.
 *
 * Return true if everything is OK or we don't need to check.
 * Return false if anything is wrong.
 */
static bool check_prev_ino(struct extent_buffer *leaf,
			   struct btrfs_key *key, int slot,
			   struct btrfs_key *prev_key)
{
	/* No prev key, skip check */
	if (slot == 0)
		return true;

	/* Only these key->types needs to be checked */
	ASSERT(key->type == BTRFS_XATTR_ITEM_KEY ||
	       key->type == BTRFS_INODE_REF_KEY ||
	       key->type == BTRFS_DIR_INDEX_KEY ||
	       key->type == BTRFS_DIR_ITEM_KEY ||
	       key->type == BTRFS_EXTENT_DATA_KEY);

	/*
	 * Only subvolume trees along with their reloc trees need this check.
	 * Things like log tree doesn't follow this ino requirement.
	 */
	if (!is_fstree(btrfs_header_owner(leaf)))
		return true;

	if (key->objectid == prev_key->objectid)
		return true;

	/* Error found */
	dir_item_err(leaf, slot,
		"invalid previous key objectid, have %llu expect %llu",
		prev_key->objectid, key->objectid);
	return false;
}
198
static int check_extent_data_item(struct extent_buffer *leaf,
199 200
				  struct btrfs_key *key, int slot,
				  struct btrfs_key *prev_key)
201
{
202
	struct btrfs_fs_info *fs_info = leaf->fs_info;
203
	struct btrfs_file_extent_item *fi;
204
	u32 sectorsize = fs_info->sectorsize;
205
	u32 item_size = btrfs_item_size(leaf, slot);
206
	u64 extent_end;
207

208
	if (unlikely(!IS_ALIGNED(key->offset, sectorsize))) {
209
		file_extent_err(leaf, slot,
210 211
"unaligned file_offset for file extent, have %llu should be aligned to %u",
			key->offset, sectorsize);
212 213 214
		return -EUCLEAN;
	}

215 216 217 218 219 220
	/*
	 * Previous key must have the same key->objectid (ino).
	 * It can be XATTR_ITEM, INODE_ITEM or just another EXTENT_DATA.
	 * But if objectids mismatch, it means we have a missing
	 * INODE_ITEM.
	 */
221
	if (unlikely(!check_prev_ino(leaf, key, slot, prev_key)))
222 223
		return -EUCLEAN;

224 225
	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);

226 227 228 229
	/*
	 * Make sure the item contains at least inline header, so the file
	 * extent type is not some garbage.
	 */
230
	if (unlikely(item_size < BTRFS_FILE_EXTENT_INLINE_DATA_START)) {
231
		file_extent_err(leaf, slot,
232
				"invalid item size, have %u expect [%zu, %u)",
233 234 235 236
				item_size, BTRFS_FILE_EXTENT_INLINE_DATA_START,
				SZ_4K);
		return -EUCLEAN;
	}
237 238
	if (unlikely(btrfs_file_extent_type(leaf, fi) >=
		     BTRFS_NR_FILE_EXTENT_TYPES)) {
239
		file_extent_err(leaf, slot,
240 241
		"invalid type for file extent, have %u expect range [0, %u]",
			btrfs_file_extent_type(leaf, fi),
242
			BTRFS_NR_FILE_EXTENT_TYPES - 1);
243 244 245 246
		return -EUCLEAN;
	}

	/*
247
	 * Support for new compression/encryption must introduce incompat flag,
248 249
	 * and must be caught in open_ctree().
	 */
250 251
	if (unlikely(btrfs_file_extent_compression(leaf, fi) >=
		     BTRFS_NR_COMPRESS_TYPES)) {
252
		file_extent_err(leaf, slot,
253 254
	"invalid compression for file extent, have %u expect range [0, %u]",
			btrfs_file_extent_compression(leaf, fi),
255
			BTRFS_NR_COMPRESS_TYPES - 1);
256 257
		return -EUCLEAN;
	}
258
	if (unlikely(btrfs_file_extent_encryption(leaf, fi))) {
259
		file_extent_err(leaf, slot,
260 261
			"invalid encryption for file extent, have %u expect 0",
			btrfs_file_extent_encryption(leaf, fi));
262 263 264 265
		return -EUCLEAN;
	}
	if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) {
		/* Inline extent must have 0 as key offset */
266
		if (unlikely(key->offset)) {
267
			file_extent_err(leaf, slot,
268 269
		"invalid file_offset for inline file extent, have %llu expect 0",
				key->offset);
270 271 272 273 274 275 276 277 278
			return -EUCLEAN;
		}

		/* Compressed inline extent has no on-disk size, skip it */
		if (btrfs_file_extent_compression(leaf, fi) !=
		    BTRFS_COMPRESS_NONE)
			return 0;

		/* Uncompressed inline extent size must match item size */
279 280
		if (unlikely(item_size != BTRFS_FILE_EXTENT_INLINE_DATA_START +
					  btrfs_file_extent_ram_bytes(leaf, fi))) {
281
			file_extent_err(leaf, slot,
282 283 284
	"invalid ram_bytes for uncompressed inline extent, have %u expect %llu",
				item_size, BTRFS_FILE_EXTENT_INLINE_DATA_START +
				btrfs_file_extent_ram_bytes(leaf, fi));
285 286 287 288 289 290
			return -EUCLEAN;
		}
		return 0;
	}

	/* Regular or preallocated extent has fixed item size */
291
	if (unlikely(item_size != sizeof(*fi))) {
292
		file_extent_err(leaf, slot,
293
	"invalid item size for reg/prealloc file extent, have %u expect %zu",
294
			item_size, sizeof(*fi));
295 296
		return -EUCLEAN;
	}
297 298 299 300 301
	if (unlikely(CHECK_FE_ALIGNED(leaf, slot, fi, ram_bytes, sectorsize) ||
		     CHECK_FE_ALIGNED(leaf, slot, fi, disk_bytenr, sectorsize) ||
		     CHECK_FE_ALIGNED(leaf, slot, fi, disk_num_bytes, sectorsize) ||
		     CHECK_FE_ALIGNED(leaf, slot, fi, offset, sectorsize) ||
		     CHECK_FE_ALIGNED(leaf, slot, fi, num_bytes, sectorsize)))
302
		return -EUCLEAN;
303

304
	/* Catch extent end overflow */
305 306
	if (unlikely(check_add_overflow(btrfs_file_extent_num_bytes(leaf, fi),
					key->offset, &extent_end))) {
307 308 309 310 311 312 313
		file_extent_err(leaf, slot,
	"extent end overflow, have file offset %llu extent num bytes %llu",
				key->offset,
				btrfs_file_extent_num_bytes(leaf, fi));
		return -EUCLEAN;
	}

314 315 316 317 318 319 320 321 322 323 324 325 326
	/*
	 * Check that no two consecutive file extent items, in the same leaf,
	 * present ranges that overlap each other.
	 */
	if (slot > 0 &&
	    prev_key->objectid == key->objectid &&
	    prev_key->type == BTRFS_EXTENT_DATA_KEY) {
		struct btrfs_file_extent_item *prev_fi;
		u64 prev_end;

		prev_fi = btrfs_item_ptr(leaf, slot - 1,
					 struct btrfs_file_extent_item);
		prev_end = file_extent_end(leaf, prev_key, prev_fi);
327
		if (unlikely(prev_end > key->offset)) {
328 329 330 331 332 333 334
			file_extent_err(leaf, slot - 1,
"file extent end range (%llu) goes beyond start offset (%llu) of the next file extent",
					prev_end, key->offset);
			return -EUCLEAN;
		}
	}

335 336 337
	return 0;
}

338
static int check_csum_item(struct extent_buffer *leaf, struct btrfs_key *key,
339
			   int slot, struct btrfs_key *prev_key)
340
{
341
	struct btrfs_fs_info *fs_info = leaf->fs_info;
342
	u32 sectorsize = fs_info->sectorsize;
343
	const u32 csumsize = fs_info->csum_size;
344

345
	if (unlikely(key->objectid != BTRFS_EXTENT_CSUM_OBJECTID)) {
346
		generic_err(leaf, slot,
347 348
		"invalid key objectid for csum item, have %llu expect %llu",
			key->objectid, BTRFS_EXTENT_CSUM_OBJECTID);
349 350
		return -EUCLEAN;
	}
351
	if (unlikely(!IS_ALIGNED(key->offset, sectorsize))) {
352
		generic_err(leaf, slot,
353 354
	"unaligned key offset for csum item, have %llu should be aligned to %u",
			key->offset, sectorsize);
355 356
		return -EUCLEAN;
	}
357
	if (unlikely(!IS_ALIGNED(btrfs_item_size(leaf, slot), csumsize))) {
358
		generic_err(leaf, slot,
359
	"unaligned item size for csum item, have %u should be aligned to %u",
360
			btrfs_item_size(leaf, slot), csumsize);
361 362
		return -EUCLEAN;
	}
363 364 365 366
	if (slot > 0 && prev_key->type == BTRFS_EXTENT_CSUM_KEY) {
		u64 prev_csum_end;
		u32 prev_item_size;

367
		prev_item_size = btrfs_item_size(leaf, slot - 1);
368 369
		prev_csum_end = (prev_item_size / csumsize) * sectorsize;
		prev_csum_end += prev_key->offset;
370
		if (unlikely(prev_csum_end > key->offset)) {
371 372 373 374 375 376
			generic_err(leaf, slot - 1,
"csum end range (%llu) goes beyond the start range (%llu) of the next csum item",
				    prev_csum_end, key->offset);
			return -EUCLEAN;
		}
	}
377 378 379
	return 0;
}

380 381 382 383 384 385 386 387 388 389 390 391 392 393 394
/* Inode item error output has the same format as dir_item_err() */
#define inode_item_err(eb, slot, fmt, ...)			\
	dir_item_err(eb, slot, fmt, __VA_ARGS__)

static int check_inode_key(struct extent_buffer *leaf, struct btrfs_key *key,
			   int slot)
{
	struct btrfs_key item_key;
	bool is_inode_item;

	btrfs_item_key_to_cpu(leaf, &item_key, slot);
	is_inode_item = (item_key.type == BTRFS_INODE_ITEM_KEY);

	/* For XATTR_ITEM, location key should be all 0 */
	if (item_key.type == BTRFS_XATTR_ITEM_KEY) {
395 396
		if (unlikely(key->objectid != 0 || key->type != 0 ||
			     key->offset != 0))
397 398 399 400
			return -EUCLEAN;
		return 0;
	}

401 402 403 404
	if (unlikely((key->objectid < BTRFS_FIRST_FREE_OBJECTID ||
		      key->objectid > BTRFS_LAST_FREE_OBJECTID) &&
		     key->objectid != BTRFS_ROOT_TREE_DIR_OBJECTID &&
		     key->objectid != BTRFS_FREE_INO_OBJECTID)) {
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
		if (is_inode_item) {
			generic_err(leaf, slot,
	"invalid key objectid: has %llu expect %llu or [%llu, %llu] or %llu",
				key->objectid, BTRFS_ROOT_TREE_DIR_OBJECTID,
				BTRFS_FIRST_FREE_OBJECTID,
				BTRFS_LAST_FREE_OBJECTID,
				BTRFS_FREE_INO_OBJECTID);
		} else {
			dir_item_err(leaf, slot,
"invalid location key objectid: has %llu expect %llu or [%llu, %llu] or %llu",
				key->objectid, BTRFS_ROOT_TREE_DIR_OBJECTID,
				BTRFS_FIRST_FREE_OBJECTID,
				BTRFS_LAST_FREE_OBJECTID,
				BTRFS_FREE_INO_OBJECTID);
		}
		return -EUCLEAN;
	}
422
	if (unlikely(key->offset != 0)) {
423 424 425 426 427 428 429 430 431 432 433 434 435
		if (is_inode_item)
			inode_item_err(leaf, slot,
				       "invalid key offset: has %llu expect 0",
				       key->offset);
		else
			dir_item_err(leaf, slot,
				"invalid location key offset:has %llu expect 0",
				key->offset);
		return -EUCLEAN;
	}
	return 0;
}

436 437 438 439 440 441 442 443 444 445
static int check_root_key(struct extent_buffer *leaf, struct btrfs_key *key,
			  int slot)
{
	struct btrfs_key item_key;
	bool is_root_item;

	btrfs_item_key_to_cpu(leaf, &item_key, slot);
	is_root_item = (item_key.type == BTRFS_ROOT_ITEM_KEY);

	/* No such tree id */
446
	if (unlikely(key->objectid == 0)) {
447 448 449 450 451 452 453 454 455
		if (is_root_item)
			generic_err(leaf, slot, "invalid root id 0");
		else
			dir_item_err(leaf, slot,
				     "invalid location key root id 0");
		return -EUCLEAN;
	}

	/* DIR_ITEM/INDEX/INODE_REF is not allowed to point to non-fs trees */
456
	if (unlikely(!is_fstree(key->objectid) && !is_root_item)) {
457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
		dir_item_err(leaf, slot,
		"invalid location key objectid, have %llu expect [%llu, %llu]",
				key->objectid, BTRFS_FIRST_FREE_OBJECTID,
				BTRFS_LAST_FREE_OBJECTID);
		return -EUCLEAN;
	}

	/*
	 * ROOT_ITEM with non-zero offset means this is a snapshot, created at
	 * @offset transid.
	 * Furthermore, for location key in DIR_ITEM, its offset is always -1.
	 *
	 * So here we only check offset for reloc tree whose key->offset must
	 * be a valid tree.
	 */
472 473
	if (unlikely(key->objectid == BTRFS_TREE_RELOC_OBJECTID &&
		     key->offset == 0)) {
474 475 476 477 478 479
		generic_err(leaf, slot, "invalid root id 0 for reloc tree");
		return -EUCLEAN;
	}
	return 0;
}

480
static int check_dir_item(struct extent_buffer *leaf,
481 482
			  struct btrfs_key *key, struct btrfs_key *prev_key,
			  int slot)
483
{
484
	struct btrfs_fs_info *fs_info = leaf->fs_info;
485
	struct btrfs_dir_item *di;
486
	u32 item_size = btrfs_item_size(leaf, slot);
487 488
	u32 cur = 0;

489
	if (unlikely(!check_prev_ino(leaf, key, slot, prev_key)))
490
		return -EUCLEAN;
491

492 493
	di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
	while (cur < item_size) {
494
		struct btrfs_key location_key;
495 496 497 498 499 500
		u32 name_len;
		u32 data_len;
		u32 max_name_len;
		u32 total_size;
		u32 name_hash;
		u8 dir_type;
501
		int ret;
502 503

		/* header itself should not cross item boundary */
504
		if (unlikely(cur + sizeof(*di) > item_size)) {
505
			dir_item_err(leaf, slot,
506
		"dir item header crosses item boundary, have %zu boundary %u",
507 508 509 510
				cur + sizeof(*di), item_size);
			return -EUCLEAN;
		}

511 512 513 514
		/* Location key check */
		btrfs_dir_item_key_to_cpu(leaf, di, &location_key);
		if (location_key.type == BTRFS_ROOT_ITEM_KEY) {
			ret = check_root_key(leaf, &location_key, slot);
515
			if (unlikely(ret < 0))
516 517 518 519
				return ret;
		} else if (location_key.type == BTRFS_INODE_ITEM_KEY ||
			   location_key.type == 0) {
			ret = check_inode_key(leaf, &location_key, slot);
520
			if (unlikely(ret < 0))
521 522 523 524 525 526 527 528 529
				return ret;
		} else {
			dir_item_err(leaf, slot,
			"invalid location key type, have %u, expect %u or %u",
				     location_key.type, BTRFS_ROOT_ITEM_KEY,
				     BTRFS_INODE_ITEM_KEY);
			return -EUCLEAN;
		}

530 531
		/* dir type check */
		dir_type = btrfs_dir_type(leaf, di);
532
		if (unlikely(dir_type >= BTRFS_FT_MAX)) {
533
			dir_item_err(leaf, slot,
534 535 536 537 538
			"invalid dir item type, have %u expect [0, %u)",
				dir_type, BTRFS_FT_MAX);
			return -EUCLEAN;
		}

539 540
		if (unlikely(key->type == BTRFS_XATTR_ITEM_KEY &&
			     dir_type != BTRFS_FT_XATTR)) {
541
			dir_item_err(leaf, slot,
542 543 544 545
		"invalid dir item type for XATTR key, have %u expect %u",
				dir_type, BTRFS_FT_XATTR);
			return -EUCLEAN;
		}
546 547
		if (unlikely(dir_type == BTRFS_FT_XATTR &&
			     key->type != BTRFS_XATTR_ITEM_KEY)) {
548
			dir_item_err(leaf, slot,
549 550 551 552 553 554 555 556 557 558 559
			"xattr dir type found for non-XATTR key");
			return -EUCLEAN;
		}
		if (dir_type == BTRFS_FT_XATTR)
			max_name_len = XATTR_NAME_MAX;
		else
			max_name_len = BTRFS_NAME_LEN;

		/* Name/data length check */
		name_len = btrfs_dir_name_len(leaf, di);
		data_len = btrfs_dir_data_len(leaf, di);
560
		if (unlikely(name_len > max_name_len)) {
561
			dir_item_err(leaf, slot,
562 563 564 565
			"dir item name len too long, have %u max %u",
				name_len, max_name_len);
			return -EUCLEAN;
		}
566
		if (unlikely(name_len + data_len > BTRFS_MAX_XATTR_SIZE(fs_info))) {
567
			dir_item_err(leaf, slot,
568 569
			"dir item name and data len too long, have %u max %u",
				name_len + data_len,
570
				BTRFS_MAX_XATTR_SIZE(fs_info));
571 572 573
			return -EUCLEAN;
		}

574
		if (unlikely(data_len && dir_type != BTRFS_FT_XATTR)) {
575
			dir_item_err(leaf, slot,
576 577 578 579 580 581 582 583
			"dir item with invalid data len, have %u expect 0",
				data_len);
			return -EUCLEAN;
		}

		total_size = sizeof(*di) + name_len + data_len;

		/* header and name/data should not cross item boundary */
584
		if (unlikely(cur + total_size > item_size)) {
585
			dir_item_err(leaf, slot,
586 587 588 589 590 591 592 593 594 595 596
		"dir item data crosses item boundary, have %u boundary %u",
				cur + total_size, item_size);
			return -EUCLEAN;
		}

		/*
		 * Special check for XATTR/DIR_ITEM, as key->offset is name
		 * hash, should match its name
		 */
		if (key->type == BTRFS_DIR_ITEM_KEY ||
		    key->type == BTRFS_XATTR_ITEM_KEY) {
597 598
			char namebuf[max(BTRFS_NAME_LEN, XATTR_NAME_MAX)];

599 600 601
			read_extent_buffer(leaf, namebuf,
					(unsigned long)(di + 1), name_len);
			name_hash = btrfs_name_hash(namebuf, name_len);
602
			if (unlikely(key->offset != name_hash)) {
603
				dir_item_err(leaf, slot,
604 605 606 607 608 609 610 611 612 613 614
		"name hash mismatch with key, have 0x%016x expect 0x%016llx",
					name_hash, key->offset);
				return -EUCLEAN;
			}
		}
		cur += total_size;
		di = (struct btrfs_dir_item *)((void *)di + total_size);
	}
	return 0;
}

615
__printf(3, 4)
616
__cold
617
static void block_group_err(const struct extent_buffer *eb, int slot,
618 619
			    const char *fmt, ...)
{
620
	const struct btrfs_fs_info *fs_info = eb->fs_info;
621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
	struct btrfs_key key;
	struct va_format vaf;
	va_list args;

	btrfs_item_key_to_cpu(eb, &key, slot);
	va_start(args, fmt);

	vaf.fmt = fmt;
	vaf.va = &args;

	btrfs_crit(fs_info,
	"corrupt %s: root=%llu block=%llu slot=%d bg_start=%llu bg_len=%llu, %pV",
		btrfs_header_level(eb) == 0 ? "leaf" : "node",
		btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
		key.objectid, key.offset, &vaf);
	va_end(args);
}

639
static int check_block_group_item(struct extent_buffer *leaf,
640 641
				  struct btrfs_key *key, int slot)
{
642
	struct btrfs_fs_info *fs_info = leaf->fs_info;
643
	struct btrfs_block_group_item bgi;
644
	u32 item_size = btrfs_item_size(leaf, slot);
645
	u64 chunk_objectid;
646 647 648 649 650
	u64 flags;
	u64 type;

	/*
	 * Here we don't really care about alignment since extent allocator can
651
	 * handle it.  We care more about the size.
652
	 */
653
	if (unlikely(key->offset == 0)) {
654
		block_group_err(leaf, slot,
655
				"invalid block group size 0");
656 657 658
		return -EUCLEAN;
	}

659
	if (unlikely(item_size != sizeof(bgi))) {
660
		block_group_err(leaf, slot,
661 662 663 664 665 666 667
			"invalid item size, have %u expect %zu",
				item_size, sizeof(bgi));
		return -EUCLEAN;
	}

	read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
			   sizeof(bgi));
668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
	chunk_objectid = btrfs_stack_block_group_chunk_objectid(&bgi);
	if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
		/*
		 * We don't init the nr_global_roots until we load the global
		 * roots, so this could be 0 at mount time.  If it's 0 we'll
		 * just assume we're fine, and later we'll check against our
		 * actual value.
		 */
		if (unlikely(fs_info->nr_global_roots &&
			     chunk_objectid >= fs_info->nr_global_roots)) {
			block_group_err(leaf, slot,
	"invalid block group global root id, have %llu, needs to be <= %llu",
					chunk_objectid,
					fs_info->nr_global_roots);
			return -EUCLEAN;
		}
	} else if (unlikely(chunk_objectid != BTRFS_FIRST_CHUNK_TREE_OBJECTID)) {
685
		block_group_err(leaf, slot,
686
		"invalid block group chunk objectid, have %llu expect %llu",
687
				btrfs_stack_block_group_chunk_objectid(&bgi),
688 689 690 691
				BTRFS_FIRST_CHUNK_TREE_OBJECTID);
		return -EUCLEAN;
	}

692
	if (unlikely(btrfs_stack_block_group_used(&bgi) > key->offset)) {
693
		block_group_err(leaf, slot,
694
			"invalid block group used, have %llu expect [0, %llu)",
695
				btrfs_stack_block_group_used(&bgi), key->offset);
696 697 698
		return -EUCLEAN;
	}

699
	flags = btrfs_stack_block_group_flags(&bgi);
700
	if (unlikely(hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) > 1)) {
701
		block_group_err(leaf, slot,
702 703 704 705 706 707 708
"invalid profile flags, have 0x%llx (%lu bits set) expect no more than 1 bit set",
			flags & BTRFS_BLOCK_GROUP_PROFILE_MASK,
			hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK));
		return -EUCLEAN;
	}

	type = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
709 710 711 712 713
	if (unlikely(type != BTRFS_BLOCK_GROUP_DATA &&
		     type != BTRFS_BLOCK_GROUP_METADATA &&
		     type != BTRFS_BLOCK_GROUP_SYSTEM &&
		     type != (BTRFS_BLOCK_GROUP_METADATA |
			      BTRFS_BLOCK_GROUP_DATA))) {
714
		block_group_err(leaf, slot,
715
"invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llx or 0x%llx",
716 717 718 719 720 721 722
			type, hweight64(type),
			BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_METADATA,
			BTRFS_BLOCK_GROUP_SYSTEM,
			BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA);
		return -EUCLEAN;
	}
	return 0;
723 724
}

725
__printf(4, 5)
726
__cold
727
static void chunk_err(const struct extent_buffer *leaf,
728 729 730
		      const struct btrfs_chunk *chunk, u64 logical,
		      const char *fmt, ...)
{
731
	const struct btrfs_fs_info *fs_info = leaf->fs_info;
732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769
	bool is_sb;
	struct va_format vaf;
	va_list args;
	int i;
	int slot = -1;

	/* Only superblock eb is able to have such small offset */
	is_sb = (leaf->start == BTRFS_SUPER_INFO_OFFSET);

	if (!is_sb) {
		/*
		 * Get the slot number by iterating through all slots, this
		 * would provide better readability.
		 */
		for (i = 0; i < btrfs_header_nritems(leaf); i++) {
			if (btrfs_item_ptr_offset(leaf, i) ==
					(unsigned long)chunk) {
				slot = i;
				break;
			}
		}
	}
	va_start(args, fmt);
	vaf.fmt = fmt;
	vaf.va = &args;

	if (is_sb)
		btrfs_crit(fs_info,
		"corrupt superblock syschunk array: chunk_start=%llu, %pV",
			   logical, &vaf);
	else
		btrfs_crit(fs_info,
	"corrupt leaf: root=%llu block=%llu slot=%d chunk_start=%llu, %pV",
			   BTRFS_CHUNK_TREE_OBJECTID, leaf->start, slot,
			   logical, &vaf);
	va_end(args);
}

770 771 772
/*
 * The common chunk check which could also work on super block sys chunk array.
 *
773
 * Return -EUCLEAN if anything is corrupted.
774 775
 * Return 0 if everything is OK.
 */
776
int btrfs_check_chunk_valid(struct extent_buffer *leaf,
777 778
			    struct btrfs_chunk *chunk, u64 logical)
{
779
	struct btrfs_fs_info *fs_info = leaf->fs_info;
780
	u64 length;
781
	u64 chunk_end;
782 783 784 785 786 787
	u64 stripe_len;
	u16 num_stripes;
	u16 sub_stripes;
	u64 type;
	u64 features;
	bool mixed = false;
788 789 790
	int raid_index;
	int nparity;
	int ncopies;
791 792 793 794 795 796

	length = btrfs_chunk_length(leaf, chunk);
	stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
	sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
	type = btrfs_chunk_type(leaf, chunk);
797 798 799
	raid_index = btrfs_bg_flags_to_raid_index(type);
	ncopies = btrfs_raid_array[raid_index].ncopies;
	nparity = btrfs_raid_array[raid_index].nparity;
800

801
	if (unlikely(!num_stripes)) {
802
		chunk_err(leaf, chunk, logical,
803
			  "invalid chunk num_stripes, have %u", num_stripes);
804
		return -EUCLEAN;
805
	}
806
	if (unlikely(num_stripes < ncopies)) {
807 808 809 810 811
		chunk_err(leaf, chunk, logical,
			  "invalid chunk num_stripes < ncopies, have %u < %d",
			  num_stripes, ncopies);
		return -EUCLEAN;
	}
812
	if (unlikely(nparity && num_stripes == nparity)) {
813 814 815 816 817
		chunk_err(leaf, chunk, logical,
			  "invalid chunk num_stripes == nparity, have %u == %d",
			  num_stripes, nparity);
		return -EUCLEAN;
	}
818
	if (unlikely(!IS_ALIGNED(logical, fs_info->sectorsize))) {
819
		chunk_err(leaf, chunk, logical,
820 821
		"invalid chunk logical, have %llu should aligned to %u",
			  logical, fs_info->sectorsize);
822
		return -EUCLEAN;
823
	}
824
	if (unlikely(btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize)) {
825
		chunk_err(leaf, chunk, logical,
826 827 828
			  "invalid chunk sectorsize, have %u expect %u",
			  btrfs_chunk_sector_size(leaf, chunk),
			  fs_info->sectorsize);
829
		return -EUCLEAN;
830
	}
831
	if (unlikely(!length || !IS_ALIGNED(length, fs_info->sectorsize))) {
832
		chunk_err(leaf, chunk, logical,
833
			  "invalid chunk length, have %llu", length);
834
		return -EUCLEAN;
835
	}
836 837 838 839 840 841
	if (unlikely(check_add_overflow(logical, length, &chunk_end))) {
		chunk_err(leaf, chunk, logical,
"invalid chunk logical start and length, have logical start %llu length %llu",
			  logical, length);
		return -EUCLEAN;
	}
842
	if (unlikely(!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN)) {
843
		chunk_err(leaf, chunk, logical,
844
			  "invalid chunk stripe length: %llu",
845
			  stripe_len);
846
		return -EUCLEAN;
847
	}
848 849
	if (unlikely(type & ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
			      BTRFS_BLOCK_GROUP_PROFILE_MASK))) {
850
		chunk_err(leaf, chunk, logical,
851
			  "unrecognized chunk type: 0x%llx",
852 853 854
			  ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
			    BTRFS_BLOCK_GROUP_PROFILE_MASK) &
			  btrfs_chunk_type(leaf, chunk));
855
		return -EUCLEAN;
856 857
	}

858 859
	if (unlikely(!has_single_bit_set(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
		     (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0)) {
860
		chunk_err(leaf, chunk, logical,
861 862 863 864
		"invalid chunk profile flag: 0x%llx, expect 0 or 1 bit set",
			  type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
		return -EUCLEAN;
	}
865
	if (unlikely((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0)) {
866
		chunk_err(leaf, chunk, logical,
867 868
	"missing chunk type flag, have 0x%llx one bit must be set in 0x%llx",
			  type, BTRFS_BLOCK_GROUP_TYPE_MASK);
869
		return -EUCLEAN;
870 871
	}

872 873 874
	if (unlikely((type & BTRFS_BLOCK_GROUP_SYSTEM) &&
		     (type & (BTRFS_BLOCK_GROUP_METADATA |
			      BTRFS_BLOCK_GROUP_DATA)))) {
875
		chunk_err(leaf, chunk, logical,
876 877
			  "system chunk with data or metadata type: 0x%llx",
			  type);
878
		return -EUCLEAN;
879 880 881 882 883 884 885
	}

	features = btrfs_super_incompat_flags(fs_info->super_copy);
	if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
		mixed = true;

	if (!mixed) {
886 887
		if (unlikely((type & BTRFS_BLOCK_GROUP_METADATA) &&
			     (type & BTRFS_BLOCK_GROUP_DATA))) {
888
			chunk_err(leaf, chunk, logical,
889
			"mixed chunk type in non-mixed mode: 0x%llx", type);
890
			return -EUCLEAN;
891 892 893
		}
	}

894 895 896 897
	if (unlikely((type & BTRFS_BLOCK_GROUP_RAID10 &&
		      sub_stripes != btrfs_raid_array[BTRFS_RAID_RAID10].sub_stripes) ||
		     (type & BTRFS_BLOCK_GROUP_RAID1 &&
		      num_stripes != btrfs_raid_array[BTRFS_RAID_RAID1].devs_min) ||
898 899 900 901
		     (type & BTRFS_BLOCK_GROUP_RAID1C3 &&
		      num_stripes != btrfs_raid_array[BTRFS_RAID_RAID1C3].devs_min) ||
		     (type & BTRFS_BLOCK_GROUP_RAID1C4 &&
		      num_stripes != btrfs_raid_array[BTRFS_RAID_RAID1C4].devs_min) ||
902 903 904 905 906 907
		     (type & BTRFS_BLOCK_GROUP_RAID5 &&
		      num_stripes < btrfs_raid_array[BTRFS_RAID_RAID5].devs_min) ||
		     (type & BTRFS_BLOCK_GROUP_RAID6 &&
		      num_stripes < btrfs_raid_array[BTRFS_RAID_RAID6].devs_min) ||
		     (type & BTRFS_BLOCK_GROUP_DUP &&
		      num_stripes != btrfs_raid_array[BTRFS_RAID_DUP].dev_stripes) ||
908
		     ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
909
		      num_stripes != btrfs_raid_array[BTRFS_RAID_SINGLE].dev_stripes))) {
910
		chunk_err(leaf, chunk, logical,
911 912 913
			"invalid num_stripes:sub_stripes %u:%u for profile %llu",
			num_stripes, sub_stripes,
			type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
914
		return -EUCLEAN;
915 916 917
	}

	return 0;
918 919
}

920 921 922 923 924 925 926 927 928 929 930 931
/*
 * Enhanced version of chunk item checker.
 *
 * The common btrfs_check_chunk_valid() doesn't check item size since it needs
 * to work on super block sys_chunk_array which doesn't have full item ptr.
 */
static int check_leaf_chunk_item(struct extent_buffer *leaf,
				 struct btrfs_chunk *chunk,
				 struct btrfs_key *key, int slot)
{
	int num_stripes;

932
	if (unlikely(btrfs_item_size(leaf, slot) < sizeof(struct btrfs_chunk))) {
933 934
		chunk_err(leaf, chunk, key->offset,
			"invalid chunk item size: have %u expect [%zu, %u)",
935
			btrfs_item_size(leaf, slot),
936 937 938 939 940 941 942 943 944 945
			sizeof(struct btrfs_chunk),
			BTRFS_LEAF_DATA_SIZE(leaf->fs_info));
		return -EUCLEAN;
	}

	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
	/* Let btrfs_check_chunk_valid() handle this error type */
	if (num_stripes == 0)
		goto out;

946
	if (unlikely(btrfs_chunk_item_size(num_stripes) !=
947
		     btrfs_item_size(leaf, slot))) {
948 949
		chunk_err(leaf, chunk, key->offset,
			"invalid chunk item size: have %u expect %lu",
950
			btrfs_item_size(leaf, slot),
951 952 953 954 955 956 957
			btrfs_chunk_item_size(num_stripes));
		return -EUCLEAN;
	}
out:
	return btrfs_check_chunk_valid(leaf, chunk, key->offset);
}

958
__printf(3, 4)
Q
Qu Wenruo 已提交
959
__cold
960
static void dev_item_err(const struct extent_buffer *eb, int slot,
Q
Qu Wenruo 已提交
961 962 963 964 965 966 967 968 969 970 971 972
			 const char *fmt, ...)
{
	struct btrfs_key key;
	struct va_format vaf;
	va_list args;

	btrfs_item_key_to_cpu(eb, &key, slot);
	va_start(args, fmt);

	vaf.fmt = fmt;
	vaf.va = &args;

973
	btrfs_crit(eb->fs_info,
Q
Qu Wenruo 已提交
974 975 976 977 978 979 980
	"corrupt %s: root=%llu block=%llu slot=%d devid=%llu %pV",
		btrfs_header_level(eb) == 0 ? "leaf" : "node",
		btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
		key.objectid, &vaf);
	va_end(args);
}

981
static int check_dev_item(struct extent_buffer *leaf,
Q
Qu Wenruo 已提交
982 983 984
			  struct btrfs_key *key, int slot)
{
	struct btrfs_dev_item *ditem;
985
	const u32 item_size = btrfs_item_size(leaf, slot);
Q
Qu Wenruo 已提交
986

987
	if (unlikely(key->objectid != BTRFS_DEV_ITEMS_OBJECTID)) {
988
		dev_item_err(leaf, slot,
Q
Qu Wenruo 已提交
989 990 991 992
			     "invalid objectid: has=%llu expect=%llu",
			     key->objectid, BTRFS_DEV_ITEMS_OBJECTID);
		return -EUCLEAN;
	}
993 994 995 996 997 998 999

	if (unlikely(item_size != sizeof(*ditem))) {
		dev_item_err(leaf, slot, "invalid item size: has %u expect %zu",
			     item_size, sizeof(*ditem));
		return -EUCLEAN;
	}

Q
Qu Wenruo 已提交
1000
	ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item);
1001
	if (unlikely(btrfs_device_id(leaf, ditem) != key->offset)) {
1002
		dev_item_err(leaf, slot,
Q
Qu Wenruo 已提交
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
			     "devid mismatch: key has=%llu item has=%llu",
			     key->offset, btrfs_device_id(leaf, ditem));
		return -EUCLEAN;
	}

	/*
	 * For device total_bytes, we don't have reliable way to check it, as
	 * it can be 0 for device removal. Device size check can only be done
	 * by dev extents check.
	 */
1013 1014
	if (unlikely(btrfs_device_bytes_used(leaf, ditem) >
		     btrfs_device_total_bytes(leaf, ditem))) {
1015
		dev_item_err(leaf, slot,
Q
Qu Wenruo 已提交
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
			     "invalid bytes used: have %llu expect [0, %llu]",
			     btrfs_device_bytes_used(leaf, ditem),
			     btrfs_device_total_bytes(leaf, ditem));
		return -EUCLEAN;
	}
	/*
	 * Remaining members like io_align/type/gen/dev_group aren't really
	 * utilized.  Skip them to make later usage of them easier.
	 */
	return 0;
}

1028
static int check_inode_item(struct extent_buffer *leaf,
1029 1030
			    struct btrfs_key *key, int slot)
{
1031
	struct btrfs_fs_info *fs_info = leaf->fs_info;
1032 1033 1034
	struct btrfs_inode_item *iitem;
	u64 super_gen = btrfs_super_generation(fs_info->super_copy);
	u32 valid_mask = (S_IFMT | S_ISUID | S_ISGID | S_ISVTX | 0777);
1035
	const u32 item_size = btrfs_item_size(leaf, slot);
1036
	u32 mode;
1037
	int ret;
1038 1039
	u32 flags;
	u32 ro_flags;
1040 1041

	ret = check_inode_key(leaf, key, slot);
1042
	if (unlikely(ret < 0))
1043
		return ret;
1044

1045 1046 1047 1048 1049 1050
	if (unlikely(item_size != sizeof(*iitem))) {
		generic_err(leaf, slot, "invalid item size: has %u expect %zu",
			    item_size, sizeof(*iitem));
		return -EUCLEAN;
	}

1051 1052 1053
	iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item);

	/* Here we use super block generation + 1 to handle log tree */
1054
	if (unlikely(btrfs_inode_generation(leaf, iitem) > super_gen + 1)) {
1055
		inode_item_err(leaf, slot,
1056 1057 1058 1059 1060 1061
			"invalid inode generation: has %llu expect (0, %llu]",
			       btrfs_inode_generation(leaf, iitem),
			       super_gen + 1);
		return -EUCLEAN;
	}
	/* Note for ROOT_TREE_DIR_ITEM, mkfs could set its transid 0 */
1062
	if (unlikely(btrfs_inode_transid(leaf, iitem) > super_gen + 1)) {
1063
		inode_item_err(leaf, slot,
1064
			"invalid inode transid: has %llu expect [0, %llu]",
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
			       btrfs_inode_transid(leaf, iitem), super_gen + 1);
		return -EUCLEAN;
	}

	/*
	 * For size and nbytes it's better not to be too strict, as for dir
	 * item its size/nbytes can easily get wrong, but doesn't affect
	 * anything in the fs. So here we skip the check.
	 */
	mode = btrfs_inode_mode(leaf, iitem);
1075
	if (unlikely(mode & ~valid_mask)) {
1076
		inode_item_err(leaf, slot,
1077 1078 1079 1080 1081 1082
			       "unknown mode bit detected: 0x%x",
			       mode & ~valid_mask);
		return -EUCLEAN;
	}

	/*
1083 1084 1085
	 * S_IFMT is not bit mapped so we can't completely rely on
	 * is_power_of_2/has_single_bit_set, but it can save us from checking
	 * FIFO/CHR/DIR/REG.  Only needs to check BLK, LNK and SOCKS
1086
	 */
1087
	if (!has_single_bit_set(mode & S_IFMT)) {
1088
		if (unlikely(!S_ISLNK(mode) && !S_ISBLK(mode) && !S_ISSOCK(mode))) {
1089
			inode_item_err(leaf, slot,
1090 1091 1092 1093 1094
			"invalid mode: has 0%o expect valid S_IF* bit(s)",
				       mode & S_IFMT);
			return -EUCLEAN;
		}
	}
1095
	if (unlikely(S_ISDIR(mode) && btrfs_inode_nlink(leaf, iitem) > 1)) {
1096
		inode_item_err(leaf, slot,
1097 1098 1099 1100
		       "invalid nlink: has %u expect no more than 1 for dir",
			btrfs_inode_nlink(leaf, iitem));
		return -EUCLEAN;
	}
1101 1102
	btrfs_inode_split_flags(btrfs_inode_flags(leaf, iitem), &flags, &ro_flags);
	if (unlikely(flags & ~BTRFS_INODE_FLAG_MASK)) {
1103
		inode_item_err(leaf, slot,
1104 1105 1106 1107 1108 1109 1110 1111
			       "unknown incompat flags detected: 0x%x", flags);
		return -EUCLEAN;
	}
	if (unlikely(!sb_rdonly(fs_info->sb) &&
		     (ro_flags & ~BTRFS_INODE_RO_FLAG_MASK))) {
		inode_item_err(leaf, slot,
			"unknown ro-compat flags detected on writeable mount: 0x%x",
			ro_flags);
1112 1113 1114 1115 1116
		return -EUCLEAN;
	}
	return 0;
}

1117 1118 1119 1120
static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key,
			   int slot)
{
	struct btrfs_fs_info *fs_info = leaf->fs_info;
1121
	struct btrfs_root_item ri = { 0 };
1122 1123
	const u64 valid_root_flags = BTRFS_ROOT_SUBVOL_RDONLY |
				     BTRFS_ROOT_SUBVOL_DEAD;
1124
	int ret;
1125

1126
	ret = check_root_key(leaf, key, slot);
1127
	if (unlikely(ret < 0))
1128
		return ret;
1129

1130 1131
	if (unlikely(btrfs_item_size(leaf, slot) != sizeof(ri) &&
		     btrfs_item_size(leaf, slot) !=
1132
		     btrfs_legacy_root_item_size())) {
1133
		generic_err(leaf, slot,
1134
			    "invalid root item size, have %u expect %zu or %u",
1135
			    btrfs_item_size(leaf, slot), sizeof(ri),
1136
			    btrfs_legacy_root_item_size());
1137
		return -EUCLEAN;
1138 1139
	}

1140 1141 1142 1143 1144
	/*
	 * For legacy root item, the members starting at generation_v2 will be
	 * all filled with 0.
	 * And since we allow geneartion_v2 as 0, it will still pass the check.
	 */
1145
	read_extent_buffer(leaf, &ri, btrfs_item_ptr_offset(leaf, slot),
1146
			   btrfs_item_size(leaf, slot));
1147 1148

	/* Generation related */
1149 1150
	if (unlikely(btrfs_root_generation(&ri) >
		     btrfs_super_generation(fs_info->super_copy) + 1)) {
1151 1152 1153 1154 1155 1156
		generic_err(leaf, slot,
			"invalid root generation, have %llu expect (0, %llu]",
			    btrfs_root_generation(&ri),
			    btrfs_super_generation(fs_info->super_copy) + 1);
		return -EUCLEAN;
	}
1157 1158
	if (unlikely(btrfs_root_generation_v2(&ri) >
		     btrfs_super_generation(fs_info->super_copy) + 1)) {
1159 1160 1161 1162 1163 1164
		generic_err(leaf, slot,
		"invalid root v2 generation, have %llu expect (0, %llu]",
			    btrfs_root_generation_v2(&ri),
			    btrfs_super_generation(fs_info->super_copy) + 1);
		return -EUCLEAN;
	}
1165 1166
	if (unlikely(btrfs_root_last_snapshot(&ri) >
		     btrfs_super_generation(fs_info->super_copy) + 1)) {
1167 1168 1169 1170 1171 1172 1173 1174
		generic_err(leaf, slot,
		"invalid root last_snapshot, have %llu expect (0, %llu]",
			    btrfs_root_last_snapshot(&ri),
			    btrfs_super_generation(fs_info->super_copy) + 1);
		return -EUCLEAN;
	}

	/* Alignment and level check */
1175
	if (unlikely(!IS_ALIGNED(btrfs_root_bytenr(&ri), fs_info->sectorsize))) {
1176 1177 1178 1179 1180
		generic_err(leaf, slot,
		"invalid root bytenr, have %llu expect to be aligned to %u",
			    btrfs_root_bytenr(&ri), fs_info->sectorsize);
		return -EUCLEAN;
	}
1181
	if (unlikely(btrfs_root_level(&ri) >= BTRFS_MAX_LEVEL)) {
1182 1183 1184 1185 1186
		generic_err(leaf, slot,
			    "invalid root level, have %u expect [0, %u]",
			    btrfs_root_level(&ri), BTRFS_MAX_LEVEL - 1);
		return -EUCLEAN;
	}
1187
	if (unlikely(btrfs_root_drop_level(&ri) >= BTRFS_MAX_LEVEL)) {
1188 1189
		generic_err(leaf, slot,
			    "invalid root level, have %u expect [0, %u]",
1190
			    btrfs_root_drop_level(&ri), BTRFS_MAX_LEVEL - 1);
1191 1192 1193 1194
		return -EUCLEAN;
	}

	/* Flags check */
1195
	if (unlikely(btrfs_root_flags(&ri) & ~valid_root_flags)) {
1196 1197 1198 1199 1200 1201 1202 1203
		generic_err(leaf, slot,
			    "invalid root flags, have 0x%llx expect mask 0x%llx",
			    btrfs_root_flags(&ri), valid_root_flags);
		return -EUCLEAN;
	}
	return 0;
}

1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
__printf(3,4)
__cold
static void extent_err(const struct extent_buffer *eb, int slot,
		       const char *fmt, ...)
{
	struct btrfs_key key;
	struct va_format vaf;
	va_list args;
	u64 bytenr;
	u64 len;

	btrfs_item_key_to_cpu(eb, &key, slot);
	bytenr = key.objectid;
1217 1218 1219
	if (key.type == BTRFS_METADATA_ITEM_KEY ||
	    key.type == BTRFS_TREE_BLOCK_REF_KEY ||
	    key.type == BTRFS_SHARED_BLOCK_REF_KEY)
1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
		len = eb->fs_info->nodesize;
	else
		len = key.offset;
	va_start(args, fmt);

	vaf.fmt = fmt;
	vaf.va = &args;

	btrfs_crit(eb->fs_info,
	"corrupt %s: block=%llu slot=%d extent bytenr=%llu len=%llu %pV",
		btrfs_header_level(eb) == 0 ? "leaf" : "node",
		eb->start, slot, bytenr, len, &vaf);
	va_end(args);
}

static int check_extent_item(struct extent_buffer *leaf,
			     struct btrfs_key *key, int slot)
{
	struct btrfs_fs_info *fs_info = leaf->fs_info;
	struct btrfs_extent_item *ei;
	bool is_tree_block = false;
	unsigned long ptr;	/* Current pointer inside inline refs */
	unsigned long end;	/* Extent item end */
1243
	const u32 item_size = btrfs_item_size(leaf, slot);
1244 1245 1246 1247 1248
	u64 flags;
	u64 generation;
	u64 total_refs;		/* Total refs in btrfs_extent_item */
	u64 inline_refs = 0;	/* found total inline refs */

1249 1250
	if (unlikely(key->type == BTRFS_METADATA_ITEM_KEY &&
		     !btrfs_fs_incompat(fs_info, SKINNY_METADATA))) {
1251 1252 1253 1254 1255
		generic_err(leaf, slot,
"invalid key type, METADATA_ITEM type invalid when SKINNY_METADATA feature disabled");
		return -EUCLEAN;
	}
	/* key->objectid is the bytenr for both key types */
1256
	if (unlikely(!IS_ALIGNED(key->objectid, fs_info->sectorsize))) {
1257 1258 1259 1260 1261 1262 1263
		generic_err(leaf, slot,
		"invalid key objectid, have %llu expect to be aligned to %u",
			   key->objectid, fs_info->sectorsize);
		return -EUCLEAN;
	}

	/* key->offset is tree level for METADATA_ITEM_KEY */
1264 1265
	if (unlikely(key->type == BTRFS_METADATA_ITEM_KEY &&
		     key->offset >= BTRFS_MAX_LEVEL)) {
1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290
		extent_err(leaf, slot,
			   "invalid tree level, have %llu expect [0, %u]",
			   key->offset, BTRFS_MAX_LEVEL - 1);
		return -EUCLEAN;
	}

	/*
	 * EXTENT/METADATA_ITEM consists of:
	 * 1) One btrfs_extent_item
	 *    Records the total refs, type and generation of the extent.
	 *
	 * 2) One btrfs_tree_block_info (for EXTENT_ITEM and tree backref only)
	 *    Records the first key and level of the tree block.
	 *
	 * 2) Zero or more btrfs_extent_inline_ref(s)
	 *    Each inline ref has one btrfs_extent_inline_ref shows:
	 *    2.1) The ref type, one of the 4
	 *         TREE_BLOCK_REF	Tree block only
	 *         SHARED_BLOCK_REF	Tree block only
	 *         EXTENT_DATA_REF	Data only
	 *         SHARED_DATA_REF	Data only
	 *    2.2) Ref type specific data
	 *         Either using btrfs_extent_inline_ref::offset, or specific
	 *         data structure.
	 */
1291
	if (unlikely(item_size < sizeof(*ei))) {
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304
		extent_err(leaf, slot,
			   "invalid item size, have %u expect [%zu, %u)",
			   item_size, sizeof(*ei),
			   BTRFS_LEAF_DATA_SIZE(fs_info));
		return -EUCLEAN;
	}
	end = item_size + btrfs_item_ptr_offset(leaf, slot);

	/* Checks against extent_item */
	ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
	flags = btrfs_extent_flags(leaf, ei);
	total_refs = btrfs_extent_refs(leaf, ei);
	generation = btrfs_extent_generation(leaf, ei);
1305 1306
	if (unlikely(generation >
		     btrfs_super_generation(fs_info->super_copy) + 1)) {
1307 1308 1309 1310 1311 1312
		extent_err(leaf, slot,
			   "invalid generation, have %llu expect (0, %llu]",
			   generation,
			   btrfs_super_generation(fs_info->super_copy) + 1);
		return -EUCLEAN;
	}
1313 1314
	if (unlikely(!has_single_bit_set(flags & (BTRFS_EXTENT_FLAG_DATA |
						  BTRFS_EXTENT_FLAG_TREE_BLOCK)))) {
1315 1316 1317 1318 1319 1320 1321 1322
		extent_err(leaf, slot,
		"invalid extent flag, have 0x%llx expect 1 bit set in 0x%llx",
			flags, BTRFS_EXTENT_FLAG_DATA |
			BTRFS_EXTENT_FLAG_TREE_BLOCK);
		return -EUCLEAN;
	}
	is_tree_block = !!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK);
	if (is_tree_block) {
1323 1324
		if (unlikely(key->type == BTRFS_EXTENT_ITEM_KEY &&
			     key->offset != fs_info->nodesize)) {
1325 1326 1327 1328 1329 1330
			extent_err(leaf, slot,
				   "invalid extent length, have %llu expect %u",
				   key->offset, fs_info->nodesize);
			return -EUCLEAN;
		}
	} else {
1331
		if (unlikely(key->type != BTRFS_EXTENT_ITEM_KEY)) {
1332 1333 1334 1335 1336
			extent_err(leaf, slot,
			"invalid key type, have %u expect %u for data backref",
				   key->type, BTRFS_EXTENT_ITEM_KEY);
			return -EUCLEAN;
		}
1337
		if (unlikely(!IS_ALIGNED(key->offset, fs_info->sectorsize))) {
1338 1339 1340 1341 1342
			extent_err(leaf, slot,
			"invalid extent length, have %llu expect aligned to %u",
				   key->offset, fs_info->sectorsize);
			return -EUCLEAN;
		}
1343 1344 1345 1346 1347
		if (unlikely(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
			extent_err(leaf, slot,
			"invalid extent flag, data has full backref set");
			return -EUCLEAN;
		}
1348 1349 1350 1351 1352 1353 1354 1355
	}
	ptr = (unsigned long)(struct btrfs_extent_item *)(ei + 1);

	/* Check the special case of btrfs_tree_block_info */
	if (is_tree_block && key->type != BTRFS_METADATA_ITEM_KEY) {
		struct btrfs_tree_block_info *info;

		info = (struct btrfs_tree_block_info *)ptr;
1356
		if (unlikely(btrfs_tree_block_level(leaf, info) >= BTRFS_MAX_LEVEL)) {
1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374
			extent_err(leaf, slot,
			"invalid tree block info level, have %u expect [0, %u]",
				   btrfs_tree_block_level(leaf, info),
				   BTRFS_MAX_LEVEL - 1);
			return -EUCLEAN;
		}
		ptr = (unsigned long)(struct btrfs_tree_block_info *)(info + 1);
	}

	/* Check inline refs */
	while (ptr < end) {
		struct btrfs_extent_inline_ref *iref;
		struct btrfs_extent_data_ref *dref;
		struct btrfs_shared_data_ref *sref;
		u64 dref_offset;
		u64 inline_offset;
		u8 inline_type;

1375
		if (unlikely(ptr + sizeof(*iref) > end)) {
1376 1377 1378 1379 1380 1381 1382 1383
			extent_err(leaf, slot,
"inline ref item overflows extent item, ptr %lu iref size %zu end %lu",
				   ptr, sizeof(*iref), end);
			return -EUCLEAN;
		}
		iref = (struct btrfs_extent_inline_ref *)ptr;
		inline_type = btrfs_extent_inline_ref_type(leaf, iref);
		inline_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1384
		if (unlikely(ptr + btrfs_extent_inline_ref_size(inline_type) > end)) {
1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397
			extent_err(leaf, slot,
"inline ref item overflows extent item, ptr %lu iref size %u end %lu",
				   ptr, inline_type, end);
			return -EUCLEAN;
		}

		switch (inline_type) {
		/* inline_offset is subvolid of the owner, no need to check */
		case BTRFS_TREE_BLOCK_REF_KEY:
			inline_refs++;
			break;
		/* Contains parent bytenr */
		case BTRFS_SHARED_BLOCK_REF_KEY:
1398 1399
			if (unlikely(!IS_ALIGNED(inline_offset,
						 fs_info->sectorsize))) {
1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413
				extent_err(leaf, slot,
		"invalid tree parent bytenr, have %llu expect aligned to %u",
					   inline_offset, fs_info->sectorsize);
				return -EUCLEAN;
			}
			inline_refs++;
			break;
		/*
		 * Contains owner subvolid, owner key objectid, adjusted offset.
		 * The only obvious corruption can happen in that offset.
		 */
		case BTRFS_EXTENT_DATA_REF_KEY:
			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
			dref_offset = btrfs_extent_data_ref_offset(leaf, dref);
1414 1415
			if (unlikely(!IS_ALIGNED(dref_offset,
						 fs_info->sectorsize))) {
1416 1417 1418 1419 1420 1421 1422 1423 1424 1425
				extent_err(leaf, slot,
		"invalid data ref offset, have %llu expect aligned to %u",
					   dref_offset, fs_info->sectorsize);
				return -EUCLEAN;
			}
			inline_refs += btrfs_extent_data_ref_count(leaf, dref);
			break;
		/* Contains parent bytenr and ref count */
		case BTRFS_SHARED_DATA_REF_KEY:
			sref = (struct btrfs_shared_data_ref *)(iref + 1);
1426 1427
			if (unlikely(!IS_ALIGNED(inline_offset,
						 fs_info->sectorsize))) {
1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442
				extent_err(leaf, slot,
		"invalid data parent bytenr, have %llu expect aligned to %u",
					   inline_offset, fs_info->sectorsize);
				return -EUCLEAN;
			}
			inline_refs += btrfs_shared_data_ref_count(leaf, sref);
			break;
		default:
			extent_err(leaf, slot, "unknown inline ref type: %u",
				   inline_type);
			return -EUCLEAN;
		}
		ptr += btrfs_extent_inline_ref_size(inline_type);
	}
	/* No padding is allowed */
1443
	if (unlikely(ptr != end)) {
1444 1445 1446 1447 1448 1449
		extent_err(leaf, slot,
			   "invalid extent item size, padding bytes found");
		return -EUCLEAN;
	}

	/* Finally, check the inline refs against total refs */
1450
	if (unlikely(inline_refs > total_refs)) {
1451 1452 1453 1454 1455 1456 1457 1458
		extent_err(leaf, slot,
			"invalid extent refs, have %llu expect >= inline %llu",
			   total_refs, inline_refs);
		return -EUCLEAN;
	}
	return 0;
}

1459 1460 1461 1462 1463 1464 1465 1466
static int check_simple_keyed_refs(struct extent_buffer *leaf,
				   struct btrfs_key *key, int slot)
{
	u32 expect_item_size = 0;

	if (key->type == BTRFS_SHARED_DATA_REF_KEY)
		expect_item_size = sizeof(struct btrfs_shared_data_ref);

1467
	if (unlikely(btrfs_item_size(leaf, slot) != expect_item_size)) {
1468 1469
		generic_err(leaf, slot,
		"invalid item size, have %u expect %u for key type %u",
1470
			    btrfs_item_size(leaf, slot),
1471 1472 1473
			    expect_item_size, key->type);
		return -EUCLEAN;
	}
1474
	if (unlikely(!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize))) {
1475 1476 1477 1478 1479
		generic_err(leaf, slot,
"invalid key objectid for shared block ref, have %llu expect aligned to %u",
			    key->objectid, leaf->fs_info->sectorsize);
		return -EUCLEAN;
	}
1480 1481
	if (unlikely(key->type != BTRFS_TREE_BLOCK_REF_KEY &&
		     !IS_ALIGNED(key->offset, leaf->fs_info->sectorsize))) {
1482 1483 1484 1485 1486 1487 1488 1489
		extent_err(leaf, slot,
		"invalid tree parent bytenr, have %llu expect aligned to %u",
			   key->offset, leaf->fs_info->sectorsize);
		return -EUCLEAN;
	}
	return 0;
}

1490 1491 1492 1493 1494
static int check_extent_data_ref(struct extent_buffer *leaf,
				 struct btrfs_key *key, int slot)
{
	struct btrfs_extent_data_ref *dref;
	unsigned long ptr = btrfs_item_ptr_offset(leaf, slot);
1495
	const unsigned long end = ptr + btrfs_item_size(leaf, slot);
1496

1497
	if (unlikely(btrfs_item_size(leaf, slot) % sizeof(*dref) != 0)) {
1498 1499
		generic_err(leaf, slot,
	"invalid item size, have %u expect aligned to %zu for key type %u",
1500
			    btrfs_item_size(leaf, slot),
1501
			    sizeof(*dref), key->type);
1502
		return -EUCLEAN;
1503
	}
1504
	if (unlikely(!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize))) {
1505 1506 1507 1508 1509 1510 1511 1512
		generic_err(leaf, slot,
"invalid key objectid for shared block ref, have %llu expect aligned to %u",
			    key->objectid, leaf->fs_info->sectorsize);
		return -EUCLEAN;
	}
	for (; ptr < end; ptr += sizeof(*dref)) {
		u64 offset;

1513 1514 1515 1516
		/*
		 * We cannot check the extent_data_ref hash due to possible
		 * overflow from the leaf due to hash collisions.
		 */
1517 1518
		dref = (struct btrfs_extent_data_ref *)ptr;
		offset = btrfs_extent_data_ref_offset(leaf, dref);
1519
		if (unlikely(!IS_ALIGNED(offset, leaf->fs_info->sectorsize))) {
1520 1521 1522
			extent_err(leaf, slot,
	"invalid extent data backref offset, have %llu expect aligned to %u",
				   offset, leaf->fs_info->sectorsize);
1523
			return -EUCLEAN;
1524 1525 1526 1527 1528
		}
	}
	return 0;
}

1529 1530
#define inode_ref_err(eb, slot, fmt, args...)			\
	inode_item_err(eb, slot, fmt, ##args)
1531 1532 1533 1534 1535 1536 1537 1538
static int check_inode_ref(struct extent_buffer *leaf,
			   struct btrfs_key *key, struct btrfs_key *prev_key,
			   int slot)
{
	struct btrfs_inode_ref *iref;
	unsigned long ptr;
	unsigned long end;

1539
	if (unlikely(!check_prev_ino(leaf, key, slot, prev_key)))
1540
		return -EUCLEAN;
1541
	/* namelen can't be 0, so item_size == sizeof() is also invalid */
1542
	if (unlikely(btrfs_item_size(leaf, slot) <= sizeof(*iref))) {
1543
		inode_ref_err(leaf, slot,
1544
			"invalid item size, have %u expect (%zu, %u)",
1545
			btrfs_item_size(leaf, slot),
1546 1547 1548 1549 1550
			sizeof(*iref), BTRFS_LEAF_DATA_SIZE(leaf->fs_info));
		return -EUCLEAN;
	}

	ptr = btrfs_item_ptr_offset(leaf, slot);
1551
	end = ptr + btrfs_item_size(leaf, slot);
1552 1553 1554
	while (ptr < end) {
		u16 namelen;

1555
		if (unlikely(ptr + sizeof(iref) > end)) {
1556
			inode_ref_err(leaf, slot,
1557 1558 1559 1560 1561 1562 1563
			"inode ref overflow, ptr %lu end %lu inode_ref_size %zu",
				ptr, end, sizeof(iref));
			return -EUCLEAN;
		}

		iref = (struct btrfs_inode_ref *)ptr;
		namelen = btrfs_inode_ref_name_len(leaf, iref);
1564
		if (unlikely(ptr + sizeof(*iref) + namelen > end)) {
1565
			inode_ref_err(leaf, slot,
1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580
				"inode ref overflow, ptr %lu end %lu namelen %u",
				ptr, end, namelen);
			return -EUCLEAN;
		}

		/*
		 * NOTE: In theory we should record all found index numbers
		 * to find any duplicated indexes, but that will be too time
		 * consuming for inodes with too many hard links.
		 */
		ptr += sizeof(*iref) + namelen;
	}
	return 0;
}

1581 1582 1583
/*
 * Common point to switch the item-specific validation.
 */
1584
static int check_leaf_item(struct extent_buffer *leaf,
1585 1586
			   struct btrfs_key *key, int slot,
			   struct btrfs_key *prev_key)
1587 1588
{
	int ret = 0;
1589
	struct btrfs_chunk *chunk;
1590 1591 1592

	switch (key->type) {
	case BTRFS_EXTENT_DATA_KEY:
1593
		ret = check_extent_data_item(leaf, key, slot, prev_key);
1594 1595
		break;
	case BTRFS_EXTENT_CSUM_KEY:
1596
		ret = check_csum_item(leaf, key, slot, prev_key);
1597
		break;
1598 1599 1600
	case BTRFS_DIR_ITEM_KEY:
	case BTRFS_DIR_INDEX_KEY:
	case BTRFS_XATTR_ITEM_KEY:
1601
		ret = check_dir_item(leaf, key, prev_key, slot);
1602
		break;
1603 1604 1605
	case BTRFS_INODE_REF_KEY:
		ret = check_inode_ref(leaf, key, prev_key, slot);
		break;
1606
	case BTRFS_BLOCK_GROUP_ITEM_KEY:
1607
		ret = check_block_group_item(leaf, key, slot);
1608
		break;
1609 1610
	case BTRFS_CHUNK_ITEM_KEY:
		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
1611
		ret = check_leaf_chunk_item(leaf, chunk, key, slot);
1612
		break;
Q
Qu Wenruo 已提交
1613
	case BTRFS_DEV_ITEM_KEY:
1614
		ret = check_dev_item(leaf, key, slot);
Q
Qu Wenruo 已提交
1615
		break;
1616
	case BTRFS_INODE_ITEM_KEY:
1617
		ret = check_inode_item(leaf, key, slot);
1618
		break;
1619 1620 1621
	case BTRFS_ROOT_ITEM_KEY:
		ret = check_root_item(leaf, key, slot);
		break;
1622 1623 1624 1625
	case BTRFS_EXTENT_ITEM_KEY:
	case BTRFS_METADATA_ITEM_KEY:
		ret = check_extent_item(leaf, key, slot);
		break;
1626 1627 1628 1629 1630
	case BTRFS_TREE_BLOCK_REF_KEY:
	case BTRFS_SHARED_DATA_REF_KEY:
	case BTRFS_SHARED_BLOCK_REF_KEY:
		ret = check_simple_keyed_refs(leaf, key, slot);
		break;
1631 1632 1633
	case BTRFS_EXTENT_DATA_REF_KEY:
		ret = check_extent_data_ref(leaf, key, slot);
		break;
1634 1635 1636 1637
	}
	return ret;
}

1638
static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
1639
{
1640
	struct btrfs_fs_info *fs_info = leaf->fs_info;
1641 1642 1643 1644 1645 1646
	/* No valid key type is 0, so all key should be larger than this key */
	struct btrfs_key prev_key = {0, 0, 0};
	struct btrfs_key key;
	u32 nritems = btrfs_header_nritems(leaf);
	int slot;

1647
	if (unlikely(btrfs_header_level(leaf) != 0)) {
1648
		generic_err(leaf, 0,
1649 1650 1651 1652 1653
			"invalid level for leaf, have %d expect 0",
			btrfs_header_level(leaf));
		return -EUCLEAN;
	}

1654 1655 1656 1657 1658 1659 1660 1661 1662
	/*
	 * Extent buffers from a relocation tree have a owner field that
	 * corresponds to the subvolume tree they are based on. So just from an
	 * extent buffer alone we can not find out what is the id of the
	 * corresponding subvolume tree, so we can not figure out if the extent
	 * buffer corresponds to the root of the relocation tree or not. So
	 * skip this check for relocation trees.
	 */
	if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
1663
		u64 owner = btrfs_header_owner(leaf);
1664

1665
		/* These trees must never be empty */
1666 1667 1668 1669 1670
		if (unlikely(owner == BTRFS_ROOT_TREE_OBJECTID ||
			     owner == BTRFS_CHUNK_TREE_OBJECTID ||
			     owner == BTRFS_DEV_TREE_OBJECTID ||
			     owner == BTRFS_FS_TREE_OBJECTID ||
			     owner == BTRFS_DATA_RELOC_TREE_OBJECTID)) {
1671
			generic_err(leaf, 0,
1672 1673 1674 1675
			"invalid root, root %llu must never be empty",
				    owner);
			return -EUCLEAN;
		}
1676

1677
		/* Unknown tree */
1678
		if (unlikely(owner == 0)) {
1679 1680 1681 1682
			generic_err(leaf, 0,
				"invalid owner, root 0 is not defined");
			return -EUCLEAN;
		}
1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694

		/* EXTENT_TREE_V2 can have empty extent trees. */
		if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
			return 0;

		if (unlikely(owner == BTRFS_EXTENT_TREE_OBJECTID)) {
			generic_err(leaf, 0,
			"invalid root, root %llu must never be empty",
				    owner);
			return -EUCLEAN;
		}

1695 1696 1697
		return 0;
	}

1698
	if (unlikely(nritems == 0))
1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713
		return 0;

	/*
	 * Check the following things to make sure this is a good leaf, and
	 * leaf users won't need to bother with similar sanity checks:
	 *
	 * 1) key ordering
	 * 2) item offset and size
	 *    No overlap, no hole, all inside the leaf.
	 * 3) item content
	 *    If possible, do comprehensive sanity check.
	 *    NOTE: All checks must only rely on the item data itself.
	 */
	for (slot = 0; slot < nritems; slot++) {
		u32 item_end_expected;
1714
		u64 item_data_end;
1715 1716 1717 1718 1719
		int ret;

		btrfs_item_key_to_cpu(leaf, &key, slot);

		/* Make sure the keys are in the right order */
1720
		if (unlikely(btrfs_comp_cpu_keys(&prev_key, &key) >= 0)) {
1721
			generic_err(leaf, slot,
1722 1723 1724 1725
	"bad key order, prev (%llu %u %llu) current (%llu %u %llu)",
				prev_key.objectid, prev_key.type,
				prev_key.offset, key.objectid, key.type,
				key.offset);
1726 1727 1728
			return -EUCLEAN;
		}

1729 1730
		item_data_end = (u64)btrfs_item_offset(leaf, slot) +
				btrfs_item_size(leaf, slot);
1731 1732 1733 1734 1735 1736 1737 1738
		/*
		 * Make sure the offset and ends are right, remember that the
		 * item data starts at the end of the leaf and grows towards the
		 * front.
		 */
		if (slot == 0)
			item_end_expected = BTRFS_LEAF_DATA_SIZE(fs_info);
		else
1739
			item_end_expected = btrfs_item_offset(leaf,
1740
								 slot - 1);
1741
		if (unlikely(item_data_end != item_end_expected)) {
1742
			generic_err(leaf, slot,
1743 1744
				"unexpected item end, have %llu expect %u",
				item_data_end, item_end_expected);
1745 1746 1747 1748 1749 1750 1751 1752
			return -EUCLEAN;
		}

		/*
		 * Check to make sure that we don't point outside of the leaf,
		 * just in case all the items are consistent to each other, but
		 * all point outside of the leaf.
		 */
1753
		if (unlikely(item_data_end > BTRFS_LEAF_DATA_SIZE(fs_info))) {
1754
			generic_err(leaf, slot,
1755 1756
			"slot end outside of leaf, have %llu expect range [0, %u]",
				item_data_end, BTRFS_LEAF_DATA_SIZE(fs_info));
1757 1758 1759 1760
			return -EUCLEAN;
		}

		/* Also check if the item pointer overlaps with btrfs item. */
1761 1762
		if (unlikely(btrfs_item_ptr_offset(leaf, slot) <
			     btrfs_item_nr_offset(slot) + sizeof(struct btrfs_item))) {
1763
			generic_err(leaf, slot,
1764 1765 1766 1767
		"slot overlaps with its data, item end %lu data start %lu",
				btrfs_item_nr_offset(slot) +
				sizeof(struct btrfs_item),
				btrfs_item_ptr_offset(leaf, slot));
1768 1769 1770
			return -EUCLEAN;
		}

1771 1772 1773 1774 1775
		if (check_item_data) {
			/*
			 * Check if the item size and content meet other
			 * criteria
			 */
1776
			ret = check_leaf_item(leaf, &key, slot, &prev_key);
1777
			if (unlikely(ret < 0))
1778 1779
				return ret;
		}
1780 1781 1782 1783 1784 1785 1786 1787 1788

		prev_key.objectid = key.objectid;
		prev_key.type = key.type;
		prev_key.offset = key.offset;
	}

	return 0;
}

1789
int btrfs_check_leaf_full(struct extent_buffer *leaf)
1790
{
1791
	return check_leaf(leaf, true);
1792
}
1793
ALLOW_ERROR_INJECTION(btrfs_check_leaf_full, ERRNO);
1794

1795
int btrfs_check_leaf_relaxed(struct extent_buffer *leaf)
1796
{
1797
	return check_leaf(leaf, false);
1798 1799
}

1800
int btrfs_check_node(struct extent_buffer *node)
1801
{
1802
	struct btrfs_fs_info *fs_info = node->fs_info;
1803 1804 1805
	unsigned long nr = btrfs_header_nritems(node);
	struct btrfs_key key, next_key;
	int slot;
1806
	int level = btrfs_header_level(node);
1807 1808 1809
	u64 bytenr;
	int ret = 0;

1810
	if (unlikely(level <= 0 || level >= BTRFS_MAX_LEVEL)) {
1811
		generic_err(node, 0,
1812 1813 1814 1815
			"invalid level for node, have %d expect [1, %d]",
			level, BTRFS_MAX_LEVEL - 1);
		return -EUCLEAN;
	}
1816
	if (unlikely(nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(fs_info))) {
1817
		btrfs_crit(fs_info,
1818
"corrupt node: root=%llu block=%llu, nritems too %s, have %lu expect range [1,%u]",
1819
			   btrfs_header_owner(node), node->start,
1820
			   nr == 0 ? "small" : "large", nr,
1821
			   BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1822
		return -EUCLEAN;
1823 1824 1825 1826 1827 1828 1829
	}

	for (slot = 0; slot < nr - 1; slot++) {
		bytenr = btrfs_node_blockptr(node, slot);
		btrfs_node_key_to_cpu(node, &key, slot);
		btrfs_node_key_to_cpu(node, &next_key, slot + 1);

1830
		if (unlikely(!bytenr)) {
1831
			generic_err(node, slot,
1832 1833 1834 1835
				"invalid NULL node pointer");
			ret = -EUCLEAN;
			goto out;
		}
1836
		if (unlikely(!IS_ALIGNED(bytenr, fs_info->sectorsize))) {
1837
			generic_err(node, slot,
1838
			"unaligned pointer, have %llu should be aligned to %u",
1839
				bytenr, fs_info->sectorsize);
1840
			ret = -EUCLEAN;
1841 1842 1843
			goto out;
		}

1844
		if (unlikely(btrfs_comp_cpu_keys(&key, &next_key) >= 0)) {
1845
			generic_err(node, slot,
1846 1847 1848 1849 1850
	"bad key order, current (%llu %u %llu) next (%llu %u %llu)",
				key.objectid, key.type, key.offset,
				next_key.objectid, next_key.type,
				next_key.offset);
			ret = -EUCLEAN;
1851 1852 1853 1854 1855 1856
			goto out;
		}
	}
out:
	return ret;
}
1857
ALLOW_ERROR_INJECTION(btrfs_check_node, ERRNO);