scrub.c 119.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
A
Arne Jansen 已提交
2
/*
3
 * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
A
Arne Jansen 已提交
4 5 6
 */

#include <linux/blkdev.h>
7
#include <linux/ratelimit.h>
8
#include <linux/sched/mm.h>
9
#include <crypto/hash.h>
A
Arne Jansen 已提交
10
#include "ctree.h"
11
#include "discard.h"
A
Arne Jansen 已提交
12 13 14
#include "volumes.h"
#include "disk-io.h"
#include "ordered-data.h"
15
#include "transaction.h"
16
#include "backref.h"
17
#include "extent_io.h"
18
#include "dev-replace.h"
19
#include "check-integrity.h"
20
#include "rcu-string.h"
D
David Woodhouse 已提交
21
#include "raid56.h"
22
#include "block-group.h"
23
#include "zoned.h"
A
Arne Jansen 已提交
24 25 26 27 28 29 30 31 32 33 34 35 36 37

/*
 * This is only the first step towards a full-features scrub. It reads all
 * extent and super block and verifies the checksums. In case a bad checksum
 * is found or the extent cannot be read, good data will be written back if
 * any can be found.
 *
 * Future enhancements:
 *  - In case an unrepairable extent is encountered, track which files are
 *    affected and report them
 *  - track and record media errors, throw out bad devices
 *  - add a mode to also read unallocated space
 */

38
struct scrub_block;
39
struct scrub_ctx;
A
Arne Jansen 已提交
40

41
/*
42 43
 * The following three values only influence the performance.
 *
44
 * The last one configures the number of parallel and outstanding I/O
45
 * operations. The first one configures an upper limit for the number
46 47
 * of (dynamically allocated) pages that are added to a bio.
 */
48 49
#define SCRUB_SECTORS_PER_BIO	32	/* 128KiB per bio for 4KiB pages */
#define SCRUB_BIOS_PER_SCTX	64	/* 8MiB per device in flight for 4KiB pages */
50 51

/*
52
 * The following value times PAGE_SIZE needs to be large enough to match the
53 54
 * largest node/leaf/sector size that shall be supported.
 */
55
#define SCRUB_MAX_SECTORS_PER_BLOCK	(BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K)
A
Arne Jansen 已提交
56

57
struct scrub_recover {
58
	refcount_t		refs;
59
	struct btrfs_io_context	*bioc;
60 61 62
	u64			map_length;
};

63
struct scrub_sector {
64 65
	struct scrub_block	*sblock;
	struct page		*page;
66
	struct btrfs_device	*dev;
67
	struct list_head	list;
A
Arne Jansen 已提交
68 69
	u64			flags;  /* extent flags */
	u64			generation;
70 71
	u64			logical;
	u64			physical;
72
	u64			physical_for_dev_replace;
73
	atomic_t		refs;
74
	u8			mirror_num;
75 76
	unsigned int		have_csum:1;
	unsigned int		io_error:1;
A
Arne Jansen 已提交
77
	u8			csum[BTRFS_CSUM_SIZE];
78 79

	struct scrub_recover	*recover;
A
Arne Jansen 已提交
80 81 82 83
};

struct scrub_bio {
	int			index;
84
	struct scrub_ctx	*sctx;
85
	struct btrfs_device	*dev;
A
Arne Jansen 已提交
86
	struct bio		*bio;
87
	blk_status_t		status;
A
Arne Jansen 已提交
88 89
	u64			logical;
	u64			physical;
90 91
	struct scrub_sector	*sectors[SCRUB_SECTORS_PER_BIO];
	int			sector_count;
A
Arne Jansen 已提交
92
	int			next_free;
93
	struct work_struct	work;
A
Arne Jansen 已提交
94 95
};

96
struct scrub_block {
97
	struct scrub_sector	*sectors[SCRUB_MAX_SECTORS_PER_BLOCK];
98
	int			sector_count;
99
	atomic_t		outstanding_sectors;
100
	refcount_t		refs; /* free mem on transition to zero */
101
	struct scrub_ctx	*sctx;
102
	struct scrub_parity	*sparity;
103 104 105 106
	struct {
		unsigned int	header_error:1;
		unsigned int	checksum_error:1;
		unsigned int	no_io_error_seen:1;
107
		unsigned int	generation_error:1; /* also sets header_error */
108 109 110 111

		/* The following is for the data used to check parity */
		/* It is for the data with checksum */
		unsigned int	data_corrected:1;
112
	};
113
	struct work_struct	work;
114 115
};

116 117 118 119 120 121 122 123 124 125 126 127
/* Used for the chunks with parity stripe such RAID5/6 */
struct scrub_parity {
	struct scrub_ctx	*sctx;

	struct btrfs_device	*scrub_dev;

	u64			logic_start;

	u64			logic_end;

	int			nsectors;

128
	u32			stripe_len;
129

130
	refcount_t		refs;
131

132
	struct list_head	sectors_list;
133 134

	/* Work of parity check and repair */
135
	struct work_struct	work;
136 137

	/* Mark the parity blocks which have data */
138
	unsigned long		dbitmap;
139 140 141 142 143

	/*
	 * Mark the parity blocks which have data, but errors happen when
	 * read data or check data
	 */
144
	unsigned long		ebitmap;
145 146
};

147
struct scrub_ctx {
148
	struct scrub_bio	*bios[SCRUB_BIOS_PER_SCTX];
149
	struct btrfs_fs_info	*fs_info;
A
Arne Jansen 已提交
150 151
	int			first_free;
	int			curr;
152 153
	atomic_t		bios_in_flight;
	atomic_t		workers_pending;
A
Arne Jansen 已提交
154 155 156 157
	spinlock_t		list_lock;
	wait_queue_head_t	list_wait;
	struct list_head	csum_list;
	atomic_t		cancel_req;
A
Arne Jansen 已提交
158
	int			readonly;
159
	int			sectors_per_bio;
160

161 162 163 164
	/* State of IO submission throttling affecting the associated device */
	ktime_t			throttle_deadline;
	u64			throttle_sent;

165
	int			is_dev_replace;
166
	u64			write_pointer;
167 168 169 170

	struct scrub_bio        *wr_curr_bio;
	struct mutex            wr_lock;
	struct btrfs_device     *wr_tgtdev;
171
	bool                    flush_all_writes;
172

A
Arne Jansen 已提交
173 174 175 176 177
	/*
	 * statistics
	 */
	struct btrfs_scrub_progress stat;
	spinlock_t		stat_lock;
178 179 180 181 182 183 184 185

	/*
	 * Use a ref counter to avoid use-after-free issues. Scrub workers
	 * decrement bios_in_flight and workers_pending and then do a wakeup
	 * on the list_wait wait queue. We must ensure the main scrub task
	 * doesn't free the scrub context before or while the workers are
	 * doing the wakeup() call.
	 */
186
	refcount_t              refs;
A
Arne Jansen 已提交
187 188
};

189 190 191 192
struct scrub_warning {
	struct btrfs_path	*path;
	u64			extent_item_size;
	const char		*errstr;
D
David Sterba 已提交
193
	u64			physical;
194 195 196 197
	u64			logical;
	struct btrfs_device	*dev;
};

198 199 200 201 202 203 204
struct full_stripe_lock {
	struct rb_node node;
	u64 logical;
	u64 refs;
	struct mutex mutex;
};

205
static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
206
				     struct scrub_block *sblocks_for_recheck);
207
static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
208 209
				struct scrub_block *sblock,
				int retry_failed_mirror);
210
static void scrub_recheck_block_checksum(struct scrub_block *sblock);
211
static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
212
					     struct scrub_block *sblock_good);
213
static int scrub_repair_sector_from_good_copy(struct scrub_block *sblock_bad,
214
					    struct scrub_block *sblock_good,
215
					    int sector_num, int force_write);
216
static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
217 218
static int scrub_write_sector_to_dev_replace(struct scrub_block *sblock,
					     int sector_num);
219 220 221 222
static int scrub_checksum_data(struct scrub_block *sblock);
static int scrub_checksum_tree_block(struct scrub_block *sblock);
static int scrub_checksum_super(struct scrub_block *sblock);
static void scrub_block_put(struct scrub_block *sblock);
223 224
static void scrub_sector_get(struct scrub_sector *sector);
static void scrub_sector_put(struct scrub_sector *sector);
225 226
static void scrub_parity_get(struct scrub_parity *sparity);
static void scrub_parity_put(struct scrub_parity *sparity);
227 228 229 230
static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len,
			 u64 physical, struct btrfs_device *dev, u64 flags,
			 u64 gen, int mirror_num, u8 *csum,
			 u64 physical_for_dev_replace);
231
static void scrub_bio_end_io(struct bio *bio);
232
static void scrub_bio_end_io_worker(struct work_struct *work);
233
static void scrub_block_complete(struct scrub_block *sblock);
234 235 236 237 238
static void scrub_find_good_copy(struct btrfs_fs_info *fs_info,
				 u64 extent_logical, u32 extent_len,
				 u64 *extent_physical,
				 struct btrfs_device **extent_dev,
				 int *extent_mirror_num);
239 240
static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx,
				      struct scrub_sector *sector);
241
static void scrub_wr_submit(struct scrub_ctx *sctx);
242
static void scrub_wr_bio_end_io(struct bio *bio);
243
static void scrub_wr_bio_end_io_worker(struct work_struct *work);
244
static void scrub_put_ctx(struct scrub_ctx *sctx);
S
Stefan Behrens 已提交
245

246
static inline int scrub_is_page_on_raid56(struct scrub_sector *sector)
247
{
248 249
	return sector->recover &&
	       (sector->recover->bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
250
}
S
Stefan Behrens 已提交
251

252 253
static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
{
254
	refcount_inc(&sctx->refs);
255 256 257 258 259 260 261
	atomic_inc(&sctx->bios_in_flight);
}

static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
{
	atomic_dec(&sctx->bios_in_flight);
	wake_up(&sctx->list_wait);
262
	scrub_put_ctx(sctx);
263 264
}

265
static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
266 267 268 269 270 271 272 273 274
{
	while (atomic_read(&fs_info->scrub_pause_req)) {
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
		   atomic_read(&fs_info->scrub_pause_req) == 0);
		mutex_lock(&fs_info->scrub_lock);
	}
}

275
static void scrub_pause_on(struct btrfs_fs_info *fs_info)
276 277 278
{
	atomic_inc(&fs_info->scrubs_paused);
	wake_up(&fs_info->scrub_pause_wait);
279
}
280

281 282
static void scrub_pause_off(struct btrfs_fs_info *fs_info)
{
283 284 285 286 287 288 289 290
	mutex_lock(&fs_info->scrub_lock);
	__scrub_blocked_if_needed(fs_info);
	atomic_dec(&fs_info->scrubs_paused);
	mutex_unlock(&fs_info->scrub_lock);

	wake_up(&fs_info->scrub_pause_wait);
}

291 292 293 294 295 296
static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
{
	scrub_pause_on(fs_info);
	scrub_pause_off(fs_info);
}

297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
/*
 * Insert new full stripe lock into full stripe locks tree
 *
 * Return pointer to existing or newly inserted full_stripe_lock structure if
 * everything works well.
 * Return ERR_PTR(-ENOMEM) if we failed to allocate memory
 *
 * NOTE: caller must hold full_stripe_locks_root->lock before calling this
 * function
 */
static struct full_stripe_lock *insert_full_stripe_lock(
		struct btrfs_full_stripe_locks_tree *locks_root,
		u64 fstripe_logical)
{
	struct rb_node **p;
	struct rb_node *parent = NULL;
	struct full_stripe_lock *entry;
	struct full_stripe_lock *ret;

316
	lockdep_assert_held(&locks_root->lock);
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331

	p = &locks_root->root.rb_node;
	while (*p) {
		parent = *p;
		entry = rb_entry(parent, struct full_stripe_lock, node);
		if (fstripe_logical < entry->logical) {
			p = &(*p)->rb_left;
		} else if (fstripe_logical > entry->logical) {
			p = &(*p)->rb_right;
		} else {
			entry->refs++;
			return entry;
		}
	}

332 333 334
	/*
	 * Insert new lock.
	 */
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
	ret = kmalloc(sizeof(*ret), GFP_KERNEL);
	if (!ret)
		return ERR_PTR(-ENOMEM);
	ret->logical = fstripe_logical;
	ret->refs = 1;
	mutex_init(&ret->mutex);

	rb_link_node(&ret->node, parent, p);
	rb_insert_color(&ret->node, &locks_root->root);
	return ret;
}

/*
 * Search for a full stripe lock of a block group
 *
 * Return pointer to existing full stripe lock if found
 * Return NULL if not found
 */
static struct full_stripe_lock *search_full_stripe_lock(
		struct btrfs_full_stripe_locks_tree *locks_root,
		u64 fstripe_logical)
{
	struct rb_node *node;
	struct full_stripe_lock *entry;

360
	lockdep_assert_held(&locks_root->lock);
361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379

	node = locks_root->root.rb_node;
	while (node) {
		entry = rb_entry(node, struct full_stripe_lock, node);
		if (fstripe_logical < entry->logical)
			node = node->rb_left;
		else if (fstripe_logical > entry->logical)
			node = node->rb_right;
		else
			return entry;
	}
	return NULL;
}

/*
 * Helper to get full stripe logical from a normal bytenr.
 *
 * Caller must ensure @cache is a RAID56 block group.
 */
380
static u64 get_full_stripe_logical(struct btrfs_block_group *cache, u64 bytenr)
381 382 383 384 385 386 387 388 389 390 391 392 393
{
	u64 ret;

	/*
	 * Due to chunk item size limit, full stripe length should not be
	 * larger than U32_MAX. Just a sanity check here.
	 */
	WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX);

	/*
	 * round_down() can only handle power of 2, while RAID56 full
	 * stripe length can be 64KiB * n, so we need to manually round down.
	 */
394 395
	ret = div64_u64(bytenr - cache->start, cache->full_stripe_len) *
			cache->full_stripe_len + cache->start;
396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
	return ret;
}

/*
 * Lock a full stripe to avoid concurrency of recovery and read
 *
 * It's only used for profiles with parities (RAID5/6), for other profiles it
 * does nothing.
 *
 * Return 0 if we locked full stripe covering @bytenr, with a mutex held.
 * So caller must call unlock_full_stripe() at the same context.
 *
 * Return <0 if encounters error.
 */
static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
			    bool *locked_ret)
{
413
	struct btrfs_block_group *bg_cache;
414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
	struct btrfs_full_stripe_locks_tree *locks_root;
	struct full_stripe_lock *existing;
	u64 fstripe_start;
	int ret = 0;

	*locked_ret = false;
	bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
	if (!bg_cache) {
		ASSERT(0);
		return -ENOENT;
	}

	/* Profiles not based on parity don't need full stripe lock */
	if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
		goto out;
	locks_root = &bg_cache->full_stripe_locks_root;

	fstripe_start = get_full_stripe_logical(bg_cache, bytenr);

	/* Now insert the full stripe lock */
	mutex_lock(&locks_root->lock);
	existing = insert_full_stripe_lock(locks_root, fstripe_start);
	mutex_unlock(&locks_root->lock);
	if (IS_ERR(existing)) {
		ret = PTR_ERR(existing);
		goto out;
	}
	mutex_lock(&existing->mutex);
	*locked_ret = true;
out:
	btrfs_put_block_group(bg_cache);
	return ret;
}

/*
 * Unlock a full stripe.
 *
 * NOTE: Caller must ensure it's the same context calling corresponding
 * lock_full_stripe().
 *
 * Return 0 if we unlock full stripe without problem.
 * Return <0 for error
 */
static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
			      bool locked)
{
460
	struct btrfs_block_group *bg_cache;
461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
	struct btrfs_full_stripe_locks_tree *locks_root;
	struct full_stripe_lock *fstripe_lock;
	u64 fstripe_start;
	bool freeit = false;
	int ret = 0;

	/* If we didn't acquire full stripe lock, no need to continue */
	if (!locked)
		return 0;

	bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
	if (!bg_cache) {
		ASSERT(0);
		return -ENOENT;
	}
	if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
		goto out;

	locks_root = &bg_cache->full_stripe_locks_root;
	fstripe_start = get_full_stripe_logical(bg_cache, bytenr);

	mutex_lock(&locks_root->lock);
	fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start);
	/* Unpaired unlock_full_stripe() detected */
	if (!fstripe_lock) {
		WARN_ON(1);
		ret = -ENOENT;
		mutex_unlock(&locks_root->lock);
		goto out;
	}

	if (fstripe_lock->refs == 0) {
		WARN_ON(1);
		btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow",
			fstripe_lock->logical);
	} else {
		fstripe_lock->refs--;
	}

	if (fstripe_lock->refs == 0) {
		rb_erase(&fstripe_lock->node, &locks_root->root);
		freeit = true;
	}
	mutex_unlock(&locks_root->lock);

	mutex_unlock(&fstripe_lock->mutex);
	if (freeit)
		kfree(fstripe_lock);
out:
	btrfs_put_block_group(bg_cache);
	return ret;
}

514
static void scrub_free_csums(struct scrub_ctx *sctx)
A
Arne Jansen 已提交
515
{
516
	while (!list_empty(&sctx->csum_list)) {
A
Arne Jansen 已提交
517
		struct btrfs_ordered_sum *sum;
518
		sum = list_first_entry(&sctx->csum_list,
A
Arne Jansen 已提交
519 520 521 522 523 524
				       struct btrfs_ordered_sum, list);
		list_del(&sum->list);
		kfree(sum);
	}
}

525
static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
A
Arne Jansen 已提交
526 527 528
{
	int i;

529
	if (!sctx)
A
Arne Jansen 已提交
530 531
		return;

532
	/* this can happen when scrub is cancelled */
533 534
	if (sctx->curr != -1) {
		struct scrub_bio *sbio = sctx->bios[sctx->curr];
535

536 537 538
		for (i = 0; i < sbio->sector_count; i++) {
			WARN_ON(!sbio->sectors[i]->page);
			scrub_block_put(sbio->sectors[i]->sblock);
539 540 541 542
		}
		bio_put(sbio->bio);
	}

543
	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
544
		struct scrub_bio *sbio = sctx->bios[i];
A
Arne Jansen 已提交
545 546 547 548 549 550

		if (!sbio)
			break;
		kfree(sbio);
	}

551
	kfree(sctx->wr_curr_bio);
552 553
	scrub_free_csums(sctx);
	kfree(sctx);
A
Arne Jansen 已提交
554 555
}

556 557
static void scrub_put_ctx(struct scrub_ctx *sctx)
{
558
	if (refcount_dec_and_test(&sctx->refs))
559 560 561
		scrub_free_ctx(sctx);
}

562 563
static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
		struct btrfs_fs_info *fs_info, int is_dev_replace)
A
Arne Jansen 已提交
564
{
565
	struct scrub_ctx *sctx;
A
Arne Jansen 已提交
566 567
	int		i;

568
	sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
569
	if (!sctx)
A
Arne Jansen 已提交
570
		goto nomem;
571
	refcount_set(&sctx->refs, 1);
572
	sctx->is_dev_replace = is_dev_replace;
573
	sctx->sectors_per_bio = SCRUB_SECTORS_PER_BIO;
574
	sctx->curr = -1;
575
	sctx->fs_info = fs_info;
576
	INIT_LIST_HEAD(&sctx->csum_list);
577
	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
A
Arne Jansen 已提交
578 579
		struct scrub_bio *sbio;

580
		sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
A
Arne Jansen 已提交
581 582
		if (!sbio)
			goto nomem;
583
		sctx->bios[i] = sbio;
A
Arne Jansen 已提交
584 585

		sbio->index = i;
586
		sbio->sctx = sctx;
587
		sbio->sector_count = 0;
588
		INIT_WORK(&sbio->work, scrub_bio_end_io_worker);
A
Arne Jansen 已提交
589

590
		if (i != SCRUB_BIOS_PER_SCTX - 1)
591
			sctx->bios[i]->next_free = i + 1;
592
		else
593 594 595
			sctx->bios[i]->next_free = -1;
	}
	sctx->first_free = 0;
596 597
	atomic_set(&sctx->bios_in_flight, 0);
	atomic_set(&sctx->workers_pending, 0);
598 599 600 601 602
	atomic_set(&sctx->cancel_req, 0);

	spin_lock_init(&sctx->list_lock);
	spin_lock_init(&sctx->stat_lock);
	init_waitqueue_head(&sctx->list_wait);
603
	sctx->throttle_deadline = 0;
604

605 606 607
	WARN_ON(sctx->wr_curr_bio != NULL);
	mutex_init(&sctx->wr_lock);
	sctx->wr_curr_bio = NULL;
608
	if (is_dev_replace) {
609 610
		WARN_ON(!fs_info->dev_replace.tgtdev);
		sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
611
		sctx->flush_all_writes = false;
612
	}
613

614
	return sctx;
A
Arne Jansen 已提交
615 616

nomem:
617
	scrub_free_ctx(sctx);
A
Arne Jansen 已提交
618 619 620
	return ERR_PTR(-ENOMEM);
}

621 622
static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
				     void *warn_ctx)
623 624 625 626
{
	u32 nlink;
	int ret;
	int i;
627
	unsigned nofs_flag;
628 629
	struct extent_buffer *eb;
	struct btrfs_inode_item *inode_item;
630
	struct scrub_warning *swarn = warn_ctx;
631
	struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
632 633
	struct inode_fs_paths *ipath = NULL;
	struct btrfs_root *local_root;
634
	struct btrfs_key key;
635

D
David Sterba 已提交
636
	local_root = btrfs_get_fs_root(fs_info, root, true);
637 638 639 640 641
	if (IS_ERR(local_root)) {
		ret = PTR_ERR(local_root);
		goto err;
	}

642 643 644
	/*
	 * this makes the path point to (inum INODE_ITEM ioff)
	 */
645 646 647 648 649
	key.objectid = inum;
	key.type = BTRFS_INODE_ITEM_KEY;
	key.offset = 0;

	ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
650
	if (ret) {
651
		btrfs_put_root(local_root);
652 653 654 655 656 657 658 659 660 661
		btrfs_release_path(swarn->path);
		goto err;
	}

	eb = swarn->path->nodes[0];
	inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
					struct btrfs_inode_item);
	nlink = btrfs_inode_nlink(eb, inode_item);
	btrfs_release_path(swarn->path);

662 663 664 665 666 667
	/*
	 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
	 * uses GFP_NOFS in this context, so we keep it consistent but it does
	 * not seem to be strictly necessary.
	 */
	nofs_flag = memalloc_nofs_save();
668
	ipath = init_ipath(4096, local_root, swarn->path);
669
	memalloc_nofs_restore(nofs_flag);
670
	if (IS_ERR(ipath)) {
671
		btrfs_put_root(local_root);
672 673 674 675
		ret = PTR_ERR(ipath);
		ipath = NULL;
		goto err;
	}
676 677 678 679 680 681 682 683 684 685
	ret = paths_from_inode(inum, ipath);

	if (ret < 0)
		goto err;

	/*
	 * we deliberately ignore the bit ipath might have been too small to
	 * hold all of the paths here
	 */
	for (i = 0; i < ipath->fspath->elem_cnt; ++i)
J
Jeff Mahoney 已提交
686
		btrfs_warn_in_rcu(fs_info,
687
"%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)",
J
Jeff Mahoney 已提交
688 689
				  swarn->errstr, swarn->logical,
				  rcu_str_deref(swarn->dev->name),
D
David Sterba 已提交
690
				  swarn->physical,
J
Jeff Mahoney 已提交
691
				  root, inum, offset,
692
				  fs_info->sectorsize, nlink,
J
Jeff Mahoney 已提交
693
				  (char *)(unsigned long)ipath->fspath->val[i]);
694

695
	btrfs_put_root(local_root);
696 697 698 699
	free_ipath(ipath);
	return 0;

err:
J
Jeff Mahoney 已提交
700
	btrfs_warn_in_rcu(fs_info,
D
David Sterba 已提交
701
			  "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
J
Jeff Mahoney 已提交
702 703
			  swarn->errstr, swarn->logical,
			  rcu_str_deref(swarn->dev->name),
D
David Sterba 已提交
704
			  swarn->physical,
J
Jeff Mahoney 已提交
705
			  root, inum, offset, ret);
706 707 708 709 710

	free_ipath(ipath);
	return 0;
}

711
static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
712
{
713 714
	struct btrfs_device *dev;
	struct btrfs_fs_info *fs_info;
715 716 717 718 719
	struct btrfs_path *path;
	struct btrfs_key found_key;
	struct extent_buffer *eb;
	struct btrfs_extent_item *ei;
	struct scrub_warning swarn;
720 721 722
	unsigned long ptr = 0;
	u64 extent_item_pos;
	u64 flags = 0;
723
	u64 ref_root;
724
	u32 item_size;
725
	u8 ref_level = 0;
726
	int ret;
727

728 729
	WARN_ON(sblock->sector_count < 1);
	dev = sblock->sectors[0]->dev;
730
	fs_info = sblock->sctx->fs_info;
731

732
	path = btrfs_alloc_path();
733 734
	if (!path)
		return;
735

736 737
	swarn.physical = sblock->sectors[0]->physical;
	swarn.logical = sblock->sectors[0]->logical;
738
	swarn.errstr = errstr;
739
	swarn.dev = NULL;
740

741 742
	ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
				  &flags);
743 744 745
	if (ret < 0)
		goto out;

J
Jan Schmidt 已提交
746
	extent_item_pos = swarn.logical - found_key.objectid;
747 748 749 750
	swarn.extent_item_size = found_key.offset;

	eb = path->nodes[0];
	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
751
	item_size = btrfs_item_size(eb, path->slots[0]);
752

753
	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
754
		do {
755 756 757
			ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
						      item_size, &ref_root,
						      &ref_level);
758
			btrfs_warn_in_rcu(fs_info,
D
David Sterba 已提交
759
"%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
J
Jeff Mahoney 已提交
760
				errstr, swarn.logical,
761
				rcu_str_deref(dev->name),
D
David Sterba 已提交
762
				swarn.physical,
763 764 765 766
				ref_level ? "node" : "leaf",
				ret < 0 ? -1 : ref_level,
				ret < 0 ? -1 : ref_root);
		} while (ret != 1);
767
		btrfs_release_path(path);
768
	} else {
769
		btrfs_release_path(path);
770
		swarn.path = path;
771
		swarn.dev = dev;
772 773
		iterate_extent_inodes(fs_info, found_key.objectid,
					extent_item_pos, 1,
774
					scrub_print_warning_inode, &swarn, false);
775 776 777 778 779 780
	}

out:
	btrfs_free_path(path);
}

781 782
static inline void scrub_get_recover(struct scrub_recover *recover)
{
783
	refcount_inc(&recover->refs);
784 785
}

786 787
static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
				     struct scrub_recover *recover)
788
{
789
	if (refcount_dec_and_test(&recover->refs)) {
790
		btrfs_bio_counter_dec(fs_info);
791
		btrfs_put_bioc(recover->bioc);
792 793 794 795
		kfree(recover);
	}
}

A
Arne Jansen 已提交
796
/*
797
 * scrub_handle_errored_block gets called when either verification of the
798 799
 * sectors failed or the bio failed to read, e.g. with EIO. In the latter
 * case, this function handles all sectors in the bio, even though only one
800 801 802
 * may be bad.
 * The goal of this function is to repair the errored block by using the
 * contents of one of the mirrors.
A
Arne Jansen 已提交
803
 */
804
static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
A
Arne Jansen 已提交
805
{
806
	struct scrub_ctx *sctx = sblock_to_check->sctx;
807
	struct btrfs_device *dev;
808 809 810 811 812 813 814 815 816
	struct btrfs_fs_info *fs_info;
	u64 logical;
	unsigned int failed_mirror_index;
	unsigned int is_metadata;
	unsigned int have_csum;
	struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
	struct scrub_block *sblock_bad;
	int ret;
	int mirror_index;
817
	int sector_num;
818
	int success;
819
	bool full_stripe_locked;
820
	unsigned int nofs_flag;
821
	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
822 823
				      DEFAULT_RATELIMIT_BURST);

824
	BUG_ON(sblock_to_check->sector_count < 1);
825
	fs_info = sctx->fs_info;
826
	if (sblock_to_check->sectors[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
827 828 829 830 831 832 833 834 835 836
		/*
		 * if we find an error in a super block, we just report it.
		 * They will get written with the next transaction commit
		 * anyway
		 */
		spin_lock(&sctx->stat_lock);
		++sctx->stat.super_errors;
		spin_unlock(&sctx->stat_lock);
		return 0;
	}
837 838 839 840
	logical = sblock_to_check->sectors[0]->logical;
	BUG_ON(sblock_to_check->sectors[0]->mirror_num < 1);
	failed_mirror_index = sblock_to_check->sectors[0]->mirror_num - 1;
	is_metadata = !(sblock_to_check->sectors[0]->flags &
841
			BTRFS_EXTENT_FLAG_DATA);
842 843
	have_csum = sblock_to_check->sectors[0]->have_csum;
	dev = sblock_to_check->sectors[0]->dev;
844

845 846
	if (!sctx->is_dev_replace && btrfs_repair_one_zone(fs_info, logical))
		return 0;
847

848 849 850 851 852 853
	/*
	 * We must use GFP_NOFS because the scrub task might be waiting for a
	 * worker task executing this function and in turn a transaction commit
	 * might be waiting the scrub task to pause (which needs to wait for all
	 * the worker tasks to complete before pausing).
	 * We do allocations in the workers through insert_full_stripe_lock()
854
	 * and scrub_add_sector_to_wr_bio(), which happens down the call chain of
855 856 857
	 * this function.
	 */
	nofs_flag = memalloc_nofs_save();
858 859 860 861 862 863 864 865 866
	/*
	 * For RAID5/6, race can happen for a different device scrub thread.
	 * For data corruption, Parity and Data threads will both try
	 * to recovery the data.
	 * Race can lead to doubly added csum error, or even unrecoverable
	 * error.
	 */
	ret = lock_full_stripe(fs_info, logical, &full_stripe_locked);
	if (ret < 0) {
867
		memalloc_nofs_restore(nofs_flag);
868 869 870 871 872 873 874 875 876
		spin_lock(&sctx->stat_lock);
		if (ret == -ENOMEM)
			sctx->stat.malloc_errors++;
		sctx->stat.read_errors++;
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
		return ret;
	}

877 878 879 880
	/*
	 * read all mirrors one after the other. This includes to
	 * re-read the extent or metadata block that failed (that was
	 * the cause that this fixup code is called) another time,
881
	 * sector by sector this time in order to know which sectors
882 883 884 885
	 * caused I/O errors and which ones are good (for all mirrors).
	 * It is the goal to handle the situation when more than one
	 * mirror contains I/O errors, but the errors do not
	 * overlap, i.e. the data can be repaired by selecting the
886 887 888 889 890 891 892 893 894 895
	 * sectors from those mirrors without I/O error on the
	 * particular sectors. One example (with blocks >= 2 * sectorsize)
	 * would be that mirror #1 has an I/O error on the first sector,
	 * the second sector is good, and mirror #2 has an I/O error on
	 * the second sector, but the first sector is good.
	 * Then the first sector of the first mirror can be repaired by
	 * taking the first sector of the second mirror, and the
	 * second sector of the second mirror can be repaired by
	 * copying the contents of the 2nd sector of the 1st mirror.
	 * One more note: if the sectors of one mirror contain I/O
896 897 898
	 * errors, the checksum cannot be verified. In order to get
	 * the best data for repairing, the first attempt is to find
	 * a mirror without I/O errors and with a validated checksum.
899
	 * Only if this is not possible, the sectors are picked from
900 901 902 903 904 905
	 * mirrors with I/O errors without considering the checksum.
	 * If the latter is the case, at the end, the checksum of the
	 * repaired area is verified in order to correctly maintain
	 * the statistics.
	 */

906
	sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
907
				      sizeof(*sblocks_for_recheck), GFP_KERNEL);
908
	if (!sblocks_for_recheck) {
909 910 911 912 913
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		sctx->stat.read_errors++;
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
914
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
915
		goto out;
A
Arne Jansen 已提交
916 917
	}

918
	/* Setup the context, map the logical blocks and alloc the sectors */
919
	ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
920
	if (ret) {
921 922 923 924
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors++;
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
925
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
926 927 928 929
		goto out;
	}
	BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
	sblock_bad = sblocks_for_recheck + failed_mirror_index;
930

931
	/* build and submit the bios for the failed mirror, check checksums */
932
	scrub_recheck_block(fs_info, sblock_bad, 1);
A
Arne Jansen 已提交
933

934 935 936
	if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
	    sblock_bad->no_io_error_seen) {
		/*
937
		 * The error disappeared after reading sector by sector, or
938 939 940 941 942 943
		 * the area was part of a huge bio and other parts of the
		 * bio caused I/O errors, or the block layer merged several
		 * read requests into one and the error is caused by a
		 * different bio (usually one of the two latter cases is
		 * the cause)
		 */
944 945
		spin_lock(&sctx->stat_lock);
		sctx->stat.unverified_errors++;
946
		sblock_to_check->data_corrected = 1;
947
		spin_unlock(&sctx->stat_lock);
A
Arne Jansen 已提交
948

949 950
		if (sctx->is_dev_replace)
			scrub_write_block_to_dev_replace(sblock_bad);
951
		goto out;
A
Arne Jansen 已提交
952 953
	}

954
	if (!sblock_bad->no_io_error_seen) {
955 956 957
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors++;
		spin_unlock(&sctx->stat_lock);
958
		if (__ratelimit(&rs))
959
			scrub_print_warning("i/o error", sblock_to_check);
960
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
961
	} else if (sblock_bad->checksum_error) {
962 963 964
		spin_lock(&sctx->stat_lock);
		sctx->stat.csum_errors++;
		spin_unlock(&sctx->stat_lock);
965
		if (__ratelimit(&rs))
966
			scrub_print_warning("checksum error", sblock_to_check);
967
		btrfs_dev_stat_inc_and_print(dev,
968
					     BTRFS_DEV_STAT_CORRUPTION_ERRS);
969
	} else if (sblock_bad->header_error) {
970 971 972
		spin_lock(&sctx->stat_lock);
		sctx->stat.verify_errors++;
		spin_unlock(&sctx->stat_lock);
973
		if (__ratelimit(&rs))
974 975
			scrub_print_warning("checksum/header error",
					    sblock_to_check);
976
		if (sblock_bad->generation_error)
977
			btrfs_dev_stat_inc_and_print(dev,
978 979
				BTRFS_DEV_STAT_GENERATION_ERRS);
		else
980
			btrfs_dev_stat_inc_and_print(dev,
981
				BTRFS_DEV_STAT_CORRUPTION_ERRS);
982
	}
A
Arne Jansen 已提交
983

984 985 986 987
	if (sctx->readonly) {
		ASSERT(!sctx->is_dev_replace);
		goto out;
	}
A
Arne Jansen 已提交
988

989 990
	/*
	 * now build and submit the bios for the other mirrors, check
991 992
	 * checksums.
	 * First try to pick the mirror which is completely without I/O
993 994 995 996 997
	 * errors and also does not have a checksum error.
	 * If one is found, and if a checksum is present, the full block
	 * that is known to contain an error is rewritten. Afterwards
	 * the block is known to be corrected.
	 * If a mirror is found which is completely correct, and no
998
	 * checksum is present, only those sectors are rewritten that had
999
	 * an I/O error in the block to be repaired, since it cannot be
1000 1001
	 * determined, which copy of the other sectors is better (and it
	 * could happen otherwise that a correct sector would be
1002 1003
	 * overwritten by a bad one).
	 */
1004
	for (mirror_index = 0; ;mirror_index++) {
1005
		struct scrub_block *sblock_other;
1006

1007 1008
		if (mirror_index == failed_mirror_index)
			continue;
1009 1010

		/* raid56's mirror can be more than BTRFS_MAX_MIRRORS */
1011
		if (!scrub_is_page_on_raid56(sblock_bad->sectors[0])) {
1012 1013
			if (mirror_index >= BTRFS_MAX_MIRRORS)
				break;
1014
			if (!sblocks_for_recheck[mirror_index].sector_count)
1015 1016 1017 1018
				break;

			sblock_other = sblocks_for_recheck + mirror_index;
		} else {
1019
			struct scrub_recover *r = sblock_bad->sectors[0]->recover;
1020
			int max_allowed = r->bioc->num_stripes - r->bioc->num_tgtdevs;
1021 1022 1023

			if (mirror_index >= max_allowed)
				break;
1024
			if (!sblocks_for_recheck[1].sector_count)
1025 1026 1027 1028
				break;

			ASSERT(failed_mirror_index == 0);
			sblock_other = sblocks_for_recheck + 1;
1029
			sblock_other->sectors[0]->mirror_num = 1 + mirror_index;
1030
		}
1031 1032

		/* build and submit the bios, check checksums */
1033
		scrub_recheck_block(fs_info, sblock_other, 0);
1034 1035

		if (!sblock_other->header_error &&
1036 1037
		    !sblock_other->checksum_error &&
		    sblock_other->no_io_error_seen) {
1038 1039
			if (sctx->is_dev_replace) {
				scrub_write_block_to_dev_replace(sblock_other);
1040
				goto corrected_error;
1041 1042
			} else {
				ret = scrub_repair_block_from_good_copy(
1043 1044 1045
						sblock_bad, sblock_other);
				if (!ret)
					goto corrected_error;
1046
			}
1047 1048
		}
	}
A
Arne Jansen 已提交
1049

1050 1051
	if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
		goto did_not_correct_error;
1052 1053 1054

	/*
	 * In case of I/O errors in the area that is supposed to be
1055 1056
	 * repaired, continue by picking good copies of those sectors.
	 * Select the good sectors from mirrors to rewrite bad sectors from
1057 1058 1059 1060 1061
	 * the area to fix. Afterwards verify the checksum of the block
	 * that is supposed to be repaired. This verification step is
	 * only done for the purpose of statistic counting and for the
	 * final scrub report, whether errors remain.
	 * A perfect algorithm could make use of the checksum and try
1062
	 * all possible combinations of sectors from the different mirrors
1063
	 * until the checksum verification succeeds. For example, when
1064
	 * the 2nd sector of mirror #1 faces I/O errors, and the 2nd sector
1065
	 * of mirror #2 is readable but the final checksum test fails,
1066
	 * then the 2nd sector of mirror #3 could be tried, whether now
1067
	 * the final checksum succeeds. But this would be a rare
1068 1069 1070 1071
	 * exception and is therefore not implemented. At least it is
	 * avoided that the good copy is overwritten.
	 * A more useful improvement would be to pick the sectors
	 * without I/O error based on sector sizes (512 bytes on legacy
1072
	 * disks) instead of on sectorsize. Then maybe 512 byte of one
1073
	 * mirror could be repaired by taking 512 byte of a different
1074
	 * mirror, even if other 512 byte sectors in the same sectorsize
1075
	 * area are unreadable.
A
Arne Jansen 已提交
1076
	 */
1077
	success = 1;
1078 1079
	for (sector_num = 0; sector_num < sblock_bad->sector_count;
	     sector_num++) {
1080
		struct scrub_sector *sector_bad = sblock_bad->sectors[sector_num];
1081
		struct scrub_block *sblock_other = NULL;
1082

1083 1084
		/* Skip no-io-error sectors in scrub */
		if (!sector_bad->io_error && !sctx->is_dev_replace)
A
Arne Jansen 已提交
1085
			continue;
1086

1087
		if (scrub_is_page_on_raid56(sblock_bad->sectors[0])) {
1088 1089 1090 1091 1092 1093 1094 1095
			/*
			 * In case of dev replace, if raid56 rebuild process
			 * didn't work out correct data, then copy the content
			 * in sblock_bad to make sure target device is identical
			 * to source device, instead of writing garbage data in
			 * sblock_for_recheck array to target device.
			 */
			sblock_other = NULL;
1096 1097
		} else if (sector_bad->io_error) {
			/* Try to find no-io-error sector in mirrors */
1098 1099
			for (mirror_index = 0;
			     mirror_index < BTRFS_MAX_MIRRORS &&
1100
			     sblocks_for_recheck[mirror_index].sector_count > 0;
1101 1102
			     mirror_index++) {
				if (!sblocks_for_recheck[mirror_index].
1103
				    sectors[sector_num]->io_error) {
1104 1105 1106
					sblock_other = sblocks_for_recheck +
						       mirror_index;
					break;
1107 1108
				}
			}
1109 1110
			if (!sblock_other)
				success = 0;
I
Ilya Dryomov 已提交
1111
		}
A
Arne Jansen 已提交
1112

1113 1114
		if (sctx->is_dev_replace) {
			/*
1115 1116 1117 1118
			 * Did not find a mirror to fetch the sector from.
			 * scrub_write_sector_to_dev_replace() handles this
			 * case (sector->io_error), by filling the block with
			 * zeros before submitting the write request
1119 1120 1121 1122
			 */
			if (!sblock_other)
				sblock_other = sblock_bad;

1123 1124
			if (scrub_write_sector_to_dev_replace(sblock_other,
							      sector_num) != 0) {
1125
				atomic64_inc(
1126
					&fs_info->dev_replace.num_write_errors);
1127 1128 1129
				success = 0;
			}
		} else if (sblock_other) {
1130 1131 1132
			ret = scrub_repair_sector_from_good_copy(sblock_bad,
								 sblock_other,
								 sector_num, 0);
1133
			if (0 == ret)
1134
				sector_bad->io_error = 0;
1135 1136
			else
				success = 0;
1137
		}
A
Arne Jansen 已提交
1138 1139
	}

1140
	if (success && !sctx->is_dev_replace) {
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150
		if (is_metadata || have_csum) {
			/*
			 * need to verify the checksum now that all
			 * sectors on disk are repaired (the write
			 * request for data to be repaired is on its way).
			 * Just be lazy and use scrub_recheck_block()
			 * which re-reads the data before the checksum
			 * is verified, but most likely the data comes out
			 * of the page cache.
			 */
1151
			scrub_recheck_block(fs_info, sblock_bad, 1);
1152
			if (!sblock_bad->header_error &&
1153 1154 1155 1156 1157 1158 1159
			    !sblock_bad->checksum_error &&
			    sblock_bad->no_io_error_seen)
				goto corrected_error;
			else
				goto did_not_correct_error;
		} else {
corrected_error:
1160 1161
			spin_lock(&sctx->stat_lock);
			sctx->stat.corrected_errors++;
1162
			sblock_to_check->data_corrected = 1;
1163
			spin_unlock(&sctx->stat_lock);
1164 1165
			btrfs_err_rl_in_rcu(fs_info,
				"fixed up error at logical %llu on dev %s",
1166
				logical, rcu_str_deref(dev->name));
A
Arne Jansen 已提交
1167
		}
1168 1169
	} else {
did_not_correct_error:
1170 1171 1172
		spin_lock(&sctx->stat_lock);
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
1173 1174
		btrfs_err_rl_in_rcu(fs_info,
			"unable to fixup (regular) error at logical %llu on dev %s",
1175
			logical, rcu_str_deref(dev->name));
I
Ilya Dryomov 已提交
1176
	}
A
Arne Jansen 已提交
1177

1178 1179 1180 1181 1182 1183
out:
	if (sblocks_for_recheck) {
		for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
		     mirror_index++) {
			struct scrub_block *sblock = sblocks_for_recheck +
						     mirror_index;
1184
			struct scrub_recover *recover;
1185
			int i;
1186

1187 1188 1189
			for (i = 0; i < sblock->sector_count; i++) {
				sblock->sectors[i]->sblock = NULL;
				recover = sblock->sectors[i]->recover;
1190
				if (recover) {
1191
					scrub_put_recover(fs_info, recover);
1192
					sblock->sectors[i]->recover = NULL;
1193
				}
1194
				scrub_sector_put(sblock->sectors[i]);
1195
			}
1196 1197 1198
		}
		kfree(sblocks_for_recheck);
	}
A
Arne Jansen 已提交
1199

1200
	ret = unlock_full_stripe(fs_info, logical, full_stripe_locked);
1201
	memalloc_nofs_restore(nofs_flag);
1202 1203
	if (ret < 0)
		return ret;
1204 1205
	return 0;
}
A
Arne Jansen 已提交
1206

1207
static inline int scrub_nr_raid_mirrors(struct btrfs_io_context *bioc)
1208
{
1209
	if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID5)
Z
Zhao Lei 已提交
1210
		return 2;
1211
	else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID6)
Z
Zhao Lei 已提交
1212 1213
		return 3;
	else
1214
		return (int)bioc->num_stripes;
1215 1216
}

Z
Zhao Lei 已提交
1217 1218
static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
						 u64 *raid_map,
1219 1220 1221 1222 1223 1224 1225
						 u64 mapped_length,
						 int nstripes, int mirror,
						 int *stripe_index,
						 u64 *stripe_offset)
{
	int i;

1226
	if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246
		/* RAID5/6 */
		for (i = 0; i < nstripes; i++) {
			if (raid_map[i] == RAID6_Q_STRIPE ||
			    raid_map[i] == RAID5_P_STRIPE)
				continue;

			if (logical >= raid_map[i] &&
			    logical < raid_map[i] + mapped_length)
				break;
		}

		*stripe_index = i;
		*stripe_offset = logical - raid_map[i];
	} else {
		/* The other RAID type */
		*stripe_index = mirror;
		*stripe_offset = 0;
	}
}

1247
static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
1248 1249
				     struct scrub_block *sblocks_for_recheck)
{
1250
	struct scrub_ctx *sctx = original_sblock->sctx;
1251
	struct btrfs_fs_info *fs_info = sctx->fs_info;
1252 1253 1254 1255 1256
	u64 length = original_sblock->sector_count << fs_info->sectorsize_bits;
	u64 logical = original_sblock->sectors[0]->logical;
	u64 generation = original_sblock->sectors[0]->generation;
	u64 flags = original_sblock->sectors[0]->flags;
	u64 have_csum = original_sblock->sectors[0]->have_csum;
1257
	struct scrub_recover *recover;
1258
	struct btrfs_io_context *bioc;
1259 1260 1261 1262
	u64 sublen;
	u64 mapped_length;
	u64 stripe_offset;
	int stripe_index;
1263
	int sector_index = 0;
1264
	int mirror_index;
1265
	int nmirrors;
1266 1267 1268
	int ret;

	/*
1269 1270
	 * Note: the two members refs and outstanding_sectors are not used (and
	 * not set) in the blocks that are used for the recheck procedure.
1271 1272 1273
	 */

	while (length > 0) {
1274
		sublen = min_t(u64, length, fs_info->sectorsize);
1275
		mapped_length = sublen;
1276
		bioc = NULL;
A
Arne Jansen 已提交
1277

1278
		/*
1279 1280
		 * With a length of sectorsize, each returned stripe represents
		 * one mirror
1281
		 */
1282
		btrfs_bio_counter_inc_blocked(fs_info);
1283
		ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
1284 1285 1286
				       logical, &mapped_length, &bioc);
		if (ret || !bioc || mapped_length < sublen) {
			btrfs_put_bioc(bioc);
1287
			btrfs_bio_counter_dec(fs_info);
1288 1289
			return -EIO;
		}
A
Arne Jansen 已提交
1290

1291 1292
		recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
		if (!recover) {
1293
			btrfs_put_bioc(bioc);
1294
			btrfs_bio_counter_dec(fs_info);
1295 1296 1297
			return -ENOMEM;
		}

1298
		refcount_set(&recover->refs, 1);
1299
		recover->bioc = bioc;
1300 1301
		recover->map_length = mapped_length;

1302
		ASSERT(sector_index < SCRUB_MAX_SECTORS_PER_BLOCK);
1303

1304
		nmirrors = min(scrub_nr_raid_mirrors(bioc), BTRFS_MAX_MIRRORS);
Z
Zhao Lei 已提交
1305

1306
		for (mirror_index = 0; mirror_index < nmirrors;
1307 1308
		     mirror_index++) {
			struct scrub_block *sblock;
1309
			struct scrub_sector *sector;
1310 1311

			sblock = sblocks_for_recheck + mirror_index;
1312
			sblock->sctx = sctx;
1313

1314 1315
			sector = kzalloc(sizeof(*sector), GFP_NOFS);
			if (!sector) {
1316
leave_nomem:
1317 1318 1319
				spin_lock(&sctx->stat_lock);
				sctx->stat.malloc_errors++;
				spin_unlock(&sctx->stat_lock);
1320
				scrub_put_recover(fs_info, recover);
1321 1322
				return -ENOMEM;
			}
1323 1324 1325 1326 1327 1328 1329
			scrub_sector_get(sector);
			sblock->sectors[sector_index] = sector;
			sector->sblock = sblock;
			sector->flags = flags;
			sector->generation = generation;
			sector->logical = logical;
			sector->have_csum = have_csum;
1330
			if (have_csum)
1331
				memcpy(sector->csum,
1332
				       original_sblock->sectors[0]->csum,
1333
				       sctx->fs_info->csum_size);
1334

Z
Zhao Lei 已提交
1335
			scrub_stripe_index_and_offset(logical,
1336 1337
						      bioc->map_type,
						      bioc->raid_map,
1338
						      mapped_length,
1339 1340
						      bioc->num_stripes -
						      bioc->num_tgtdevs,
1341 1342 1343
						      mirror_index,
						      &stripe_index,
						      &stripe_offset);
1344
			sector->physical = bioc->stripes[stripe_index].physical +
1345
					 stripe_offset;
1346
			sector->dev = bioc->stripes[stripe_index].dev;
1347

1348
			BUG_ON(sector_index >= original_sblock->sector_count);
1349
			sector->physical_for_dev_replace =
1350
				original_sblock->sectors[sector_index]->
1351
				physical_for_dev_replace;
1352 1353
			/* For missing devices, dev->bdev is NULL */
			sector->mirror_num = mirror_index + 1;
1354
			sblock->sector_count++;
1355 1356
			sector->page = alloc_page(GFP_NOFS);
			if (!sector->page)
1357
				goto leave_nomem;
1358 1359

			scrub_get_recover(recover);
1360
			sector->recover = recover;
1361
		}
1362
		scrub_put_recover(fs_info, recover);
1363 1364
		length -= sublen;
		logical += sublen;
1365
		sector_index++;
1366 1367 1368
	}

	return 0;
I
Ilya Dryomov 已提交
1369 1370
}

1371
static void scrub_bio_wait_endio(struct bio *bio)
1372
{
1373
	complete(bio->bi_private);
1374 1375 1376 1377
}

static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
					struct bio *bio,
1378
					struct scrub_sector *sector)
1379
{
1380
	DECLARE_COMPLETION_ONSTACK(done);
1381
	int ret;
1382
	int mirror_num;
1383

1384
	bio->bi_iter.bi_sector = sector->logical >> 9;
1385 1386 1387
	bio->bi_private = &done;
	bio->bi_end_io = scrub_bio_wait_endio;

1388 1389 1390
	mirror_num = sector->sblock->sectors[0]->mirror_num;
	ret = raid56_parity_recover(bio, sector->recover->bioc,
				    sector->recover->map_length,
1391
				    mirror_num, 0);
1392 1393 1394
	if (ret)
		return ret;

1395 1396
	wait_for_completion_io(&done);
	return blk_status_to_errno(bio->bi_status);
1397 1398
}

L
Liu Bo 已提交
1399 1400 1401
static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info,
					  struct scrub_block *sblock)
{
1402
	struct scrub_sector *first_sector = sblock->sectors[0];
L
Liu Bo 已提交
1403
	struct bio *bio;
1404
	int i;
L
Liu Bo 已提交
1405

1406 1407 1408
	/* All sectors in sblock belong to the same stripe on the same device. */
	ASSERT(first_sector->dev);
	if (!first_sector->dev->bdev)
L
Liu Bo 已提交
1409 1410
		goto out;

1411
	bio = bio_alloc(first_sector->dev->bdev, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS);
L
Liu Bo 已提交
1412

1413
	for (i = 0; i < sblock->sector_count; i++) {
1414
		struct scrub_sector *sector = sblock->sectors[i];
L
Liu Bo 已提交
1415

1416 1417
		WARN_ON(!sector->page);
		bio_add_page(bio, sector->page, PAGE_SIZE, 0);
L
Liu Bo 已提交
1418 1419
	}

1420
	if (scrub_submit_raid56_bio_wait(fs_info, bio, first_sector)) {
L
Liu Bo 已提交
1421 1422 1423 1424 1425 1426 1427 1428 1429 1430
		bio_put(bio);
		goto out;
	}

	bio_put(bio);

	scrub_recheck_block_checksum(sblock);

	return;
out:
1431 1432
	for (i = 0; i < sblock->sector_count; i++)
		sblock->sectors[i]->io_error = 1;
L
Liu Bo 已提交
1433 1434 1435 1436

	sblock->no_io_error_seen = 0;
}

1437
/*
1438 1439 1440 1441 1442
 * This function will check the on disk data for checksum errors, header errors
 * and read I/O errors. If any I/O errors happen, the exact sectors which are
 * errored are marked as being bad. The goal is to enable scrub to take those
 * sectors that are not errored from all the mirrors so that the sectors that
 * are errored in the just handled mirror can be repaired.
1443
 */
1444
static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1445 1446
				struct scrub_block *sblock,
				int retry_failed_mirror)
I
Ilya Dryomov 已提交
1447
{
1448
	int i;
I
Ilya Dryomov 已提交
1449

1450
	sblock->no_io_error_seen = 1;
I
Ilya Dryomov 已提交
1451

L
Liu Bo 已提交
1452
	/* short cut for raid56 */
1453
	if (!retry_failed_mirror && scrub_is_page_on_raid56(sblock->sectors[0]))
L
Liu Bo 已提交
1454 1455
		return scrub_recheck_block_on_raid56(fs_info, sblock);

1456
	for (i = 0; i < sblock->sector_count; i++) {
1457
		struct scrub_sector *sector = sblock->sectors[i];
1458 1459
		struct bio bio;
		struct bio_vec bvec;
1460

1461 1462
		if (sector->dev->bdev == NULL) {
			sector->io_error = 1;
1463 1464 1465 1466
			sblock->no_io_error_seen = 0;
			continue;
		}

1467
		WARN_ON(!sector->page);
1468 1469 1470
		bio_init(&bio, sector->dev->bdev, &bvec, 1, REQ_OP_READ);
		bio_add_page(&bio, sector->page, fs_info->sectorsize, 0);
		bio.bi_iter.bi_sector = sector->physical >> 9;
1471

1472 1473
		btrfsic_check_bio(&bio);
		if (submit_bio_wait(&bio)) {
1474
			sector->io_error = 1;
L
Liu Bo 已提交
1475
			sblock->no_io_error_seen = 0;
1476
		}
1477

1478
		bio_uninit(&bio);
1479
	}
I
Ilya Dryomov 已提交
1480

1481
	if (sblock->no_io_error_seen)
1482
		scrub_recheck_block_checksum(sblock);
A
Arne Jansen 已提交
1483 1484
}

1485
static inline int scrub_check_fsid(u8 fsid[], struct scrub_sector *sector)
M
Miao Xie 已提交
1486
{
1487
	struct btrfs_fs_devices *fs_devices = sector->dev->fs_devices;
M
Miao Xie 已提交
1488 1489
	int ret;

1490
	ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
M
Miao Xie 已提交
1491 1492 1493
	return !ret;
}

1494
static void scrub_recheck_block_checksum(struct scrub_block *sblock)
A
Arne Jansen 已提交
1495
{
1496 1497 1498
	sblock->header_error = 0;
	sblock->checksum_error = 0;
	sblock->generation_error = 0;
1499

1500
	if (sblock->sectors[0]->flags & BTRFS_EXTENT_FLAG_DATA)
1501 1502 1503
		scrub_checksum_data(sblock);
	else
		scrub_checksum_tree_block(sblock);
A
Arne Jansen 已提交
1504 1505
}

1506
static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1507
					     struct scrub_block *sblock_good)
1508
{
1509
	int i;
1510
	int ret = 0;
I
Ilya Dryomov 已提交
1511

1512
	for (i = 0; i < sblock_bad->sector_count; i++) {
1513
		int ret_sub;
I
Ilya Dryomov 已提交
1514

1515 1516
		ret_sub = scrub_repair_sector_from_good_copy(sblock_bad,
							     sblock_good, i, 1);
1517 1518
		if (ret_sub)
			ret = ret_sub;
A
Arne Jansen 已提交
1519
	}
1520 1521 1522 1523

	return ret;
}

1524 1525 1526
static int scrub_repair_sector_from_good_copy(struct scrub_block *sblock_bad,
					      struct scrub_block *sblock_good,
					      int sector_num, int force_write)
1527
{
1528 1529
	struct scrub_sector *sector_bad = sblock_bad->sectors[sector_num];
	struct scrub_sector *sector_good = sblock_good->sectors[sector_num];
1530
	struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
1531
	const u32 sectorsize = fs_info->sectorsize;
1532

1533 1534
	BUG_ON(sector_bad->page == NULL);
	BUG_ON(sector_good->page == NULL);
1535
	if (force_write || sblock_bad->header_error ||
1536
	    sblock_bad->checksum_error || sector_bad->io_error) {
1537 1538
		struct bio bio;
		struct bio_vec bvec;
1539 1540
		int ret;

1541
		if (!sector_bad->dev->bdev) {
1542
			btrfs_warn_rl(fs_info,
J
Jeff Mahoney 已提交
1543
				"scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
1544 1545 1546
			return -EIO;
		}

1547 1548 1549
		bio_init(&bio, sector_bad->dev->bdev, &bvec, 1, REQ_OP_WRITE);
		bio.bi_iter.bi_sector = sector_bad->physical >> 9;
		__bio_add_page(&bio, sector_good->page, sectorsize, 0);
1550

1551 1552 1553
		btrfsic_check_bio(&bio);
		ret = submit_bio_wait(&bio);
		bio_uninit(&bio);
1554

1555
		if (ret) {
1556
			btrfs_dev_stat_inc_and_print(sector_bad->dev,
1557
				BTRFS_DEV_STAT_WRITE_ERRS);
1558
			atomic64_inc(&fs_info->dev_replace.num_write_errors);
1559 1560
			return -EIO;
		}
A
Arne Jansen 已提交
1561 1562
	}

1563 1564 1565
	return 0;
}

1566 1567
static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
{
1568
	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
1569
	int i;
1570

1571 1572 1573 1574 1575 1576 1577
	/*
	 * This block is used for the check of the parity on the source device,
	 * so the data needn't be written into the destination device.
	 */
	if (sblock->sparity)
		return;

1578
	for (i = 0; i < sblock->sector_count; i++) {
1579 1580
		int ret;

1581
		ret = scrub_write_sector_to_dev_replace(sblock, i);
1582
		if (ret)
1583
			atomic64_inc(&fs_info->dev_replace.num_write_errors);
1584 1585 1586
	}
}

1587
static int scrub_write_sector_to_dev_replace(struct scrub_block *sblock, int sector_num)
1588
{
1589
	struct scrub_sector *sector = sblock->sectors[sector_num];
1590

1591 1592 1593
	BUG_ON(sector->page == NULL);
	if (sector->io_error)
		clear_page(page_address(sector->page));
1594

1595
	return scrub_add_sector_to_wr_bio(sblock->sctx, sector);
1596 1597
}

1598 1599 1600 1601 1602 1603 1604 1605
static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
{
	int ret = 0;
	u64 length;

	if (!btrfs_is_zoned(sctx->fs_info))
		return 0;

1606 1607 1608
	if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical))
		return 0;

1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619
	if (sctx->write_pointer < physical) {
		length = physical - sctx->write_pointer;

		ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev,
						sctx->write_pointer, length);
		if (!ret)
			sctx->write_pointer = physical;
	}
	return ret;
}

1620 1621
static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx,
				      struct scrub_sector *sector)
1622 1623 1624
{
	struct scrub_bio *sbio;
	int ret;
1625
	const u32 sectorsize = sctx->fs_info->sectorsize;
1626

1627
	mutex_lock(&sctx->wr_lock);
1628
again:
1629 1630
	if (!sctx->wr_curr_bio) {
		sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
1631
					      GFP_KERNEL);
1632 1633
		if (!sctx->wr_curr_bio) {
			mutex_unlock(&sctx->wr_lock);
1634 1635
			return -ENOMEM;
		}
1636
		sctx->wr_curr_bio->sctx = sctx;
1637
		sctx->wr_curr_bio->sector_count = 0;
1638
	}
1639
	sbio = sctx->wr_curr_bio;
1640
	if (sbio->sector_count == 0) {
1641
		ret = fill_writer_pointer_gap(sctx, sector->physical_for_dev_replace);
1642 1643 1644 1645 1646
		if (ret) {
			mutex_unlock(&sctx->wr_lock);
			return ret;
		}

1647 1648
		sbio->physical = sector->physical_for_dev_replace;
		sbio->logical = sector->logical;
1649
		sbio->dev = sctx->wr_tgtdev;
1650 1651 1652
		if (!sbio->bio) {
			sbio->bio = bio_alloc(sbio->dev->bdev, sctx->sectors_per_bio,
					      REQ_OP_WRITE, GFP_NOFS);
1653
		}
1654 1655 1656
		sbio->bio->bi_private = sbio;
		sbio->bio->bi_end_io = scrub_wr_bio_end_io;
		sbio->bio->bi_iter.bi_sector = sbio->physical >> 9;
1657
		sbio->status = 0;
1658
	} else if (sbio->physical + sbio->sector_count * sectorsize !=
1659
		   sector->physical_for_dev_replace ||
1660
		   sbio->logical + sbio->sector_count * sectorsize !=
1661
		   sector->logical) {
1662 1663 1664 1665
		scrub_wr_submit(sctx);
		goto again;
	}

1666
	ret = bio_add_page(sbio->bio, sector->page, sectorsize, 0);
1667
	if (ret != sectorsize) {
1668
		if (sbio->sector_count < 1) {
1669 1670
			bio_put(sbio->bio);
			sbio->bio = NULL;
1671
			mutex_unlock(&sctx->wr_lock);
1672 1673 1674 1675 1676 1677
			return -EIO;
		}
		scrub_wr_submit(sctx);
		goto again;
	}

1678
	sbio->sectors[sbio->sector_count] = sector;
1679
	scrub_sector_get(sector);
1680 1681
	sbio->sector_count++;
	if (sbio->sector_count == sctx->sectors_per_bio)
1682
		scrub_wr_submit(sctx);
1683
	mutex_unlock(&sctx->wr_lock);
1684 1685 1686 1687 1688 1689 1690 1691

	return 0;
}

static void scrub_wr_submit(struct scrub_ctx *sctx)
{
	struct scrub_bio *sbio;

1692
	if (!sctx->wr_curr_bio)
1693 1694
		return;

1695 1696
	sbio = sctx->wr_curr_bio;
	sctx->wr_curr_bio = NULL;
1697 1698 1699 1700 1701
	scrub_pending_bio_inc(sctx);
	/* process all writes in a single worker thread. Then the block layer
	 * orders the requests before sending them to the driver which
	 * doubled the write performance on spinning disks when measured
	 * with Linux 3.5 */
1702 1703
	btrfsic_check_bio(sbio->bio);
	submit_bio(sbio->bio);
1704 1705

	if (btrfs_is_zoned(sctx->fs_info))
1706
		sctx->write_pointer = sbio->physical + sbio->sector_count *
1707
			sctx->fs_info->sectorsize;
1708 1709
}

1710
static void scrub_wr_bio_end_io(struct bio *bio)
1711 1712
{
	struct scrub_bio *sbio = bio->bi_private;
1713
	struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
1714

1715
	sbio->status = bio->bi_status;
1716 1717
	sbio->bio = bio;

1718 1719
	INIT_WORK(&sbio->work, scrub_wr_bio_end_io_worker);
	queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1720 1721
}

1722
static void scrub_wr_bio_end_io_worker(struct work_struct *work)
1723 1724 1725 1726 1727
{
	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
	struct scrub_ctx *sctx = sbio->sctx;
	int i;

1728
	ASSERT(sbio->sector_count <= SCRUB_SECTORS_PER_BIO);
1729
	if (sbio->status) {
1730
		struct btrfs_dev_replace *dev_replace =
1731
			&sbio->sctx->fs_info->dev_replace;
1732

1733 1734
		for (i = 0; i < sbio->sector_count; i++) {
			struct scrub_sector *sector = sbio->sectors[i];
1735

1736
			sector->io_error = 1;
1737
			atomic64_inc(&dev_replace->num_write_errors);
1738 1739 1740
		}
	}

1741 1742
	for (i = 0; i < sbio->sector_count; i++)
		scrub_sector_put(sbio->sectors[i]);
1743 1744 1745 1746 1747 1748 1749

	bio_put(sbio->bio);
	kfree(sbio);
	scrub_pending_bio_dec(sctx);
}

static int scrub_checksum(struct scrub_block *sblock)
1750 1751 1752 1753
{
	u64 flags;
	int ret;

1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765
	/*
	 * No need to initialize these stats currently,
	 * because this function only use return value
	 * instead of these stats value.
	 *
	 * Todo:
	 * always use stats
	 */
	sblock->header_error = 0;
	sblock->generation_error = 0;
	sblock->checksum_error = 0;

1766 1767
	WARN_ON(sblock->sector_count < 1);
	flags = sblock->sectors[0]->flags;
1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778
	ret = 0;
	if (flags & BTRFS_EXTENT_FLAG_DATA)
		ret = scrub_checksum_data(sblock);
	else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
		ret = scrub_checksum_tree_block(sblock);
	else if (flags & BTRFS_EXTENT_FLAG_SUPER)
		(void)scrub_checksum_super(sblock);
	else
		WARN_ON(1);
	if (ret)
		scrub_handle_errored_block(sblock);
1779 1780

	return ret;
A
Arne Jansen 已提交
1781 1782
}

1783
static int scrub_checksum_data(struct scrub_block *sblock)
A
Arne Jansen 已提交
1784
{
1785
	struct scrub_ctx *sctx = sblock->sctx;
1786 1787
	struct btrfs_fs_info *fs_info = sctx->fs_info;
	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
A
Arne Jansen 已提交
1788
	u8 csum[BTRFS_CSUM_SIZE];
1789
	struct scrub_sector *sector;
1790
	char *kaddr;
A
Arne Jansen 已提交
1791

1792
	BUG_ON(sblock->sector_count < 1);
1793 1794
	sector = sblock->sectors[0];
	if (!sector->have_csum)
A
Arne Jansen 已提交
1795 1796
		return 0;

1797
	kaddr = page_address(sector->page);
1798

1799 1800
	shash->tfm = fs_info->csum_shash;
	crypto_shash_init(shash);
1801

1802
	/*
1803
	 * In scrub_sectors() and scrub_sectors_for_parity() we ensure each sector
1804 1805 1806
	 * only contains one sector of data.
	 */
	crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum);
A
Arne Jansen 已提交
1807

1808
	if (memcmp(csum, sector->csum, fs_info->csum_size))
1809
		sblock->checksum_error = 1;
1810
	return sblock->checksum_error;
A
Arne Jansen 已提交
1811 1812
}

1813
static int scrub_checksum_tree_block(struct scrub_block *sblock)
A
Arne Jansen 已提交
1814
{
1815
	struct scrub_ctx *sctx = sblock->sctx;
A
Arne Jansen 已提交
1816
	struct btrfs_header *h;
1817
	struct btrfs_fs_info *fs_info = sctx->fs_info;
1818
	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
1819 1820
	u8 calculated_csum[BTRFS_CSUM_SIZE];
	u8 on_disk_csum[BTRFS_CSUM_SIZE];
1821 1822 1823 1824 1825 1826 1827
	/*
	 * This is done in sectorsize steps even for metadata as there's a
	 * constraint for nodesize to be aligned to sectorsize. This will need
	 * to change so we don't misuse data and metadata units like that.
	 */
	const u32 sectorsize = sctx->fs_info->sectorsize;
	const int num_sectors = fs_info->nodesize >> fs_info->sectorsize_bits;
1828
	int i;
1829
	struct scrub_sector *sector;
1830
	char *kaddr;
1831

1832
	BUG_ON(sblock->sector_count < 1);
1833

1834
	/* Each member in sectors is just one sector */
1835
	ASSERT(sblock->sector_count == num_sectors);
1836

1837 1838
	sector = sblock->sectors[0];
	kaddr = page_address(sector->page);
1839
	h = (struct btrfs_header *)kaddr;
1840
	memcpy(on_disk_csum, h->csum, sctx->fs_info->csum_size);
A
Arne Jansen 已提交
1841 1842 1843 1844 1845 1846

	/*
	 * we don't use the getter functions here, as we
	 * a) don't have an extent buffer and
	 * b) the page is already kmapped
	 */
1847
	if (sector->logical != btrfs_stack_header_bytenr(h))
1848
		sblock->header_error = 1;
A
Arne Jansen 已提交
1849

1850
	if (sector->generation != btrfs_stack_header_generation(h)) {
1851 1852 1853
		sblock->header_error = 1;
		sblock->generation_error = 1;
	}
A
Arne Jansen 已提交
1854

1855
	if (!scrub_check_fsid(h->fsid, sector))
1856
		sblock->header_error = 1;
A
Arne Jansen 已提交
1857 1858 1859

	if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
		   BTRFS_UUID_SIZE))
1860
		sblock->header_error = 1;
A
Arne Jansen 已提交
1861

1862 1863 1864
	shash->tfm = fs_info->csum_shash;
	crypto_shash_init(shash);
	crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
1865
			    sectorsize - BTRFS_CSUM_SIZE);
1866

1867
	for (i = 1; i < num_sectors; i++) {
1868
		kaddr = page_address(sblock->sectors[i]->page);
1869
		crypto_shash_update(shash, kaddr, sectorsize);
1870 1871
	}

1872
	crypto_shash_final(shash, calculated_csum);
1873
	if (memcmp(calculated_csum, on_disk_csum, sctx->fs_info->csum_size))
1874
		sblock->checksum_error = 1;
A
Arne Jansen 已提交
1875

1876
	return sblock->header_error || sblock->checksum_error;
A
Arne Jansen 已提交
1877 1878
}

1879
static int scrub_checksum_super(struct scrub_block *sblock)
A
Arne Jansen 已提交
1880 1881
{
	struct btrfs_super_block *s;
1882
	struct scrub_ctx *sctx = sblock->sctx;
1883 1884
	struct btrfs_fs_info *fs_info = sctx->fs_info;
	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
1885
	u8 calculated_csum[BTRFS_CSUM_SIZE];
1886
	struct scrub_sector *sector;
1887
	char *kaddr;
1888 1889
	int fail_gen = 0;
	int fail_cor = 0;
1890

1891
	BUG_ON(sblock->sector_count < 1);
1892 1893
	sector = sblock->sectors[0];
	kaddr = page_address(sector->page);
1894
	s = (struct btrfs_super_block *)kaddr;
A
Arne Jansen 已提交
1895

1896
	if (sector->logical != btrfs_super_bytenr(s))
1897
		++fail_cor;
A
Arne Jansen 已提交
1898

1899
	if (sector->generation != btrfs_super_generation(s))
1900
		++fail_gen;
A
Arne Jansen 已提交
1901

1902
	if (!scrub_check_fsid(s->fsid, sector))
1903
		++fail_cor;
A
Arne Jansen 已提交
1904

1905 1906 1907 1908
	shash->tfm = fs_info->csum_shash;
	crypto_shash_init(shash);
	crypto_shash_digest(shash, kaddr + BTRFS_CSUM_SIZE,
			BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, calculated_csum);
1909

1910
	if (memcmp(calculated_csum, s->csum, sctx->fs_info->csum_size))
1911
		++fail_cor;
A
Arne Jansen 已提交
1912

1913
	if (fail_cor + fail_gen) {
A
Arne Jansen 已提交
1914 1915 1916 1917 1918
		/*
		 * if we find an error in a super block, we just report it.
		 * They will get written with the next transaction commit
		 * anyway
		 */
1919 1920 1921
		spin_lock(&sctx->stat_lock);
		++sctx->stat.super_errors;
		spin_unlock(&sctx->stat_lock);
1922
		if (fail_cor)
1923
			btrfs_dev_stat_inc_and_print(sector->dev,
1924 1925
				BTRFS_DEV_STAT_CORRUPTION_ERRS);
		else
1926
			btrfs_dev_stat_inc_and_print(sector->dev,
1927
				BTRFS_DEV_STAT_GENERATION_ERRS);
A
Arne Jansen 已提交
1928 1929
	}

1930
	return fail_cor + fail_gen;
A
Arne Jansen 已提交
1931 1932
}

1933 1934
static void scrub_block_get(struct scrub_block *sblock)
{
1935
	refcount_inc(&sblock->refs);
1936 1937 1938 1939
}

static void scrub_block_put(struct scrub_block *sblock)
{
1940
	if (refcount_dec_and_test(&sblock->refs)) {
1941 1942
		int i;

1943 1944 1945
		if (sblock->sparity)
			scrub_parity_put(sblock->sparity);

1946
		for (i = 0; i < sblock->sector_count; i++)
1947
			scrub_sector_put(sblock->sectors[i]);
1948 1949 1950 1951
		kfree(sblock);
	}
}

1952
static void scrub_sector_get(struct scrub_sector *sector)
1953
{
1954
	atomic_inc(&sector->refs);
1955 1956
}

1957
static void scrub_sector_put(struct scrub_sector *sector)
1958
{
1959 1960 1961 1962
	if (atomic_dec_and_test(&sector->refs)) {
		if (sector->page)
			__free_page(sector->page);
		kfree(sector);
1963 1964 1965
	}
}

1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024
/*
 * Throttling of IO submission, bandwidth-limit based, the timeslice is 1
 * second.  Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max.
 */
static void scrub_throttle(struct scrub_ctx *sctx)
{
	const int time_slice = 1000;
	struct scrub_bio *sbio;
	struct btrfs_device *device;
	s64 delta;
	ktime_t now;
	u32 div;
	u64 bwlimit;

	sbio = sctx->bios[sctx->curr];
	device = sbio->dev;
	bwlimit = READ_ONCE(device->scrub_speed_max);
	if (bwlimit == 0)
		return;

	/*
	 * Slice is divided into intervals when the IO is submitted, adjust by
	 * bwlimit and maximum of 64 intervals.
	 */
	div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024)));
	div = min_t(u32, 64, div);

	/* Start new epoch, set deadline */
	now = ktime_get();
	if (sctx->throttle_deadline == 0) {
		sctx->throttle_deadline = ktime_add_ms(now, time_slice / div);
		sctx->throttle_sent = 0;
	}

	/* Still in the time to send? */
	if (ktime_before(now, sctx->throttle_deadline)) {
		/* If current bio is within the limit, send it */
		sctx->throttle_sent += sbio->bio->bi_iter.bi_size;
		if (sctx->throttle_sent <= div_u64(bwlimit, div))
			return;

		/* We're over the limit, sleep until the rest of the slice */
		delta = ktime_ms_delta(sctx->throttle_deadline, now);
	} else {
		/* New request after deadline, start new epoch */
		delta = 0;
	}

	if (delta) {
		long timeout;

		timeout = div_u64(delta * HZ, 1000);
		schedule_timeout_interruptible(timeout);
	}

	/* Next call will start the deadline period */
	sctx->throttle_deadline = 0;
}

2025
static void scrub_submit(struct scrub_ctx *sctx)
A
Arne Jansen 已提交
2026 2027 2028
{
	struct scrub_bio *sbio;

2029
	if (sctx->curr == -1)
S
Stefan Behrens 已提交
2030
		return;
A
Arne Jansen 已提交
2031

2032 2033
	scrub_throttle(sctx);

2034 2035
	sbio = sctx->bios[sctx->curr];
	sctx->curr = -1;
2036
	scrub_pending_bio_inc(sctx);
2037 2038
	btrfsic_check_bio(sbio->bio);
	submit_bio(sbio->bio);
A
Arne Jansen 已提交
2039 2040
}

2041 2042
static int scrub_add_sector_to_rd_bio(struct scrub_ctx *sctx,
				      struct scrub_sector *sector)
A
Arne Jansen 已提交
2043
{
2044
	struct scrub_block *sblock = sector->sblock;
A
Arne Jansen 已提交
2045
	struct scrub_bio *sbio;
2046
	const u32 sectorsize = sctx->fs_info->sectorsize;
2047
	int ret;
A
Arne Jansen 已提交
2048 2049 2050 2051 2052

again:
	/*
	 * grab a fresh bio or wait for one to become available
	 */
2053 2054 2055 2056 2057 2058
	while (sctx->curr == -1) {
		spin_lock(&sctx->list_lock);
		sctx->curr = sctx->first_free;
		if (sctx->curr != -1) {
			sctx->first_free = sctx->bios[sctx->curr]->next_free;
			sctx->bios[sctx->curr]->next_free = -1;
2059
			sctx->bios[sctx->curr]->sector_count = 0;
2060
			spin_unlock(&sctx->list_lock);
A
Arne Jansen 已提交
2061
		} else {
2062 2063
			spin_unlock(&sctx->list_lock);
			wait_event(sctx->list_wait, sctx->first_free != -1);
A
Arne Jansen 已提交
2064 2065
		}
	}
2066
	sbio = sctx->bios[sctx->curr];
2067
	if (sbio->sector_count == 0) {
2068 2069 2070
		sbio->physical = sector->physical;
		sbio->logical = sector->logical;
		sbio->dev = sector->dev;
2071 2072 2073
		if (!sbio->bio) {
			sbio->bio = bio_alloc(sbio->dev->bdev, sctx->sectors_per_bio,
					      REQ_OP_READ, GFP_NOFS);
2074
		}
2075 2076 2077
		sbio->bio->bi_private = sbio;
		sbio->bio->bi_end_io = scrub_bio_end_io;
		sbio->bio->bi_iter.bi_sector = sbio->physical >> 9;
2078
		sbio->status = 0;
2079
	} else if (sbio->physical + sbio->sector_count * sectorsize !=
2080
		   sector->physical ||
2081
		   sbio->logical + sbio->sector_count * sectorsize !=
2082 2083
		   sector->logical ||
		   sbio->dev != sector->dev) {
2084
		scrub_submit(sctx);
A
Arne Jansen 已提交
2085 2086
		goto again;
	}
2087

2088
	sbio->sectors[sbio->sector_count] = sector;
2089
	ret = bio_add_page(sbio->bio, sector->page, sectorsize, 0);
2090
	if (ret != sectorsize) {
2091
		if (sbio->sector_count < 1) {
2092 2093 2094 2095
			bio_put(sbio->bio);
			sbio->bio = NULL;
			return -EIO;
		}
2096
		scrub_submit(sctx);
2097 2098 2099
		goto again;
	}

2100
	scrub_block_get(sblock); /* one for the page added to the bio */
2101
	atomic_inc(&sblock->outstanding_sectors);
2102 2103
	sbio->sector_count++;
	if (sbio->sector_count == sctx->sectors_per_bio)
2104
		scrub_submit(sctx);
2105 2106 2107 2108

	return 0;
}

2109
static void scrub_missing_raid56_end_io(struct bio *bio)
2110 2111
{
	struct scrub_block *sblock = bio->bi_private;
2112
	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
2113

2114
	if (bio->bi_status)
2115 2116
		sblock->no_io_error_seen = 0;

2117 2118
	bio_put(bio);

2119
	queue_work(fs_info->scrub_workers, &sblock->work);
2120 2121
}

2122
static void scrub_missing_raid56_worker(struct work_struct *work)
2123 2124 2125
{
	struct scrub_block *sblock = container_of(work, struct scrub_block, work);
	struct scrub_ctx *sctx = sblock->sctx;
2126
	struct btrfs_fs_info *fs_info = sctx->fs_info;
2127 2128 2129
	u64 logical;
	struct btrfs_device *dev;

2130 2131
	logical = sblock->sectors[0]->logical;
	dev = sblock->sectors[0]->dev;
2132

2133
	if (sblock->no_io_error_seen)
2134
		scrub_recheck_block_checksum(sblock);
2135 2136 2137 2138 2139

	if (!sblock->no_io_error_seen) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors++;
		spin_unlock(&sctx->stat_lock);
2140
		btrfs_err_rl_in_rcu(fs_info,
2141
			"IO error rebuilding logical %llu for dev %s",
2142 2143 2144 2145 2146
			logical, rcu_str_deref(dev->name));
	} else if (sblock->header_error || sblock->checksum_error) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
2147
		btrfs_err_rl_in_rcu(fs_info,
2148
			"failed to rebuild valid logical %llu for dev %s",
2149 2150 2151 2152 2153
			logical, rcu_str_deref(dev->name));
	} else {
		scrub_write_block_to_dev_replace(sblock);
	}

2154
	if (sctx->is_dev_replace && sctx->flush_all_writes) {
2155
		mutex_lock(&sctx->wr_lock);
2156
		scrub_wr_submit(sctx);
2157
		mutex_unlock(&sctx->wr_lock);
2158 2159
	}

2160
	scrub_block_put(sblock);
2161 2162 2163 2164 2165 2166
	scrub_pending_bio_dec(sctx);
}

static void scrub_missing_raid56_pages(struct scrub_block *sblock)
{
	struct scrub_ctx *sctx = sblock->sctx;
2167
	struct btrfs_fs_info *fs_info = sctx->fs_info;
2168 2169
	u64 length = sblock->sector_count << fs_info->sectorsize_bits;
	u64 logical = sblock->sectors[0]->logical;
2170
	struct btrfs_io_context *bioc = NULL;
2171 2172 2173 2174 2175
	struct bio *bio;
	struct btrfs_raid_bio *rbio;
	int ret;
	int i;

2176
	btrfs_bio_counter_inc_blocked(fs_info);
2177
	ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
2178 2179 2180
			       &length, &bioc);
	if (ret || !bioc || !bioc->raid_map)
		goto bioc_out;
2181 2182

	if (WARN_ON(!sctx->is_dev_replace ||
2183
		    !(bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2184 2185 2186 2187
		/*
		 * We shouldn't be scrubbing a missing device. Even for dev
		 * replace, we should only get here for RAID 5/6. We either
		 * managed to mount something with no mirrors remaining or
2188
		 * there's a bug in scrub_find_good_copy()/btrfs_map_block().
2189
		 */
2190
		goto bioc_out;
2191 2192
	}

2193
	bio = bio_alloc(NULL, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS);
2194 2195 2196 2197
	bio->bi_iter.bi_sector = logical >> 9;
	bio->bi_private = sblock;
	bio->bi_end_io = scrub_missing_raid56_end_io;

2198
	rbio = raid56_alloc_missing_rbio(bio, bioc, length);
2199 2200 2201
	if (!rbio)
		goto rbio_out;

2202
	for (i = 0; i < sblock->sector_count; i++) {
2203
		struct scrub_sector *sector = sblock->sectors[i];
2204

2205 2206 2207 2208 2209
		/*
		 * For now, our scrub is still one page per sector, so pgoff
		 * is always 0.
		 */
		raid56_add_scrub_pages(rbio, sector->page, 0, sector->logical);
2210 2211
	}

2212
	INIT_WORK(&sblock->work, scrub_missing_raid56_worker);
2213 2214 2215 2216 2217 2218 2219
	scrub_block_get(sblock);
	scrub_pending_bio_inc(sctx);
	raid56_submit_missing_rbio(rbio);
	return;

rbio_out:
	bio_put(bio);
2220
bioc_out:
2221
	btrfs_bio_counter_dec(fs_info);
2222
	btrfs_put_bioc(bioc);
2223 2224 2225 2226 2227
	spin_lock(&sctx->stat_lock);
	sctx->stat.malloc_errors++;
	spin_unlock(&sctx->stat_lock);
}

2228
static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len,
2229
		       u64 physical, struct btrfs_device *dev, u64 flags,
2230
		       u64 gen, int mirror_num, u8 *csum,
2231
		       u64 physical_for_dev_replace)
2232 2233
{
	struct scrub_block *sblock;
2234
	const u32 sectorsize = sctx->fs_info->sectorsize;
2235 2236
	int index;

2237
	sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2238
	if (!sblock) {
2239 2240 2241
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
2242
		return -ENOMEM;
A
Arne Jansen 已提交
2243
	}
2244

2245 2246
	/* one ref inside this function, plus one for each page added to
	 * a bio later on */
2247
	refcount_set(&sblock->refs, 1);
2248
	sblock->sctx = sctx;
2249 2250 2251
	sblock->no_io_error_seen = 1;

	for (index = 0; len > 0; index++) {
2252
		struct scrub_sector *sector;
2253 2254 2255 2256 2257 2258
		/*
		 * Here we will allocate one page for one sector to scrub.
		 * This is fine if PAGE_SIZE == sectorsize, but will cost
		 * more memory for PAGE_SIZE > sectorsize case.
		 */
		u32 l = min(sectorsize, len);
2259

2260 2261
		sector = kzalloc(sizeof(*sector), GFP_KERNEL);
		if (!sector) {
2262
leave_nomem:
2263 2264 2265
			spin_lock(&sctx->stat_lock);
			sctx->stat.malloc_errors++;
			spin_unlock(&sctx->stat_lock);
2266
			scrub_block_put(sblock);
2267 2268
			return -ENOMEM;
		}
2269
		ASSERT(index < SCRUB_MAX_SECTORS_PER_BLOCK);
2270 2271 2272 2273 2274 2275 2276 2277 2278 2279
		scrub_sector_get(sector);
		sblock->sectors[index] = sector;
		sector->sblock = sblock;
		sector->dev = dev;
		sector->flags = flags;
		sector->generation = gen;
		sector->logical = logical;
		sector->physical = physical;
		sector->physical_for_dev_replace = physical_for_dev_replace;
		sector->mirror_num = mirror_num;
2280
		if (csum) {
2281 2282
			sector->have_csum = 1;
			memcpy(sector->csum, csum, sctx->fs_info->csum_size);
2283
		} else {
2284
			sector->have_csum = 0;
2285
		}
2286
		sblock->sector_count++;
2287 2288
		sector->page = alloc_page(GFP_KERNEL);
		if (!sector->page)
2289
			goto leave_nomem;
2290 2291 2292
		len -= l;
		logical += l;
		physical += l;
2293
		physical_for_dev_replace += l;
2294 2295
	}

2296
	WARN_ON(sblock->sector_count == 0);
2297
	if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
2298 2299 2300 2301 2302 2303
		/*
		 * This case should only be hit for RAID 5/6 device replace. See
		 * the comment in scrub_missing_raid56_pages() for details.
		 */
		scrub_missing_raid56_pages(sblock);
	} else {
2304
		for (index = 0; index < sblock->sector_count; index++) {
2305
			struct scrub_sector *sector = sblock->sectors[index];
2306
			int ret;
2307

2308
			ret = scrub_add_sector_to_rd_bio(sctx, sector);
2309 2310 2311 2312
			if (ret) {
				scrub_block_put(sblock);
				return ret;
			}
2313
		}
A
Arne Jansen 已提交
2314

2315
		if (flags & BTRFS_EXTENT_FLAG_SUPER)
2316 2317
			scrub_submit(sctx);
	}
A
Arne Jansen 已提交
2318

2319 2320
	/* last one frees, either here or in bio completion for last page */
	scrub_block_put(sblock);
A
Arne Jansen 已提交
2321 2322 2323
	return 0;
}

2324
static void scrub_bio_end_io(struct bio *bio)
2325 2326
{
	struct scrub_bio *sbio = bio->bi_private;
2327
	struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
2328

2329
	sbio->status = bio->bi_status;
2330 2331
	sbio->bio = bio;

2332
	queue_work(fs_info->scrub_workers, &sbio->work);
2333 2334
}

2335
static void scrub_bio_end_io_worker(struct work_struct *work)
2336 2337
{
	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2338
	struct scrub_ctx *sctx = sbio->sctx;
2339 2340
	int i;

2341
	ASSERT(sbio->sector_count <= SCRUB_SECTORS_PER_BIO);
2342
	if (sbio->status) {
2343 2344
		for (i = 0; i < sbio->sector_count; i++) {
			struct scrub_sector *sector = sbio->sectors[i];
2345

2346 2347
			sector->io_error = 1;
			sector->sblock->no_io_error_seen = 0;
2348 2349 2350
		}
	}

2351
	/* Now complete the scrub_block items that have all pages completed */
2352 2353
	for (i = 0; i < sbio->sector_count; i++) {
		struct scrub_sector *sector = sbio->sectors[i];
2354
		struct scrub_block *sblock = sector->sblock;
2355

2356
		if (atomic_dec_and_test(&sblock->outstanding_sectors))
2357 2358 2359 2360 2361 2362
			scrub_block_complete(sblock);
		scrub_block_put(sblock);
	}

	bio_put(sbio->bio);
	sbio->bio = NULL;
2363 2364 2365 2366
	spin_lock(&sctx->list_lock);
	sbio->next_free = sctx->first_free;
	sctx->first_free = sbio->index;
	spin_unlock(&sctx->list_lock);
2367

2368
	if (sctx->is_dev_replace && sctx->flush_all_writes) {
2369
		mutex_lock(&sctx->wr_lock);
2370
		scrub_wr_submit(sctx);
2371
		mutex_unlock(&sctx->wr_lock);
2372 2373
	}

2374
	scrub_pending_bio_dec(sctx);
2375 2376
}

2377 2378
static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
				       unsigned long *bitmap,
2379
				       u64 start, u32 len)
2380
{
2381
	u64 offset;
2382
	u32 nsectors;
2383
	u32 sectorsize_bits = sparity->sctx->fs_info->sectorsize_bits;
2384 2385 2386 2387 2388 2389 2390

	if (len >= sparity->stripe_len) {
		bitmap_set(bitmap, 0, sparity->nsectors);
		return;
	}

	start -= sparity->logic_start;
2391
	start = div64_u64_rem(start, sparity->stripe_len, &offset);
2392
	offset = offset >> sectorsize_bits;
2393
	nsectors = len >> sectorsize_bits;
2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404

	if (offset + nsectors <= sparity->nsectors) {
		bitmap_set(bitmap, offset, nsectors);
		return;
	}

	bitmap_set(bitmap, offset, sparity->nsectors - offset);
	bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
}

static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2405
						   u64 start, u32 len)
2406
{
2407
	__scrub_mark_bitmap(sparity, &sparity->ebitmap, start, len);
2408 2409 2410
}

static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2411
						  u64 start, u32 len)
2412
{
2413
	__scrub_mark_bitmap(sparity, &sparity->dbitmap, start, len);
2414 2415
}

2416 2417
static void scrub_block_complete(struct scrub_block *sblock)
{
2418 2419
	int corrupted = 0;

2420
	if (!sblock->no_io_error_seen) {
2421
		corrupted = 1;
2422
		scrub_handle_errored_block(sblock);
2423 2424 2425 2426 2427 2428
	} else {
		/*
		 * if has checksum error, write via repair mechanism in
		 * dev replace case, otherwise write here in dev replace
		 * case.
		 */
2429 2430
		corrupted = scrub_checksum(sblock);
		if (!corrupted && sblock->sctx->is_dev_replace)
2431 2432
			scrub_write_block_to_dev_replace(sblock);
	}
2433 2434

	if (sblock->sparity && corrupted && !sblock->data_corrected) {
2435 2436
		u64 start = sblock->sectors[0]->logical;
		u64 end = sblock->sectors[sblock->sector_count - 1]->logical +
2437
			  sblock->sctx->fs_info->sectorsize;
2438

2439
		ASSERT(end - start <= U32_MAX);
2440 2441 2442
		scrub_parity_mark_sectors_error(sblock->sparity,
						start, end - start);
	}
2443 2444
}

2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456
static void drop_csum_range(struct scrub_ctx *sctx, struct btrfs_ordered_sum *sum)
{
	sctx->stat.csum_discards += sum->len >> sctx->fs_info->sectorsize_bits;
	list_del(&sum->list);
	kfree(sum);
}

/*
 * Find the desired csum for range [logical, logical + sectorsize), and store
 * the csum into @csum.
 *
 * The search source is sctx->csum_list, which is a pre-populated list
D
David Sterba 已提交
2457
 * storing bytenr ordered csum ranges.  We're responsible to cleanup any range
2458 2459 2460 2461 2462
 * that is before @logical.
 *
 * Return 0 if there is no csum for the range.
 * Return 1 if there is csum for the range and copied to @csum.
 */
2463
static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
A
Arne Jansen 已提交
2464
{
2465
	bool found = false;
A
Arne Jansen 已提交
2466

2467
	while (!list_empty(&sctx->csum_list)) {
2468 2469 2470 2471
		struct btrfs_ordered_sum *sum = NULL;
		unsigned long index;
		unsigned long num_sectors;

2472
		sum = list_first_entry(&sctx->csum_list,
A
Arne Jansen 已提交
2473
				       struct btrfs_ordered_sum, list);
2474
		/* The current csum range is beyond our range, no csum found */
A
Arne Jansen 已提交
2475 2476 2477
		if (sum->bytenr > logical)
			break;

2478 2479 2480 2481 2482 2483 2484 2485 2486 2487
		/*
		 * The current sum is before our bytenr, since scrub is always
		 * done in bytenr order, the csum will never be used anymore,
		 * clean it up so that later calls won't bother with the range,
		 * and continue search the next range.
		 */
		if (sum->bytenr + sum->len <= logical) {
			drop_csum_range(sctx, sum);
			continue;
		}
A
Arne Jansen 已提交
2488

2489 2490 2491 2492
		/* Now the csum range covers our bytenr, copy the csum */
		found = true;
		index = (logical - sum->bytenr) >> sctx->fs_info->sectorsize_bits;
		num_sectors = sum->len >> sctx->fs_info->sectorsize_bits;
2493

2494 2495 2496 2497 2498 2499 2500
		memcpy(csum, sum->sums + index * sctx->fs_info->csum_size,
		       sctx->fs_info->csum_size);

		/* Cleanup the range if we're at the end of the csum range */
		if (index == num_sectors - 1)
			drop_csum_range(sctx, sum);
		break;
A
Arne Jansen 已提交
2501
	}
2502 2503
	if (!found)
		return 0;
2504
	return 1;
A
Arne Jansen 已提交
2505 2506 2507
}

/* scrub extent tries to collect up to 64 kB for each bio */
L
Liu Bo 已提交
2508
static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
2509
			u64 logical, u32 len,
2510
			u64 physical, struct btrfs_device *dev, u64 flags,
2511
			u64 gen, int mirror_num)
A
Arne Jansen 已提交
2512
{
2513 2514 2515
	struct btrfs_device *src_dev = dev;
	u64 src_physical = physical;
	int src_mirror = mirror_num;
A
Arne Jansen 已提交
2516 2517
	int ret;
	u8 csum[BTRFS_CSUM_SIZE];
2518 2519 2520
	u32 blocksize;

	if (flags & BTRFS_EXTENT_FLAG_DATA) {
L
Liu Bo 已提交
2521 2522 2523 2524
		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
			blocksize = map->stripe_len;
		else
			blocksize = sctx->fs_info->sectorsize;
2525 2526 2527 2528
		spin_lock(&sctx->stat_lock);
		sctx->stat.data_extents_scrubbed++;
		sctx->stat.data_bytes_scrubbed += len;
		spin_unlock(&sctx->stat_lock);
2529
	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
L
Liu Bo 已提交
2530 2531 2532 2533
		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
			blocksize = map->stripe_len;
		else
			blocksize = sctx->fs_info->nodesize;
2534 2535 2536 2537
		spin_lock(&sctx->stat_lock);
		sctx->stat.tree_extents_scrubbed++;
		sctx->stat.tree_bytes_scrubbed += len;
		spin_unlock(&sctx->stat_lock);
2538
	} else {
2539
		blocksize = sctx->fs_info->sectorsize;
2540
		WARN_ON(1);
2541
	}
A
Arne Jansen 已提交
2542

2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554
	/*
	 * For dev-replace case, we can have @dev being a missing device.
	 * Regular scrub will avoid its execution on missing device at all,
	 * as that would trigger tons of read error.
	 *
	 * Reading from missing device will cause read error counts to
	 * increase unnecessarily.
	 * So here we change the read source to a good mirror.
	 */
	if (sctx->is_dev_replace && !dev->bdev)
		scrub_find_good_copy(sctx->fs_info, logical, len, &src_physical,
				     &src_dev, &src_mirror);
A
Arne Jansen 已提交
2555
	while (len) {
2556
		u32 l = min(len, blocksize);
A
Arne Jansen 已提交
2557 2558 2559 2560
		int have_csum = 0;

		if (flags & BTRFS_EXTENT_FLAG_DATA) {
			/* push csums to sbio */
2561
			have_csum = scrub_find_csum(sctx, logical, csum);
A
Arne Jansen 已提交
2562
			if (have_csum == 0)
2563
				++sctx->stat.no_csum;
A
Arne Jansen 已提交
2564
		}
2565 2566 2567
		ret = scrub_sectors(sctx, logical, l, src_physical, src_dev,
				    flags, gen, src_mirror,
				    have_csum ? csum : NULL, physical);
A
Arne Jansen 已提交
2568 2569 2570 2571 2572
		if (ret)
			return ret;
		len -= l;
		logical += l;
		physical += l;
2573
		src_physical += l;
A
Arne Jansen 已提交
2574 2575 2576 2577
	}
	return 0;
}

2578
static int scrub_sectors_for_parity(struct scrub_parity *sparity,
2579
				  u64 logical, u32 len,
2580 2581 2582 2583 2584
				  u64 physical, struct btrfs_device *dev,
				  u64 flags, u64 gen, int mirror_num, u8 *csum)
{
	struct scrub_ctx *sctx = sparity->sctx;
	struct scrub_block *sblock;
2585
	const u32 sectorsize = sctx->fs_info->sectorsize;
2586 2587
	int index;

2588 2589
	ASSERT(IS_ALIGNED(len, sectorsize));

2590
	sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2591 2592 2593 2594 2595 2596 2597 2598 2599
	if (!sblock) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
		return -ENOMEM;
	}

	/* one ref inside this function, plus one for each page added to
	 * a bio later on */
2600
	refcount_set(&sblock->refs, 1);
2601 2602 2603 2604 2605 2606
	sblock->sctx = sctx;
	sblock->no_io_error_seen = 1;
	sblock->sparity = sparity;
	scrub_parity_get(sparity);

	for (index = 0; len > 0; index++) {
2607
		struct scrub_sector *sector;
2608

2609 2610
		sector = kzalloc(sizeof(*sector), GFP_KERNEL);
		if (!sector) {
2611 2612 2613 2614 2615 2616 2617
leave_nomem:
			spin_lock(&sctx->stat_lock);
			sctx->stat.malloc_errors++;
			spin_unlock(&sctx->stat_lock);
			scrub_block_put(sblock);
			return -ENOMEM;
		}
2618
		ASSERT(index < SCRUB_MAX_SECTORS_PER_BLOCK);
2619
		/* For scrub block */
2620 2621
		scrub_sector_get(sector);
		sblock->sectors[index] = sector;
2622
		/* For scrub parity */
2623 2624 2625 2626 2627 2628 2629 2630 2631
		scrub_sector_get(sector);
		list_add_tail(&sector->list, &sparity->sectors_list);
		sector->sblock = sblock;
		sector->dev = dev;
		sector->flags = flags;
		sector->generation = gen;
		sector->logical = logical;
		sector->physical = physical;
		sector->mirror_num = mirror_num;
2632
		if (csum) {
2633 2634
			sector->have_csum = 1;
			memcpy(sector->csum, csum, sctx->fs_info->csum_size);
2635
		} else {
2636
			sector->have_csum = 0;
2637
		}
2638
		sblock->sector_count++;
2639 2640
		sector->page = alloc_page(GFP_KERNEL);
		if (!sector->page)
2641
			goto leave_nomem;
2642 2643 2644 2645 2646 2647


		/* Iterate over the stripe range in sectorsize steps */
		len -= sectorsize;
		logical += sectorsize;
		physical += sectorsize;
2648 2649
	}

2650 2651
	WARN_ON(sblock->sector_count == 0);
	for (index = 0; index < sblock->sector_count; index++) {
2652
		struct scrub_sector *sector = sblock->sectors[index];
2653 2654
		int ret;

2655
		ret = scrub_add_sector_to_rd_bio(sctx, sector);
2656 2657 2658 2659 2660 2661
		if (ret) {
			scrub_block_put(sblock);
			return ret;
		}
	}

2662
	/* Last one frees, either here or in bio completion for last sector */
2663 2664 2665 2666 2667
	scrub_block_put(sblock);
	return 0;
}

static int scrub_extent_for_parity(struct scrub_parity *sparity,
2668
				   u64 logical, u32 len,
2669 2670 2671 2672 2673 2674 2675 2676
				   u64 physical, struct btrfs_device *dev,
				   u64 flags, u64 gen, int mirror_num)
{
	struct scrub_ctx *sctx = sparity->sctx;
	int ret;
	u8 csum[BTRFS_CSUM_SIZE];
	u32 blocksize;

2677
	if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
2678 2679 2680 2681
		scrub_parity_mark_sectors_error(sparity, logical, len);
		return 0;
	}

2682
	if (flags & BTRFS_EXTENT_FLAG_DATA) {
L
Liu Bo 已提交
2683
		blocksize = sparity->stripe_len;
2684
	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
L
Liu Bo 已提交
2685
		blocksize = sparity->stripe_len;
2686
	} else {
2687
		blocksize = sctx->fs_info->sectorsize;
2688 2689 2690 2691
		WARN_ON(1);
	}

	while (len) {
2692
		u32 l = min(len, blocksize);
2693 2694 2695 2696
		int have_csum = 0;

		if (flags & BTRFS_EXTENT_FLAG_DATA) {
			/* push csums to sbio */
2697
			have_csum = scrub_find_csum(sctx, logical, csum);
2698 2699 2700
			if (have_csum == 0)
				goto skip;
		}
2701
		ret = scrub_sectors_for_parity(sparity, logical, l, physical, dev,
2702 2703 2704 2705
					     flags, gen, mirror_num,
					     have_csum ? csum : NULL);
		if (ret)
			return ret;
2706
skip:
2707 2708 2709 2710 2711 2712 2713
		len -= l;
		logical += l;
		physical += l;
	}
	return 0;
}

2714 2715 2716 2717 2718 2719 2720 2721
/*
 * Given a physical address, this will calculate it's
 * logical offset. if this is a parity stripe, it will return
 * the most left data stripe's logical offset.
 *
 * return 0 if it is a data stripe, 1 means parity stripe.
 */
static int get_raid56_logic_offset(u64 physical, int num,
2722 2723
				   struct map_lookup *map, u64 *offset,
				   u64 *stripe_start)
2724 2725 2726 2727 2728
{
	int i;
	int j = 0;
	u64 stripe_nr;
	u64 last_offset;
2729 2730
	u32 stripe_index;
	u32 rot;
2731
	const int data_stripes = nr_data_stripes(map);
2732

2733
	last_offset = (physical - map->stripes[num].physical) * data_stripes;
2734 2735 2736
	if (stripe_start)
		*stripe_start = last_offset;

2737
	*offset = last_offset;
2738
	for (i = 0; i < data_stripes; i++) {
2739 2740
		*offset = last_offset + i * map->stripe_len;

2741
		stripe_nr = div64_u64(*offset, map->stripe_len);
2742
		stripe_nr = div_u64(stripe_nr, data_stripes);
2743 2744

		/* Work out the disk rotation on this stripe-set */
2745
		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
2746 2747
		/* calculate which stripe this data locates */
		rot += i;
2748
		stripe_index = rot % map->num_stripes;
2749 2750 2751 2752 2753 2754 2755 2756 2757
		if (stripe_index == num)
			return 0;
		if (stripe_index < num)
			j++;
	}
	*offset = last_offset + j * map->stripe_len;
	return 1;
}

2758 2759 2760
static void scrub_free_parity(struct scrub_parity *sparity)
{
	struct scrub_ctx *sctx = sparity->sctx;
2761
	struct scrub_sector *curr, *next;
2762 2763
	int nbits;

2764
	nbits = bitmap_weight(&sparity->ebitmap, sparity->nsectors);
2765 2766 2767 2768 2769 2770 2771
	if (nbits) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors += nbits;
		sctx->stat.uncorrectable_errors += nbits;
		spin_unlock(&sctx->stat_lock);
	}

2772
	list_for_each_entry_safe(curr, next, &sparity->sectors_list, list) {
2773
		list_del_init(&curr->list);
2774
		scrub_sector_put(curr);
2775 2776 2777 2778 2779
	}

	kfree(sparity);
}

2780
static void scrub_parity_bio_endio_worker(struct work_struct *work)
2781 2782 2783 2784 2785 2786 2787 2788 2789
{
	struct scrub_parity *sparity = container_of(work, struct scrub_parity,
						    work);
	struct scrub_ctx *sctx = sparity->sctx;

	scrub_free_parity(sparity);
	scrub_pending_bio_dec(sctx);
}

2790
static void scrub_parity_bio_endio(struct bio *bio)
2791
{
Y
Yu Zhe 已提交
2792
	struct scrub_parity *sparity = bio->bi_private;
2793
	struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
2794

2795
	if (bio->bi_status)
2796 2797
		bitmap_or(&sparity->ebitmap, &sparity->ebitmap,
			  &sparity->dbitmap, sparity->nsectors);
2798 2799

	bio_put(bio);
2800

2801 2802
	INIT_WORK(&sparity->work, scrub_parity_bio_endio_worker);
	queue_work(fs_info->scrub_parity_workers, &sparity->work);
2803 2804 2805 2806 2807
}

static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
{
	struct scrub_ctx *sctx = sparity->sctx;
2808
	struct btrfs_fs_info *fs_info = sctx->fs_info;
2809 2810
	struct bio *bio;
	struct btrfs_raid_bio *rbio;
2811
	struct btrfs_io_context *bioc = NULL;
2812 2813 2814
	u64 length;
	int ret;

2815 2816
	if (!bitmap_andnot(&sparity->dbitmap, &sparity->dbitmap,
			   &sparity->ebitmap, sparity->nsectors))
2817 2818
		goto out;

2819
	length = sparity->logic_end - sparity->logic_start;
2820 2821

	btrfs_bio_counter_inc_blocked(fs_info);
2822
	ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
2823 2824 2825
			       &length, &bioc);
	if (ret || !bioc || !bioc->raid_map)
		goto bioc_out;
2826

2827
	bio = bio_alloc(NULL, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS);
2828 2829 2830 2831
	bio->bi_iter.bi_sector = sparity->logic_start >> 9;
	bio->bi_private = sparity;
	bio->bi_end_io = scrub_parity_bio_endio;

2832 2833
	rbio = raid56_parity_alloc_scrub_rbio(bio, bioc, length,
					      sparity->scrub_dev,
2834
					      &sparity->dbitmap,
2835 2836 2837 2838 2839 2840 2841 2842 2843 2844
					      sparity->nsectors);
	if (!rbio)
		goto rbio_out;

	scrub_pending_bio_inc(sctx);
	raid56_parity_submit_scrub_rbio(rbio);
	return;

rbio_out:
	bio_put(bio);
2845
bioc_out:
2846
	btrfs_bio_counter_dec(fs_info);
2847
	btrfs_put_bioc(bioc);
2848
	bitmap_or(&sparity->ebitmap, &sparity->ebitmap, &sparity->dbitmap,
2849 2850 2851 2852 2853 2854 2855 2856 2857 2858
		  sparity->nsectors);
	spin_lock(&sctx->stat_lock);
	sctx->stat.malloc_errors++;
	spin_unlock(&sctx->stat_lock);
out:
	scrub_free_parity(sparity);
}

static void scrub_parity_get(struct scrub_parity *sparity)
{
2859
	refcount_inc(&sparity->refs);
2860 2861 2862 2863
}

static void scrub_parity_put(struct scrub_parity *sparity)
{
2864
	if (!refcount_dec_and_test(&sparity->refs))
2865 2866 2867 2868 2869
		return;

	scrub_parity_check_and_repair(sparity);
}

2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976
/*
 * Return 0 if the extent item range covers any byte of the range.
 * Return <0 if the extent item is before @search_start.
 * Return >0 if the extent item is after @start_start + @search_len.
 */
static int compare_extent_item_range(struct btrfs_path *path,
				     u64 search_start, u64 search_len)
{
	struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info;
	u64 len;
	struct btrfs_key key;

	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
	ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY ||
	       key.type == BTRFS_METADATA_ITEM_KEY);
	if (key.type == BTRFS_METADATA_ITEM_KEY)
		len = fs_info->nodesize;
	else
		len = key.offset;

	if (key.objectid + len <= search_start)
		return -1;
	if (key.objectid >= search_start + search_len)
		return 1;
	return 0;
}

/*
 * Locate one extent item which covers any byte in range
 * [@search_start, @search_start + @search_length)
 *
 * If the path is not initialized, we will initialize the search by doing
 * a btrfs_search_slot().
 * If the path is already initialized, we will use the path as the initial
 * slot, to avoid duplicated btrfs_search_slot() calls.
 *
 * NOTE: If an extent item starts before @search_start, we will still
 * return the extent item. This is for data extent crossing stripe boundary.
 *
 * Return 0 if we found such extent item, and @path will point to the extent item.
 * Return >0 if no such extent item can be found, and @path will be released.
 * Return <0 if hit fatal error, and @path will be released.
 */
static int find_first_extent_item(struct btrfs_root *extent_root,
				  struct btrfs_path *path,
				  u64 search_start, u64 search_len)
{
	struct btrfs_fs_info *fs_info = extent_root->fs_info;
	struct btrfs_key key;
	int ret;

	/* Continue using the existing path */
	if (path->nodes[0])
		goto search_forward;

	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
		key.type = BTRFS_METADATA_ITEM_KEY;
	else
		key.type = BTRFS_EXTENT_ITEM_KEY;
	key.objectid = search_start;
	key.offset = (u64)-1;

	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
	if (ret < 0)
		return ret;

	ASSERT(ret > 0);
	/*
	 * Here we intentionally pass 0 as @min_objectid, as there could be
	 * an extent item starting before @search_start.
	 */
	ret = btrfs_previous_extent_item(extent_root, path, 0);
	if (ret < 0)
		return ret;
	/*
	 * No matter whether we have found an extent item, the next loop will
	 * properly do every check on the key.
	 */
search_forward:
	while (true) {
		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
		if (key.objectid >= search_start + search_len)
			break;
		if (key.type != BTRFS_METADATA_ITEM_KEY &&
		    key.type != BTRFS_EXTENT_ITEM_KEY)
			goto next;

		ret = compare_extent_item_range(path, search_start, search_len);
		if (ret == 0)
			return ret;
		if (ret > 0)
			break;
next:
		path->slots[0]++;
		if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
			ret = btrfs_next_leaf(extent_root, path);
			if (ret) {
				/* Either no more item or fatal error */
				btrfs_release_path(path);
				return ret;
			}
		}
	}
	btrfs_release_path(path);
	return 1;
}

2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995
static void get_extent_info(struct btrfs_path *path, u64 *extent_start_ret,
			    u64 *size_ret, u64 *flags_ret, u64 *generation_ret)
{
	struct btrfs_key key;
	struct btrfs_extent_item *ei;

	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
	ASSERT(key.type == BTRFS_METADATA_ITEM_KEY ||
	       key.type == BTRFS_EXTENT_ITEM_KEY);
	*extent_start_ret = key.objectid;
	if (key.type == BTRFS_METADATA_ITEM_KEY)
		*size_ret = path->nodes[0]->fs_info->nodesize;
	else
		*size_ret = key.offset;
	ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item);
	*flags_ret = btrfs_extent_flags(path->nodes[0], ei);
	*generation_ret = btrfs_extent_generation(path->nodes[0], ei);
}

2996 2997 2998 2999 3000 3001 3002 3003 3004
static bool does_range_cross_boundary(u64 extent_start, u64 extent_len,
				      u64 boundary_start, u64 boudary_len)
{
	return (extent_start < boundary_start &&
		extent_start + extent_len > boundary_start) ||
	       (extent_start < boundary_start + boudary_len &&
		extent_start + extent_len > boundary_start + boudary_len);
}

3005 3006 3007 3008 3009 3010 3011 3012 3013 3014
static int scrub_raid56_data_stripe_for_parity(struct scrub_ctx *sctx,
					       struct scrub_parity *sparity,
					       struct map_lookup *map,
					       struct btrfs_device *sdev,
					       struct btrfs_path *path,
					       u64 logical)
{
	struct btrfs_fs_info *fs_info = sctx->fs_info;
	struct btrfs_root *extent_root = btrfs_extent_root(fs_info, logical);
	struct btrfs_root *csum_root = btrfs_csum_root(fs_info, logical);
3015
	u64 cur_logical = logical;
3016 3017 3018 3019 3020 3021 3022
	int ret;

	ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);

	/* Path must not be populated */
	ASSERT(!path->nodes[0]);

3023
	while (cur_logical < logical + map->stripe_len) {
3024 3025 3026 3027 3028 3029 3030 3031 3032 3033
		struct btrfs_io_context *bioc = NULL;
		struct btrfs_device *extent_dev;
		u64 extent_start;
		u64 extent_size;
		u64 mapped_length;
		u64 extent_flags;
		u64 extent_gen;
		u64 extent_physical;
		u64 extent_mirror_num;

3034 3035 3036 3037 3038
		ret = find_first_extent_item(extent_root, path, cur_logical,
					     logical + map->stripe_len - cur_logical);
		/* No more extent item in this data stripe */
		if (ret > 0) {
			ret = 0;
3039 3040
			break;
		}
3041
		if (ret < 0)
3042
			break;
3043 3044
		get_extent_info(path, &extent_start, &extent_size, &extent_flags,
				&extent_gen);
3045

3046
		/* Metadata should not cross stripe boundaries */
3047
		if ((extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3048 3049
		    does_range_cross_boundary(extent_start, extent_size,
					      logical, map->stripe_len)) {
3050
			btrfs_err(fs_info,
3051 3052
	"scrub: tree block %llu spanning stripes, ignored. logical=%llu",
				  extent_start, logical);
3053 3054 3055
			spin_lock(&sctx->stat_lock);
			sctx->stat.uncorrectable_errors++;
			spin_unlock(&sctx->stat_lock);
3056 3057
			cur_logical += extent_size;
			continue;
3058 3059
		}

3060 3061
		/* Skip hole range which doesn't have any extent */
		cur_logical = max(extent_start, cur_logical);
3062

3063 3064 3065 3066 3067
		/* Truncate the range inside this data stripe */
		extent_size = min(extent_start + extent_size,
				  logical + map->stripe_len) - cur_logical;
		extent_start = cur_logical;
		ASSERT(extent_size <= U32_MAX);
3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108

		scrub_parity_mark_sectors_data(sparity, extent_start, extent_size);

		mapped_length = extent_size;
		ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_start,
				      &mapped_length, &bioc, 0);
		if (!ret && (!bioc || mapped_length < extent_size))
			ret = -EIO;
		if (ret) {
			btrfs_put_bioc(bioc);
			scrub_parity_mark_sectors_error(sparity, extent_start,
							extent_size);
			break;
		}
		extent_physical = bioc->stripes[0].physical;
		extent_mirror_num = bioc->mirror_num;
		extent_dev = bioc->stripes[0].dev;
		btrfs_put_bioc(bioc);

		ret = btrfs_lookup_csums_range(csum_root, extent_start,
					       extent_start + extent_size - 1,
					       &sctx->csum_list, 1);
		if (ret) {
			scrub_parity_mark_sectors_error(sparity, extent_start,
							extent_size);
			break;
		}

		ret = scrub_extent_for_parity(sparity, extent_start,
					      extent_size, extent_physical,
					      extent_dev, extent_flags,
					      extent_gen, extent_mirror_num);
		scrub_free_csums(sctx);

		if (ret) {
			scrub_parity_mark_sectors_error(sparity, extent_start,
							extent_size);
			break;
		}

		cond_resched();
3109
		cur_logical += extent_size;
3110 3111 3112 3113 3114
	}
	btrfs_release_path(path);
	return ret;
}

3115 3116 3117 3118 3119 3120
static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
						  struct map_lookup *map,
						  struct btrfs_device *sdev,
						  u64 logic_start,
						  u64 logic_end)
{
3121
	struct btrfs_fs_info *fs_info = sctx->fs_info;
3122
	struct btrfs_path *path;
3123
	u64 cur_logical;
3124 3125 3126 3127
	int ret;
	struct scrub_parity *sparity;
	int nsectors;

3128 3129 3130 3131 3132 3133 3134 3135 3136 3137
	path = btrfs_alloc_path();
	if (!path) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
		return -ENOMEM;
	}
	path->search_commit_root = 1;
	path->skip_locking = 1;

3138
	ASSERT(map->stripe_len <= U32_MAX);
3139
	nsectors = map->stripe_len >> fs_info->sectorsize_bits;
3140 3141
	ASSERT(nsectors <= BITS_PER_LONG);
	sparity = kzalloc(sizeof(struct scrub_parity), GFP_NOFS);
3142 3143 3144 3145
	if (!sparity) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
3146
		btrfs_free_path(path);
3147 3148 3149
		return -ENOMEM;
	}

3150
	ASSERT(map->stripe_len <= U32_MAX);
3151 3152 3153 3154 3155 3156
	sparity->stripe_len = map->stripe_len;
	sparity->nsectors = nsectors;
	sparity->sctx = sctx;
	sparity->scrub_dev = sdev;
	sparity->logic_start = logic_start;
	sparity->logic_end = logic_end;
3157
	refcount_set(&sparity->refs, 1);
3158
	INIT_LIST_HEAD(&sparity->sectors_list);
3159 3160

	ret = 0;
3161 3162 3163 3164
	for (cur_logical = logic_start; cur_logical < logic_end;
	     cur_logical += map->stripe_len) {
		ret = scrub_raid56_data_stripe_for_parity(sctx, sparity, map,
							  sdev, path, cur_logical);
3165 3166
		if (ret < 0)
			break;
3167
	}
3168

3169 3170
	scrub_parity_put(sparity);
	scrub_submit(sctx);
3171
	mutex_lock(&sctx->wr_lock);
3172
	scrub_wr_submit(sctx);
3173
	mutex_unlock(&sctx->wr_lock);
3174

3175
	btrfs_free_path(path);
3176 3177 3178
	return ret < 0 ? ret : 0;
}

3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192
static void sync_replace_for_zoned(struct scrub_ctx *sctx)
{
	if (!btrfs_is_zoned(sctx->fs_info))
		return;

	sctx->flush_all_writes = true;
	scrub_submit(sctx);
	mutex_lock(&sctx->wr_lock);
	scrub_wr_submit(sctx);
	mutex_unlock(&sctx->wr_lock);

	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
}

3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218
static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical,
					u64 physical, u64 physical_end)
{
	struct btrfs_fs_info *fs_info = sctx->fs_info;
	int ret = 0;

	if (!btrfs_is_zoned(fs_info))
		return 0;

	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);

	mutex_lock(&sctx->wr_lock);
	if (sctx->write_pointer < physical_end) {
		ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical,
						    physical,
						    sctx->write_pointer);
		if (ret)
			btrfs_err(fs_info,
				  "zoned: failed to recover write pointer");
	}
	mutex_unlock(&sctx->wr_lock);
	btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical);

	return ret;
}

3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330
/*
 * Scrub one range which can only has simple mirror based profile.
 * (Including all range in SINGLE/DUP/RAID1/RAID1C*, and each stripe in
 *  RAID0/RAID10).
 *
 * Since we may need to handle a subset of block group, we need @logical_start
 * and @logical_length parameter.
 */
static int scrub_simple_mirror(struct scrub_ctx *sctx,
			       struct btrfs_root *extent_root,
			       struct btrfs_root *csum_root,
			       struct btrfs_block_group *bg,
			       struct map_lookup *map,
			       u64 logical_start, u64 logical_length,
			       struct btrfs_device *device,
			       u64 physical, int mirror_num)
{
	struct btrfs_fs_info *fs_info = sctx->fs_info;
	const u64 logical_end = logical_start + logical_length;
	/* An artificial limit, inherit from old scrub behavior */
	const u32 max_length = SZ_64K;
	struct btrfs_path path = { 0 };
	u64 cur_logical = logical_start;
	int ret;

	/* The range must be inside the bg */
	ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);

	path.search_commit_root = 1;
	path.skip_locking = 1;
	/* Go through each extent items inside the logical range */
	while (cur_logical < logical_end) {
		u64 extent_start;
		u64 extent_len;
		u64 extent_flags;
		u64 extent_gen;
		u64 scrub_len;

		/* Canceled? */
		if (atomic_read(&fs_info->scrub_cancel_req) ||
		    atomic_read(&sctx->cancel_req)) {
			ret = -ECANCELED;
			break;
		}
		/* Paused? */
		if (atomic_read(&fs_info->scrub_pause_req)) {
			/* Push queued extents */
			sctx->flush_all_writes = true;
			scrub_submit(sctx);
			mutex_lock(&sctx->wr_lock);
			scrub_wr_submit(sctx);
			mutex_unlock(&sctx->wr_lock);
			wait_event(sctx->list_wait,
				   atomic_read(&sctx->bios_in_flight) == 0);
			sctx->flush_all_writes = false;
			scrub_blocked_if_needed(fs_info);
		}
		/* Block group removed? */
		spin_lock(&bg->lock);
		if (bg->removed) {
			spin_unlock(&bg->lock);
			ret = 0;
			break;
		}
		spin_unlock(&bg->lock);

		ret = find_first_extent_item(extent_root, &path, cur_logical,
					     logical_end - cur_logical);
		if (ret > 0) {
			/* No more extent, just update the accounting */
			sctx->stat.last_physical = physical + logical_length;
			ret = 0;
			break;
		}
		if (ret < 0)
			break;
		get_extent_info(&path, &extent_start, &extent_len,
				&extent_flags, &extent_gen);
		/* Skip hole range which doesn't have any extent */
		cur_logical = max(extent_start, cur_logical);

		/*
		 * Scrub len has three limits:
		 * - Extent size limit
		 * - Scrub range limit
		 *   This is especially imporatant for RAID0/RAID10 to reuse
		 *   this function
		 * - Max scrub size limit
		 */
		scrub_len = min(min(extent_start + extent_len,
				    logical_end), cur_logical + max_length) -
			    cur_logical;

		if (extent_flags & BTRFS_EXTENT_FLAG_DATA) {
			ret = btrfs_lookup_csums_range(csum_root, cur_logical,
					cur_logical + scrub_len - 1,
					&sctx->csum_list, 1);
			if (ret)
				break;
		}
		if ((extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
		    does_range_cross_boundary(extent_start, extent_len,
					      logical_start, logical_length)) {
			btrfs_err(fs_info,
"scrub: tree block %llu spanning boundaries, ignored. boundary=[%llu, %llu)",
				  extent_start, logical_start, logical_end);
			spin_lock(&sctx->stat_lock);
			sctx->stat.uncorrectable_errors++;
			spin_unlock(&sctx->stat_lock);
			cur_logical += scrub_len;
			continue;
		}
3331 3332 3333 3334
		ret = scrub_extent(sctx, map, cur_logical, scrub_len,
				   cur_logical - logical_start + physical,
				   device, extent_flags, extent_gen,
				   mirror_num);
3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347
		scrub_free_csums(sctx);
		if (ret)
			break;
		if (sctx->is_dev_replace)
			sync_replace_for_zoned(sctx);
		cur_logical += scrub_len;
		/* Don't hold CPU for too long time */
		cond_resched();
	}
	btrfs_release_path(&path);
	return ret;
}

3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418
/* Calculate the full stripe length for simple stripe based profiles */
static u64 simple_stripe_full_stripe_len(const struct map_lookup *map)
{
	ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
			    BTRFS_BLOCK_GROUP_RAID10));

	return map->num_stripes / map->sub_stripes * map->stripe_len;
}

/* Get the logical bytenr for the stripe */
static u64 simple_stripe_get_logical(struct map_lookup *map,
				     struct btrfs_block_group *bg,
				     int stripe_index)
{
	ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
			    BTRFS_BLOCK_GROUP_RAID10));
	ASSERT(stripe_index < map->num_stripes);

	/*
	 * (stripe_index / sub_stripes) gives how many data stripes we need to
	 * skip.
	 */
	return (stripe_index / map->sub_stripes) * map->stripe_len + bg->start;
}

/* Get the mirror number for the stripe */
static int simple_stripe_mirror_num(struct map_lookup *map, int stripe_index)
{
	ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
			    BTRFS_BLOCK_GROUP_RAID10));
	ASSERT(stripe_index < map->num_stripes);

	/* For RAID0, it's fixed to 1, for RAID10 it's 0,1,0,1... */
	return stripe_index % map->sub_stripes + 1;
}

static int scrub_simple_stripe(struct scrub_ctx *sctx,
			       struct btrfs_root *extent_root,
			       struct btrfs_root *csum_root,
			       struct btrfs_block_group *bg,
			       struct map_lookup *map,
			       struct btrfs_device *device,
			       int stripe_index)
{
	const u64 logical_increment = simple_stripe_full_stripe_len(map);
	const u64 orig_logical = simple_stripe_get_logical(map, bg, stripe_index);
	const u64 orig_physical = map->stripes[stripe_index].physical;
	const int mirror_num = simple_stripe_mirror_num(map, stripe_index);
	u64 cur_logical = orig_logical;
	u64 cur_physical = orig_physical;
	int ret = 0;

	while (cur_logical < bg->start + bg->length) {
		/*
		 * Inside each stripe, RAID0 is just SINGLE, and RAID10 is
		 * just RAID1, so we can reuse scrub_simple_mirror() to scrub
		 * this stripe.
		 */
		ret = scrub_simple_mirror(sctx, extent_root, csum_root, bg, map,
					  cur_logical, map->stripe_len, device,
					  cur_physical, mirror_num);
		if (ret)
			return ret;
		/* Skip to next stripe which belongs to the target device */
		cur_logical += logical_increment;
		/* For physical offset, we just go to next stripe */
		cur_physical += map->stripe_len;
	}
	return ret;
}

3419
static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3420
					   struct btrfs_block_group *bg,
3421 3422
					   struct map_lookup *map,
					   struct btrfs_device *scrub_dev,
3423
					   int stripe_index, u64 dev_extent_len)
A
Arne Jansen 已提交
3424
{
3425
	struct btrfs_path *path;
3426
	struct btrfs_fs_info *fs_info = sctx->fs_info;
3427
	struct btrfs_root *root;
3428
	struct btrfs_root *csum_root;
3429
	struct blk_plug plug;
3430
	const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
3431
	const u64 chunk_logical = bg->start;
A
Arne Jansen 已提交
3432
	int ret;
3433
	u64 physical = map->stripes[stripe_index].physical;
3434
	const u64 physical_end = physical + dev_extent_len;
A
Arne Jansen 已提交
3435
	u64 logical;
L
Liu Bo 已提交
3436
	u64 logic_end;
3437
	/* The logical increment after finishing one stripe */
3438
	u64 increment;
3439
	/* Offset inside the chunk */
A
Arne Jansen 已提交
3440
	u64 offset;
3441 3442
	u64 stripe_logical;
	u64 stripe_end;
3443
	int stop_loop = 0;
D
David Woodhouse 已提交
3444

A
Arne Jansen 已提交
3445 3446 3447 3448
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

3449 3450 3451 3452 3453
	/*
	 * work on commit root. The related disk blocks are static as
	 * long as COW is applied. This means, it is save to rewrite
	 * them to repair disk errors without any race conditions
	 */
A
Arne Jansen 已提交
3454 3455
	path->search_commit_root = 1;
	path->skip_locking = 1;
3456
	path->reada = READA_FORWARD;
A
Arne Jansen 已提交
3457

3458
	wait_event(sctx->list_wait,
3459
		   atomic_read(&sctx->bios_in_flight) == 0);
3460
	scrub_blocked_if_needed(fs_info);
A
Arne Jansen 已提交
3461

3462 3463
	root = btrfs_extent_root(fs_info, bg->start);
	csum_root = btrfs_csum_root(fs_info, bg->start);
3464

A
Arne Jansen 已提交
3465 3466 3467 3468
	/*
	 * collect all data csums for the stripe to avoid seeking during
	 * the scrub. This might currently (crc32) end up to be about 1MB
	 */
3469
	blk_start_plug(&plug);
A
Arne Jansen 已提交
3470

3471 3472 3473 3474 3475 3476 3477 3478
	if (sctx->is_dev_replace &&
	    btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
		mutex_lock(&sctx->wr_lock);
		sctx->write_pointer = physical;
		mutex_unlock(&sctx->wr_lock);
		sctx->flush_all_writes = true;
	}

3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499
	/*
	 * There used to be a big double loop to handle all profiles using the
	 * same routine, which grows larger and more gross over time.
	 *
	 * So here we handle each profile differently, so simpler profiles
	 * have simpler scrubbing function.
	 */
	if (!(profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10 |
			 BTRFS_BLOCK_GROUP_RAID56_MASK))) {
		/*
		 * Above check rules out all complex profile, the remaining
		 * profiles are SINGLE|DUP|RAID1|RAID1C*, which is simple
		 * mirrored duplication without stripe.
		 *
		 * Only @physical and @mirror_num needs to calculated using
		 * @stripe_index.
		 */
		ret = scrub_simple_mirror(sctx, root, csum_root, bg, map,
				bg->start, bg->length, scrub_dev,
				map->stripes[stripe_index].physical,
				stripe_index + 1);
3500
		offset = 0;
3501 3502
		goto out;
	}
3503 3504 3505
	if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
		ret = scrub_simple_stripe(sctx, root, csum_root, bg, map,
					  scrub_dev, stripe_index);
3506
		offset = map->stripe_len * (stripe_index / map->sub_stripes);
3507 3508 3509 3510 3511
		goto out;
	}

	/* Only RAID56 goes through the old code */
	ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);
A
Arne Jansen 已提交
3512
	ret = 0;
3513 3514 3515 3516 3517 3518 3519 3520 3521 3522

	/* Calculate the logical end of the stripe */
	get_raid56_logic_offset(physical_end, stripe_index,
				map, &logic_end, NULL);
	logic_end += chunk_logical;

	/* Initialize @offset in case we need to go to out: label */
	get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL);
	increment = map->stripe_len * nr_data_stripes(map);

3523 3524 3525 3526
	/*
	 * Due to the rotation, for RAID56 it's better to iterate each stripe
	 * using their physical offset.
	 */
3527
	while (physical < physical_end) {
3528 3529
		ret = get_raid56_logic_offset(physical, stripe_index, map,
					      &logical, &stripe_logical);
3530 3531 3532 3533 3534 3535 3536 3537 3538 3539
		logical += chunk_logical;
		if (ret) {
			/* it is parity strip */
			stripe_logical += chunk_logical;
			stripe_end = stripe_logical + increment;
			ret = scrub_raid56_parity(sctx, map, scrub_dev,
						  stripe_logical,
						  stripe_end);
			if (ret)
				goto out;
3540
			goto next;
3541 3542
		}

3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553
		/*
		 * Now we're at a data stripe, scrub each extents in the range.
		 *
		 * At this stage, if we ignore the repair part, inside each data
		 * stripe it is no different than SINGLE profile.
		 * We can reuse scrub_simple_mirror() here, as the repair part
		 * is still based on @mirror_num.
		 */
		ret = scrub_simple_mirror(sctx, root, csum_root, bg, map,
					  logical, map->stripe_len,
					  scrub_dev, physical, 1);
A
Arne Jansen 已提交
3554 3555 3556 3557 3558
		if (ret < 0)
			goto out;
next:
		logical += increment;
		physical += map->stripe_len;
3559
		spin_lock(&sctx->stat_lock);
L
Liu Bo 已提交
3560
		if (stop_loop)
3561 3562
			sctx->stat.last_physical = map->stripes[stripe_index].physical +
						   dev_extent_len;
L
Liu Bo 已提交
3563 3564
		else
			sctx->stat.last_physical = physical;
3565
		spin_unlock(&sctx->stat_lock);
L
Liu Bo 已提交
3566 3567
		if (stop_loop)
			break;
A
Arne Jansen 已提交
3568
	}
3569
out:
A
Arne Jansen 已提交
3570
	/* push queued extents */
3571
	scrub_submit(sctx);
3572
	mutex_lock(&sctx->wr_lock);
3573
	scrub_wr_submit(sctx);
3574
	mutex_unlock(&sctx->wr_lock);
A
Arne Jansen 已提交
3575

3576
	blk_finish_plug(&plug);
A
Arne Jansen 已提交
3577
	btrfs_free_path(path);
3578 3579 3580 3581

	if (sctx->is_dev_replace && ret >= 0) {
		int ret2;

3582 3583 3584 3585
		ret2 = sync_write_pointer_for_zoned(sctx,
				chunk_logical + offset,
				map->stripes[stripe_index].physical,
				physical_end);
3586 3587 3588 3589
		if (ret2)
			ret = ret2;
	}

A
Arne Jansen 已提交
3590 3591 3592
	return ret < 0 ? ret : 0;
}

3593
static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3594
					  struct btrfs_block_group *bg,
3595
					  struct btrfs_device *scrub_dev,
3596
					  u64 dev_offset,
3597
					  u64 dev_extent_len)
A
Arne Jansen 已提交
3598
{
3599
	struct btrfs_fs_info *fs_info = sctx->fs_info;
3600
	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
A
Arne Jansen 已提交
3601 3602 3603
	struct map_lookup *map;
	struct extent_map *em;
	int i;
3604
	int ret = 0;
A
Arne Jansen 已提交
3605

3606
	read_lock(&map_tree->lock);
3607
	em = lookup_extent_mapping(map_tree, bg->start, bg->length);
3608
	read_unlock(&map_tree->lock);
A
Arne Jansen 已提交
3609

3610 3611 3612 3613 3614
	if (!em) {
		/*
		 * Might have been an unused block group deleted by the cleaner
		 * kthread or relocation.
		 */
3615 3616
		spin_lock(&bg->lock);
		if (!bg->removed)
3617
			ret = -EINVAL;
3618
		spin_unlock(&bg->lock);
3619 3620 3621

		return ret;
	}
3622
	if (em->start != bg->start)
A
Arne Jansen 已提交
3623
		goto out;
3624
	if (em->len < dev_extent_len)
A
Arne Jansen 已提交
3625 3626
		goto out;

3627
	map = em->map_lookup;
A
Arne Jansen 已提交
3628
	for (i = 0; i < map->num_stripes; ++i) {
3629
		if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
3630
		    map->stripes[i].physical == dev_offset) {
3631 3632
			ret = scrub_stripe(sctx, bg, map, scrub_dev, i,
					   dev_extent_len);
A
Arne Jansen 已提交
3633 3634 3635 3636 3637 3638 3639 3640 3641 3642
			if (ret)
				goto out;
		}
	}
out:
	free_extent_map(em);

	return ret;
}

3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661
static int finish_extent_writes_for_zoned(struct btrfs_root *root,
					  struct btrfs_block_group *cache)
{
	struct btrfs_fs_info *fs_info = cache->fs_info;
	struct btrfs_trans_handle *trans;

	if (!btrfs_is_zoned(fs_info))
		return 0;

	btrfs_wait_block_group_reservations(cache);
	btrfs_wait_nocow_writers(cache);
	btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length);

	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans))
		return PTR_ERR(trans);
	return btrfs_commit_transaction(trans);
}

A
Arne Jansen 已提交
3662
static noinline_for_stack
3663
int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3664
			   struct btrfs_device *scrub_dev, u64 start, u64 end)
A
Arne Jansen 已提交
3665 3666 3667
{
	struct btrfs_dev_extent *dev_extent = NULL;
	struct btrfs_path *path;
3668 3669
	struct btrfs_fs_info *fs_info = sctx->fs_info;
	struct btrfs_root *root = fs_info->dev_root;
A
Arne Jansen 已提交
3670
	u64 chunk_offset;
3671
	int ret = 0;
3672
	int ro_set;
A
Arne Jansen 已提交
3673 3674 3675 3676
	int slot;
	struct extent_buffer *l;
	struct btrfs_key key;
	struct btrfs_key found_key;
3677
	struct btrfs_block_group *cache;
3678
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
A
Arne Jansen 已提交
3679 3680 3681 3682 3683

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

3684
	path->reada = READA_FORWARD;
A
Arne Jansen 已提交
3685 3686 3687
	path->search_commit_root = 1;
	path->skip_locking = 1;

3688
	key.objectid = scrub_dev->devid;
A
Arne Jansen 已提交
3689 3690 3691 3692
	key.offset = 0ull;
	key.type = BTRFS_DEV_EXTENT_KEY;

	while (1) {
3693 3694
		u64 dev_extent_len;

A
Arne Jansen 已提交
3695 3696
		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		if (ret < 0)
3697 3698 3699 3700 3701
			break;
		if (ret > 0) {
			if (path->slots[0] >=
			    btrfs_header_nritems(path->nodes[0])) {
				ret = btrfs_next_leaf(root, path);
3702 3703 3704 3705
				if (ret < 0)
					break;
				if (ret > 0) {
					ret = 0;
3706
					break;
3707 3708 3709
				}
			} else {
				ret = 0;
3710 3711
			}
		}
A
Arne Jansen 已提交
3712 3713 3714 3715 3716 3717

		l = path->nodes[0];
		slot = path->slots[0];

		btrfs_item_key_to_cpu(l, &found_key, slot);

3718
		if (found_key.objectid != scrub_dev->devid)
A
Arne Jansen 已提交
3719 3720
			break;

3721
		if (found_key.type != BTRFS_DEV_EXTENT_KEY)
A
Arne Jansen 已提交
3722 3723 3724 3725 3726 3727 3728 3729 3730
			break;

		if (found_key.offset >= end)
			break;

		if (found_key.offset < key.offset)
			break;

		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3731
		dev_extent_len = btrfs_dev_extent_length(l, dev_extent);
A
Arne Jansen 已提交
3732

3733
		if (found_key.offset + dev_extent_len <= start)
3734
			goto skip;
A
Arne Jansen 已提交
3735 3736 3737 3738 3739 3740 3741 3742

		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);

		/*
		 * get a reference on the corresponding block group to prevent
		 * the chunk from going away while we scrub it
		 */
		cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3743 3744 3745 3746 3747 3748

		/* some chunks are removed but not committed to disk yet,
		 * continue scrubbing */
		if (!cache)
			goto skip;

3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773
		ASSERT(cache->start <= chunk_offset);
		/*
		 * We are using the commit root to search for device extents, so
		 * that means we could have found a device extent item from a
		 * block group that was deleted in the current transaction. The
		 * logical start offset of the deleted block group, stored at
		 * @chunk_offset, might be part of the logical address range of
		 * a new block group (which uses different physical extents).
		 * In this case btrfs_lookup_block_group() has returned the new
		 * block group, and its start address is less than @chunk_offset.
		 *
		 * We skip such new block groups, because it's pointless to
		 * process them, as we won't find their extents because we search
		 * for them using the commit root of the extent tree. For a device
		 * replace it's also fine to skip it, we won't miss copying them
		 * to the target device because we have the write duplication
		 * setup through the regular write path (by btrfs_map_block()),
		 * and we have committed a transaction when we started the device
		 * replace, right after setting up the device replace state.
		 */
		if (cache->start < chunk_offset) {
			btrfs_put_block_group(cache);
			goto skip;
		}

3774 3775 3776 3777
		if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
			spin_lock(&cache->lock);
			if (!cache->to_copy) {
				spin_unlock(&cache->lock);
3778 3779
				btrfs_put_block_group(cache);
				goto skip;
3780 3781 3782 3783
			}
			spin_unlock(&cache->lock);
		}

3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797
		/*
		 * Make sure that while we are scrubbing the corresponding block
		 * group doesn't get its logical address and its device extents
		 * reused for another block group, which can possibly be of a
		 * different type and different profile. We do this to prevent
		 * false error detections and crashes due to bogus attempts to
		 * repair extents.
		 */
		spin_lock(&cache->lock);
		if (cache->removed) {
			spin_unlock(&cache->lock);
			btrfs_put_block_group(cache);
			goto skip;
		}
3798
		btrfs_freeze_block_group(cache);
3799 3800
		spin_unlock(&cache->lock);

3801 3802 3803 3804 3805 3806 3807 3808 3809
		/*
		 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
		 * to avoid deadlock caused by:
		 * btrfs_inc_block_group_ro()
		 * -> btrfs_wait_for_commit()
		 * -> btrfs_commit_transaction()
		 * -> btrfs_scrub_pause()
		 */
		scrub_pause_on(fs_info);
3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827

		/*
		 * Don't do chunk preallocation for scrub.
		 *
		 * This is especially important for SYSTEM bgs, or we can hit
		 * -EFBIG from btrfs_finish_chunk_alloc() like:
		 * 1. The only SYSTEM bg is marked RO.
		 *    Since SYSTEM bg is small, that's pretty common.
		 * 2. New SYSTEM bg will be allocated
		 *    Due to regular version will allocate new chunk.
		 * 3. New SYSTEM bg is empty and will get cleaned up
		 *    Before cleanup really happens, it's marked RO again.
		 * 4. Empty SYSTEM bg get scrubbed
		 *    We go back to 2.
		 *
		 * This can easily boost the amount of SYSTEM chunks if cleaner
		 * thread can't be triggered fast enough, and use up all space
		 * of btrfs_super_block::sys_chunk_array
3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839
		 *
		 * While for dev replace, we need to try our best to mark block
		 * group RO, to prevent race between:
		 * - Write duplication
		 *   Contains latest data
		 * - Scrub copy
		 *   Contains data from commit tree
		 *
		 * If target block group is not marked RO, nocow writes can
		 * be overwritten by scrub copy, causing data corruption.
		 * So for dev-replace, it's not allowed to continue if a block
		 * group is not RO.
3840
		 */
3841
		ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
3842 3843 3844 3845 3846 3847 3848 3849 3850 3851
		if (!ret && sctx->is_dev_replace) {
			ret = finish_extent_writes_for_zoned(root, cache);
			if (ret) {
				btrfs_dec_block_group_ro(cache);
				scrub_pause_off(fs_info);
				btrfs_put_block_group(cache);
				break;
			}
		}

3852 3853
		if (ret == 0) {
			ro_set = 1;
3854
		} else if (ret == -ENOSPC && !sctx->is_dev_replace) {
3855 3856 3857
			/*
			 * btrfs_inc_block_group_ro return -ENOSPC when it
			 * failed in creating new chunk for metadata.
3858
			 * It is not a problem for scrub, because
3859 3860 3861 3862
			 * metadata are always cowed, and our scrub paused
			 * commit_transactions.
			 */
			ro_set = 0;
3863 3864 3865 3866 3867 3868 3869
		} else if (ret == -ETXTBSY) {
			btrfs_warn(fs_info,
		   "skipping scrub of block group %llu due to active swapfile",
				   cache->start);
			scrub_pause_off(fs_info);
			ret = 0;
			goto skip_unfreeze;
3870
		} else {
J
Jeff Mahoney 已提交
3871
			btrfs_warn(fs_info,
3872
				   "failed setting block group ro: %d", ret);
3873
			btrfs_unfreeze_block_group(cache);
3874
			btrfs_put_block_group(cache);
3875
			scrub_pause_off(fs_info);
3876 3877 3878
			break;
		}

3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890
		/*
		 * Now the target block is marked RO, wait for nocow writes to
		 * finish before dev-replace.
		 * COW is fine, as COW never overwrites extents in commit tree.
		 */
		if (sctx->is_dev_replace) {
			btrfs_wait_nocow_writers(cache);
			btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
					cache->length);
		}

		scrub_pause_off(fs_info);
3891
		down_write(&dev_replace->rwsem);
3892
		dev_replace->cursor_right = found_key.offset + dev_extent_len;
3893 3894
		dev_replace->cursor_left = found_key.offset;
		dev_replace->item_needs_writeback = 1;
3895 3896
		up_write(&dev_replace->rwsem);

3897 3898
		ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset,
				  dev_extent_len);
3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909

		/*
		 * flush, submit all pending read and write bios, afterwards
		 * wait for them.
		 * Note that in the dev replace case, a read request causes
		 * write requests that are submitted in the read completion
		 * worker. Therefore in the current situation, it is required
		 * that all write requests are flushed, so that all read and
		 * write requests are really completed when bios_in_flight
		 * changes to 0.
		 */
3910
		sctx->flush_all_writes = true;
3911
		scrub_submit(sctx);
3912
		mutex_lock(&sctx->wr_lock);
3913
		scrub_wr_submit(sctx);
3914
		mutex_unlock(&sctx->wr_lock);
3915 3916 3917

		wait_event(sctx->list_wait,
			   atomic_read(&sctx->bios_in_flight) == 0);
3918 3919

		scrub_pause_on(fs_info);
3920 3921 3922 3923 3924 3925

		/*
		 * must be called before we decrease @scrub_paused.
		 * make sure we don't block transaction commit while
		 * we are waiting pending workers finished.
		 */
3926 3927
		wait_event(sctx->list_wait,
			   atomic_read(&sctx->workers_pending) == 0);
3928
		sctx->flush_all_writes = false;
3929

3930
		scrub_pause_off(fs_info);
3931

3932 3933 3934 3935 3936
		if (sctx->is_dev_replace &&
		    !btrfs_finish_block_group_to_copy(dev_replace->srcdev,
						      cache, found_key.offset))
			ro_set = 0;

3937
		down_write(&dev_replace->rwsem);
3938 3939
		dev_replace->cursor_left = dev_replace->cursor_right;
		dev_replace->item_needs_writeback = 1;
3940
		up_write(&dev_replace->rwsem);
3941

3942
		if (ro_set)
3943
			btrfs_dec_block_group_ro(cache);
3944

3945 3946 3947 3948 3949 3950 3951 3952 3953
		/*
		 * We might have prevented the cleaner kthread from deleting
		 * this block group if it was already unused because we raced
		 * and set it to RO mode first. So add it back to the unused
		 * list, otherwise it might not ever be deleted unless a manual
		 * balance is triggered or it becomes used and unused again.
		 */
		spin_lock(&cache->lock);
		if (!cache->removed && !cache->ro && cache->reserved == 0 &&
3954
		    cache->used == 0) {
3955
			spin_unlock(&cache->lock);
3956 3957 3958 3959 3960
			if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
				btrfs_discard_queue_work(&fs_info->discard_ctl,
							 cache);
			else
				btrfs_mark_bg_unused(cache);
3961 3962 3963
		} else {
			spin_unlock(&cache->lock);
		}
3964
skip_unfreeze:
3965
		btrfs_unfreeze_block_group(cache);
A
Arne Jansen 已提交
3966 3967 3968
		btrfs_put_block_group(cache);
		if (ret)
			break;
3969
		if (sctx->is_dev_replace &&
3970
		    atomic64_read(&dev_replace->num_write_errors) > 0) {
3971 3972 3973 3974 3975 3976 3977
			ret = -EIO;
			break;
		}
		if (sctx->stat.malloc_errors > 0) {
			ret = -ENOMEM;
			break;
		}
3978
skip:
3979
		key.offset = found_key.offset + dev_extent_len;
C
Chris Mason 已提交
3980
		btrfs_release_path(path);
A
Arne Jansen 已提交
3981 3982 3983
	}

	btrfs_free_path(path);
3984

3985
	return ret;
A
Arne Jansen 已提交
3986 3987
}

3988 3989
static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
					   struct btrfs_device *scrub_dev)
A
Arne Jansen 已提交
3990 3991 3992 3993 3994
{
	int	i;
	u64	bytenr;
	u64	gen;
	int	ret;
3995
	struct btrfs_fs_info *fs_info = sctx->fs_info;
A
Arne Jansen 已提交
3996

J
Josef Bacik 已提交
3997
	if (BTRFS_FS_ERROR(fs_info))
3998
		return -EROFS;
3999

4000
	/* Seed devices of a new filesystem has their own generation. */
4001
	if (scrub_dev->fs_devices != fs_info->fs_devices)
4002 4003
		gen = scrub_dev->generation;
	else
4004
		gen = fs_info->last_trans_committed;
A
Arne Jansen 已提交
4005 4006 4007

	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
		bytenr = btrfs_sb_offset(i);
4008 4009
		if (bytenr + BTRFS_SUPER_INFO_SIZE >
		    scrub_dev->commit_total_bytes)
A
Arne Jansen 已提交
4010
			break;
4011 4012
		if (!btrfs_check_super_location(scrub_dev, bytenr))
			continue;
A
Arne Jansen 已提交
4013

4014 4015 4016
		ret = scrub_sectors(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
				    scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
				    NULL, bytenr);
A
Arne Jansen 已提交
4017 4018 4019
		if (ret)
			return ret;
	}
4020
	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
A
Arne Jansen 已提交
4021 4022 4023 4024

	return 0;
}

4025 4026 4027 4028
static void scrub_workers_put(struct btrfs_fs_info *fs_info)
{
	if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
					&fs_info->scrub_lock)) {
4029 4030 4031 4032 4033
		struct workqueue_struct *scrub_workers = fs_info->scrub_workers;
		struct workqueue_struct *scrub_wr_comp =
						fs_info->scrub_wr_completion_workers;
		struct workqueue_struct *scrub_parity =
						fs_info->scrub_parity_workers;
4034 4035 4036 4037 4038 4039

		fs_info->scrub_workers = NULL;
		fs_info->scrub_wr_completion_workers = NULL;
		fs_info->scrub_parity_workers = NULL;
		mutex_unlock(&fs_info->scrub_lock);

4040 4041 4042 4043 4044 4045
		if (scrub_workers)
			destroy_workqueue(scrub_workers);
		if (scrub_wr_comp)
			destroy_workqueue(scrub_wr_comp);
		if (scrub_parity)
			destroy_workqueue(scrub_parity);
4046 4047 4048
	}
}

A
Arne Jansen 已提交
4049 4050 4051
/*
 * get a reference count on fs_info->scrub_workers. start worker if necessary
 */
4052 4053
static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
						int is_dev_replace)
A
Arne Jansen 已提交
4054
{
4055 4056 4057
	struct workqueue_struct *scrub_workers = NULL;
	struct workqueue_struct *scrub_wr_comp = NULL;
	struct workqueue_struct *scrub_parity = NULL;
4058
	unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
4059
	int max_active = fs_info->thread_pool_size;
4060
	int ret = -ENOMEM;
A
Arne Jansen 已提交
4061

4062 4063
	if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
		return 0;
4064

4065 4066
	scrub_workers = alloc_workqueue("btrfs-scrub", flags,
					is_dev_replace ? 1 : max_active);
4067 4068
	if (!scrub_workers)
		goto fail_scrub_workers;
4069

4070
	scrub_wr_comp = alloc_workqueue("btrfs-scrubwrc", flags, max_active);
4071 4072
	if (!scrub_wr_comp)
		goto fail_scrub_wr_completion_workers;
4073

4074
	scrub_parity = alloc_workqueue("btrfs-scrubparity", flags, max_active);
4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085
	if (!scrub_parity)
		goto fail_scrub_parity_workers;

	mutex_lock(&fs_info->scrub_lock);
	if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
		ASSERT(fs_info->scrub_workers == NULL &&
		       fs_info->scrub_wr_completion_workers == NULL &&
		       fs_info->scrub_parity_workers == NULL);
		fs_info->scrub_workers = scrub_workers;
		fs_info->scrub_wr_completion_workers = scrub_wr_comp;
		fs_info->scrub_parity_workers = scrub_parity;
4086
		refcount_set(&fs_info->scrub_workers_refcnt, 1);
4087 4088
		mutex_unlock(&fs_info->scrub_lock);
		return 0;
A
Arne Jansen 已提交
4089
	}
4090 4091 4092
	/* Other thread raced in and created the workers for us */
	refcount_inc(&fs_info->scrub_workers_refcnt);
	mutex_unlock(&fs_info->scrub_lock);
4093

4094
	ret = 0;
4095
	destroy_workqueue(scrub_parity);
4096
fail_scrub_parity_workers:
4097
	destroy_workqueue(scrub_wr_comp);
4098
fail_scrub_wr_completion_workers:
4099
	destroy_workqueue(scrub_workers);
4100
fail_scrub_workers:
4101
	return ret;
A
Arne Jansen 已提交
4102 4103
}

4104 4105
int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
		    u64 end, struct btrfs_scrub_progress *progress,
4106
		    int readonly, int is_dev_replace)
A
Arne Jansen 已提交
4107
{
4108
	struct btrfs_dev_lookup_args args = { .devid = devid };
4109
	struct scrub_ctx *sctx;
A
Arne Jansen 已提交
4110 4111
	int ret;
	struct btrfs_device *dev;
4112
	unsigned int nofs_flag;
A
Arne Jansen 已提交
4113

4114
	if (btrfs_fs_closing(fs_info))
4115
		return -EAGAIN;
A
Arne Jansen 已提交
4116

4117
	if (fs_info->nodesize > BTRFS_STRIPE_LEN) {
4118 4119 4120 4121 4122
		/*
		 * in this case scrub is unable to calculate the checksum
		 * the way scrub is implemented. Do not handle this
		 * situation at all because it won't ever happen.
		 */
4123 4124
		btrfs_err(fs_info,
			   "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
4125 4126
		       fs_info->nodesize,
		       BTRFS_STRIPE_LEN);
4127 4128 4129
		return -EINVAL;
	}

4130
	if (fs_info->nodesize >
4131 4132
	    SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits ||
	    fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_SECTORS_PER_BLOCK) {
4133
		/*
4134
		 * Would exhaust the array bounds of sectorv member in
4135 4136
		 * struct scrub_block
		 */
J
Jeff Mahoney 已提交
4137
		btrfs_err(fs_info,
4138 4139 4140
"scrub: nodesize and sectorsize <= SCRUB_MAX_SECTORS_PER_BLOCK (%d <= %d && %d <= %d) fails",
		       fs_info->nodesize, SCRUB_MAX_SECTORS_PER_BLOCK,
		       fs_info->sectorsize, SCRUB_MAX_SECTORS_PER_BLOCK);
4141 4142 4143
		return -EINVAL;
	}

4144 4145 4146 4147
	/* Allocate outside of device_list_mutex */
	sctx = scrub_setup_ctx(fs_info, is_dev_replace);
	if (IS_ERR(sctx))
		return PTR_ERR(sctx);
A
Arne Jansen 已提交
4148

4149 4150 4151 4152
	ret = scrub_workers_get(fs_info, is_dev_replace);
	if (ret)
		goto out_free_ctx;

4153
	mutex_lock(&fs_info->fs_devices->device_list_mutex);
4154
	dev = btrfs_find_device(fs_info->fs_devices, &args);
4155 4156
	if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
		     !is_dev_replace)) {
4157
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4158
		ret = -ENODEV;
4159
		goto out;
A
Arne Jansen 已提交
4160 4161
	}

4162 4163
	if (!is_dev_replace && !readonly &&
	    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
4164
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4165 4166 4167
		btrfs_err_in_rcu(fs_info,
			"scrub on devid %llu: filesystem on %s is not writable",
				 devid, rcu_str_deref(dev->name));
4168
		ret = -EROFS;
4169
		goto out;
4170 4171
	}

4172
	mutex_lock(&fs_info->scrub_lock);
4173
	if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
4174
	    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
A
Arne Jansen 已提交
4175
		mutex_unlock(&fs_info->scrub_lock);
4176
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4177
		ret = -EIO;
4178
		goto out;
A
Arne Jansen 已提交
4179 4180
	}

4181
	down_read(&fs_info->dev_replace.rwsem);
4182
	if (dev->scrub_ctx ||
4183 4184
	    (!is_dev_replace &&
	     btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
4185
		up_read(&fs_info->dev_replace.rwsem);
A
Arne Jansen 已提交
4186
		mutex_unlock(&fs_info->scrub_lock);
4187
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4188
		ret = -EINPROGRESS;
4189
		goto out;
A
Arne Jansen 已提交
4190
	}
4191
	up_read(&fs_info->dev_replace.rwsem);
4192

4193
	sctx->readonly = readonly;
4194
	dev->scrub_ctx = sctx;
4195
	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
A
Arne Jansen 已提交
4196

4197 4198 4199 4200
	/*
	 * checking @scrub_pause_req here, we can avoid
	 * race between committing transaction and scrubbing.
	 */
4201
	__scrub_blocked_if_needed(fs_info);
A
Arne Jansen 已提交
4202 4203 4204
	atomic_inc(&fs_info->scrubs_running);
	mutex_unlock(&fs_info->scrub_lock);

4205 4206 4207
	/*
	 * In order to avoid deadlock with reclaim when there is a transaction
	 * trying to pause scrub, make sure we use GFP_NOFS for all the
4208
	 * allocations done at btrfs_scrub_sectors() and scrub_sectors_for_parity()
4209 4210 4211 4212 4213 4214
	 * invoked by our callees. The pausing request is done when the
	 * transaction commit starts, and it blocks the transaction until scrub
	 * is paused (done at specific points at scrub_stripe() or right above
	 * before incrementing fs_info->scrubs_running).
	 */
	nofs_flag = memalloc_nofs_save();
4215
	if (!is_dev_replace) {
4216
		btrfs_info(fs_info, "scrub: started on devid %llu", devid);
4217 4218 4219 4220
		/*
		 * by holding device list mutex, we can
		 * kick off writing super in log tree sync.
		 */
4221
		mutex_lock(&fs_info->fs_devices->device_list_mutex);
4222
		ret = scrub_supers(sctx, dev);
4223
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4224
	}
A
Arne Jansen 已提交
4225 4226

	if (!ret)
4227
		ret = scrub_enumerate_chunks(sctx, dev, start, end);
4228
	memalloc_nofs_restore(nofs_flag);
A
Arne Jansen 已提交
4229

4230
	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
A
Arne Jansen 已提交
4231 4232 4233
	atomic_dec(&fs_info->scrubs_running);
	wake_up(&fs_info->scrub_pause_wait);

4234
	wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
4235

A
Arne Jansen 已提交
4236
	if (progress)
4237
		memcpy(progress, &sctx->stat, sizeof(*progress));
A
Arne Jansen 已提交
4238

4239 4240 4241 4242
	if (!is_dev_replace)
		btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d",
			ret ? "not finished" : "finished", devid, ret);

A
Arne Jansen 已提交
4243
	mutex_lock(&fs_info->scrub_lock);
4244
	dev->scrub_ctx = NULL;
A
Arne Jansen 已提交
4245 4246
	mutex_unlock(&fs_info->scrub_lock);

4247
	scrub_workers_put(fs_info);
4248
	scrub_put_ctx(sctx);
A
Arne Jansen 已提交
4249

4250
	return ret;
4251 4252
out:
	scrub_workers_put(fs_info);
4253 4254 4255
out_free_ctx:
	scrub_free_ctx(sctx);

A
Arne Jansen 已提交
4256 4257 4258
	return ret;
}

4259
void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
A
Arne Jansen 已提交
4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273
{
	mutex_lock(&fs_info->scrub_lock);
	atomic_inc(&fs_info->scrub_pause_req);
	while (atomic_read(&fs_info->scrubs_paused) !=
	       atomic_read(&fs_info->scrubs_running)) {
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
			   atomic_read(&fs_info->scrubs_paused) ==
			   atomic_read(&fs_info->scrubs_running));
		mutex_lock(&fs_info->scrub_lock);
	}
	mutex_unlock(&fs_info->scrub_lock);
}

4274
void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
A
Arne Jansen 已提交
4275 4276 4277 4278 4279
{
	atomic_dec(&fs_info->scrub_pause_req);
	wake_up(&fs_info->scrub_pause_wait);
}

4280
int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
A
Arne Jansen 已提交
4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300
{
	mutex_lock(&fs_info->scrub_lock);
	if (!atomic_read(&fs_info->scrubs_running)) {
		mutex_unlock(&fs_info->scrub_lock);
		return -ENOTCONN;
	}

	atomic_inc(&fs_info->scrub_cancel_req);
	while (atomic_read(&fs_info->scrubs_running)) {
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
			   atomic_read(&fs_info->scrubs_running) == 0);
		mutex_lock(&fs_info->scrub_lock);
	}
	atomic_dec(&fs_info->scrub_cancel_req);
	mutex_unlock(&fs_info->scrub_lock);

	return 0;
}

4301
int btrfs_scrub_cancel_dev(struct btrfs_device *dev)
4302
{
4303
	struct btrfs_fs_info *fs_info = dev->fs_info;
4304
	struct scrub_ctx *sctx;
A
Arne Jansen 已提交
4305 4306

	mutex_lock(&fs_info->scrub_lock);
4307
	sctx = dev->scrub_ctx;
4308
	if (!sctx) {
A
Arne Jansen 已提交
4309 4310 4311
		mutex_unlock(&fs_info->scrub_lock);
		return -ENOTCONN;
	}
4312
	atomic_inc(&sctx->cancel_req);
4313
	while (dev->scrub_ctx) {
A
Arne Jansen 已提交
4314 4315
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
4316
			   dev->scrub_ctx == NULL);
A
Arne Jansen 已提交
4317 4318 4319 4320 4321 4322
		mutex_lock(&fs_info->scrub_lock);
	}
	mutex_unlock(&fs_info->scrub_lock);

	return 0;
}
S
Stefan Behrens 已提交
4323

4324
int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
A
Arne Jansen 已提交
4325 4326
			 struct btrfs_scrub_progress *progress)
{
4327
	struct btrfs_dev_lookup_args args = { .devid = devid };
A
Arne Jansen 已提交
4328
	struct btrfs_device *dev;
4329
	struct scrub_ctx *sctx = NULL;
A
Arne Jansen 已提交
4330

4331
	mutex_lock(&fs_info->fs_devices->device_list_mutex);
4332
	dev = btrfs_find_device(fs_info->fs_devices, &args);
A
Arne Jansen 已提交
4333
	if (dev)
4334
		sctx = dev->scrub_ctx;
4335 4336
	if (sctx)
		memcpy(progress, &sctx->stat, sizeof(*progress));
4337
	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
A
Arne Jansen 已提交
4338

4339
	return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
A
Arne Jansen 已提交
4340
}
4341

4342 4343 4344 4345 4346
static void scrub_find_good_copy(struct btrfs_fs_info *fs_info,
				 u64 extent_logical, u32 extent_len,
				 u64 *extent_physical,
				 struct btrfs_device **extent_dev,
				 int *extent_mirror_num)
4347 4348
{
	u64 mapped_length;
4349
	struct btrfs_io_context *bioc = NULL;
4350 4351 4352
	int ret;

	mapped_length = extent_len;
4353
	ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
4354 4355 4356 4357
			      &mapped_length, &bioc, 0);
	if (ret || !bioc || mapped_length < extent_len ||
	    !bioc->stripes[0].dev->bdev) {
		btrfs_put_bioc(bioc);
4358 4359 4360
		return;
	}

4361 4362 4363 4364
	*extent_physical = bioc->stripes[0].physical;
	*extent_mirror_num = bioc->mirror_num;
	*extent_dev = bioc->stripes[0].dev;
	btrfs_put_bioc(bioc);
4365
}