scrub.c 114.7 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
A
Arne Jansen 已提交
2
/*
3
 * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
A
Arne Jansen 已提交
4 5 6
 */

#include <linux/blkdev.h>
7
#include <linux/ratelimit.h>
8
#include <linux/sched/mm.h>
9
#include <crypto/hash.h>
A
Arne Jansen 已提交
10
#include "ctree.h"
11
#include "discard.h"
A
Arne Jansen 已提交
12 13 14
#include "volumes.h"
#include "disk-io.h"
#include "ordered-data.h"
15
#include "transaction.h"
16
#include "backref.h"
17
#include "extent_io.h"
18
#include "dev-replace.h"
19
#include "check-integrity.h"
20
#include "rcu-string.h"
D
David Woodhouse 已提交
21
#include "raid56.h"
22
#include "block-group.h"
23
#include "zoned.h"
A
Arne Jansen 已提交
24 25 26 27 28 29 30 31 32 33 34 35 36 37

/*
 * This is only the first step towards a full-features scrub. It reads all
 * extent and super block and verifies the checksums. In case a bad checksum
 * is found or the extent cannot be read, good data will be written back if
 * any can be found.
 *
 * Future enhancements:
 *  - In case an unrepairable extent is encountered, track which files are
 *    affected and report them
 *  - track and record media errors, throw out bad devices
 *  - add a mode to also read unallocated space
 */

38
struct scrub_block;
39
struct scrub_ctx;
A
Arne Jansen 已提交
40

41
/*
42 43
 * The following three values only influence the performance.
 *
44
 * The last one configures the number of parallel and outstanding I/O
45
 * operations. The first one configures an upper limit for the number
46 47
 * of (dynamically allocated) pages that are added to a bio.
 */
48 49
#define SCRUB_SECTORS_PER_BIO	32	/* 128KiB per bio for 4KiB pages */
#define SCRUB_BIOS_PER_SCTX	64	/* 8MiB per device in flight for 4KiB pages */
50 51

/*
52
 * The following value times PAGE_SIZE needs to be large enough to match the
53 54
 * largest node/leaf/sector size that shall be supported.
 */
55
#define SCRUB_MAX_SECTORS_PER_BLOCK	(BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K)
A
Arne Jansen 已提交
56

57
struct scrub_recover {
58
	refcount_t		refs;
59
	struct btrfs_io_context	*bioc;
60 61 62
	u64			map_length;
};

63
struct scrub_sector {
64 65
	struct scrub_block	*sblock;
	struct page		*page;
66
	struct btrfs_device	*dev;
67
	struct list_head	list;
A
Arne Jansen 已提交
68 69
	u64			flags;  /* extent flags */
	u64			generation;
70 71
	u64			logical;
	u64			physical;
72
	u64			physical_for_dev_replace;
73
	atomic_t		refs;
74
	u8			mirror_num;
75 76
	unsigned int		have_csum:1;
	unsigned int		io_error:1;
A
Arne Jansen 已提交
77
	u8			csum[BTRFS_CSUM_SIZE];
78 79

	struct scrub_recover	*recover;
A
Arne Jansen 已提交
80 81 82 83
};

struct scrub_bio {
	int			index;
84
	struct scrub_ctx	*sctx;
85
	struct btrfs_device	*dev;
A
Arne Jansen 已提交
86
	struct bio		*bio;
87
	blk_status_t		status;
A
Arne Jansen 已提交
88 89
	u64			logical;
	u64			physical;
90 91
	struct scrub_sector	*sectors[SCRUB_SECTORS_PER_BIO];
	int			sector_count;
A
Arne Jansen 已提交
92 93 94 95
	int			next_free;
	struct btrfs_work	work;
};

96
struct scrub_block {
97
	struct scrub_sector	*sectors[SCRUB_MAX_SECTORS_PER_BLOCK];
98
	int			sector_count;
99
	atomic_t		outstanding_sectors;
100
	refcount_t		refs; /* free mem on transition to zero */
101
	struct scrub_ctx	*sctx;
102
	struct scrub_parity	*sparity;
103 104 105 106
	struct {
		unsigned int	header_error:1;
		unsigned int	checksum_error:1;
		unsigned int	no_io_error_seen:1;
107
		unsigned int	generation_error:1; /* also sets header_error */
108 109 110 111

		/* The following is for the data used to check parity */
		/* It is for the data with checksum */
		unsigned int	data_corrected:1;
112
	};
113
	struct btrfs_work	work;
114 115
};

116 117 118 119 120 121 122 123 124 125 126 127
/* Used for the chunks with parity stripe such RAID5/6 */
struct scrub_parity {
	struct scrub_ctx	*sctx;

	struct btrfs_device	*scrub_dev;

	u64			logic_start;

	u64			logic_end;

	int			nsectors;

128
	u32			stripe_len;
129

130
	refcount_t		refs;
131

132
	struct list_head	sectors_list;
133 134 135 136 137 138 139 140 141 142 143 144 145

	/* Work of parity check and repair */
	struct btrfs_work	work;

	/* Mark the parity blocks which have data */
	unsigned long		*dbitmap;

	/*
	 * Mark the parity blocks which have data, but errors happen when
	 * read data or check data
	 */
	unsigned long		*ebitmap;

146
	unsigned long		bitmap[];
147 148
};

149
struct scrub_ctx {
150
	struct scrub_bio	*bios[SCRUB_BIOS_PER_SCTX];
151
	struct btrfs_fs_info	*fs_info;
A
Arne Jansen 已提交
152 153
	int			first_free;
	int			curr;
154 155
	atomic_t		bios_in_flight;
	atomic_t		workers_pending;
A
Arne Jansen 已提交
156 157 158 159
	spinlock_t		list_lock;
	wait_queue_head_t	list_wait;
	struct list_head	csum_list;
	atomic_t		cancel_req;
A
Arne Jansen 已提交
160
	int			readonly;
161
	int			sectors_per_bio;
162

163 164 165 166
	/* State of IO submission throttling affecting the associated device */
	ktime_t			throttle_deadline;
	u64			throttle_sent;

167
	int			is_dev_replace;
168
	u64			write_pointer;
169 170 171 172

	struct scrub_bio        *wr_curr_bio;
	struct mutex            wr_lock;
	struct btrfs_device     *wr_tgtdev;
173
	bool                    flush_all_writes;
174

A
Arne Jansen 已提交
175 176 177 178 179
	/*
	 * statistics
	 */
	struct btrfs_scrub_progress stat;
	spinlock_t		stat_lock;
180 181 182 183 184 185 186 187

	/*
	 * Use a ref counter to avoid use-after-free issues. Scrub workers
	 * decrement bios_in_flight and workers_pending and then do a wakeup
	 * on the list_wait wait queue. We must ensure the main scrub task
	 * doesn't free the scrub context before or while the workers are
	 * doing the wakeup() call.
	 */
188
	refcount_t              refs;
A
Arne Jansen 已提交
189 190
};

191 192 193 194
struct scrub_warning {
	struct btrfs_path	*path;
	u64			extent_item_size;
	const char		*errstr;
D
David Sterba 已提交
195
	u64			physical;
196 197 198 199
	u64			logical;
	struct btrfs_device	*dev;
};

200 201 202 203 204 205 206
struct full_stripe_lock {
	struct rb_node node;
	u64 logical;
	u64 refs;
	struct mutex mutex;
};

207
static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
208
				     struct scrub_block *sblocks_for_recheck);
209
static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
210 211
				struct scrub_block *sblock,
				int retry_failed_mirror);
212
static void scrub_recheck_block_checksum(struct scrub_block *sblock);
213
static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
214
					     struct scrub_block *sblock_good);
215
static int scrub_repair_sector_from_good_copy(struct scrub_block *sblock_bad,
216
					    struct scrub_block *sblock_good,
217
					    int sector_num, int force_write);
218
static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
219 220
static int scrub_write_sector_to_dev_replace(struct scrub_block *sblock,
					     int sector_num);
221 222 223 224
static int scrub_checksum_data(struct scrub_block *sblock);
static int scrub_checksum_tree_block(struct scrub_block *sblock);
static int scrub_checksum_super(struct scrub_block *sblock);
static void scrub_block_put(struct scrub_block *sblock);
225 226
static void scrub_sector_get(struct scrub_sector *sector);
static void scrub_sector_put(struct scrub_sector *sector);
227 228
static void scrub_parity_get(struct scrub_parity *sparity);
static void scrub_parity_put(struct scrub_parity *sparity);
229 230 231 232
static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len,
			 u64 physical, struct btrfs_device *dev, u64 flags,
			 u64 gen, int mirror_num, u8 *csum,
			 u64 physical_for_dev_replace);
233
static void scrub_bio_end_io(struct bio *bio);
234 235
static void scrub_bio_end_io_worker(struct btrfs_work *work);
static void scrub_block_complete(struct scrub_block *sblock);
236
static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
237
			       u64 extent_logical, u32 extent_len,
238 239 240
			       u64 *extent_physical,
			       struct btrfs_device **extent_dev,
			       int *extent_mirror_num);
241 242
static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx,
				      struct scrub_sector *sector);
243
static void scrub_wr_submit(struct scrub_ctx *sctx);
244
static void scrub_wr_bio_end_io(struct bio *bio);
245
static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
246
static void scrub_put_ctx(struct scrub_ctx *sctx);
S
Stefan Behrens 已提交
247

248
static inline int scrub_is_page_on_raid56(struct scrub_sector *sector)
249
{
250 251
	return sector->recover &&
	       (sector->recover->bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
252
}
S
Stefan Behrens 已提交
253

254 255
static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
{
256
	refcount_inc(&sctx->refs);
257 258 259 260 261 262 263
	atomic_inc(&sctx->bios_in_flight);
}

static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
{
	atomic_dec(&sctx->bios_in_flight);
	wake_up(&sctx->list_wait);
264
	scrub_put_ctx(sctx);
265 266
}

267
static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
268 269 270 271 272 273 274 275 276
{
	while (atomic_read(&fs_info->scrub_pause_req)) {
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
		   atomic_read(&fs_info->scrub_pause_req) == 0);
		mutex_lock(&fs_info->scrub_lock);
	}
}

277
static void scrub_pause_on(struct btrfs_fs_info *fs_info)
278 279 280
{
	atomic_inc(&fs_info->scrubs_paused);
	wake_up(&fs_info->scrub_pause_wait);
281
}
282

283 284
static void scrub_pause_off(struct btrfs_fs_info *fs_info)
{
285 286 287 288 289 290 291 292
	mutex_lock(&fs_info->scrub_lock);
	__scrub_blocked_if_needed(fs_info);
	atomic_dec(&fs_info->scrubs_paused);
	mutex_unlock(&fs_info->scrub_lock);

	wake_up(&fs_info->scrub_pause_wait);
}

293 294 295 296 297 298
static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
{
	scrub_pause_on(fs_info);
	scrub_pause_off(fs_info);
}

299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
/*
 * Insert new full stripe lock into full stripe locks tree
 *
 * Return pointer to existing or newly inserted full_stripe_lock structure if
 * everything works well.
 * Return ERR_PTR(-ENOMEM) if we failed to allocate memory
 *
 * NOTE: caller must hold full_stripe_locks_root->lock before calling this
 * function
 */
static struct full_stripe_lock *insert_full_stripe_lock(
		struct btrfs_full_stripe_locks_tree *locks_root,
		u64 fstripe_logical)
{
	struct rb_node **p;
	struct rb_node *parent = NULL;
	struct full_stripe_lock *entry;
	struct full_stripe_lock *ret;

318
	lockdep_assert_held(&locks_root->lock);
319 320 321 322 323 324 325 326 327 328 329 330 331 332 333

	p = &locks_root->root.rb_node;
	while (*p) {
		parent = *p;
		entry = rb_entry(parent, struct full_stripe_lock, node);
		if (fstripe_logical < entry->logical) {
			p = &(*p)->rb_left;
		} else if (fstripe_logical > entry->logical) {
			p = &(*p)->rb_right;
		} else {
			entry->refs++;
			return entry;
		}
	}

334 335 336
	/*
	 * Insert new lock.
	 */
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
	ret = kmalloc(sizeof(*ret), GFP_KERNEL);
	if (!ret)
		return ERR_PTR(-ENOMEM);
	ret->logical = fstripe_logical;
	ret->refs = 1;
	mutex_init(&ret->mutex);

	rb_link_node(&ret->node, parent, p);
	rb_insert_color(&ret->node, &locks_root->root);
	return ret;
}

/*
 * Search for a full stripe lock of a block group
 *
 * Return pointer to existing full stripe lock if found
 * Return NULL if not found
 */
static struct full_stripe_lock *search_full_stripe_lock(
		struct btrfs_full_stripe_locks_tree *locks_root,
		u64 fstripe_logical)
{
	struct rb_node *node;
	struct full_stripe_lock *entry;

362
	lockdep_assert_held(&locks_root->lock);
363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381

	node = locks_root->root.rb_node;
	while (node) {
		entry = rb_entry(node, struct full_stripe_lock, node);
		if (fstripe_logical < entry->logical)
			node = node->rb_left;
		else if (fstripe_logical > entry->logical)
			node = node->rb_right;
		else
			return entry;
	}
	return NULL;
}

/*
 * Helper to get full stripe logical from a normal bytenr.
 *
 * Caller must ensure @cache is a RAID56 block group.
 */
382
static u64 get_full_stripe_logical(struct btrfs_block_group *cache, u64 bytenr)
383 384 385 386 387 388 389 390 391 392 393 394 395
{
	u64 ret;

	/*
	 * Due to chunk item size limit, full stripe length should not be
	 * larger than U32_MAX. Just a sanity check here.
	 */
	WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX);

	/*
	 * round_down() can only handle power of 2, while RAID56 full
	 * stripe length can be 64KiB * n, so we need to manually round down.
	 */
396 397
	ret = div64_u64(bytenr - cache->start, cache->full_stripe_len) *
			cache->full_stripe_len + cache->start;
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
	return ret;
}

/*
 * Lock a full stripe to avoid concurrency of recovery and read
 *
 * It's only used for profiles with parities (RAID5/6), for other profiles it
 * does nothing.
 *
 * Return 0 if we locked full stripe covering @bytenr, with a mutex held.
 * So caller must call unlock_full_stripe() at the same context.
 *
 * Return <0 if encounters error.
 */
static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
			    bool *locked_ret)
{
415
	struct btrfs_block_group *bg_cache;
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
	struct btrfs_full_stripe_locks_tree *locks_root;
	struct full_stripe_lock *existing;
	u64 fstripe_start;
	int ret = 0;

	*locked_ret = false;
	bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
	if (!bg_cache) {
		ASSERT(0);
		return -ENOENT;
	}

	/* Profiles not based on parity don't need full stripe lock */
	if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
		goto out;
	locks_root = &bg_cache->full_stripe_locks_root;

	fstripe_start = get_full_stripe_logical(bg_cache, bytenr);

	/* Now insert the full stripe lock */
	mutex_lock(&locks_root->lock);
	existing = insert_full_stripe_lock(locks_root, fstripe_start);
	mutex_unlock(&locks_root->lock);
	if (IS_ERR(existing)) {
		ret = PTR_ERR(existing);
		goto out;
	}
	mutex_lock(&existing->mutex);
	*locked_ret = true;
out:
	btrfs_put_block_group(bg_cache);
	return ret;
}

/*
 * Unlock a full stripe.
 *
 * NOTE: Caller must ensure it's the same context calling corresponding
 * lock_full_stripe().
 *
 * Return 0 if we unlock full stripe without problem.
 * Return <0 for error
 */
static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
			      bool locked)
{
462
	struct btrfs_block_group *bg_cache;
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
	struct btrfs_full_stripe_locks_tree *locks_root;
	struct full_stripe_lock *fstripe_lock;
	u64 fstripe_start;
	bool freeit = false;
	int ret = 0;

	/* If we didn't acquire full stripe lock, no need to continue */
	if (!locked)
		return 0;

	bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
	if (!bg_cache) {
		ASSERT(0);
		return -ENOENT;
	}
	if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
		goto out;

	locks_root = &bg_cache->full_stripe_locks_root;
	fstripe_start = get_full_stripe_logical(bg_cache, bytenr);

	mutex_lock(&locks_root->lock);
	fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start);
	/* Unpaired unlock_full_stripe() detected */
	if (!fstripe_lock) {
		WARN_ON(1);
		ret = -ENOENT;
		mutex_unlock(&locks_root->lock);
		goto out;
	}

	if (fstripe_lock->refs == 0) {
		WARN_ON(1);
		btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow",
			fstripe_lock->logical);
	} else {
		fstripe_lock->refs--;
	}

	if (fstripe_lock->refs == 0) {
		rb_erase(&fstripe_lock->node, &locks_root->root);
		freeit = true;
	}
	mutex_unlock(&locks_root->lock);

	mutex_unlock(&fstripe_lock->mutex);
	if (freeit)
		kfree(fstripe_lock);
out:
	btrfs_put_block_group(bg_cache);
	return ret;
}

516
static void scrub_free_csums(struct scrub_ctx *sctx)
A
Arne Jansen 已提交
517
{
518
	while (!list_empty(&sctx->csum_list)) {
A
Arne Jansen 已提交
519
		struct btrfs_ordered_sum *sum;
520
		sum = list_first_entry(&sctx->csum_list,
A
Arne Jansen 已提交
521 522 523 524 525 526
				       struct btrfs_ordered_sum, list);
		list_del(&sum->list);
		kfree(sum);
	}
}

527
static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
A
Arne Jansen 已提交
528 529 530
{
	int i;

531
	if (!sctx)
A
Arne Jansen 已提交
532 533
		return;

534
	/* this can happen when scrub is cancelled */
535 536
	if (sctx->curr != -1) {
		struct scrub_bio *sbio = sctx->bios[sctx->curr];
537

538 539 540
		for (i = 0; i < sbio->sector_count; i++) {
			WARN_ON(!sbio->sectors[i]->page);
			scrub_block_put(sbio->sectors[i]->sblock);
541 542 543 544
		}
		bio_put(sbio->bio);
	}

545
	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
546
		struct scrub_bio *sbio = sctx->bios[i];
A
Arne Jansen 已提交
547 548 549 550 551 552

		if (!sbio)
			break;
		kfree(sbio);
	}

553
	kfree(sctx->wr_curr_bio);
554 555
	scrub_free_csums(sctx);
	kfree(sctx);
A
Arne Jansen 已提交
556 557
}

558 559
static void scrub_put_ctx(struct scrub_ctx *sctx)
{
560
	if (refcount_dec_and_test(&sctx->refs))
561 562 563
		scrub_free_ctx(sctx);
}

564 565
static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
		struct btrfs_fs_info *fs_info, int is_dev_replace)
A
Arne Jansen 已提交
566
{
567
	struct scrub_ctx *sctx;
A
Arne Jansen 已提交
568 569
	int		i;

570
	sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
571
	if (!sctx)
A
Arne Jansen 已提交
572
		goto nomem;
573
	refcount_set(&sctx->refs, 1);
574
	sctx->is_dev_replace = is_dev_replace;
575
	sctx->sectors_per_bio = SCRUB_SECTORS_PER_BIO;
576
	sctx->curr = -1;
577
	sctx->fs_info = fs_info;
578
	INIT_LIST_HEAD(&sctx->csum_list);
579
	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
A
Arne Jansen 已提交
580 581
		struct scrub_bio *sbio;

582
		sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
A
Arne Jansen 已提交
583 584
		if (!sbio)
			goto nomem;
585
		sctx->bios[i] = sbio;
A
Arne Jansen 已提交
586 587

		sbio->index = i;
588
		sbio->sctx = sctx;
589
		sbio->sector_count = 0;
590 591
		btrfs_init_work(&sbio->work, scrub_bio_end_io_worker, NULL,
				NULL);
A
Arne Jansen 已提交
592

593
		if (i != SCRUB_BIOS_PER_SCTX - 1)
594
			sctx->bios[i]->next_free = i + 1;
595
		else
596 597 598
			sctx->bios[i]->next_free = -1;
	}
	sctx->first_free = 0;
599 600
	atomic_set(&sctx->bios_in_flight, 0);
	atomic_set(&sctx->workers_pending, 0);
601 602 603 604 605
	atomic_set(&sctx->cancel_req, 0);

	spin_lock_init(&sctx->list_lock);
	spin_lock_init(&sctx->stat_lock);
	init_waitqueue_head(&sctx->list_wait);
606
	sctx->throttle_deadline = 0;
607

608 609 610
	WARN_ON(sctx->wr_curr_bio != NULL);
	mutex_init(&sctx->wr_lock);
	sctx->wr_curr_bio = NULL;
611
	if (is_dev_replace) {
612 613
		WARN_ON(!fs_info->dev_replace.tgtdev);
		sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
614
		sctx->flush_all_writes = false;
615
	}
616

617
	return sctx;
A
Arne Jansen 已提交
618 619

nomem:
620
	scrub_free_ctx(sctx);
A
Arne Jansen 已提交
621 622 623
	return ERR_PTR(-ENOMEM);
}

624 625
static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
				     void *warn_ctx)
626 627 628 629
{
	u32 nlink;
	int ret;
	int i;
630
	unsigned nofs_flag;
631 632
	struct extent_buffer *eb;
	struct btrfs_inode_item *inode_item;
633
	struct scrub_warning *swarn = warn_ctx;
634
	struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
635 636
	struct inode_fs_paths *ipath = NULL;
	struct btrfs_root *local_root;
637
	struct btrfs_key key;
638

D
David Sterba 已提交
639
	local_root = btrfs_get_fs_root(fs_info, root, true);
640 641 642 643 644
	if (IS_ERR(local_root)) {
		ret = PTR_ERR(local_root);
		goto err;
	}

645 646 647
	/*
	 * this makes the path point to (inum INODE_ITEM ioff)
	 */
648 649 650 651 652
	key.objectid = inum;
	key.type = BTRFS_INODE_ITEM_KEY;
	key.offset = 0;

	ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
653
	if (ret) {
654
		btrfs_put_root(local_root);
655 656 657 658 659 660 661 662 663 664
		btrfs_release_path(swarn->path);
		goto err;
	}

	eb = swarn->path->nodes[0];
	inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
					struct btrfs_inode_item);
	nlink = btrfs_inode_nlink(eb, inode_item);
	btrfs_release_path(swarn->path);

665 666 667 668 669 670
	/*
	 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
	 * uses GFP_NOFS in this context, so we keep it consistent but it does
	 * not seem to be strictly necessary.
	 */
	nofs_flag = memalloc_nofs_save();
671
	ipath = init_ipath(4096, local_root, swarn->path);
672
	memalloc_nofs_restore(nofs_flag);
673
	if (IS_ERR(ipath)) {
674
		btrfs_put_root(local_root);
675 676 677 678
		ret = PTR_ERR(ipath);
		ipath = NULL;
		goto err;
	}
679 680 681 682 683 684 685 686 687 688
	ret = paths_from_inode(inum, ipath);

	if (ret < 0)
		goto err;

	/*
	 * we deliberately ignore the bit ipath might have been too small to
	 * hold all of the paths here
	 */
	for (i = 0; i < ipath->fspath->elem_cnt; ++i)
J
Jeff Mahoney 已提交
689
		btrfs_warn_in_rcu(fs_info,
690
"%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)",
J
Jeff Mahoney 已提交
691 692
				  swarn->errstr, swarn->logical,
				  rcu_str_deref(swarn->dev->name),
D
David Sterba 已提交
693
				  swarn->physical,
J
Jeff Mahoney 已提交
694
				  root, inum, offset,
695
				  fs_info->sectorsize, nlink,
J
Jeff Mahoney 已提交
696
				  (char *)(unsigned long)ipath->fspath->val[i]);
697

698
	btrfs_put_root(local_root);
699 700 701 702
	free_ipath(ipath);
	return 0;

err:
J
Jeff Mahoney 已提交
703
	btrfs_warn_in_rcu(fs_info,
D
David Sterba 已提交
704
			  "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
J
Jeff Mahoney 已提交
705 706
			  swarn->errstr, swarn->logical,
			  rcu_str_deref(swarn->dev->name),
D
David Sterba 已提交
707
			  swarn->physical,
J
Jeff Mahoney 已提交
708
			  root, inum, offset, ret);
709 710 711 712 713

	free_ipath(ipath);
	return 0;
}

714
static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
715
{
716 717
	struct btrfs_device *dev;
	struct btrfs_fs_info *fs_info;
718 719 720 721 722
	struct btrfs_path *path;
	struct btrfs_key found_key;
	struct extent_buffer *eb;
	struct btrfs_extent_item *ei;
	struct scrub_warning swarn;
723 724 725
	unsigned long ptr = 0;
	u64 extent_item_pos;
	u64 flags = 0;
726
	u64 ref_root;
727
	u32 item_size;
728
	u8 ref_level = 0;
729
	int ret;
730

731 732
	WARN_ON(sblock->sector_count < 1);
	dev = sblock->sectors[0]->dev;
733
	fs_info = sblock->sctx->fs_info;
734

735
	path = btrfs_alloc_path();
736 737
	if (!path)
		return;
738

739 740
	swarn.physical = sblock->sectors[0]->physical;
	swarn.logical = sblock->sectors[0]->logical;
741
	swarn.errstr = errstr;
742
	swarn.dev = NULL;
743

744 745
	ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
				  &flags);
746 747 748
	if (ret < 0)
		goto out;

J
Jan Schmidt 已提交
749
	extent_item_pos = swarn.logical - found_key.objectid;
750 751 752 753
	swarn.extent_item_size = found_key.offset;

	eb = path->nodes[0];
	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
754
	item_size = btrfs_item_size(eb, path->slots[0]);
755

756
	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
757
		do {
758 759 760
			ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
						      item_size, &ref_root,
						      &ref_level);
761
			btrfs_warn_in_rcu(fs_info,
D
David Sterba 已提交
762
"%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
J
Jeff Mahoney 已提交
763
				errstr, swarn.logical,
764
				rcu_str_deref(dev->name),
D
David Sterba 已提交
765
				swarn.physical,
766 767 768 769
				ref_level ? "node" : "leaf",
				ret < 0 ? -1 : ref_level,
				ret < 0 ? -1 : ref_root);
		} while (ret != 1);
770
		btrfs_release_path(path);
771
	} else {
772
		btrfs_release_path(path);
773
		swarn.path = path;
774
		swarn.dev = dev;
775 776
		iterate_extent_inodes(fs_info, found_key.objectid,
					extent_item_pos, 1,
777
					scrub_print_warning_inode, &swarn, false);
778 779 780 781 782 783
	}

out:
	btrfs_free_path(path);
}

784 785
static inline void scrub_get_recover(struct scrub_recover *recover)
{
786
	refcount_inc(&recover->refs);
787 788
}

789 790
static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
				     struct scrub_recover *recover)
791
{
792
	if (refcount_dec_and_test(&recover->refs)) {
793
		btrfs_bio_counter_dec(fs_info);
794
		btrfs_put_bioc(recover->bioc);
795 796 797 798
		kfree(recover);
	}
}

A
Arne Jansen 已提交
799
/*
800
 * scrub_handle_errored_block gets called when either verification of the
801 802
 * sectors failed or the bio failed to read, e.g. with EIO. In the latter
 * case, this function handles all sectors in the bio, even though only one
803 804 805
 * may be bad.
 * The goal of this function is to repair the errored block by using the
 * contents of one of the mirrors.
A
Arne Jansen 已提交
806
 */
807
static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
A
Arne Jansen 已提交
808
{
809
	struct scrub_ctx *sctx = sblock_to_check->sctx;
810
	struct btrfs_device *dev;
811 812 813 814 815 816 817 818 819
	struct btrfs_fs_info *fs_info;
	u64 logical;
	unsigned int failed_mirror_index;
	unsigned int is_metadata;
	unsigned int have_csum;
	struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
	struct scrub_block *sblock_bad;
	int ret;
	int mirror_index;
820
	int sector_num;
821
	int success;
822
	bool full_stripe_locked;
823
	unsigned int nofs_flag;
824
	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
825 826
				      DEFAULT_RATELIMIT_BURST);

827
	BUG_ON(sblock_to_check->sector_count < 1);
828
	fs_info = sctx->fs_info;
829
	if (sblock_to_check->sectors[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
830 831 832 833 834 835 836 837 838 839
		/*
		 * if we find an error in a super block, we just report it.
		 * They will get written with the next transaction commit
		 * anyway
		 */
		spin_lock(&sctx->stat_lock);
		++sctx->stat.super_errors;
		spin_unlock(&sctx->stat_lock);
		return 0;
	}
840 841 842 843
	logical = sblock_to_check->sectors[0]->logical;
	BUG_ON(sblock_to_check->sectors[0]->mirror_num < 1);
	failed_mirror_index = sblock_to_check->sectors[0]->mirror_num - 1;
	is_metadata = !(sblock_to_check->sectors[0]->flags &
844
			BTRFS_EXTENT_FLAG_DATA);
845 846
	have_csum = sblock_to_check->sectors[0]->have_csum;
	dev = sblock_to_check->sectors[0]->dev;
847

848 849
	if (!sctx->is_dev_replace && btrfs_repair_one_zone(fs_info, logical))
		return 0;
850

851 852 853 854 855 856
	/*
	 * We must use GFP_NOFS because the scrub task might be waiting for a
	 * worker task executing this function and in turn a transaction commit
	 * might be waiting the scrub task to pause (which needs to wait for all
	 * the worker tasks to complete before pausing).
	 * We do allocations in the workers through insert_full_stripe_lock()
857
	 * and scrub_add_sector_to_wr_bio(), which happens down the call chain of
858 859 860
	 * this function.
	 */
	nofs_flag = memalloc_nofs_save();
861 862 863 864 865 866 867 868 869
	/*
	 * For RAID5/6, race can happen for a different device scrub thread.
	 * For data corruption, Parity and Data threads will both try
	 * to recovery the data.
	 * Race can lead to doubly added csum error, or even unrecoverable
	 * error.
	 */
	ret = lock_full_stripe(fs_info, logical, &full_stripe_locked);
	if (ret < 0) {
870
		memalloc_nofs_restore(nofs_flag);
871 872 873 874 875 876 877 878 879
		spin_lock(&sctx->stat_lock);
		if (ret == -ENOMEM)
			sctx->stat.malloc_errors++;
		sctx->stat.read_errors++;
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
		return ret;
	}

880 881 882 883
	/*
	 * read all mirrors one after the other. This includes to
	 * re-read the extent or metadata block that failed (that was
	 * the cause that this fixup code is called) another time,
884
	 * sector by sector this time in order to know which sectors
885 886 887 888
	 * caused I/O errors and which ones are good (for all mirrors).
	 * It is the goal to handle the situation when more than one
	 * mirror contains I/O errors, but the errors do not
	 * overlap, i.e. the data can be repaired by selecting the
889 890 891 892 893 894 895 896 897 898
	 * sectors from those mirrors without I/O error on the
	 * particular sectors. One example (with blocks >= 2 * sectorsize)
	 * would be that mirror #1 has an I/O error on the first sector,
	 * the second sector is good, and mirror #2 has an I/O error on
	 * the second sector, but the first sector is good.
	 * Then the first sector of the first mirror can be repaired by
	 * taking the first sector of the second mirror, and the
	 * second sector of the second mirror can be repaired by
	 * copying the contents of the 2nd sector of the 1st mirror.
	 * One more note: if the sectors of one mirror contain I/O
899 900 901
	 * errors, the checksum cannot be verified. In order to get
	 * the best data for repairing, the first attempt is to find
	 * a mirror without I/O errors and with a validated checksum.
902
	 * Only if this is not possible, the sectors are picked from
903 904 905 906 907 908
	 * mirrors with I/O errors without considering the checksum.
	 * If the latter is the case, at the end, the checksum of the
	 * repaired area is verified in order to correctly maintain
	 * the statistics.
	 */

909
	sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
910
				      sizeof(*sblocks_for_recheck), GFP_KERNEL);
911
	if (!sblocks_for_recheck) {
912 913 914 915 916
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		sctx->stat.read_errors++;
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
917
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
918
		goto out;
A
Arne Jansen 已提交
919 920
	}

921
	/* Setup the context, map the logical blocks and alloc the sectors */
922
	ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
923
	if (ret) {
924 925 926 927
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors++;
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
928
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
929 930 931 932
		goto out;
	}
	BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
	sblock_bad = sblocks_for_recheck + failed_mirror_index;
933

934
	/* build and submit the bios for the failed mirror, check checksums */
935
	scrub_recheck_block(fs_info, sblock_bad, 1);
A
Arne Jansen 已提交
936

937 938 939
	if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
	    sblock_bad->no_io_error_seen) {
		/*
940
		 * The error disappeared after reading sector by sector, or
941 942 943 944 945 946
		 * the area was part of a huge bio and other parts of the
		 * bio caused I/O errors, or the block layer merged several
		 * read requests into one and the error is caused by a
		 * different bio (usually one of the two latter cases is
		 * the cause)
		 */
947 948
		spin_lock(&sctx->stat_lock);
		sctx->stat.unverified_errors++;
949
		sblock_to_check->data_corrected = 1;
950
		spin_unlock(&sctx->stat_lock);
A
Arne Jansen 已提交
951

952 953
		if (sctx->is_dev_replace)
			scrub_write_block_to_dev_replace(sblock_bad);
954
		goto out;
A
Arne Jansen 已提交
955 956
	}

957
	if (!sblock_bad->no_io_error_seen) {
958 959 960
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors++;
		spin_unlock(&sctx->stat_lock);
961
		if (__ratelimit(&rs))
962
			scrub_print_warning("i/o error", sblock_to_check);
963
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
964
	} else if (sblock_bad->checksum_error) {
965 966 967
		spin_lock(&sctx->stat_lock);
		sctx->stat.csum_errors++;
		spin_unlock(&sctx->stat_lock);
968
		if (__ratelimit(&rs))
969
			scrub_print_warning("checksum error", sblock_to_check);
970
		btrfs_dev_stat_inc_and_print(dev,
971
					     BTRFS_DEV_STAT_CORRUPTION_ERRS);
972
	} else if (sblock_bad->header_error) {
973 974 975
		spin_lock(&sctx->stat_lock);
		sctx->stat.verify_errors++;
		spin_unlock(&sctx->stat_lock);
976
		if (__ratelimit(&rs))
977 978
			scrub_print_warning("checksum/header error",
					    sblock_to_check);
979
		if (sblock_bad->generation_error)
980
			btrfs_dev_stat_inc_and_print(dev,
981 982
				BTRFS_DEV_STAT_GENERATION_ERRS);
		else
983
			btrfs_dev_stat_inc_and_print(dev,
984
				BTRFS_DEV_STAT_CORRUPTION_ERRS);
985
	}
A
Arne Jansen 已提交
986

987 988 989 990
	if (sctx->readonly) {
		ASSERT(!sctx->is_dev_replace);
		goto out;
	}
A
Arne Jansen 已提交
991

992 993
	/*
	 * now build and submit the bios for the other mirrors, check
994 995
	 * checksums.
	 * First try to pick the mirror which is completely without I/O
996 997 998 999 1000
	 * errors and also does not have a checksum error.
	 * If one is found, and if a checksum is present, the full block
	 * that is known to contain an error is rewritten. Afterwards
	 * the block is known to be corrected.
	 * If a mirror is found which is completely correct, and no
1001
	 * checksum is present, only those sectors are rewritten that had
1002
	 * an I/O error in the block to be repaired, since it cannot be
1003 1004
	 * determined, which copy of the other sectors is better (and it
	 * could happen otherwise that a correct sector would be
1005 1006
	 * overwritten by a bad one).
	 */
1007
	for (mirror_index = 0; ;mirror_index++) {
1008
		struct scrub_block *sblock_other;
1009

1010 1011
		if (mirror_index == failed_mirror_index)
			continue;
1012 1013

		/* raid56's mirror can be more than BTRFS_MAX_MIRRORS */
1014
		if (!scrub_is_page_on_raid56(sblock_bad->sectors[0])) {
1015 1016
			if (mirror_index >= BTRFS_MAX_MIRRORS)
				break;
1017
			if (!sblocks_for_recheck[mirror_index].sector_count)
1018 1019 1020 1021
				break;

			sblock_other = sblocks_for_recheck + mirror_index;
		} else {
1022
			struct scrub_recover *r = sblock_bad->sectors[0]->recover;
1023
			int max_allowed = r->bioc->num_stripes - r->bioc->num_tgtdevs;
1024 1025 1026

			if (mirror_index >= max_allowed)
				break;
1027
			if (!sblocks_for_recheck[1].sector_count)
1028 1029 1030 1031
				break;

			ASSERT(failed_mirror_index == 0);
			sblock_other = sblocks_for_recheck + 1;
1032
			sblock_other->sectors[0]->mirror_num = 1 + mirror_index;
1033
		}
1034 1035

		/* build and submit the bios, check checksums */
1036
		scrub_recheck_block(fs_info, sblock_other, 0);
1037 1038

		if (!sblock_other->header_error &&
1039 1040
		    !sblock_other->checksum_error &&
		    sblock_other->no_io_error_seen) {
1041 1042
			if (sctx->is_dev_replace) {
				scrub_write_block_to_dev_replace(sblock_other);
1043
				goto corrected_error;
1044 1045
			} else {
				ret = scrub_repair_block_from_good_copy(
1046 1047 1048
						sblock_bad, sblock_other);
				if (!ret)
					goto corrected_error;
1049
			}
1050 1051
		}
	}
A
Arne Jansen 已提交
1052

1053 1054
	if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
		goto did_not_correct_error;
1055 1056 1057

	/*
	 * In case of I/O errors in the area that is supposed to be
1058 1059
	 * repaired, continue by picking good copies of those sectors.
	 * Select the good sectors from mirrors to rewrite bad sectors from
1060 1061 1062 1063 1064
	 * the area to fix. Afterwards verify the checksum of the block
	 * that is supposed to be repaired. This verification step is
	 * only done for the purpose of statistic counting and for the
	 * final scrub report, whether errors remain.
	 * A perfect algorithm could make use of the checksum and try
1065
	 * all possible combinations of sectors from the different mirrors
1066
	 * until the checksum verification succeeds. For example, when
1067
	 * the 2nd sector of mirror #1 faces I/O errors, and the 2nd sector
1068
	 * of mirror #2 is readable but the final checksum test fails,
1069
	 * then the 2nd sector of mirror #3 could be tried, whether now
1070
	 * the final checksum succeeds. But this would be a rare
1071 1072 1073 1074
	 * exception and is therefore not implemented. At least it is
	 * avoided that the good copy is overwritten.
	 * A more useful improvement would be to pick the sectors
	 * without I/O error based on sector sizes (512 bytes on legacy
1075
	 * disks) instead of on sectorsize. Then maybe 512 byte of one
1076
	 * mirror could be repaired by taking 512 byte of a different
1077
	 * mirror, even if other 512 byte sectors in the same sectorsize
1078
	 * area are unreadable.
A
Arne Jansen 已提交
1079
	 */
1080
	success = 1;
1081 1082
	for (sector_num = 0; sector_num < sblock_bad->sector_count;
	     sector_num++) {
1083
		struct scrub_sector *sector_bad = sblock_bad->sectors[sector_num];
1084
		struct scrub_block *sblock_other = NULL;
1085

1086 1087
		/* Skip no-io-error sectors in scrub */
		if (!sector_bad->io_error && !sctx->is_dev_replace)
A
Arne Jansen 已提交
1088
			continue;
1089

1090
		if (scrub_is_page_on_raid56(sblock_bad->sectors[0])) {
1091 1092 1093 1094 1095 1096 1097 1098
			/*
			 * In case of dev replace, if raid56 rebuild process
			 * didn't work out correct data, then copy the content
			 * in sblock_bad to make sure target device is identical
			 * to source device, instead of writing garbage data in
			 * sblock_for_recheck array to target device.
			 */
			sblock_other = NULL;
1099 1100
		} else if (sector_bad->io_error) {
			/* Try to find no-io-error sector in mirrors */
1101 1102
			for (mirror_index = 0;
			     mirror_index < BTRFS_MAX_MIRRORS &&
1103
			     sblocks_for_recheck[mirror_index].sector_count > 0;
1104 1105
			     mirror_index++) {
				if (!sblocks_for_recheck[mirror_index].
1106
				    sectors[sector_num]->io_error) {
1107 1108 1109
					sblock_other = sblocks_for_recheck +
						       mirror_index;
					break;
1110 1111
				}
			}
1112 1113
			if (!sblock_other)
				success = 0;
I
Ilya Dryomov 已提交
1114
		}
A
Arne Jansen 已提交
1115

1116 1117
		if (sctx->is_dev_replace) {
			/*
1118 1119 1120 1121
			 * Did not find a mirror to fetch the sector from.
			 * scrub_write_sector_to_dev_replace() handles this
			 * case (sector->io_error), by filling the block with
			 * zeros before submitting the write request
1122 1123 1124 1125
			 */
			if (!sblock_other)
				sblock_other = sblock_bad;

1126 1127
			if (scrub_write_sector_to_dev_replace(sblock_other,
							      sector_num) != 0) {
1128
				atomic64_inc(
1129
					&fs_info->dev_replace.num_write_errors);
1130 1131 1132
				success = 0;
			}
		} else if (sblock_other) {
1133 1134 1135
			ret = scrub_repair_sector_from_good_copy(sblock_bad,
								 sblock_other,
								 sector_num, 0);
1136
			if (0 == ret)
1137
				sector_bad->io_error = 0;
1138 1139
			else
				success = 0;
1140
		}
A
Arne Jansen 已提交
1141 1142
	}

1143
	if (success && !sctx->is_dev_replace) {
1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
		if (is_metadata || have_csum) {
			/*
			 * need to verify the checksum now that all
			 * sectors on disk are repaired (the write
			 * request for data to be repaired is on its way).
			 * Just be lazy and use scrub_recheck_block()
			 * which re-reads the data before the checksum
			 * is verified, but most likely the data comes out
			 * of the page cache.
			 */
1154
			scrub_recheck_block(fs_info, sblock_bad, 1);
1155
			if (!sblock_bad->header_error &&
1156 1157 1158 1159 1160 1161 1162
			    !sblock_bad->checksum_error &&
			    sblock_bad->no_io_error_seen)
				goto corrected_error;
			else
				goto did_not_correct_error;
		} else {
corrected_error:
1163 1164
			spin_lock(&sctx->stat_lock);
			sctx->stat.corrected_errors++;
1165
			sblock_to_check->data_corrected = 1;
1166
			spin_unlock(&sctx->stat_lock);
1167 1168
			btrfs_err_rl_in_rcu(fs_info,
				"fixed up error at logical %llu on dev %s",
1169
				logical, rcu_str_deref(dev->name));
A
Arne Jansen 已提交
1170
		}
1171 1172
	} else {
did_not_correct_error:
1173 1174 1175
		spin_lock(&sctx->stat_lock);
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
1176 1177
		btrfs_err_rl_in_rcu(fs_info,
			"unable to fixup (regular) error at logical %llu on dev %s",
1178
			logical, rcu_str_deref(dev->name));
I
Ilya Dryomov 已提交
1179
	}
A
Arne Jansen 已提交
1180

1181 1182 1183 1184 1185 1186
out:
	if (sblocks_for_recheck) {
		for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
		     mirror_index++) {
			struct scrub_block *sblock = sblocks_for_recheck +
						     mirror_index;
1187
			struct scrub_recover *recover;
1188
			int i;
1189

1190 1191 1192
			for (i = 0; i < sblock->sector_count; i++) {
				sblock->sectors[i]->sblock = NULL;
				recover = sblock->sectors[i]->recover;
1193
				if (recover) {
1194
					scrub_put_recover(fs_info, recover);
1195
					sblock->sectors[i]->recover = NULL;
1196
				}
1197
				scrub_sector_put(sblock->sectors[i]);
1198
			}
1199 1200 1201
		}
		kfree(sblocks_for_recheck);
	}
A
Arne Jansen 已提交
1202

1203
	ret = unlock_full_stripe(fs_info, logical, full_stripe_locked);
1204
	memalloc_nofs_restore(nofs_flag);
1205 1206
	if (ret < 0)
		return ret;
1207 1208
	return 0;
}
A
Arne Jansen 已提交
1209

1210
static inline int scrub_nr_raid_mirrors(struct btrfs_io_context *bioc)
1211
{
1212
	if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID5)
Z
Zhao Lei 已提交
1213
		return 2;
1214
	else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID6)
Z
Zhao Lei 已提交
1215 1216
		return 3;
	else
1217
		return (int)bioc->num_stripes;
1218 1219
}

Z
Zhao Lei 已提交
1220 1221
static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
						 u64 *raid_map,
1222 1223 1224 1225 1226 1227 1228
						 u64 mapped_length,
						 int nstripes, int mirror,
						 int *stripe_index,
						 u64 *stripe_offset)
{
	int i;

1229
	if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249
		/* RAID5/6 */
		for (i = 0; i < nstripes; i++) {
			if (raid_map[i] == RAID6_Q_STRIPE ||
			    raid_map[i] == RAID5_P_STRIPE)
				continue;

			if (logical >= raid_map[i] &&
			    logical < raid_map[i] + mapped_length)
				break;
		}

		*stripe_index = i;
		*stripe_offset = logical - raid_map[i];
	} else {
		/* The other RAID type */
		*stripe_index = mirror;
		*stripe_offset = 0;
	}
}

1250
static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
1251 1252
				     struct scrub_block *sblocks_for_recheck)
{
1253
	struct scrub_ctx *sctx = original_sblock->sctx;
1254
	struct btrfs_fs_info *fs_info = sctx->fs_info;
1255 1256 1257 1258 1259
	u64 length = original_sblock->sector_count << fs_info->sectorsize_bits;
	u64 logical = original_sblock->sectors[0]->logical;
	u64 generation = original_sblock->sectors[0]->generation;
	u64 flags = original_sblock->sectors[0]->flags;
	u64 have_csum = original_sblock->sectors[0]->have_csum;
1260
	struct scrub_recover *recover;
1261
	struct btrfs_io_context *bioc;
1262 1263 1264 1265
	u64 sublen;
	u64 mapped_length;
	u64 stripe_offset;
	int stripe_index;
1266
	int sector_index = 0;
1267
	int mirror_index;
1268
	int nmirrors;
1269 1270 1271
	int ret;

	/*
1272 1273
	 * Note: the two members refs and outstanding_sectors are not used (and
	 * not set) in the blocks that are used for the recheck procedure.
1274 1275 1276
	 */

	while (length > 0) {
1277
		sublen = min_t(u64, length, fs_info->sectorsize);
1278
		mapped_length = sublen;
1279
		bioc = NULL;
A
Arne Jansen 已提交
1280

1281
		/*
1282 1283
		 * With a length of sectorsize, each returned stripe represents
		 * one mirror
1284
		 */
1285
		btrfs_bio_counter_inc_blocked(fs_info);
1286
		ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
1287 1288 1289
				       logical, &mapped_length, &bioc);
		if (ret || !bioc || mapped_length < sublen) {
			btrfs_put_bioc(bioc);
1290
			btrfs_bio_counter_dec(fs_info);
1291 1292
			return -EIO;
		}
A
Arne Jansen 已提交
1293

1294 1295
		recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
		if (!recover) {
1296
			btrfs_put_bioc(bioc);
1297
			btrfs_bio_counter_dec(fs_info);
1298 1299 1300
			return -ENOMEM;
		}

1301
		refcount_set(&recover->refs, 1);
1302
		recover->bioc = bioc;
1303 1304
		recover->map_length = mapped_length;

1305
		ASSERT(sector_index < SCRUB_MAX_SECTORS_PER_BLOCK);
1306

1307
		nmirrors = min(scrub_nr_raid_mirrors(bioc), BTRFS_MAX_MIRRORS);
Z
Zhao Lei 已提交
1308

1309
		for (mirror_index = 0; mirror_index < nmirrors;
1310 1311
		     mirror_index++) {
			struct scrub_block *sblock;
1312
			struct scrub_sector *sector;
1313 1314

			sblock = sblocks_for_recheck + mirror_index;
1315
			sblock->sctx = sctx;
1316

1317 1318
			sector = kzalloc(sizeof(*sector), GFP_NOFS);
			if (!sector) {
1319
leave_nomem:
1320 1321 1322
				spin_lock(&sctx->stat_lock);
				sctx->stat.malloc_errors++;
				spin_unlock(&sctx->stat_lock);
1323
				scrub_put_recover(fs_info, recover);
1324 1325
				return -ENOMEM;
			}
1326 1327 1328 1329 1330 1331 1332
			scrub_sector_get(sector);
			sblock->sectors[sector_index] = sector;
			sector->sblock = sblock;
			sector->flags = flags;
			sector->generation = generation;
			sector->logical = logical;
			sector->have_csum = have_csum;
1333
			if (have_csum)
1334
				memcpy(sector->csum,
1335
				       original_sblock->sectors[0]->csum,
1336
				       sctx->fs_info->csum_size);
1337

Z
Zhao Lei 已提交
1338
			scrub_stripe_index_and_offset(logical,
1339 1340
						      bioc->map_type,
						      bioc->raid_map,
1341
						      mapped_length,
1342 1343
						      bioc->num_stripes -
						      bioc->num_tgtdevs,
1344 1345 1346
						      mirror_index,
						      &stripe_index,
						      &stripe_offset);
1347
			sector->physical = bioc->stripes[stripe_index].physical +
1348
					 stripe_offset;
1349
			sector->dev = bioc->stripes[stripe_index].dev;
1350

1351
			BUG_ON(sector_index >= original_sblock->sector_count);
1352
			sector->physical_for_dev_replace =
1353
				original_sblock->sectors[sector_index]->
1354
				physical_for_dev_replace;
1355 1356
			/* For missing devices, dev->bdev is NULL */
			sector->mirror_num = mirror_index + 1;
1357
			sblock->sector_count++;
1358 1359
			sector->page = alloc_page(GFP_NOFS);
			if (!sector->page)
1360
				goto leave_nomem;
1361 1362

			scrub_get_recover(recover);
1363
			sector->recover = recover;
1364
		}
1365
		scrub_put_recover(fs_info, recover);
1366 1367
		length -= sublen;
		logical += sublen;
1368
		sector_index++;
1369 1370 1371
	}

	return 0;
I
Ilya Dryomov 已提交
1372 1373
}

1374
static void scrub_bio_wait_endio(struct bio *bio)
1375
{
1376
	complete(bio->bi_private);
1377 1378 1379 1380
}

static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
					struct bio *bio,
1381
					struct scrub_sector *sector)
1382
{
1383
	DECLARE_COMPLETION_ONSTACK(done);
1384
	int ret;
1385
	int mirror_num;
1386

1387
	bio->bi_iter.bi_sector = sector->logical >> 9;
1388 1389 1390
	bio->bi_private = &done;
	bio->bi_end_io = scrub_bio_wait_endio;

1391 1392 1393
	mirror_num = sector->sblock->sectors[0]->mirror_num;
	ret = raid56_parity_recover(bio, sector->recover->bioc,
				    sector->recover->map_length,
1394
				    mirror_num, 0);
1395 1396 1397
	if (ret)
		return ret;

1398 1399
	wait_for_completion_io(&done);
	return blk_status_to_errno(bio->bi_status);
1400 1401
}

L
Liu Bo 已提交
1402 1403 1404
static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info,
					  struct scrub_block *sblock)
{
1405
	struct scrub_sector *first_sector = sblock->sectors[0];
L
Liu Bo 已提交
1406
	struct bio *bio;
1407
	int i;
L
Liu Bo 已提交
1408

1409 1410 1411
	/* All sectors in sblock belong to the same stripe on the same device. */
	ASSERT(first_sector->dev);
	if (!first_sector->dev->bdev)
L
Liu Bo 已提交
1412 1413
		goto out;

1414
	bio = bio_alloc(first_sector->dev->bdev, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS);
L
Liu Bo 已提交
1415

1416
	for (i = 0; i < sblock->sector_count; i++) {
1417
		struct scrub_sector *sector = sblock->sectors[i];
L
Liu Bo 已提交
1418

1419 1420
		WARN_ON(!sector->page);
		bio_add_page(bio, sector->page, PAGE_SIZE, 0);
L
Liu Bo 已提交
1421 1422
	}

1423
	if (scrub_submit_raid56_bio_wait(fs_info, bio, first_sector)) {
L
Liu Bo 已提交
1424 1425 1426 1427 1428 1429 1430 1431 1432 1433
		bio_put(bio);
		goto out;
	}

	bio_put(bio);

	scrub_recheck_block_checksum(sblock);

	return;
out:
1434 1435
	for (i = 0; i < sblock->sector_count; i++)
		sblock->sectors[i]->io_error = 1;
L
Liu Bo 已提交
1436 1437 1438 1439

	sblock->no_io_error_seen = 0;
}

1440
/*
1441 1442 1443 1444 1445
 * This function will check the on disk data for checksum errors, header errors
 * and read I/O errors. If any I/O errors happen, the exact sectors which are
 * errored are marked as being bad. The goal is to enable scrub to take those
 * sectors that are not errored from all the mirrors so that the sectors that
 * are errored in the just handled mirror can be repaired.
1446
 */
1447
static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1448 1449
				struct scrub_block *sblock,
				int retry_failed_mirror)
I
Ilya Dryomov 已提交
1450
{
1451
	int i;
I
Ilya Dryomov 已提交
1452

1453
	sblock->no_io_error_seen = 1;
I
Ilya Dryomov 已提交
1454

L
Liu Bo 已提交
1455
	/* short cut for raid56 */
1456
	if (!retry_failed_mirror && scrub_is_page_on_raid56(sblock->sectors[0]))
L
Liu Bo 已提交
1457 1458
		return scrub_recheck_block_on_raid56(fs_info, sblock);

1459
	for (i = 0; i < sblock->sector_count; i++) {
1460
		struct scrub_sector *sector = sblock->sectors[i];
1461 1462
		struct bio bio;
		struct bio_vec bvec;
1463

1464 1465
		if (sector->dev->bdev == NULL) {
			sector->io_error = 1;
1466 1467 1468 1469
			sblock->no_io_error_seen = 0;
			continue;
		}

1470
		WARN_ON(!sector->page);
1471 1472 1473
		bio_init(&bio, sector->dev->bdev, &bvec, 1, REQ_OP_READ);
		bio_add_page(&bio, sector->page, fs_info->sectorsize, 0);
		bio.bi_iter.bi_sector = sector->physical >> 9;
1474

1475 1476
		btrfsic_check_bio(&bio);
		if (submit_bio_wait(&bio)) {
1477
			sector->io_error = 1;
L
Liu Bo 已提交
1478
			sblock->no_io_error_seen = 0;
1479
		}
1480

1481
		bio_uninit(&bio);
1482
	}
I
Ilya Dryomov 已提交
1483

1484
	if (sblock->no_io_error_seen)
1485
		scrub_recheck_block_checksum(sblock);
A
Arne Jansen 已提交
1486 1487
}

1488
static inline int scrub_check_fsid(u8 fsid[], struct scrub_sector *sector)
M
Miao Xie 已提交
1489
{
1490
	struct btrfs_fs_devices *fs_devices = sector->dev->fs_devices;
M
Miao Xie 已提交
1491 1492
	int ret;

1493
	ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
M
Miao Xie 已提交
1494 1495 1496
	return !ret;
}

1497
static void scrub_recheck_block_checksum(struct scrub_block *sblock)
A
Arne Jansen 已提交
1498
{
1499 1500 1501
	sblock->header_error = 0;
	sblock->checksum_error = 0;
	sblock->generation_error = 0;
1502

1503
	if (sblock->sectors[0]->flags & BTRFS_EXTENT_FLAG_DATA)
1504 1505 1506
		scrub_checksum_data(sblock);
	else
		scrub_checksum_tree_block(sblock);
A
Arne Jansen 已提交
1507 1508
}

1509
static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1510
					     struct scrub_block *sblock_good)
1511
{
1512
	int i;
1513
	int ret = 0;
I
Ilya Dryomov 已提交
1514

1515
	for (i = 0; i < sblock_bad->sector_count; i++) {
1516
		int ret_sub;
I
Ilya Dryomov 已提交
1517

1518 1519
		ret_sub = scrub_repair_sector_from_good_copy(sblock_bad,
							     sblock_good, i, 1);
1520 1521
		if (ret_sub)
			ret = ret_sub;
A
Arne Jansen 已提交
1522
	}
1523 1524 1525 1526

	return ret;
}

1527 1528 1529
static int scrub_repair_sector_from_good_copy(struct scrub_block *sblock_bad,
					      struct scrub_block *sblock_good,
					      int sector_num, int force_write)
1530
{
1531 1532
	struct scrub_sector *sector_bad = sblock_bad->sectors[sector_num];
	struct scrub_sector *sector_good = sblock_good->sectors[sector_num];
1533
	struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
1534
	const u32 sectorsize = fs_info->sectorsize;
1535

1536 1537
	BUG_ON(sector_bad->page == NULL);
	BUG_ON(sector_good->page == NULL);
1538
	if (force_write || sblock_bad->header_error ||
1539
	    sblock_bad->checksum_error || sector_bad->io_error) {
1540 1541
		struct bio bio;
		struct bio_vec bvec;
1542 1543
		int ret;

1544
		if (!sector_bad->dev->bdev) {
1545
			btrfs_warn_rl(fs_info,
J
Jeff Mahoney 已提交
1546
				"scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
1547 1548 1549
			return -EIO;
		}

1550 1551 1552
		bio_init(&bio, sector_bad->dev->bdev, &bvec, 1, REQ_OP_WRITE);
		bio.bi_iter.bi_sector = sector_bad->physical >> 9;
		__bio_add_page(&bio, sector_good->page, sectorsize, 0);
1553

1554 1555 1556
		btrfsic_check_bio(&bio);
		ret = submit_bio_wait(&bio);
		bio_uninit(&bio);
1557

1558
		if (ret) {
1559
			btrfs_dev_stat_inc_and_print(sector_bad->dev,
1560
				BTRFS_DEV_STAT_WRITE_ERRS);
1561
			atomic64_inc(&fs_info->dev_replace.num_write_errors);
1562 1563
			return -EIO;
		}
A
Arne Jansen 已提交
1564 1565
	}

1566 1567 1568
	return 0;
}

1569 1570
static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
{
1571
	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
1572
	int i;
1573

1574 1575 1576 1577 1578 1579 1580
	/*
	 * This block is used for the check of the parity on the source device,
	 * so the data needn't be written into the destination device.
	 */
	if (sblock->sparity)
		return;

1581
	for (i = 0; i < sblock->sector_count; i++) {
1582 1583
		int ret;

1584
		ret = scrub_write_sector_to_dev_replace(sblock, i);
1585
		if (ret)
1586
			atomic64_inc(&fs_info->dev_replace.num_write_errors);
1587 1588 1589
	}
}

1590
static int scrub_write_sector_to_dev_replace(struct scrub_block *sblock, int sector_num)
1591
{
1592
	struct scrub_sector *sector = sblock->sectors[sector_num];
1593

1594 1595 1596
	BUG_ON(sector->page == NULL);
	if (sector->io_error)
		clear_page(page_address(sector->page));
1597

1598
	return scrub_add_sector_to_wr_bio(sblock->sctx, sector);
1599 1600
}

1601 1602 1603 1604 1605 1606 1607 1608
static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
{
	int ret = 0;
	u64 length;

	if (!btrfs_is_zoned(sctx->fs_info))
		return 0;

1609 1610 1611
	if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical))
		return 0;

1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622
	if (sctx->write_pointer < physical) {
		length = physical - sctx->write_pointer;

		ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev,
						sctx->write_pointer, length);
		if (!ret)
			sctx->write_pointer = physical;
	}
	return ret;
}

1623 1624
static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx,
				      struct scrub_sector *sector)
1625 1626 1627
{
	struct scrub_bio *sbio;
	int ret;
1628
	const u32 sectorsize = sctx->fs_info->sectorsize;
1629

1630
	mutex_lock(&sctx->wr_lock);
1631
again:
1632 1633
	if (!sctx->wr_curr_bio) {
		sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
1634
					      GFP_KERNEL);
1635 1636
		if (!sctx->wr_curr_bio) {
			mutex_unlock(&sctx->wr_lock);
1637 1638
			return -ENOMEM;
		}
1639
		sctx->wr_curr_bio->sctx = sctx;
1640
		sctx->wr_curr_bio->sector_count = 0;
1641
	}
1642
	sbio = sctx->wr_curr_bio;
1643
	if (sbio->sector_count == 0) {
1644
		ret = fill_writer_pointer_gap(sctx, sector->physical_for_dev_replace);
1645 1646 1647 1648 1649
		if (ret) {
			mutex_unlock(&sctx->wr_lock);
			return ret;
		}

1650 1651
		sbio->physical = sector->physical_for_dev_replace;
		sbio->logical = sector->logical;
1652
		sbio->dev = sctx->wr_tgtdev;
1653 1654 1655
		if (!sbio->bio) {
			sbio->bio = bio_alloc(sbio->dev->bdev, sctx->sectors_per_bio,
					      REQ_OP_WRITE, GFP_NOFS);
1656
		}
1657 1658 1659
		sbio->bio->bi_private = sbio;
		sbio->bio->bi_end_io = scrub_wr_bio_end_io;
		sbio->bio->bi_iter.bi_sector = sbio->physical >> 9;
1660
		sbio->status = 0;
1661
	} else if (sbio->physical + sbio->sector_count * sectorsize !=
1662
		   sector->physical_for_dev_replace ||
1663
		   sbio->logical + sbio->sector_count * sectorsize !=
1664
		   sector->logical) {
1665 1666 1667 1668
		scrub_wr_submit(sctx);
		goto again;
	}

1669
	ret = bio_add_page(sbio->bio, sector->page, sectorsize, 0);
1670
	if (ret != sectorsize) {
1671
		if (sbio->sector_count < 1) {
1672 1673
			bio_put(sbio->bio);
			sbio->bio = NULL;
1674
			mutex_unlock(&sctx->wr_lock);
1675 1676 1677 1678 1679 1680
			return -EIO;
		}
		scrub_wr_submit(sctx);
		goto again;
	}

1681
	sbio->sectors[sbio->sector_count] = sector;
1682
	scrub_sector_get(sector);
1683 1684
	sbio->sector_count++;
	if (sbio->sector_count == sctx->sectors_per_bio)
1685
		scrub_wr_submit(sctx);
1686
	mutex_unlock(&sctx->wr_lock);
1687 1688 1689 1690 1691 1692 1693 1694

	return 0;
}

static void scrub_wr_submit(struct scrub_ctx *sctx)
{
	struct scrub_bio *sbio;

1695
	if (!sctx->wr_curr_bio)
1696 1697
		return;

1698 1699
	sbio = sctx->wr_curr_bio;
	sctx->wr_curr_bio = NULL;
1700 1701 1702 1703 1704
	scrub_pending_bio_inc(sctx);
	/* process all writes in a single worker thread. Then the block layer
	 * orders the requests before sending them to the driver which
	 * doubled the write performance on spinning disks when measured
	 * with Linux 3.5 */
1705 1706
	btrfsic_check_bio(sbio->bio);
	submit_bio(sbio->bio);
1707 1708

	if (btrfs_is_zoned(sctx->fs_info))
1709
		sctx->write_pointer = sbio->physical + sbio->sector_count *
1710
			sctx->fs_info->sectorsize;
1711 1712
}

1713
static void scrub_wr_bio_end_io(struct bio *bio)
1714 1715
{
	struct scrub_bio *sbio = bio->bi_private;
1716
	struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
1717

1718
	sbio->status = bio->bi_status;
1719 1720
	sbio->bio = bio;

1721
	btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL);
1722
	btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1723 1724 1725 1726 1727 1728 1729 1730
}

static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
{
	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
	struct scrub_ctx *sctx = sbio->sctx;
	int i;

1731
	ASSERT(sbio->sector_count <= SCRUB_SECTORS_PER_BIO);
1732
	if (sbio->status) {
1733
		struct btrfs_dev_replace *dev_replace =
1734
			&sbio->sctx->fs_info->dev_replace;
1735

1736 1737
		for (i = 0; i < sbio->sector_count; i++) {
			struct scrub_sector *sector = sbio->sectors[i];
1738

1739
			sector->io_error = 1;
1740
			atomic64_inc(&dev_replace->num_write_errors);
1741 1742 1743
		}
	}

1744 1745
	for (i = 0; i < sbio->sector_count; i++)
		scrub_sector_put(sbio->sectors[i]);
1746 1747 1748 1749 1750 1751 1752

	bio_put(sbio->bio);
	kfree(sbio);
	scrub_pending_bio_dec(sctx);
}

static int scrub_checksum(struct scrub_block *sblock)
1753 1754 1755 1756
{
	u64 flags;
	int ret;

1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768
	/*
	 * No need to initialize these stats currently,
	 * because this function only use return value
	 * instead of these stats value.
	 *
	 * Todo:
	 * always use stats
	 */
	sblock->header_error = 0;
	sblock->generation_error = 0;
	sblock->checksum_error = 0;

1769 1770
	WARN_ON(sblock->sector_count < 1);
	flags = sblock->sectors[0]->flags;
1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781
	ret = 0;
	if (flags & BTRFS_EXTENT_FLAG_DATA)
		ret = scrub_checksum_data(sblock);
	else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
		ret = scrub_checksum_tree_block(sblock);
	else if (flags & BTRFS_EXTENT_FLAG_SUPER)
		(void)scrub_checksum_super(sblock);
	else
		WARN_ON(1);
	if (ret)
		scrub_handle_errored_block(sblock);
1782 1783

	return ret;
A
Arne Jansen 已提交
1784 1785
}

1786
static int scrub_checksum_data(struct scrub_block *sblock)
A
Arne Jansen 已提交
1787
{
1788
	struct scrub_ctx *sctx = sblock->sctx;
1789 1790
	struct btrfs_fs_info *fs_info = sctx->fs_info;
	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
A
Arne Jansen 已提交
1791
	u8 csum[BTRFS_CSUM_SIZE];
1792
	struct scrub_sector *sector;
1793
	char *kaddr;
A
Arne Jansen 已提交
1794

1795
	BUG_ON(sblock->sector_count < 1);
1796 1797
	sector = sblock->sectors[0];
	if (!sector->have_csum)
A
Arne Jansen 已提交
1798 1799
		return 0;

1800
	kaddr = page_address(sector->page);
1801

1802 1803
	shash->tfm = fs_info->csum_shash;
	crypto_shash_init(shash);
1804

1805
	/*
1806
	 * In scrub_sectors() and scrub_sectors_for_parity() we ensure each sector
1807 1808 1809
	 * only contains one sector of data.
	 */
	crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum);
A
Arne Jansen 已提交
1810

1811
	if (memcmp(csum, sector->csum, fs_info->csum_size))
1812
		sblock->checksum_error = 1;
1813
	return sblock->checksum_error;
A
Arne Jansen 已提交
1814 1815
}

1816
static int scrub_checksum_tree_block(struct scrub_block *sblock)
A
Arne Jansen 已提交
1817
{
1818
	struct scrub_ctx *sctx = sblock->sctx;
A
Arne Jansen 已提交
1819
	struct btrfs_header *h;
1820
	struct btrfs_fs_info *fs_info = sctx->fs_info;
1821
	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
1822 1823
	u8 calculated_csum[BTRFS_CSUM_SIZE];
	u8 on_disk_csum[BTRFS_CSUM_SIZE];
1824 1825 1826 1827 1828 1829 1830
	/*
	 * This is done in sectorsize steps even for metadata as there's a
	 * constraint for nodesize to be aligned to sectorsize. This will need
	 * to change so we don't misuse data and metadata units like that.
	 */
	const u32 sectorsize = sctx->fs_info->sectorsize;
	const int num_sectors = fs_info->nodesize >> fs_info->sectorsize_bits;
1831
	int i;
1832
	struct scrub_sector *sector;
1833
	char *kaddr;
1834

1835
	BUG_ON(sblock->sector_count < 1);
1836

1837
	/* Each member in sectors is just one sector */
1838
	ASSERT(sblock->sector_count == num_sectors);
1839

1840 1841
	sector = sblock->sectors[0];
	kaddr = page_address(sector->page);
1842
	h = (struct btrfs_header *)kaddr;
1843
	memcpy(on_disk_csum, h->csum, sctx->fs_info->csum_size);
A
Arne Jansen 已提交
1844 1845 1846 1847 1848 1849

	/*
	 * we don't use the getter functions here, as we
	 * a) don't have an extent buffer and
	 * b) the page is already kmapped
	 */
1850
	if (sector->logical != btrfs_stack_header_bytenr(h))
1851
		sblock->header_error = 1;
A
Arne Jansen 已提交
1852

1853
	if (sector->generation != btrfs_stack_header_generation(h)) {
1854 1855 1856
		sblock->header_error = 1;
		sblock->generation_error = 1;
	}
A
Arne Jansen 已提交
1857

1858
	if (!scrub_check_fsid(h->fsid, sector))
1859
		sblock->header_error = 1;
A
Arne Jansen 已提交
1860 1861 1862

	if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
		   BTRFS_UUID_SIZE))
1863
		sblock->header_error = 1;
A
Arne Jansen 已提交
1864

1865 1866 1867
	shash->tfm = fs_info->csum_shash;
	crypto_shash_init(shash);
	crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
1868
			    sectorsize - BTRFS_CSUM_SIZE);
1869

1870
	for (i = 1; i < num_sectors; i++) {
1871
		kaddr = page_address(sblock->sectors[i]->page);
1872
		crypto_shash_update(shash, kaddr, sectorsize);
1873 1874
	}

1875
	crypto_shash_final(shash, calculated_csum);
1876
	if (memcmp(calculated_csum, on_disk_csum, sctx->fs_info->csum_size))
1877
		sblock->checksum_error = 1;
A
Arne Jansen 已提交
1878

1879
	return sblock->header_error || sblock->checksum_error;
A
Arne Jansen 已提交
1880 1881
}

1882
static int scrub_checksum_super(struct scrub_block *sblock)
A
Arne Jansen 已提交
1883 1884
{
	struct btrfs_super_block *s;
1885
	struct scrub_ctx *sctx = sblock->sctx;
1886 1887
	struct btrfs_fs_info *fs_info = sctx->fs_info;
	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
1888
	u8 calculated_csum[BTRFS_CSUM_SIZE];
1889
	struct scrub_sector *sector;
1890
	char *kaddr;
1891 1892
	int fail_gen = 0;
	int fail_cor = 0;
1893

1894
	BUG_ON(sblock->sector_count < 1);
1895 1896
	sector = sblock->sectors[0];
	kaddr = page_address(sector->page);
1897
	s = (struct btrfs_super_block *)kaddr;
A
Arne Jansen 已提交
1898

1899
	if (sector->logical != btrfs_super_bytenr(s))
1900
		++fail_cor;
A
Arne Jansen 已提交
1901

1902
	if (sector->generation != btrfs_super_generation(s))
1903
		++fail_gen;
A
Arne Jansen 已提交
1904

1905
	if (!scrub_check_fsid(s->fsid, sector))
1906
		++fail_cor;
A
Arne Jansen 已提交
1907

1908 1909 1910 1911
	shash->tfm = fs_info->csum_shash;
	crypto_shash_init(shash);
	crypto_shash_digest(shash, kaddr + BTRFS_CSUM_SIZE,
			BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, calculated_csum);
1912

1913
	if (memcmp(calculated_csum, s->csum, sctx->fs_info->csum_size))
1914
		++fail_cor;
A
Arne Jansen 已提交
1915

1916
	if (fail_cor + fail_gen) {
A
Arne Jansen 已提交
1917 1918 1919 1920 1921
		/*
		 * if we find an error in a super block, we just report it.
		 * They will get written with the next transaction commit
		 * anyway
		 */
1922 1923 1924
		spin_lock(&sctx->stat_lock);
		++sctx->stat.super_errors;
		spin_unlock(&sctx->stat_lock);
1925
		if (fail_cor)
1926
			btrfs_dev_stat_inc_and_print(sector->dev,
1927 1928
				BTRFS_DEV_STAT_CORRUPTION_ERRS);
		else
1929
			btrfs_dev_stat_inc_and_print(sector->dev,
1930
				BTRFS_DEV_STAT_GENERATION_ERRS);
A
Arne Jansen 已提交
1931 1932
	}

1933
	return fail_cor + fail_gen;
A
Arne Jansen 已提交
1934 1935
}

1936 1937
static void scrub_block_get(struct scrub_block *sblock)
{
1938
	refcount_inc(&sblock->refs);
1939 1940 1941 1942
}

static void scrub_block_put(struct scrub_block *sblock)
{
1943
	if (refcount_dec_and_test(&sblock->refs)) {
1944 1945
		int i;

1946 1947 1948
		if (sblock->sparity)
			scrub_parity_put(sblock->sparity);

1949
		for (i = 0; i < sblock->sector_count; i++)
1950
			scrub_sector_put(sblock->sectors[i]);
1951 1952 1953 1954
		kfree(sblock);
	}
}

1955
static void scrub_sector_get(struct scrub_sector *sector)
1956
{
1957
	atomic_inc(&sector->refs);
1958 1959
}

1960
static void scrub_sector_put(struct scrub_sector *sector)
1961
{
1962 1963 1964 1965
	if (atomic_dec_and_test(&sector->refs)) {
		if (sector->page)
			__free_page(sector->page);
		kfree(sector);
1966 1967 1968
	}
}

1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027
/*
 * Throttling of IO submission, bandwidth-limit based, the timeslice is 1
 * second.  Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max.
 */
static void scrub_throttle(struct scrub_ctx *sctx)
{
	const int time_slice = 1000;
	struct scrub_bio *sbio;
	struct btrfs_device *device;
	s64 delta;
	ktime_t now;
	u32 div;
	u64 bwlimit;

	sbio = sctx->bios[sctx->curr];
	device = sbio->dev;
	bwlimit = READ_ONCE(device->scrub_speed_max);
	if (bwlimit == 0)
		return;

	/*
	 * Slice is divided into intervals when the IO is submitted, adjust by
	 * bwlimit and maximum of 64 intervals.
	 */
	div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024)));
	div = min_t(u32, 64, div);

	/* Start new epoch, set deadline */
	now = ktime_get();
	if (sctx->throttle_deadline == 0) {
		sctx->throttle_deadline = ktime_add_ms(now, time_slice / div);
		sctx->throttle_sent = 0;
	}

	/* Still in the time to send? */
	if (ktime_before(now, sctx->throttle_deadline)) {
		/* If current bio is within the limit, send it */
		sctx->throttle_sent += sbio->bio->bi_iter.bi_size;
		if (sctx->throttle_sent <= div_u64(bwlimit, div))
			return;

		/* We're over the limit, sleep until the rest of the slice */
		delta = ktime_ms_delta(sctx->throttle_deadline, now);
	} else {
		/* New request after deadline, start new epoch */
		delta = 0;
	}

	if (delta) {
		long timeout;

		timeout = div_u64(delta * HZ, 1000);
		schedule_timeout_interruptible(timeout);
	}

	/* Next call will start the deadline period */
	sctx->throttle_deadline = 0;
}

2028
static void scrub_submit(struct scrub_ctx *sctx)
A
Arne Jansen 已提交
2029 2030 2031
{
	struct scrub_bio *sbio;

2032
	if (sctx->curr == -1)
S
Stefan Behrens 已提交
2033
		return;
A
Arne Jansen 已提交
2034

2035 2036
	scrub_throttle(sctx);

2037 2038
	sbio = sctx->bios[sctx->curr];
	sctx->curr = -1;
2039
	scrub_pending_bio_inc(sctx);
2040 2041
	btrfsic_check_bio(sbio->bio);
	submit_bio(sbio->bio);
A
Arne Jansen 已提交
2042 2043
}

2044 2045
static int scrub_add_sector_to_rd_bio(struct scrub_ctx *sctx,
				      struct scrub_sector *sector)
A
Arne Jansen 已提交
2046
{
2047
	struct scrub_block *sblock = sector->sblock;
A
Arne Jansen 已提交
2048
	struct scrub_bio *sbio;
2049
	const u32 sectorsize = sctx->fs_info->sectorsize;
2050
	int ret;
A
Arne Jansen 已提交
2051 2052 2053 2054 2055

again:
	/*
	 * grab a fresh bio or wait for one to become available
	 */
2056 2057 2058 2059 2060 2061
	while (sctx->curr == -1) {
		spin_lock(&sctx->list_lock);
		sctx->curr = sctx->first_free;
		if (sctx->curr != -1) {
			sctx->first_free = sctx->bios[sctx->curr]->next_free;
			sctx->bios[sctx->curr]->next_free = -1;
2062
			sctx->bios[sctx->curr]->sector_count = 0;
2063
			spin_unlock(&sctx->list_lock);
A
Arne Jansen 已提交
2064
		} else {
2065 2066
			spin_unlock(&sctx->list_lock);
			wait_event(sctx->list_wait, sctx->first_free != -1);
A
Arne Jansen 已提交
2067 2068
		}
	}
2069
	sbio = sctx->bios[sctx->curr];
2070
	if (sbio->sector_count == 0) {
2071 2072 2073
		sbio->physical = sector->physical;
		sbio->logical = sector->logical;
		sbio->dev = sector->dev;
2074 2075 2076
		if (!sbio->bio) {
			sbio->bio = bio_alloc(sbio->dev->bdev, sctx->sectors_per_bio,
					      REQ_OP_READ, GFP_NOFS);
2077
		}
2078 2079 2080
		sbio->bio->bi_private = sbio;
		sbio->bio->bi_end_io = scrub_bio_end_io;
		sbio->bio->bi_iter.bi_sector = sbio->physical >> 9;
2081
		sbio->status = 0;
2082
	} else if (sbio->physical + sbio->sector_count * sectorsize !=
2083
		   sector->physical ||
2084
		   sbio->logical + sbio->sector_count * sectorsize !=
2085 2086
		   sector->logical ||
		   sbio->dev != sector->dev) {
2087
		scrub_submit(sctx);
A
Arne Jansen 已提交
2088 2089
		goto again;
	}
2090

2091
	sbio->sectors[sbio->sector_count] = sector;
2092
	ret = bio_add_page(sbio->bio, sector->page, sectorsize, 0);
2093
	if (ret != sectorsize) {
2094
		if (sbio->sector_count < 1) {
2095 2096 2097 2098
			bio_put(sbio->bio);
			sbio->bio = NULL;
			return -EIO;
		}
2099
		scrub_submit(sctx);
2100 2101 2102
		goto again;
	}

2103
	scrub_block_get(sblock); /* one for the page added to the bio */
2104
	atomic_inc(&sblock->outstanding_sectors);
2105 2106
	sbio->sector_count++;
	if (sbio->sector_count == sctx->sectors_per_bio)
2107
		scrub_submit(sctx);
2108 2109 2110 2111

	return 0;
}

2112
static void scrub_missing_raid56_end_io(struct bio *bio)
2113 2114
{
	struct scrub_block *sblock = bio->bi_private;
2115
	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
2116

2117
	if (bio->bi_status)
2118 2119
		sblock->no_io_error_seen = 0;

2120 2121
	bio_put(bio);

2122 2123 2124 2125 2126 2127 2128
	btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
}

static void scrub_missing_raid56_worker(struct btrfs_work *work)
{
	struct scrub_block *sblock = container_of(work, struct scrub_block, work);
	struct scrub_ctx *sctx = sblock->sctx;
2129
	struct btrfs_fs_info *fs_info = sctx->fs_info;
2130 2131 2132
	u64 logical;
	struct btrfs_device *dev;

2133 2134
	logical = sblock->sectors[0]->logical;
	dev = sblock->sectors[0]->dev;
2135

2136
	if (sblock->no_io_error_seen)
2137
		scrub_recheck_block_checksum(sblock);
2138 2139 2140 2141 2142

	if (!sblock->no_io_error_seen) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors++;
		spin_unlock(&sctx->stat_lock);
2143
		btrfs_err_rl_in_rcu(fs_info,
2144
			"IO error rebuilding logical %llu for dev %s",
2145 2146 2147 2148 2149
			logical, rcu_str_deref(dev->name));
	} else if (sblock->header_error || sblock->checksum_error) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
2150
		btrfs_err_rl_in_rcu(fs_info,
2151
			"failed to rebuild valid logical %llu for dev %s",
2152 2153 2154 2155 2156
			logical, rcu_str_deref(dev->name));
	} else {
		scrub_write_block_to_dev_replace(sblock);
	}

2157
	if (sctx->is_dev_replace && sctx->flush_all_writes) {
2158
		mutex_lock(&sctx->wr_lock);
2159
		scrub_wr_submit(sctx);
2160
		mutex_unlock(&sctx->wr_lock);
2161 2162
	}

2163
	scrub_block_put(sblock);
2164 2165 2166 2167 2168 2169
	scrub_pending_bio_dec(sctx);
}

static void scrub_missing_raid56_pages(struct scrub_block *sblock)
{
	struct scrub_ctx *sctx = sblock->sctx;
2170
	struct btrfs_fs_info *fs_info = sctx->fs_info;
2171 2172
	u64 length = sblock->sector_count << fs_info->sectorsize_bits;
	u64 logical = sblock->sectors[0]->logical;
2173
	struct btrfs_io_context *bioc = NULL;
2174 2175 2176 2177 2178
	struct bio *bio;
	struct btrfs_raid_bio *rbio;
	int ret;
	int i;

2179
	btrfs_bio_counter_inc_blocked(fs_info);
2180
	ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
2181 2182 2183
			       &length, &bioc);
	if (ret || !bioc || !bioc->raid_map)
		goto bioc_out;
2184 2185

	if (WARN_ON(!sctx->is_dev_replace ||
2186
		    !(bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2187 2188 2189 2190 2191 2192
		/*
		 * We shouldn't be scrubbing a missing device. Even for dev
		 * replace, we should only get here for RAID 5/6. We either
		 * managed to mount something with no mirrors remaining or
		 * there's a bug in scrub_remap_extent()/btrfs_map_block().
		 */
2193
		goto bioc_out;
2194 2195
	}

2196
	bio = bio_alloc(NULL, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS);
2197 2198 2199 2200
	bio->bi_iter.bi_sector = logical >> 9;
	bio->bi_private = sblock;
	bio->bi_end_io = scrub_missing_raid56_end_io;

2201
	rbio = raid56_alloc_missing_rbio(bio, bioc, length);
2202 2203 2204
	if (!rbio)
		goto rbio_out;

2205
	for (i = 0; i < sblock->sector_count; i++) {
2206
		struct scrub_sector *sector = sblock->sectors[i];
2207

2208 2209 2210 2211 2212
		/*
		 * For now, our scrub is still one page per sector, so pgoff
		 * is always 0.
		 */
		raid56_add_scrub_pages(rbio, sector->page, 0, sector->logical);
2213 2214
	}

2215
	btrfs_init_work(&sblock->work, scrub_missing_raid56_worker, NULL, NULL);
2216 2217 2218 2219 2220 2221 2222
	scrub_block_get(sblock);
	scrub_pending_bio_inc(sctx);
	raid56_submit_missing_rbio(rbio);
	return;

rbio_out:
	bio_put(bio);
2223
bioc_out:
2224
	btrfs_bio_counter_dec(fs_info);
2225
	btrfs_put_bioc(bioc);
2226 2227 2228 2229 2230
	spin_lock(&sctx->stat_lock);
	sctx->stat.malloc_errors++;
	spin_unlock(&sctx->stat_lock);
}

2231
static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len,
2232
		       u64 physical, struct btrfs_device *dev, u64 flags,
2233
		       u64 gen, int mirror_num, u8 *csum,
2234
		       u64 physical_for_dev_replace)
2235 2236
{
	struct scrub_block *sblock;
2237
	const u32 sectorsize = sctx->fs_info->sectorsize;
2238 2239
	int index;

2240
	sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2241
	if (!sblock) {
2242 2243 2244
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
2245
		return -ENOMEM;
A
Arne Jansen 已提交
2246
	}
2247

2248 2249
	/* one ref inside this function, plus one for each page added to
	 * a bio later on */
2250
	refcount_set(&sblock->refs, 1);
2251
	sblock->sctx = sctx;
2252 2253 2254
	sblock->no_io_error_seen = 1;

	for (index = 0; len > 0; index++) {
2255
		struct scrub_sector *sector;
2256 2257 2258 2259 2260 2261
		/*
		 * Here we will allocate one page for one sector to scrub.
		 * This is fine if PAGE_SIZE == sectorsize, but will cost
		 * more memory for PAGE_SIZE > sectorsize case.
		 */
		u32 l = min(sectorsize, len);
2262

2263 2264
		sector = kzalloc(sizeof(*sector), GFP_KERNEL);
		if (!sector) {
2265
leave_nomem:
2266 2267 2268
			spin_lock(&sctx->stat_lock);
			sctx->stat.malloc_errors++;
			spin_unlock(&sctx->stat_lock);
2269
			scrub_block_put(sblock);
2270 2271
			return -ENOMEM;
		}
2272
		ASSERT(index < SCRUB_MAX_SECTORS_PER_BLOCK);
2273 2274 2275 2276 2277 2278 2279 2280 2281 2282
		scrub_sector_get(sector);
		sblock->sectors[index] = sector;
		sector->sblock = sblock;
		sector->dev = dev;
		sector->flags = flags;
		sector->generation = gen;
		sector->logical = logical;
		sector->physical = physical;
		sector->physical_for_dev_replace = physical_for_dev_replace;
		sector->mirror_num = mirror_num;
2283
		if (csum) {
2284 2285
			sector->have_csum = 1;
			memcpy(sector->csum, csum, sctx->fs_info->csum_size);
2286
		} else {
2287
			sector->have_csum = 0;
2288
		}
2289
		sblock->sector_count++;
2290 2291
		sector->page = alloc_page(GFP_KERNEL);
		if (!sector->page)
2292
			goto leave_nomem;
2293 2294 2295
		len -= l;
		logical += l;
		physical += l;
2296
		physical_for_dev_replace += l;
2297 2298
	}

2299
	WARN_ON(sblock->sector_count == 0);
2300
	if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
2301 2302 2303 2304 2305 2306
		/*
		 * This case should only be hit for RAID 5/6 device replace. See
		 * the comment in scrub_missing_raid56_pages() for details.
		 */
		scrub_missing_raid56_pages(sblock);
	} else {
2307
		for (index = 0; index < sblock->sector_count; index++) {
2308
			struct scrub_sector *sector = sblock->sectors[index];
2309
			int ret;
2310

2311
			ret = scrub_add_sector_to_rd_bio(sctx, sector);
2312 2313 2314 2315
			if (ret) {
				scrub_block_put(sblock);
				return ret;
			}
2316
		}
A
Arne Jansen 已提交
2317

2318
		if (flags & BTRFS_EXTENT_FLAG_SUPER)
2319 2320
			scrub_submit(sctx);
	}
A
Arne Jansen 已提交
2321

2322 2323
	/* last one frees, either here or in bio completion for last page */
	scrub_block_put(sblock);
A
Arne Jansen 已提交
2324 2325 2326
	return 0;
}

2327
static void scrub_bio_end_io(struct bio *bio)
2328 2329
{
	struct scrub_bio *sbio = bio->bi_private;
2330
	struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
2331

2332
	sbio->status = bio->bi_status;
2333 2334
	sbio->bio = bio;

2335
	btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
2336 2337 2338 2339 2340
}

static void scrub_bio_end_io_worker(struct btrfs_work *work)
{
	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2341
	struct scrub_ctx *sctx = sbio->sctx;
2342 2343
	int i;

2344
	ASSERT(sbio->sector_count <= SCRUB_SECTORS_PER_BIO);
2345
	if (sbio->status) {
2346 2347
		for (i = 0; i < sbio->sector_count; i++) {
			struct scrub_sector *sector = sbio->sectors[i];
2348

2349 2350
			sector->io_error = 1;
			sector->sblock->no_io_error_seen = 0;
2351 2352 2353
		}
	}

2354
	/* Now complete the scrub_block items that have all pages completed */
2355 2356
	for (i = 0; i < sbio->sector_count; i++) {
		struct scrub_sector *sector = sbio->sectors[i];
2357
		struct scrub_block *sblock = sector->sblock;
2358

2359
		if (atomic_dec_and_test(&sblock->outstanding_sectors))
2360 2361 2362 2363 2364 2365
			scrub_block_complete(sblock);
		scrub_block_put(sblock);
	}

	bio_put(sbio->bio);
	sbio->bio = NULL;
2366 2367 2368 2369
	spin_lock(&sctx->list_lock);
	sbio->next_free = sctx->first_free;
	sctx->first_free = sbio->index;
	spin_unlock(&sctx->list_lock);
2370

2371
	if (sctx->is_dev_replace && sctx->flush_all_writes) {
2372
		mutex_lock(&sctx->wr_lock);
2373
		scrub_wr_submit(sctx);
2374
		mutex_unlock(&sctx->wr_lock);
2375 2376
	}

2377
	scrub_pending_bio_dec(sctx);
2378 2379
}

2380 2381
static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
				       unsigned long *bitmap,
2382
				       u64 start, u32 len)
2383
{
2384
	u64 offset;
2385
	u32 nsectors;
2386
	u32 sectorsize_bits = sparity->sctx->fs_info->sectorsize_bits;
2387 2388 2389 2390 2391 2392 2393

	if (len >= sparity->stripe_len) {
		bitmap_set(bitmap, 0, sparity->nsectors);
		return;
	}

	start -= sparity->logic_start;
2394
	start = div64_u64_rem(start, sparity->stripe_len, &offset);
2395
	offset = offset >> sectorsize_bits;
2396
	nsectors = len >> sectorsize_bits;
2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407

	if (offset + nsectors <= sparity->nsectors) {
		bitmap_set(bitmap, offset, nsectors);
		return;
	}

	bitmap_set(bitmap, offset, sparity->nsectors - offset);
	bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
}

static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2408
						   u64 start, u32 len)
2409 2410 2411 2412 2413
{
	__scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
}

static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2414
						  u64 start, u32 len)
2415 2416 2417 2418
{
	__scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
}

2419 2420
static void scrub_block_complete(struct scrub_block *sblock)
{
2421 2422
	int corrupted = 0;

2423
	if (!sblock->no_io_error_seen) {
2424
		corrupted = 1;
2425
		scrub_handle_errored_block(sblock);
2426 2427 2428 2429 2430 2431
	} else {
		/*
		 * if has checksum error, write via repair mechanism in
		 * dev replace case, otherwise write here in dev replace
		 * case.
		 */
2432 2433
		corrupted = scrub_checksum(sblock);
		if (!corrupted && sblock->sctx->is_dev_replace)
2434 2435
			scrub_write_block_to_dev_replace(sblock);
	}
2436 2437

	if (sblock->sparity && corrupted && !sblock->data_corrected) {
2438 2439
		u64 start = sblock->sectors[0]->logical;
		u64 end = sblock->sectors[sblock->sector_count - 1]->logical +
2440
			  sblock->sctx->fs_info->sectorsize;
2441

2442
		ASSERT(end - start <= U32_MAX);
2443 2444 2445
		scrub_parity_mark_sectors_error(sblock->sparity,
						start, end - start);
	}
2446 2447
}

2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459
static void drop_csum_range(struct scrub_ctx *sctx, struct btrfs_ordered_sum *sum)
{
	sctx->stat.csum_discards += sum->len >> sctx->fs_info->sectorsize_bits;
	list_del(&sum->list);
	kfree(sum);
}

/*
 * Find the desired csum for range [logical, logical + sectorsize), and store
 * the csum into @csum.
 *
 * The search source is sctx->csum_list, which is a pre-populated list
D
David Sterba 已提交
2460
 * storing bytenr ordered csum ranges.  We're responsible to cleanup any range
2461 2462 2463 2464 2465
 * that is before @logical.
 *
 * Return 0 if there is no csum for the range.
 * Return 1 if there is csum for the range and copied to @csum.
 */
2466
static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
A
Arne Jansen 已提交
2467
{
2468
	bool found = false;
A
Arne Jansen 已提交
2469

2470
	while (!list_empty(&sctx->csum_list)) {
2471 2472 2473 2474
		struct btrfs_ordered_sum *sum = NULL;
		unsigned long index;
		unsigned long num_sectors;

2475
		sum = list_first_entry(&sctx->csum_list,
A
Arne Jansen 已提交
2476
				       struct btrfs_ordered_sum, list);
2477
		/* The current csum range is beyond our range, no csum found */
A
Arne Jansen 已提交
2478 2479 2480
		if (sum->bytenr > logical)
			break;

2481 2482 2483 2484 2485 2486 2487 2488 2489 2490
		/*
		 * The current sum is before our bytenr, since scrub is always
		 * done in bytenr order, the csum will never be used anymore,
		 * clean it up so that later calls won't bother with the range,
		 * and continue search the next range.
		 */
		if (sum->bytenr + sum->len <= logical) {
			drop_csum_range(sctx, sum);
			continue;
		}
A
Arne Jansen 已提交
2491

2492 2493 2494 2495
		/* Now the csum range covers our bytenr, copy the csum */
		found = true;
		index = (logical - sum->bytenr) >> sctx->fs_info->sectorsize_bits;
		num_sectors = sum->len >> sctx->fs_info->sectorsize_bits;
2496

2497 2498 2499 2500 2501 2502 2503
		memcpy(csum, sum->sums + index * sctx->fs_info->csum_size,
		       sctx->fs_info->csum_size);

		/* Cleanup the range if we're at the end of the csum range */
		if (index == num_sectors - 1)
			drop_csum_range(sctx, sum);
		break;
A
Arne Jansen 已提交
2504
	}
2505 2506
	if (!found)
		return 0;
2507
	return 1;
A
Arne Jansen 已提交
2508 2509 2510
}

/* scrub extent tries to collect up to 64 kB for each bio */
L
Liu Bo 已提交
2511
static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
2512
			u64 logical, u32 len,
2513
			u64 physical, struct btrfs_device *dev, u64 flags,
2514
			u64 gen, int mirror_num, u64 physical_for_dev_replace)
A
Arne Jansen 已提交
2515 2516 2517
{
	int ret;
	u8 csum[BTRFS_CSUM_SIZE];
2518 2519 2520
	u32 blocksize;

	if (flags & BTRFS_EXTENT_FLAG_DATA) {
L
Liu Bo 已提交
2521 2522 2523 2524
		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
			blocksize = map->stripe_len;
		else
			blocksize = sctx->fs_info->sectorsize;
2525 2526 2527 2528
		spin_lock(&sctx->stat_lock);
		sctx->stat.data_extents_scrubbed++;
		sctx->stat.data_bytes_scrubbed += len;
		spin_unlock(&sctx->stat_lock);
2529
	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
L
Liu Bo 已提交
2530 2531 2532 2533
		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
			blocksize = map->stripe_len;
		else
			blocksize = sctx->fs_info->nodesize;
2534 2535 2536 2537
		spin_lock(&sctx->stat_lock);
		sctx->stat.tree_extents_scrubbed++;
		sctx->stat.tree_bytes_scrubbed += len;
		spin_unlock(&sctx->stat_lock);
2538
	} else {
2539
		blocksize = sctx->fs_info->sectorsize;
2540
		WARN_ON(1);
2541
	}
A
Arne Jansen 已提交
2542 2543

	while (len) {
2544
		u32 l = min(len, blocksize);
A
Arne Jansen 已提交
2545 2546 2547 2548
		int have_csum = 0;

		if (flags & BTRFS_EXTENT_FLAG_DATA) {
			/* push csums to sbio */
2549
			have_csum = scrub_find_csum(sctx, logical, csum);
A
Arne Jansen 已提交
2550
			if (have_csum == 0)
2551
				++sctx->stat.no_csum;
A
Arne Jansen 已提交
2552
		}
2553
		ret = scrub_sectors(sctx, logical, l, physical, dev, flags, gen,
2554
				  mirror_num, have_csum ? csum : NULL,
2555
				  physical_for_dev_replace);
A
Arne Jansen 已提交
2556 2557 2558 2559 2560
		if (ret)
			return ret;
		len -= l;
		logical += l;
		physical += l;
2561
		physical_for_dev_replace += l;
A
Arne Jansen 已提交
2562 2563 2564 2565
	}
	return 0;
}

2566
static int scrub_sectors_for_parity(struct scrub_parity *sparity,
2567
				  u64 logical, u32 len,
2568 2569 2570 2571 2572
				  u64 physical, struct btrfs_device *dev,
				  u64 flags, u64 gen, int mirror_num, u8 *csum)
{
	struct scrub_ctx *sctx = sparity->sctx;
	struct scrub_block *sblock;
2573
	const u32 sectorsize = sctx->fs_info->sectorsize;
2574 2575
	int index;

2576 2577
	ASSERT(IS_ALIGNED(len, sectorsize));

2578
	sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2579 2580 2581 2582 2583 2584 2585 2586 2587
	if (!sblock) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
		return -ENOMEM;
	}

	/* one ref inside this function, plus one for each page added to
	 * a bio later on */
2588
	refcount_set(&sblock->refs, 1);
2589 2590 2591 2592 2593 2594
	sblock->sctx = sctx;
	sblock->no_io_error_seen = 1;
	sblock->sparity = sparity;
	scrub_parity_get(sparity);

	for (index = 0; len > 0; index++) {
2595
		struct scrub_sector *sector;
2596

2597 2598
		sector = kzalloc(sizeof(*sector), GFP_KERNEL);
		if (!sector) {
2599 2600 2601 2602 2603 2604 2605
leave_nomem:
			spin_lock(&sctx->stat_lock);
			sctx->stat.malloc_errors++;
			spin_unlock(&sctx->stat_lock);
			scrub_block_put(sblock);
			return -ENOMEM;
		}
2606
		ASSERT(index < SCRUB_MAX_SECTORS_PER_BLOCK);
2607
		/* For scrub block */
2608 2609
		scrub_sector_get(sector);
		sblock->sectors[index] = sector;
2610
		/* For scrub parity */
2611 2612 2613 2614 2615 2616 2617 2618 2619
		scrub_sector_get(sector);
		list_add_tail(&sector->list, &sparity->sectors_list);
		sector->sblock = sblock;
		sector->dev = dev;
		sector->flags = flags;
		sector->generation = gen;
		sector->logical = logical;
		sector->physical = physical;
		sector->mirror_num = mirror_num;
2620
		if (csum) {
2621 2622
			sector->have_csum = 1;
			memcpy(sector->csum, csum, sctx->fs_info->csum_size);
2623
		} else {
2624
			sector->have_csum = 0;
2625
		}
2626
		sblock->sector_count++;
2627 2628
		sector->page = alloc_page(GFP_KERNEL);
		if (!sector->page)
2629
			goto leave_nomem;
2630 2631 2632 2633 2634 2635


		/* Iterate over the stripe range in sectorsize steps */
		len -= sectorsize;
		logical += sectorsize;
		physical += sectorsize;
2636 2637
	}

2638 2639
	WARN_ON(sblock->sector_count == 0);
	for (index = 0; index < sblock->sector_count; index++) {
2640
		struct scrub_sector *sector = sblock->sectors[index];
2641 2642
		int ret;

2643
		ret = scrub_add_sector_to_rd_bio(sctx, sector);
2644 2645 2646 2647 2648 2649
		if (ret) {
			scrub_block_put(sblock);
			return ret;
		}
	}

2650
	/* Last one frees, either here or in bio completion for last sector */
2651 2652 2653 2654 2655
	scrub_block_put(sblock);
	return 0;
}

static int scrub_extent_for_parity(struct scrub_parity *sparity,
2656
				   u64 logical, u32 len,
2657 2658 2659 2660 2661 2662 2663 2664
				   u64 physical, struct btrfs_device *dev,
				   u64 flags, u64 gen, int mirror_num)
{
	struct scrub_ctx *sctx = sparity->sctx;
	int ret;
	u8 csum[BTRFS_CSUM_SIZE];
	u32 blocksize;

2665
	if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
2666 2667 2668 2669
		scrub_parity_mark_sectors_error(sparity, logical, len);
		return 0;
	}

2670
	if (flags & BTRFS_EXTENT_FLAG_DATA) {
L
Liu Bo 已提交
2671
		blocksize = sparity->stripe_len;
2672
	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
L
Liu Bo 已提交
2673
		blocksize = sparity->stripe_len;
2674
	} else {
2675
		blocksize = sctx->fs_info->sectorsize;
2676 2677 2678 2679
		WARN_ON(1);
	}

	while (len) {
2680
		u32 l = min(len, blocksize);
2681 2682 2683 2684
		int have_csum = 0;

		if (flags & BTRFS_EXTENT_FLAG_DATA) {
			/* push csums to sbio */
2685
			have_csum = scrub_find_csum(sctx, logical, csum);
2686 2687 2688
			if (have_csum == 0)
				goto skip;
		}
2689
		ret = scrub_sectors_for_parity(sparity, logical, l, physical, dev,
2690 2691 2692 2693
					     flags, gen, mirror_num,
					     have_csum ? csum : NULL);
		if (ret)
			return ret;
2694
skip:
2695 2696 2697 2698 2699 2700 2701
		len -= l;
		logical += l;
		physical += l;
	}
	return 0;
}

2702 2703 2704 2705 2706 2707 2708 2709
/*
 * Given a physical address, this will calculate it's
 * logical offset. if this is a parity stripe, it will return
 * the most left data stripe's logical offset.
 *
 * return 0 if it is a data stripe, 1 means parity stripe.
 */
static int get_raid56_logic_offset(u64 physical, int num,
2710 2711
				   struct map_lookup *map, u64 *offset,
				   u64 *stripe_start)
2712 2713 2714 2715 2716
{
	int i;
	int j = 0;
	u64 stripe_nr;
	u64 last_offset;
2717 2718
	u32 stripe_index;
	u32 rot;
2719
	const int data_stripes = nr_data_stripes(map);
2720

2721
	last_offset = (physical - map->stripes[num].physical) * data_stripes;
2722 2723 2724
	if (stripe_start)
		*stripe_start = last_offset;

2725
	*offset = last_offset;
2726
	for (i = 0; i < data_stripes; i++) {
2727 2728
		*offset = last_offset + i * map->stripe_len;

2729
		stripe_nr = div64_u64(*offset, map->stripe_len);
2730
		stripe_nr = div_u64(stripe_nr, data_stripes);
2731 2732

		/* Work out the disk rotation on this stripe-set */
2733
		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
2734 2735
		/* calculate which stripe this data locates */
		rot += i;
2736
		stripe_index = rot % map->num_stripes;
2737 2738 2739 2740 2741 2742 2743 2744 2745
		if (stripe_index == num)
			return 0;
		if (stripe_index < num)
			j++;
	}
	*offset = last_offset + j * map->stripe_len;
	return 1;
}

2746 2747 2748
static void scrub_free_parity(struct scrub_parity *sparity)
{
	struct scrub_ctx *sctx = sparity->sctx;
2749
	struct scrub_sector *curr, *next;
2750 2751 2752 2753 2754 2755 2756 2757 2758 2759
	int nbits;

	nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
	if (nbits) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors += nbits;
		sctx->stat.uncorrectable_errors += nbits;
		spin_unlock(&sctx->stat_lock);
	}

2760
	list_for_each_entry_safe(curr, next, &sparity->sectors_list, list) {
2761
		list_del_init(&curr->list);
2762
		scrub_sector_put(curr);
2763 2764 2765 2766 2767
	}

	kfree(sparity);
}

2768 2769 2770 2771 2772 2773 2774 2775 2776 2777
static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
{
	struct scrub_parity *sparity = container_of(work, struct scrub_parity,
						    work);
	struct scrub_ctx *sctx = sparity->sctx;

	scrub_free_parity(sparity);
	scrub_pending_bio_dec(sctx);
}

2778
static void scrub_parity_bio_endio(struct bio *bio)
2779
{
Y
Yu Zhe 已提交
2780
	struct scrub_parity *sparity = bio->bi_private;
2781
	struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
2782

2783
	if (bio->bi_status)
2784 2785 2786 2787
		bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
			  sparity->nsectors);

	bio_put(bio);
2788

2789 2790
	btrfs_init_work(&sparity->work, scrub_parity_bio_endio_worker, NULL,
			NULL);
2791
	btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
2792 2793 2794 2795 2796
}

static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
{
	struct scrub_ctx *sctx = sparity->sctx;
2797
	struct btrfs_fs_info *fs_info = sctx->fs_info;
2798 2799
	struct bio *bio;
	struct btrfs_raid_bio *rbio;
2800
	struct btrfs_io_context *bioc = NULL;
2801 2802 2803 2804 2805 2806 2807
	u64 length;
	int ret;

	if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
			   sparity->nsectors))
		goto out;

2808
	length = sparity->logic_end - sparity->logic_start;
2809 2810

	btrfs_bio_counter_inc_blocked(fs_info);
2811
	ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
2812 2813 2814
			       &length, &bioc);
	if (ret || !bioc || !bioc->raid_map)
		goto bioc_out;
2815

2816
	bio = bio_alloc(NULL, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS);
2817 2818 2819 2820
	bio->bi_iter.bi_sector = sparity->logic_start >> 9;
	bio->bi_private = sparity;
	bio->bi_end_io = scrub_parity_bio_endio;

2821 2822
	rbio = raid56_parity_alloc_scrub_rbio(bio, bioc, length,
					      sparity->scrub_dev,
2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833
					      sparity->dbitmap,
					      sparity->nsectors);
	if (!rbio)
		goto rbio_out;

	scrub_pending_bio_inc(sctx);
	raid56_parity_submit_scrub_rbio(rbio);
	return;

rbio_out:
	bio_put(bio);
2834
bioc_out:
2835
	btrfs_bio_counter_dec(fs_info);
2836
	btrfs_put_bioc(bioc);
2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847
	bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
		  sparity->nsectors);
	spin_lock(&sctx->stat_lock);
	sctx->stat.malloc_errors++;
	spin_unlock(&sctx->stat_lock);
out:
	scrub_free_parity(sparity);
}

static inline int scrub_calc_parity_bitmap_len(int nsectors)
{
2848
	return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long);
2849 2850 2851 2852
}

static void scrub_parity_get(struct scrub_parity *sparity)
{
2853
	refcount_inc(&sparity->refs);
2854 2855 2856 2857
}

static void scrub_parity_put(struct scrub_parity *sparity)
{
2858
	if (!refcount_dec_and_test(&sparity->refs))
2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869
		return;

	scrub_parity_check_and_repair(sparity);
}

static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
						  struct map_lookup *map,
						  struct btrfs_device *sdev,
						  u64 logic_start,
						  u64 logic_end)
{
2870
	struct btrfs_fs_info *fs_info = sctx->fs_info;
2871
	struct btrfs_root *root = btrfs_extent_root(fs_info, logic_start);
2872
	struct btrfs_root *csum_root;
2873
	struct btrfs_extent_item *extent;
2874
	struct btrfs_io_context *bioc = NULL;
2875
	struct btrfs_path *path;
2876 2877 2878 2879 2880 2881 2882 2883
	u64 flags;
	int ret;
	int slot;
	struct extent_buffer *l;
	struct btrfs_key key;
	u64 generation;
	u64 extent_logical;
	u64 extent_physical;
2884 2885
	/* Check the comment in scrub_stripe() for why u32 is enough here */
	u32 extent_len;
2886
	u64 mapped_length;
2887 2888 2889 2890 2891 2892 2893
	struct btrfs_device *extent_dev;
	struct scrub_parity *sparity;
	int nsectors;
	int bitmap_len;
	int extent_mirror_num;
	int stop_loop = 0;

2894 2895 2896 2897 2898 2899 2900 2901 2902 2903
	path = btrfs_alloc_path();
	if (!path) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
		return -ENOMEM;
	}
	path->search_commit_root = 1;
	path->skip_locking = 1;

2904
	ASSERT(map->stripe_len <= U32_MAX);
2905
	nsectors = map->stripe_len >> fs_info->sectorsize_bits;
2906 2907 2908 2909 2910 2911 2912
	bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
	sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
			  GFP_NOFS);
	if (!sparity) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
2913
		btrfs_free_path(path);
2914 2915 2916
		return -ENOMEM;
	}

2917
	ASSERT(map->stripe_len <= U32_MAX);
2918 2919 2920 2921 2922 2923
	sparity->stripe_len = map->stripe_len;
	sparity->nsectors = nsectors;
	sparity->sctx = sctx;
	sparity->scrub_dev = sdev;
	sparity->logic_start = logic_start;
	sparity->logic_end = logic_end;
2924
	refcount_set(&sparity->refs, 1);
2925
	INIT_LIST_HEAD(&sparity->sectors_list);
2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972
	sparity->dbitmap = sparity->bitmap;
	sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;

	ret = 0;
	while (logic_start < logic_end) {
		if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
			key.type = BTRFS_METADATA_ITEM_KEY;
		else
			key.type = BTRFS_EXTENT_ITEM_KEY;
		key.objectid = logic_start;
		key.offset = (u64)-1;

		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		if (ret < 0)
			goto out;

		if (ret > 0) {
			ret = btrfs_previous_extent_item(root, path, 0);
			if (ret < 0)
				goto out;
			if (ret > 0) {
				btrfs_release_path(path);
				ret = btrfs_search_slot(NULL, root, &key,
							path, 0, 0);
				if (ret < 0)
					goto out;
			}
		}

		stop_loop = 0;
		while (1) {
			u64 bytes;

			l = path->nodes[0];
			slot = path->slots[0];
			if (slot >= btrfs_header_nritems(l)) {
				ret = btrfs_next_leaf(root, path);
				if (ret == 0)
					continue;
				if (ret < 0)
					goto out;

				stop_loop = 1;
				break;
			}
			btrfs_item_key_to_cpu(l, &key, slot);

2973 2974 2975 2976
			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
			    key.type != BTRFS_METADATA_ITEM_KEY)
				goto next;

2977
			if (key.type == BTRFS_METADATA_ITEM_KEY)
2978
				bytes = fs_info->nodesize;
2979 2980 2981 2982 2983 2984
			else
				bytes = key.offset;

			if (key.objectid + bytes <= logic_start)
				goto next;

2985
			if (key.objectid >= logic_end) {
2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997
				stop_loop = 1;
				break;
			}

			while (key.objectid >= logic_start + map->stripe_len)
				logic_start += map->stripe_len;

			extent = btrfs_item_ptr(l, slot,
						struct btrfs_extent_item);
			flags = btrfs_extent_flags(l, extent);
			generation = btrfs_extent_generation(l, extent);

2998 2999 3000 3001
			if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
			    (key.objectid < logic_start ||
			     key.objectid + bytes >
			     logic_start + map->stripe_len)) {
J
Jeff Mahoney 已提交
3002 3003
				btrfs_err(fs_info,
					  "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3004
					  key.objectid, logic_start);
3005 3006 3007
				spin_lock(&sctx->stat_lock);
				sctx->stat.uncorrectable_errors++;
				spin_unlock(&sctx->stat_lock);
3008 3009 3010 3011
				goto next;
			}
again:
			extent_logical = key.objectid;
3012
			ASSERT(bytes <= U32_MAX);
3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027
			extent_len = bytes;

			if (extent_logical < logic_start) {
				extent_len -= logic_start - extent_logical;
				extent_logical = logic_start;
			}

			if (extent_logical + extent_len >
			    logic_start + map->stripe_len)
				extent_len = logic_start + map->stripe_len -
					     extent_logical;

			scrub_parity_mark_sectors_data(sparity, extent_logical,
						       extent_len);

3028
			mapped_length = extent_len;
3029
			bioc = NULL;
3030
			ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
3031
					extent_logical, &mapped_length, &bioc,
3032
					0);
3033
			if (!ret) {
3034
				if (!bioc || mapped_length < extent_len)
3035 3036 3037
					ret = -EIO;
			}
			if (ret) {
3038
				btrfs_put_bioc(bioc);
3039 3040
				goto out;
			}
3041 3042 3043 3044
			extent_physical = bioc->stripes[0].physical;
			extent_mirror_num = bioc->mirror_num;
			extent_dev = bioc->stripes[0].dev;
			btrfs_put_bioc(bioc);
3045

3046
			csum_root = btrfs_csum_root(fs_info, extent_logical);
3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059
			ret = btrfs_lookup_csums_range(csum_root,
						extent_logical,
						extent_logical + extent_len - 1,
						&sctx->csum_list, 1);
			if (ret)
				goto out;

			ret = scrub_extent_for_parity(sparity, extent_logical,
						      extent_len,
						      extent_physical,
						      extent_dev, flags,
						      generation,
						      extent_mirror_num);
3060 3061 3062

			scrub_free_csums(sctx);

3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091
			if (ret)
				goto out;

			if (extent_logical + extent_len <
			    key.objectid + bytes) {
				logic_start += map->stripe_len;

				if (logic_start >= logic_end) {
					stop_loop = 1;
					break;
				}

				if (logic_start < key.objectid + bytes) {
					cond_resched();
					goto again;
				}
			}
next:
			path->slots[0]++;
		}

		btrfs_release_path(path);

		if (stop_loop)
			break;

		logic_start += map->stripe_len;
	}
out:
3092 3093
	if (ret < 0) {
		ASSERT(logic_end - logic_start <= U32_MAX);
3094
		scrub_parity_mark_sectors_error(sparity, logic_start,
3095
						logic_end - logic_start);
3096
	}
3097 3098
	scrub_parity_put(sparity);
	scrub_submit(sctx);
3099
	mutex_lock(&sctx->wr_lock);
3100
	scrub_wr_submit(sctx);
3101
	mutex_unlock(&sctx->wr_lock);
3102

3103
	btrfs_free_path(path);
3104 3105 3106
	return ret < 0 ? ret : 0;
}

3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120
static void sync_replace_for_zoned(struct scrub_ctx *sctx)
{
	if (!btrfs_is_zoned(sctx->fs_info))
		return;

	sctx->flush_all_writes = true;
	scrub_submit(sctx);
	mutex_lock(&sctx->wr_lock);
	scrub_wr_submit(sctx);
	mutex_unlock(&sctx->wr_lock);

	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
}

3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146
static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical,
					u64 physical, u64 physical_end)
{
	struct btrfs_fs_info *fs_info = sctx->fs_info;
	int ret = 0;

	if (!btrfs_is_zoned(fs_info))
		return 0;

	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);

	mutex_lock(&sctx->wr_lock);
	if (sctx->write_pointer < physical_end) {
		ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical,
						    physical,
						    sctx->write_pointer);
		if (ret)
			btrfs_err(fs_info,
				  "zoned: failed to recover write pointer");
	}
	mutex_unlock(&sctx->wr_lock);
	btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical);

	return ret;
}

3147
static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3148
					   struct btrfs_block_group *bg,
3149 3150
					   struct map_lookup *map,
					   struct btrfs_device *scrub_dev,
3151
					   int stripe_index, u64 dev_extent_len)
A
Arne Jansen 已提交
3152
{
3153
	struct btrfs_path *path;
3154
	struct btrfs_fs_info *fs_info = sctx->fs_info;
3155
	struct btrfs_root *root;
3156
	struct btrfs_root *csum_root;
A
Arne Jansen 已提交
3157
	struct btrfs_extent_item *extent;
3158
	struct blk_plug plug;
3159
	const u64 chunk_logical = bg->start;
A
Arne Jansen 已提交
3160 3161 3162 3163 3164 3165 3166
	u64 flags;
	int ret;
	int slot;
	u64 nstripes;
	struct extent_buffer *l;
	u64 physical;
	u64 logical;
L
Liu Bo 已提交
3167
	u64 logic_end;
3168
	u64 physical_end;
A
Arne Jansen 已提交
3169
	u64 generation;
3170
	int mirror_num;
3171
	struct btrfs_key key;
3172
	u64 increment;
A
Arne Jansen 已提交
3173
	u64 offset;
3174 3175
	u64 extent_logical;
	u64 extent_physical;
3176 3177 3178 3179 3180
	/*
	 * Unlike chunk length, extent length should never go beyond
	 * BTRFS_MAX_EXTENT_SIZE, thus u32 is enough here.
	 */
	u32 extent_len;
3181 3182
	u64 stripe_logical;
	u64 stripe_end;
3183 3184
	struct btrfs_device *extent_dev;
	int extent_mirror_num;
3185
	int stop_loop = 0;
D
David Woodhouse 已提交
3186

3187
	physical = map->stripes[stripe_index].physical;
A
Arne Jansen 已提交
3188
	offset = 0;
3189
	nstripes = div64_u64(dev_extent_len, map->stripe_len);
3190 3191
	mirror_num = 1;
	increment = map->stripe_len;
A
Arne Jansen 已提交
3192
	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3193
		offset = map->stripe_len * stripe_index;
A
Arne Jansen 已提交
3194 3195 3196
		increment = map->stripe_len * map->num_stripes;
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
		int factor = map->num_stripes / map->sub_stripes;
3197
		offset = map->stripe_len * (stripe_index / map->sub_stripes);
A
Arne Jansen 已提交
3198
		increment = map->stripe_len * factor;
3199
		mirror_num = stripe_index % map->sub_stripes + 1;
3200
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
3201
		mirror_num = stripe_index % map->num_stripes + 1;
A
Arne Jansen 已提交
3202
	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3203
		mirror_num = stripe_index % map->num_stripes + 1;
3204
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3205 3206
		get_raid56_logic_offset(physical, stripe_index, map, &offset,
					NULL);
3207
		increment = map->stripe_len * nr_data_stripes(map);
A
Arne Jansen 已提交
3208 3209 3210 3211 3212 3213
	}

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

3214 3215 3216 3217 3218
	/*
	 * work on commit root. The related disk blocks are static as
	 * long as COW is applied. This means, it is save to rewrite
	 * them to repair disk errors without any race conditions
	 */
A
Arne Jansen 已提交
3219 3220
	path->search_commit_root = 1;
	path->skip_locking = 1;
3221
	path->reada = READA_FORWARD;
A
Arne Jansen 已提交
3222

3223
	logical = chunk_logical + offset;
3224
	physical_end = physical + nstripes * map->stripe_len;
3225
	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3226
		get_raid56_logic_offset(physical_end, stripe_index,
3227
					map, &logic_end, NULL);
3228
		logic_end += chunk_logical;
3229 3230 3231
	} else {
		logic_end = logical + increment * nstripes;
	}
3232
	wait_event(sctx->list_wait,
3233
		   atomic_read(&sctx->bios_in_flight) == 0);
3234
	scrub_blocked_if_needed(fs_info);
A
Arne Jansen 已提交
3235

3236
	root = btrfs_extent_root(fs_info, logical);
3237 3238
	csum_root = btrfs_csum_root(fs_info, logical);

A
Arne Jansen 已提交
3239 3240 3241 3242
	/*
	 * collect all data csums for the stripe to avoid seeking during
	 * the scrub. This might currently (crc32) end up to be about 1MB
	 */
3243
	blk_start_plug(&plug);
A
Arne Jansen 已提交
3244

3245 3246 3247 3248 3249 3250 3251 3252
	if (sctx->is_dev_replace &&
	    btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
		mutex_lock(&sctx->wr_lock);
		sctx->write_pointer = physical;
		mutex_unlock(&sctx->wr_lock);
		sctx->flush_all_writes = true;
	}

A
Arne Jansen 已提交
3253 3254 3255 3256
	/*
	 * now find all extents for each stripe and scrub them
	 */
	ret = 0;
3257
	while (physical < physical_end) {
A
Arne Jansen 已提交
3258 3259 3260 3261
		/*
		 * canceled?
		 */
		if (atomic_read(&fs_info->scrub_cancel_req) ||
3262
		    atomic_read(&sctx->cancel_req)) {
A
Arne Jansen 已提交
3263 3264 3265 3266 3267 3268 3269 3270
			ret = -ECANCELED;
			goto out;
		}
		/*
		 * check to see if we have to pause
		 */
		if (atomic_read(&fs_info->scrub_pause_req)) {
			/* push queued extents */
3271
			sctx->flush_all_writes = true;
3272
			scrub_submit(sctx);
3273
			mutex_lock(&sctx->wr_lock);
3274
			scrub_wr_submit(sctx);
3275
			mutex_unlock(&sctx->wr_lock);
3276
			wait_event(sctx->list_wait,
3277
				   atomic_read(&sctx->bios_in_flight) == 0);
3278
			sctx->flush_all_writes = false;
3279
			scrub_blocked_if_needed(fs_info);
A
Arne Jansen 已提交
3280 3281
		}

3282
		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3283 3284
			ret = get_raid56_logic_offset(physical, stripe_index,
						      map, &logical,
3285
						      &stripe_logical);
3286
			logical += chunk_logical;
3287
			if (ret) {
3288
				/* it is parity strip */
3289
				stripe_logical += chunk_logical;
3290
				stripe_end = stripe_logical + increment;
3291
				ret = scrub_raid56_parity(sctx, map, scrub_dev,
3292
							  stripe_logical,
3293 3294 3295 3296 3297 3298 3299
							  stripe_end);
				if (ret)
					goto out;
				goto skip;
			}
		}

3300 3301 3302 3303
		if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
			key.type = BTRFS_METADATA_ITEM_KEY;
		else
			key.type = BTRFS_EXTENT_ITEM_KEY;
A
Arne Jansen 已提交
3304
		key.objectid = logical;
L
Liu Bo 已提交
3305
		key.offset = (u64)-1;
A
Arne Jansen 已提交
3306 3307 3308 3309

		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		if (ret < 0)
			goto out;
3310

3311
		if (ret > 0) {
3312
			ret = btrfs_previous_extent_item(root, path, 0);
A
Arne Jansen 已提交
3313 3314
			if (ret < 0)
				goto out;
3315 3316 3317 3318 3319 3320 3321 3322 3323
			if (ret > 0) {
				/* there's no smaller item, so stick with the
				 * larger one */
				btrfs_release_path(path);
				ret = btrfs_search_slot(NULL, root, &key,
							path, 0, 0);
				if (ret < 0)
					goto out;
			}
A
Arne Jansen 已提交
3324 3325
		}

L
Liu Bo 已提交
3326
		stop_loop = 0;
A
Arne Jansen 已提交
3327
		while (1) {
3328 3329
			u64 bytes;

A
Arne Jansen 已提交
3330 3331 3332 3333 3334 3335 3336 3337 3338
			l = path->nodes[0];
			slot = path->slots[0];
			if (slot >= btrfs_header_nritems(l)) {
				ret = btrfs_next_leaf(root, path);
				if (ret == 0)
					continue;
				if (ret < 0)
					goto out;

L
Liu Bo 已提交
3339
				stop_loop = 1;
A
Arne Jansen 已提交
3340 3341 3342 3343
				break;
			}
			btrfs_item_key_to_cpu(l, &key, slot);

3344 3345 3346 3347
			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
			    key.type != BTRFS_METADATA_ITEM_KEY)
				goto next;

3348
			if (key.type == BTRFS_METADATA_ITEM_KEY)
3349
				bytes = fs_info->nodesize;
3350 3351 3352 3353
			else
				bytes = key.offset;

			if (key.objectid + bytes <= logical)
A
Arne Jansen 已提交
3354 3355
				goto next;

L
Liu Bo 已提交
3356 3357 3358 3359 3360 3361
			if (key.objectid >= logical + map->stripe_len) {
				/* out of this device extent */
				if (key.objectid >= logic_end)
					stop_loop = 1;
				break;
			}
A
Arne Jansen 已提交
3362

3363 3364 3365 3366 3367 3368
			/*
			 * If our block group was removed in the meanwhile, just
			 * stop scrubbing since there is no point in continuing.
			 * Continuing would prevent reusing its device extents
			 * for new block groups for a long time.
			 */
3369 3370 3371
			spin_lock(&bg->lock);
			if (bg->removed) {
				spin_unlock(&bg->lock);
3372 3373 3374
				ret = 0;
				goto out;
			}
3375
			spin_unlock(&bg->lock);
3376

A
Arne Jansen 已提交
3377 3378 3379 3380 3381
			extent = btrfs_item_ptr(l, slot,
						struct btrfs_extent_item);
			flags = btrfs_extent_flags(l, extent);
			generation = btrfs_extent_generation(l, extent);

3382 3383 3384 3385
			if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
			    (key.objectid < logical ||
			     key.objectid + bytes >
			     logical + map->stripe_len)) {
3386
				btrfs_err(fs_info,
J
Jeff Mahoney 已提交
3387
					   "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3388
				       key.objectid, logical);
3389 3390 3391
				spin_lock(&sctx->stat_lock);
				sctx->stat.uncorrectable_errors++;
				spin_unlock(&sctx->stat_lock);
A
Arne Jansen 已提交
3392 3393 3394
				goto next;
			}

L
Liu Bo 已提交
3395 3396
again:
			extent_logical = key.objectid;
3397
			ASSERT(bytes <= U32_MAX);
L
Liu Bo 已提交
3398 3399
			extent_len = bytes;

A
Arne Jansen 已提交
3400 3401 3402
			/*
			 * trim extent to this stripe
			 */
L
Liu Bo 已提交
3403 3404 3405
			if (extent_logical < logical) {
				extent_len -= logical - extent_logical;
				extent_logical = logical;
A
Arne Jansen 已提交
3406
			}
L
Liu Bo 已提交
3407
			if (extent_logical + extent_len >
A
Arne Jansen 已提交
3408
			    logical + map->stripe_len) {
L
Liu Bo 已提交
3409 3410
				extent_len = logical + map->stripe_len -
					     extent_logical;
A
Arne Jansen 已提交
3411 3412
			}

L
Liu Bo 已提交
3413
			extent_physical = extent_logical - logical + physical;
3414 3415
			extent_dev = scrub_dev;
			extent_mirror_num = mirror_num;
3416
			if (sctx->is_dev_replace)
3417 3418 3419 3420
				scrub_remap_extent(fs_info, extent_logical,
						   extent_len, &extent_physical,
						   &extent_dev,
						   &extent_mirror_num);
L
Liu Bo 已提交
3421

3422 3423 3424 3425 3426 3427 3428 3429
			if (flags & BTRFS_EXTENT_FLAG_DATA) {
				ret = btrfs_lookup_csums_range(csum_root,
						extent_logical,
						extent_logical + extent_len - 1,
						&sctx->csum_list, 1);
				if (ret)
					goto out;
			}
L
Liu Bo 已提交
3430

L
Liu Bo 已提交
3431
			ret = scrub_extent(sctx, map, extent_logical, extent_len,
3432 3433
					   extent_physical, extent_dev, flags,
					   generation, extent_mirror_num,
3434
					   extent_logical - logical + physical);
3435 3436 3437

			scrub_free_csums(sctx);

A
Arne Jansen 已提交
3438 3439 3440
			if (ret)
				goto out;

3441 3442 3443
			if (sctx->is_dev_replace)
				sync_replace_for_zoned(sctx);

L
Liu Bo 已提交
3444 3445
			if (extent_logical + extent_len <
			    key.objectid + bytes) {
3446
				if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3447 3448 3449 3450
					/*
					 * loop until we find next data stripe
					 * or we have finished all stripes.
					 */
3451 3452 3453
loop:
					physical += map->stripe_len;
					ret = get_raid56_logic_offset(physical,
3454 3455 3456
							stripe_index, map,
							&logical, &stripe_logical);
					logical += chunk_logical;
3457 3458

					if (ret && physical < physical_end) {
3459
						stripe_logical += chunk_logical;
3460
						stripe_end = stripe_logical +
3461
								increment;
3462
						ret = scrub_raid56_parity(sctx,
3463
							map, scrub_dev,
3464 3465 3466 3467 3468 3469
							stripe_logical,
							stripe_end);
						if (ret)
							goto out;
						goto loop;
					}
3470 3471 3472 3473
				} else {
					physical += map->stripe_len;
					logical += increment;
				}
L
Liu Bo 已提交
3474 3475 3476 3477 3478
				if (logical < key.objectid + bytes) {
					cond_resched();
					goto again;
				}

3479
				if (physical >= physical_end) {
L
Liu Bo 已提交
3480 3481 3482 3483
					stop_loop = 1;
					break;
				}
			}
A
Arne Jansen 已提交
3484 3485 3486
next:
			path->slots[0]++;
		}
C
Chris Mason 已提交
3487
		btrfs_release_path(path);
3488
skip:
A
Arne Jansen 已提交
3489 3490
		logical += increment;
		physical += map->stripe_len;
3491
		spin_lock(&sctx->stat_lock);
L
Liu Bo 已提交
3492
		if (stop_loop)
3493 3494
			sctx->stat.last_physical = map->stripes[stripe_index].physical +
						   dev_extent_len;
L
Liu Bo 已提交
3495 3496
		else
			sctx->stat.last_physical = physical;
3497
		spin_unlock(&sctx->stat_lock);
L
Liu Bo 已提交
3498 3499
		if (stop_loop)
			break;
A
Arne Jansen 已提交
3500
	}
3501
out:
A
Arne Jansen 已提交
3502
	/* push queued extents */
3503
	scrub_submit(sctx);
3504
	mutex_lock(&sctx->wr_lock);
3505
	scrub_wr_submit(sctx);
3506
	mutex_unlock(&sctx->wr_lock);
A
Arne Jansen 已提交
3507

3508
	blk_finish_plug(&plug);
A
Arne Jansen 已提交
3509
	btrfs_free_path(path);
3510 3511 3512 3513

	if (sctx->is_dev_replace && ret >= 0) {
		int ret2;

3514 3515 3516 3517
		ret2 = sync_write_pointer_for_zoned(sctx,
				chunk_logical + offset,
				map->stripes[stripe_index].physical,
				physical_end);
3518 3519 3520 3521
		if (ret2)
			ret = ret2;
	}

A
Arne Jansen 已提交
3522 3523 3524
	return ret < 0 ? ret : 0;
}

3525
static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3526
					  struct btrfs_block_group *bg,
3527
					  struct btrfs_device *scrub_dev,
3528
					  u64 dev_offset,
3529
					  u64 dev_extent_len)
A
Arne Jansen 已提交
3530
{
3531
	struct btrfs_fs_info *fs_info = sctx->fs_info;
3532
	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
A
Arne Jansen 已提交
3533 3534 3535
	struct map_lookup *map;
	struct extent_map *em;
	int i;
3536
	int ret = 0;
A
Arne Jansen 已提交
3537

3538
	read_lock(&map_tree->lock);
3539
	em = lookup_extent_mapping(map_tree, bg->start, bg->length);
3540
	read_unlock(&map_tree->lock);
A
Arne Jansen 已提交
3541

3542 3543 3544 3545 3546
	if (!em) {
		/*
		 * Might have been an unused block group deleted by the cleaner
		 * kthread or relocation.
		 */
3547 3548
		spin_lock(&bg->lock);
		if (!bg->removed)
3549
			ret = -EINVAL;
3550
		spin_unlock(&bg->lock);
3551 3552 3553

		return ret;
	}
3554
	if (em->start != bg->start)
A
Arne Jansen 已提交
3555
		goto out;
3556
	if (em->len < dev_extent_len)
A
Arne Jansen 已提交
3557 3558
		goto out;

3559
	map = em->map_lookup;
A
Arne Jansen 已提交
3560
	for (i = 0; i < map->num_stripes; ++i) {
3561
		if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
3562
		    map->stripes[i].physical == dev_offset) {
3563 3564
			ret = scrub_stripe(sctx, bg, map, scrub_dev, i,
					   dev_extent_len);
A
Arne Jansen 已提交
3565 3566 3567 3568 3569 3570 3571 3572 3573 3574
			if (ret)
				goto out;
		}
	}
out:
	free_extent_map(em);

	return ret;
}

3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593
static int finish_extent_writes_for_zoned(struct btrfs_root *root,
					  struct btrfs_block_group *cache)
{
	struct btrfs_fs_info *fs_info = cache->fs_info;
	struct btrfs_trans_handle *trans;

	if (!btrfs_is_zoned(fs_info))
		return 0;

	btrfs_wait_block_group_reservations(cache);
	btrfs_wait_nocow_writers(cache);
	btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length);

	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans))
		return PTR_ERR(trans);
	return btrfs_commit_transaction(trans);
}

A
Arne Jansen 已提交
3594
static noinline_for_stack
3595
int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3596
			   struct btrfs_device *scrub_dev, u64 start, u64 end)
A
Arne Jansen 已提交
3597 3598 3599
{
	struct btrfs_dev_extent *dev_extent = NULL;
	struct btrfs_path *path;
3600 3601
	struct btrfs_fs_info *fs_info = sctx->fs_info;
	struct btrfs_root *root = fs_info->dev_root;
A
Arne Jansen 已提交
3602
	u64 chunk_offset;
3603
	int ret = 0;
3604
	int ro_set;
A
Arne Jansen 已提交
3605 3606 3607 3608
	int slot;
	struct extent_buffer *l;
	struct btrfs_key key;
	struct btrfs_key found_key;
3609
	struct btrfs_block_group *cache;
3610
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
A
Arne Jansen 已提交
3611 3612 3613 3614 3615

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

3616
	path->reada = READA_FORWARD;
A
Arne Jansen 已提交
3617 3618 3619
	path->search_commit_root = 1;
	path->skip_locking = 1;

3620
	key.objectid = scrub_dev->devid;
A
Arne Jansen 已提交
3621 3622 3623 3624
	key.offset = 0ull;
	key.type = BTRFS_DEV_EXTENT_KEY;

	while (1) {
3625 3626
		u64 dev_extent_len;

A
Arne Jansen 已提交
3627 3628
		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		if (ret < 0)
3629 3630 3631 3632 3633
			break;
		if (ret > 0) {
			if (path->slots[0] >=
			    btrfs_header_nritems(path->nodes[0])) {
				ret = btrfs_next_leaf(root, path);
3634 3635 3636 3637
				if (ret < 0)
					break;
				if (ret > 0) {
					ret = 0;
3638
					break;
3639 3640 3641
				}
			} else {
				ret = 0;
3642 3643
			}
		}
A
Arne Jansen 已提交
3644 3645 3646 3647 3648 3649

		l = path->nodes[0];
		slot = path->slots[0];

		btrfs_item_key_to_cpu(l, &found_key, slot);

3650
		if (found_key.objectid != scrub_dev->devid)
A
Arne Jansen 已提交
3651 3652
			break;

3653
		if (found_key.type != BTRFS_DEV_EXTENT_KEY)
A
Arne Jansen 已提交
3654 3655 3656 3657 3658 3659 3660 3661 3662
			break;

		if (found_key.offset >= end)
			break;

		if (found_key.offset < key.offset)
			break;

		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3663
		dev_extent_len = btrfs_dev_extent_length(l, dev_extent);
A
Arne Jansen 已提交
3664

3665
		if (found_key.offset + dev_extent_len <= start)
3666
			goto skip;
A
Arne Jansen 已提交
3667 3668 3669 3670 3671 3672 3673 3674

		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);

		/*
		 * get a reference on the corresponding block group to prevent
		 * the chunk from going away while we scrub it
		 */
		cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3675 3676 3677 3678 3679 3680

		/* some chunks are removed but not committed to disk yet,
		 * continue scrubbing */
		if (!cache)
			goto skip;

3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705
		ASSERT(cache->start <= chunk_offset);
		/*
		 * We are using the commit root to search for device extents, so
		 * that means we could have found a device extent item from a
		 * block group that was deleted in the current transaction. The
		 * logical start offset of the deleted block group, stored at
		 * @chunk_offset, might be part of the logical address range of
		 * a new block group (which uses different physical extents).
		 * In this case btrfs_lookup_block_group() has returned the new
		 * block group, and its start address is less than @chunk_offset.
		 *
		 * We skip such new block groups, because it's pointless to
		 * process them, as we won't find their extents because we search
		 * for them using the commit root of the extent tree. For a device
		 * replace it's also fine to skip it, we won't miss copying them
		 * to the target device because we have the write duplication
		 * setup through the regular write path (by btrfs_map_block()),
		 * and we have committed a transaction when we started the device
		 * replace, right after setting up the device replace state.
		 */
		if (cache->start < chunk_offset) {
			btrfs_put_block_group(cache);
			goto skip;
		}

3706 3707 3708 3709
		if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
			spin_lock(&cache->lock);
			if (!cache->to_copy) {
				spin_unlock(&cache->lock);
3710 3711
				btrfs_put_block_group(cache);
				goto skip;
3712 3713 3714 3715
			}
			spin_unlock(&cache->lock);
		}

3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729
		/*
		 * Make sure that while we are scrubbing the corresponding block
		 * group doesn't get its logical address and its device extents
		 * reused for another block group, which can possibly be of a
		 * different type and different profile. We do this to prevent
		 * false error detections and crashes due to bogus attempts to
		 * repair extents.
		 */
		spin_lock(&cache->lock);
		if (cache->removed) {
			spin_unlock(&cache->lock);
			btrfs_put_block_group(cache);
			goto skip;
		}
3730
		btrfs_freeze_block_group(cache);
3731 3732
		spin_unlock(&cache->lock);

3733 3734 3735 3736 3737 3738 3739 3740 3741
		/*
		 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
		 * to avoid deadlock caused by:
		 * btrfs_inc_block_group_ro()
		 * -> btrfs_wait_for_commit()
		 * -> btrfs_commit_transaction()
		 * -> btrfs_scrub_pause()
		 */
		scrub_pause_on(fs_info);
3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759

		/*
		 * Don't do chunk preallocation for scrub.
		 *
		 * This is especially important for SYSTEM bgs, or we can hit
		 * -EFBIG from btrfs_finish_chunk_alloc() like:
		 * 1. The only SYSTEM bg is marked RO.
		 *    Since SYSTEM bg is small, that's pretty common.
		 * 2. New SYSTEM bg will be allocated
		 *    Due to regular version will allocate new chunk.
		 * 3. New SYSTEM bg is empty and will get cleaned up
		 *    Before cleanup really happens, it's marked RO again.
		 * 4. Empty SYSTEM bg get scrubbed
		 *    We go back to 2.
		 *
		 * This can easily boost the amount of SYSTEM chunks if cleaner
		 * thread can't be triggered fast enough, and use up all space
		 * of btrfs_super_block::sys_chunk_array
3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771
		 *
		 * While for dev replace, we need to try our best to mark block
		 * group RO, to prevent race between:
		 * - Write duplication
		 *   Contains latest data
		 * - Scrub copy
		 *   Contains data from commit tree
		 *
		 * If target block group is not marked RO, nocow writes can
		 * be overwritten by scrub copy, causing data corruption.
		 * So for dev-replace, it's not allowed to continue if a block
		 * group is not RO.
3772
		 */
3773
		ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
3774 3775 3776 3777 3778 3779 3780 3781 3782 3783
		if (!ret && sctx->is_dev_replace) {
			ret = finish_extent_writes_for_zoned(root, cache);
			if (ret) {
				btrfs_dec_block_group_ro(cache);
				scrub_pause_off(fs_info);
				btrfs_put_block_group(cache);
				break;
			}
		}

3784 3785
		if (ret == 0) {
			ro_set = 1;
3786
		} else if (ret == -ENOSPC && !sctx->is_dev_replace) {
3787 3788 3789
			/*
			 * btrfs_inc_block_group_ro return -ENOSPC when it
			 * failed in creating new chunk for metadata.
3790
			 * It is not a problem for scrub, because
3791 3792 3793 3794
			 * metadata are always cowed, and our scrub paused
			 * commit_transactions.
			 */
			ro_set = 0;
3795 3796 3797 3798 3799 3800 3801
		} else if (ret == -ETXTBSY) {
			btrfs_warn(fs_info,
		   "skipping scrub of block group %llu due to active swapfile",
				   cache->start);
			scrub_pause_off(fs_info);
			ret = 0;
			goto skip_unfreeze;
3802
		} else {
J
Jeff Mahoney 已提交
3803
			btrfs_warn(fs_info,
3804
				   "failed setting block group ro: %d", ret);
3805
			btrfs_unfreeze_block_group(cache);
3806
			btrfs_put_block_group(cache);
3807
			scrub_pause_off(fs_info);
3808 3809 3810
			break;
		}

3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822
		/*
		 * Now the target block is marked RO, wait for nocow writes to
		 * finish before dev-replace.
		 * COW is fine, as COW never overwrites extents in commit tree.
		 */
		if (sctx->is_dev_replace) {
			btrfs_wait_nocow_writers(cache);
			btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
					cache->length);
		}

		scrub_pause_off(fs_info);
3823
		down_write(&dev_replace->rwsem);
3824
		dev_replace->cursor_right = found_key.offset + dev_extent_len;
3825 3826
		dev_replace->cursor_left = found_key.offset;
		dev_replace->item_needs_writeback = 1;
3827 3828
		up_write(&dev_replace->rwsem);

3829 3830
		ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset,
				  dev_extent_len);
3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841

		/*
		 * flush, submit all pending read and write bios, afterwards
		 * wait for them.
		 * Note that in the dev replace case, a read request causes
		 * write requests that are submitted in the read completion
		 * worker. Therefore in the current situation, it is required
		 * that all write requests are flushed, so that all read and
		 * write requests are really completed when bios_in_flight
		 * changes to 0.
		 */
3842
		sctx->flush_all_writes = true;
3843
		scrub_submit(sctx);
3844
		mutex_lock(&sctx->wr_lock);
3845
		scrub_wr_submit(sctx);
3846
		mutex_unlock(&sctx->wr_lock);
3847 3848 3849

		wait_event(sctx->list_wait,
			   atomic_read(&sctx->bios_in_flight) == 0);
3850 3851

		scrub_pause_on(fs_info);
3852 3853 3854 3855 3856 3857

		/*
		 * must be called before we decrease @scrub_paused.
		 * make sure we don't block transaction commit while
		 * we are waiting pending workers finished.
		 */
3858 3859
		wait_event(sctx->list_wait,
			   atomic_read(&sctx->workers_pending) == 0);
3860
		sctx->flush_all_writes = false;
3861

3862
		scrub_pause_off(fs_info);
3863

3864 3865 3866 3867 3868
		if (sctx->is_dev_replace &&
		    !btrfs_finish_block_group_to_copy(dev_replace->srcdev,
						      cache, found_key.offset))
			ro_set = 0;

3869
		down_write(&dev_replace->rwsem);
3870 3871
		dev_replace->cursor_left = dev_replace->cursor_right;
		dev_replace->item_needs_writeback = 1;
3872
		up_write(&dev_replace->rwsem);
3873

3874
		if (ro_set)
3875
			btrfs_dec_block_group_ro(cache);
3876

3877 3878 3879 3880 3881 3882 3883 3884 3885
		/*
		 * We might have prevented the cleaner kthread from deleting
		 * this block group if it was already unused because we raced
		 * and set it to RO mode first. So add it back to the unused
		 * list, otherwise it might not ever be deleted unless a manual
		 * balance is triggered or it becomes used and unused again.
		 */
		spin_lock(&cache->lock);
		if (!cache->removed && !cache->ro && cache->reserved == 0 &&
3886
		    cache->used == 0) {
3887
			spin_unlock(&cache->lock);
3888 3889 3890 3891 3892
			if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
				btrfs_discard_queue_work(&fs_info->discard_ctl,
							 cache);
			else
				btrfs_mark_bg_unused(cache);
3893 3894 3895
		} else {
			spin_unlock(&cache->lock);
		}
3896
skip_unfreeze:
3897
		btrfs_unfreeze_block_group(cache);
A
Arne Jansen 已提交
3898 3899 3900
		btrfs_put_block_group(cache);
		if (ret)
			break;
3901
		if (sctx->is_dev_replace &&
3902
		    atomic64_read(&dev_replace->num_write_errors) > 0) {
3903 3904 3905 3906 3907 3908 3909
			ret = -EIO;
			break;
		}
		if (sctx->stat.malloc_errors > 0) {
			ret = -ENOMEM;
			break;
		}
3910
skip:
3911
		key.offset = found_key.offset + dev_extent_len;
C
Chris Mason 已提交
3912
		btrfs_release_path(path);
A
Arne Jansen 已提交
3913 3914 3915
	}

	btrfs_free_path(path);
3916

3917
	return ret;
A
Arne Jansen 已提交
3918 3919
}

3920 3921
static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
					   struct btrfs_device *scrub_dev)
A
Arne Jansen 已提交
3922 3923 3924 3925 3926
{
	int	i;
	u64	bytenr;
	u64	gen;
	int	ret;
3927
	struct btrfs_fs_info *fs_info = sctx->fs_info;
A
Arne Jansen 已提交
3928

J
Josef Bacik 已提交
3929
	if (BTRFS_FS_ERROR(fs_info))
3930
		return -EROFS;
3931

3932
	/* Seed devices of a new filesystem has their own generation. */
3933
	if (scrub_dev->fs_devices != fs_info->fs_devices)
3934 3935
		gen = scrub_dev->generation;
	else
3936
		gen = fs_info->last_trans_committed;
A
Arne Jansen 已提交
3937 3938 3939

	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
		bytenr = btrfs_sb_offset(i);
3940 3941
		if (bytenr + BTRFS_SUPER_INFO_SIZE >
		    scrub_dev->commit_total_bytes)
A
Arne Jansen 已提交
3942
			break;
3943 3944
		if (!btrfs_check_super_location(scrub_dev, bytenr))
			continue;
A
Arne Jansen 已提交
3945

3946 3947 3948
		ret = scrub_sectors(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
				    scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
				    NULL, bytenr);
A
Arne Jansen 已提交
3949 3950 3951
		if (ret)
			return ret;
	}
3952
	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
A
Arne Jansen 已提交
3953 3954 3955 3956

	return 0;
}

3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979
static void scrub_workers_put(struct btrfs_fs_info *fs_info)
{
	if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
					&fs_info->scrub_lock)) {
		struct btrfs_workqueue *scrub_workers = NULL;
		struct btrfs_workqueue *scrub_wr_comp = NULL;
		struct btrfs_workqueue *scrub_parity = NULL;

		scrub_workers = fs_info->scrub_workers;
		scrub_wr_comp = fs_info->scrub_wr_completion_workers;
		scrub_parity = fs_info->scrub_parity_workers;

		fs_info->scrub_workers = NULL;
		fs_info->scrub_wr_completion_workers = NULL;
		fs_info->scrub_parity_workers = NULL;
		mutex_unlock(&fs_info->scrub_lock);

		btrfs_destroy_workqueue(scrub_workers);
		btrfs_destroy_workqueue(scrub_wr_comp);
		btrfs_destroy_workqueue(scrub_parity);
	}
}

A
Arne Jansen 已提交
3980 3981 3982
/*
 * get a reference count on fs_info->scrub_workers. start worker if necessary
 */
3983 3984
static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
						int is_dev_replace)
A
Arne Jansen 已提交
3985
{
3986 3987 3988
	struct btrfs_workqueue *scrub_workers = NULL;
	struct btrfs_workqueue *scrub_wr_comp = NULL;
	struct btrfs_workqueue *scrub_parity = NULL;
3989
	unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
3990
	int max_active = fs_info->thread_pool_size;
3991
	int ret = -ENOMEM;
A
Arne Jansen 已提交
3992

3993 3994
	if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
		return 0;
3995

3996 3997 3998 3999
	scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub", flags,
					      is_dev_replace ? 1 : max_active, 4);
	if (!scrub_workers)
		goto fail_scrub_workers;
4000

4001
	scrub_wr_comp = btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
4002
					      max_active, 2);
4003 4004
	if (!scrub_wr_comp)
		goto fail_scrub_wr_completion_workers;
4005

4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018
	scrub_parity = btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
					     max_active, 2);
	if (!scrub_parity)
		goto fail_scrub_parity_workers;

	mutex_lock(&fs_info->scrub_lock);
	if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
		ASSERT(fs_info->scrub_workers == NULL &&
		       fs_info->scrub_wr_completion_workers == NULL &&
		       fs_info->scrub_parity_workers == NULL);
		fs_info->scrub_workers = scrub_workers;
		fs_info->scrub_wr_completion_workers = scrub_wr_comp;
		fs_info->scrub_parity_workers = scrub_parity;
4019
		refcount_set(&fs_info->scrub_workers_refcnt, 1);
4020 4021
		mutex_unlock(&fs_info->scrub_lock);
		return 0;
A
Arne Jansen 已提交
4022
	}
4023 4024 4025
	/* Other thread raced in and created the workers for us */
	refcount_inc(&fs_info->scrub_workers_refcnt);
	mutex_unlock(&fs_info->scrub_lock);
4026

4027 4028
	ret = 0;
	btrfs_destroy_workqueue(scrub_parity);
4029
fail_scrub_parity_workers:
4030
	btrfs_destroy_workqueue(scrub_wr_comp);
4031
fail_scrub_wr_completion_workers:
4032
	btrfs_destroy_workqueue(scrub_workers);
4033
fail_scrub_workers:
4034
	return ret;
A
Arne Jansen 已提交
4035 4036
}

4037 4038
int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
		    u64 end, struct btrfs_scrub_progress *progress,
4039
		    int readonly, int is_dev_replace)
A
Arne Jansen 已提交
4040
{
4041
	struct btrfs_dev_lookup_args args = { .devid = devid };
4042
	struct scrub_ctx *sctx;
A
Arne Jansen 已提交
4043 4044
	int ret;
	struct btrfs_device *dev;
4045
	unsigned int nofs_flag;
A
Arne Jansen 已提交
4046

4047
	if (btrfs_fs_closing(fs_info))
4048
		return -EAGAIN;
A
Arne Jansen 已提交
4049

4050
	if (fs_info->nodesize > BTRFS_STRIPE_LEN) {
4051 4052 4053 4054 4055
		/*
		 * in this case scrub is unable to calculate the checksum
		 * the way scrub is implemented. Do not handle this
		 * situation at all because it won't ever happen.
		 */
4056 4057
		btrfs_err(fs_info,
			   "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
4058 4059
		       fs_info->nodesize,
		       BTRFS_STRIPE_LEN);
4060 4061 4062
		return -EINVAL;
	}

4063
	if (fs_info->nodesize >
4064 4065
	    SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits ||
	    fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_SECTORS_PER_BLOCK) {
4066
		/*
4067
		 * Would exhaust the array bounds of sectorv member in
4068 4069
		 * struct scrub_block
		 */
J
Jeff Mahoney 已提交
4070
		btrfs_err(fs_info,
4071 4072 4073
"scrub: nodesize and sectorsize <= SCRUB_MAX_SECTORS_PER_BLOCK (%d <= %d && %d <= %d) fails",
		       fs_info->nodesize, SCRUB_MAX_SECTORS_PER_BLOCK,
		       fs_info->sectorsize, SCRUB_MAX_SECTORS_PER_BLOCK);
4074 4075 4076
		return -EINVAL;
	}

4077 4078 4079 4080
	/* Allocate outside of device_list_mutex */
	sctx = scrub_setup_ctx(fs_info, is_dev_replace);
	if (IS_ERR(sctx))
		return PTR_ERR(sctx);
A
Arne Jansen 已提交
4081

4082 4083 4084 4085
	ret = scrub_workers_get(fs_info, is_dev_replace);
	if (ret)
		goto out_free_ctx;

4086
	mutex_lock(&fs_info->fs_devices->device_list_mutex);
4087
	dev = btrfs_find_device(fs_info->fs_devices, &args);
4088 4089
	if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
		     !is_dev_replace)) {
4090
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4091
		ret = -ENODEV;
4092
		goto out;
A
Arne Jansen 已提交
4093 4094
	}

4095 4096
	if (!is_dev_replace && !readonly &&
	    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
4097
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4098 4099 4100
		btrfs_err_in_rcu(fs_info,
			"scrub on devid %llu: filesystem on %s is not writable",
				 devid, rcu_str_deref(dev->name));
4101
		ret = -EROFS;
4102
		goto out;
4103 4104
	}

4105
	mutex_lock(&fs_info->scrub_lock);
4106
	if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
4107
	    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
A
Arne Jansen 已提交
4108
		mutex_unlock(&fs_info->scrub_lock);
4109
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4110
		ret = -EIO;
4111
		goto out;
A
Arne Jansen 已提交
4112 4113
	}

4114
	down_read(&fs_info->dev_replace.rwsem);
4115
	if (dev->scrub_ctx ||
4116 4117
	    (!is_dev_replace &&
	     btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
4118
		up_read(&fs_info->dev_replace.rwsem);
A
Arne Jansen 已提交
4119
		mutex_unlock(&fs_info->scrub_lock);
4120
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4121
		ret = -EINPROGRESS;
4122
		goto out;
A
Arne Jansen 已提交
4123
	}
4124
	up_read(&fs_info->dev_replace.rwsem);
4125

4126
	sctx->readonly = readonly;
4127
	dev->scrub_ctx = sctx;
4128
	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
A
Arne Jansen 已提交
4129

4130 4131 4132 4133
	/*
	 * checking @scrub_pause_req here, we can avoid
	 * race between committing transaction and scrubbing.
	 */
4134
	__scrub_blocked_if_needed(fs_info);
A
Arne Jansen 已提交
4135 4136 4137
	atomic_inc(&fs_info->scrubs_running);
	mutex_unlock(&fs_info->scrub_lock);

4138 4139 4140
	/*
	 * In order to avoid deadlock with reclaim when there is a transaction
	 * trying to pause scrub, make sure we use GFP_NOFS for all the
4141
	 * allocations done at btrfs_scrub_sectors() and scrub_sectors_for_parity()
4142 4143 4144 4145 4146 4147
	 * invoked by our callees. The pausing request is done when the
	 * transaction commit starts, and it blocks the transaction until scrub
	 * is paused (done at specific points at scrub_stripe() or right above
	 * before incrementing fs_info->scrubs_running).
	 */
	nofs_flag = memalloc_nofs_save();
4148
	if (!is_dev_replace) {
4149
		btrfs_info(fs_info, "scrub: started on devid %llu", devid);
4150 4151 4152 4153
		/*
		 * by holding device list mutex, we can
		 * kick off writing super in log tree sync.
		 */
4154
		mutex_lock(&fs_info->fs_devices->device_list_mutex);
4155
		ret = scrub_supers(sctx, dev);
4156
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4157
	}
A
Arne Jansen 已提交
4158 4159

	if (!ret)
4160
		ret = scrub_enumerate_chunks(sctx, dev, start, end);
4161
	memalloc_nofs_restore(nofs_flag);
A
Arne Jansen 已提交
4162

4163
	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
A
Arne Jansen 已提交
4164 4165 4166
	atomic_dec(&fs_info->scrubs_running);
	wake_up(&fs_info->scrub_pause_wait);

4167
	wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
4168

A
Arne Jansen 已提交
4169
	if (progress)
4170
		memcpy(progress, &sctx->stat, sizeof(*progress));
A
Arne Jansen 已提交
4171

4172 4173 4174 4175
	if (!is_dev_replace)
		btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d",
			ret ? "not finished" : "finished", devid, ret);

A
Arne Jansen 已提交
4176
	mutex_lock(&fs_info->scrub_lock);
4177
	dev->scrub_ctx = NULL;
A
Arne Jansen 已提交
4178 4179
	mutex_unlock(&fs_info->scrub_lock);

4180
	scrub_workers_put(fs_info);
4181
	scrub_put_ctx(sctx);
A
Arne Jansen 已提交
4182

4183
	return ret;
4184 4185
out:
	scrub_workers_put(fs_info);
4186 4187 4188
out_free_ctx:
	scrub_free_ctx(sctx);

A
Arne Jansen 已提交
4189 4190 4191
	return ret;
}

4192
void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
A
Arne Jansen 已提交
4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206
{
	mutex_lock(&fs_info->scrub_lock);
	atomic_inc(&fs_info->scrub_pause_req);
	while (atomic_read(&fs_info->scrubs_paused) !=
	       atomic_read(&fs_info->scrubs_running)) {
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
			   atomic_read(&fs_info->scrubs_paused) ==
			   atomic_read(&fs_info->scrubs_running));
		mutex_lock(&fs_info->scrub_lock);
	}
	mutex_unlock(&fs_info->scrub_lock);
}

4207
void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
A
Arne Jansen 已提交
4208 4209 4210 4211 4212
{
	atomic_dec(&fs_info->scrub_pause_req);
	wake_up(&fs_info->scrub_pause_wait);
}

4213
int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
A
Arne Jansen 已提交
4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233
{
	mutex_lock(&fs_info->scrub_lock);
	if (!atomic_read(&fs_info->scrubs_running)) {
		mutex_unlock(&fs_info->scrub_lock);
		return -ENOTCONN;
	}

	atomic_inc(&fs_info->scrub_cancel_req);
	while (atomic_read(&fs_info->scrubs_running)) {
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
			   atomic_read(&fs_info->scrubs_running) == 0);
		mutex_lock(&fs_info->scrub_lock);
	}
	atomic_dec(&fs_info->scrub_cancel_req);
	mutex_unlock(&fs_info->scrub_lock);

	return 0;
}

4234
int btrfs_scrub_cancel_dev(struct btrfs_device *dev)
4235
{
4236
	struct btrfs_fs_info *fs_info = dev->fs_info;
4237
	struct scrub_ctx *sctx;
A
Arne Jansen 已提交
4238 4239

	mutex_lock(&fs_info->scrub_lock);
4240
	sctx = dev->scrub_ctx;
4241
	if (!sctx) {
A
Arne Jansen 已提交
4242 4243 4244
		mutex_unlock(&fs_info->scrub_lock);
		return -ENOTCONN;
	}
4245
	atomic_inc(&sctx->cancel_req);
4246
	while (dev->scrub_ctx) {
A
Arne Jansen 已提交
4247 4248
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
4249
			   dev->scrub_ctx == NULL);
A
Arne Jansen 已提交
4250 4251 4252 4253 4254 4255
		mutex_lock(&fs_info->scrub_lock);
	}
	mutex_unlock(&fs_info->scrub_lock);

	return 0;
}
S
Stefan Behrens 已提交
4256

4257
int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
A
Arne Jansen 已提交
4258 4259
			 struct btrfs_scrub_progress *progress)
{
4260
	struct btrfs_dev_lookup_args args = { .devid = devid };
A
Arne Jansen 已提交
4261
	struct btrfs_device *dev;
4262
	struct scrub_ctx *sctx = NULL;
A
Arne Jansen 已提交
4263

4264
	mutex_lock(&fs_info->fs_devices->device_list_mutex);
4265
	dev = btrfs_find_device(fs_info->fs_devices, &args);
A
Arne Jansen 已提交
4266
	if (dev)
4267
		sctx = dev->scrub_ctx;
4268 4269
	if (sctx)
		memcpy(progress, &sctx->stat, sizeof(*progress));
4270
	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
A
Arne Jansen 已提交
4271

4272
	return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
A
Arne Jansen 已提交
4273
}
4274 4275

static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
4276
			       u64 extent_logical, u32 extent_len,
4277 4278 4279 4280 4281
			       u64 *extent_physical,
			       struct btrfs_device **extent_dev,
			       int *extent_mirror_num)
{
	u64 mapped_length;
4282
	struct btrfs_io_context *bioc = NULL;
4283 4284 4285
	int ret;

	mapped_length = extent_len;
4286
	ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
4287 4288 4289 4290
			      &mapped_length, &bioc, 0);
	if (ret || !bioc || mapped_length < extent_len ||
	    !bioc->stripes[0].dev->bdev) {
		btrfs_put_bioc(bioc);
4291 4292 4293
		return;
	}

4294 4295 4296 4297
	*extent_physical = bioc->stripes[0].physical;
	*extent_mirror_num = bioc->mirror_num;
	*extent_dev = bioc->stripes[0].dev;
	btrfs_put_bioc(bioc);
4298
}