scrub.c 114.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
A
Arne Jansen 已提交
2
/*
3
 * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
A
Arne Jansen 已提交
4 5 6
 */

#include <linux/blkdev.h>
7
#include <linux/ratelimit.h>
8
#include <linux/sched/mm.h>
9
#include <crypto/hash.h>
A
Arne Jansen 已提交
10
#include "ctree.h"
11
#include "discard.h"
A
Arne Jansen 已提交
12 13 14
#include "volumes.h"
#include "disk-io.h"
#include "ordered-data.h"
15
#include "transaction.h"
16
#include "backref.h"
17
#include "extent_io.h"
18
#include "dev-replace.h"
19
#include "check-integrity.h"
20
#include "rcu-string.h"
D
David Woodhouse 已提交
21
#include "raid56.h"
22
#include "block-group.h"
23
#include "zoned.h"
A
Arne Jansen 已提交
24 25 26 27 28 29 30 31 32 33 34 35 36 37

/*
 * This is only the first step towards a full-features scrub. It reads all
 * extent and super block and verifies the checksums. In case a bad checksum
 * is found or the extent cannot be read, good data will be written back if
 * any can be found.
 *
 * Future enhancements:
 *  - In case an unrepairable extent is encountered, track which files are
 *    affected and report them
 *  - track and record media errors, throw out bad devices
 *  - add a mode to also read unallocated space
 */

38
struct scrub_block;
39
struct scrub_ctx;
A
Arne Jansen 已提交
40

41
/*
42 43
 * The following three values only influence the performance.
 *
44
 * The last one configures the number of parallel and outstanding I/O
45
 * operations. The first one configures an upper limit for the number
46 47
 * of (dynamically allocated) pages that are added to a bio.
 */
48 49
#define SCRUB_PAGES_PER_BIO	32	/* 128KiB per bio for x86 */
#define SCRUB_BIOS_PER_SCTX	64	/* 8MiB per device in flight for x86 */
50 51

/*
52
 * The following value times PAGE_SIZE needs to be large enough to match the
53 54
 * largest node/leaf/sector size that shall be supported.
 */
55
#define SCRUB_MAX_SECTORS_PER_BLOCK	(BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K)
A
Arne Jansen 已提交
56

57
struct scrub_recover {
58
	refcount_t		refs;
59
	struct btrfs_io_context	*bioc;
60 61 62
	u64			map_length;
};

63
struct scrub_sector {
64 65
	struct scrub_block	*sblock;
	struct page		*page;
66
	struct btrfs_device	*dev;
67
	struct list_head	list;
A
Arne Jansen 已提交
68 69
	u64			flags;  /* extent flags */
	u64			generation;
70 71
	u64			logical;
	u64			physical;
72
	u64			physical_for_dev_replace;
73
	atomic_t		refs;
74
	u8			mirror_num;
75 76
	unsigned int		have_csum:1;
	unsigned int		io_error:1;
A
Arne Jansen 已提交
77
	u8			csum[BTRFS_CSUM_SIZE];
78 79

	struct scrub_recover	*recover;
A
Arne Jansen 已提交
80 81 82 83
};

struct scrub_bio {
	int			index;
84
	struct scrub_ctx	*sctx;
85
	struct btrfs_device	*dev;
A
Arne Jansen 已提交
86
	struct bio		*bio;
87
	blk_status_t		status;
A
Arne Jansen 已提交
88 89
	u64			logical;
	u64			physical;
90
	struct scrub_sector	*pagev[SCRUB_PAGES_PER_BIO];
91
	int			page_count;
A
Arne Jansen 已提交
92 93 94 95
	int			next_free;
	struct btrfs_work	work;
};

96
struct scrub_block {
97
	struct scrub_sector	*sectors[SCRUB_MAX_SECTORS_PER_BLOCK];
98
	int			sector_count;
99
	atomic_t		outstanding_sectors;
100
	refcount_t		refs; /* free mem on transition to zero */
101
	struct scrub_ctx	*sctx;
102
	struct scrub_parity	*sparity;
103 104 105 106
	struct {
		unsigned int	header_error:1;
		unsigned int	checksum_error:1;
		unsigned int	no_io_error_seen:1;
107
		unsigned int	generation_error:1; /* also sets header_error */
108 109 110 111

		/* The following is for the data used to check parity */
		/* It is for the data with checksum */
		unsigned int	data_corrected:1;
112
	};
113
	struct btrfs_work	work;
114 115
};

116 117 118 119 120 121 122 123 124 125 126 127
/* Used for the chunks with parity stripe such RAID5/6 */
struct scrub_parity {
	struct scrub_ctx	*sctx;

	struct btrfs_device	*scrub_dev;

	u64			logic_start;

	u64			logic_end;

	int			nsectors;

128
	u32			stripe_len;
129

130
	refcount_t		refs;
131

132
	struct list_head	sectors_list;
133 134 135 136 137 138 139 140 141 142 143 144 145

	/* Work of parity check and repair */
	struct btrfs_work	work;

	/* Mark the parity blocks which have data */
	unsigned long		*dbitmap;

	/*
	 * Mark the parity blocks which have data, but errors happen when
	 * read data or check data
	 */
	unsigned long		*ebitmap;

146
	unsigned long		bitmap[];
147 148
};

149
struct scrub_ctx {
150
	struct scrub_bio	*bios[SCRUB_BIOS_PER_SCTX];
151
	struct btrfs_fs_info	*fs_info;
A
Arne Jansen 已提交
152 153
	int			first_free;
	int			curr;
154 155
	atomic_t		bios_in_flight;
	atomic_t		workers_pending;
A
Arne Jansen 已提交
156 157 158 159
	spinlock_t		list_lock;
	wait_queue_head_t	list_wait;
	struct list_head	csum_list;
	atomic_t		cancel_req;
A
Arne Jansen 已提交
160
	int			readonly;
161
	int			pages_per_bio;
162

163 164 165 166
	/* State of IO submission throttling affecting the associated device */
	ktime_t			throttle_deadline;
	u64			throttle_sent;

167
	int			is_dev_replace;
168
	u64			write_pointer;
169 170 171 172

	struct scrub_bio        *wr_curr_bio;
	struct mutex            wr_lock;
	struct btrfs_device     *wr_tgtdev;
173
	bool                    flush_all_writes;
174

A
Arne Jansen 已提交
175 176 177 178 179
	/*
	 * statistics
	 */
	struct btrfs_scrub_progress stat;
	spinlock_t		stat_lock;
180 181 182 183 184 185 186 187

	/*
	 * Use a ref counter to avoid use-after-free issues. Scrub workers
	 * decrement bios_in_flight and workers_pending and then do a wakeup
	 * on the list_wait wait queue. We must ensure the main scrub task
	 * doesn't free the scrub context before or while the workers are
	 * doing the wakeup() call.
	 */
188
	refcount_t              refs;
A
Arne Jansen 已提交
189 190
};

191 192 193 194
struct scrub_warning {
	struct btrfs_path	*path;
	u64			extent_item_size;
	const char		*errstr;
D
David Sterba 已提交
195
	u64			physical;
196 197 198 199
	u64			logical;
	struct btrfs_device	*dev;
};

200 201 202 203 204 205 206
struct full_stripe_lock {
	struct rb_node node;
	u64 logical;
	u64 refs;
	struct mutex mutex;
};

207
static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
208
				     struct scrub_block *sblocks_for_recheck);
209
static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
210 211
				struct scrub_block *sblock,
				int retry_failed_mirror);
212
static void scrub_recheck_block_checksum(struct scrub_block *sblock);
213
static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
214
					     struct scrub_block *sblock_good);
215
static int scrub_repair_sector_from_good_copy(struct scrub_block *sblock_bad,
216
					    struct scrub_block *sblock_good,
217
					    int sector_num, int force_write);
218
static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
219 220
static int scrub_write_sector_to_dev_replace(struct scrub_block *sblock,
					     int sector_num);
221 222 223 224
static int scrub_checksum_data(struct scrub_block *sblock);
static int scrub_checksum_tree_block(struct scrub_block *sblock);
static int scrub_checksum_super(struct scrub_block *sblock);
static void scrub_block_put(struct scrub_block *sblock);
225 226
static void scrub_sector_get(struct scrub_sector *sector);
static void scrub_sector_put(struct scrub_sector *sector);
227 228
static void scrub_parity_get(struct scrub_parity *sparity);
static void scrub_parity_put(struct scrub_parity *sparity);
229 230 231 232
static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len,
			 u64 physical, struct btrfs_device *dev, u64 flags,
			 u64 gen, int mirror_num, u8 *csum,
			 u64 physical_for_dev_replace);
233
static void scrub_bio_end_io(struct bio *bio);
234 235
static void scrub_bio_end_io_worker(struct btrfs_work *work);
static void scrub_block_complete(struct scrub_block *sblock);
236
static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
237
			       u64 extent_logical, u32 extent_len,
238 239 240
			       u64 *extent_physical,
			       struct btrfs_device **extent_dev,
			       int *extent_mirror_num);
241 242
static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx,
				      struct scrub_sector *sector);
243
static void scrub_wr_submit(struct scrub_ctx *sctx);
244
static void scrub_wr_bio_end_io(struct bio *bio);
245
static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
246
static void scrub_put_ctx(struct scrub_ctx *sctx);
S
Stefan Behrens 已提交
247

248
static inline int scrub_is_page_on_raid56(struct scrub_sector *sector)
249
{
250 251
	return sector->recover &&
	       (sector->recover->bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
252
}
S
Stefan Behrens 已提交
253

254 255
static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
{
256
	refcount_inc(&sctx->refs);
257 258 259 260 261 262 263
	atomic_inc(&sctx->bios_in_flight);
}

static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
{
	atomic_dec(&sctx->bios_in_flight);
	wake_up(&sctx->list_wait);
264
	scrub_put_ctx(sctx);
265 266
}

267
static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
268 269 270 271 272 273 274 275 276
{
	while (atomic_read(&fs_info->scrub_pause_req)) {
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
		   atomic_read(&fs_info->scrub_pause_req) == 0);
		mutex_lock(&fs_info->scrub_lock);
	}
}

277
static void scrub_pause_on(struct btrfs_fs_info *fs_info)
278 279 280
{
	atomic_inc(&fs_info->scrubs_paused);
	wake_up(&fs_info->scrub_pause_wait);
281
}
282

283 284
static void scrub_pause_off(struct btrfs_fs_info *fs_info)
{
285 286 287 288 289 290 291 292
	mutex_lock(&fs_info->scrub_lock);
	__scrub_blocked_if_needed(fs_info);
	atomic_dec(&fs_info->scrubs_paused);
	mutex_unlock(&fs_info->scrub_lock);

	wake_up(&fs_info->scrub_pause_wait);
}

293 294 295 296 297 298
static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
{
	scrub_pause_on(fs_info);
	scrub_pause_off(fs_info);
}

299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
/*
 * Insert new full stripe lock into full stripe locks tree
 *
 * Return pointer to existing or newly inserted full_stripe_lock structure if
 * everything works well.
 * Return ERR_PTR(-ENOMEM) if we failed to allocate memory
 *
 * NOTE: caller must hold full_stripe_locks_root->lock before calling this
 * function
 */
static struct full_stripe_lock *insert_full_stripe_lock(
		struct btrfs_full_stripe_locks_tree *locks_root,
		u64 fstripe_logical)
{
	struct rb_node **p;
	struct rb_node *parent = NULL;
	struct full_stripe_lock *entry;
	struct full_stripe_lock *ret;

318
	lockdep_assert_held(&locks_root->lock);
319 320 321 322 323 324 325 326 327 328 329 330 331 332 333

	p = &locks_root->root.rb_node;
	while (*p) {
		parent = *p;
		entry = rb_entry(parent, struct full_stripe_lock, node);
		if (fstripe_logical < entry->logical) {
			p = &(*p)->rb_left;
		} else if (fstripe_logical > entry->logical) {
			p = &(*p)->rb_right;
		} else {
			entry->refs++;
			return entry;
		}
	}

334 335 336
	/*
	 * Insert new lock.
	 */
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
	ret = kmalloc(sizeof(*ret), GFP_KERNEL);
	if (!ret)
		return ERR_PTR(-ENOMEM);
	ret->logical = fstripe_logical;
	ret->refs = 1;
	mutex_init(&ret->mutex);

	rb_link_node(&ret->node, parent, p);
	rb_insert_color(&ret->node, &locks_root->root);
	return ret;
}

/*
 * Search for a full stripe lock of a block group
 *
 * Return pointer to existing full stripe lock if found
 * Return NULL if not found
 */
static struct full_stripe_lock *search_full_stripe_lock(
		struct btrfs_full_stripe_locks_tree *locks_root,
		u64 fstripe_logical)
{
	struct rb_node *node;
	struct full_stripe_lock *entry;

362
	lockdep_assert_held(&locks_root->lock);
363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381

	node = locks_root->root.rb_node;
	while (node) {
		entry = rb_entry(node, struct full_stripe_lock, node);
		if (fstripe_logical < entry->logical)
			node = node->rb_left;
		else if (fstripe_logical > entry->logical)
			node = node->rb_right;
		else
			return entry;
	}
	return NULL;
}

/*
 * Helper to get full stripe logical from a normal bytenr.
 *
 * Caller must ensure @cache is a RAID56 block group.
 */
382
static u64 get_full_stripe_logical(struct btrfs_block_group *cache, u64 bytenr)
383 384 385 386 387 388 389 390 391 392 393 394 395
{
	u64 ret;

	/*
	 * Due to chunk item size limit, full stripe length should not be
	 * larger than U32_MAX. Just a sanity check here.
	 */
	WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX);

	/*
	 * round_down() can only handle power of 2, while RAID56 full
	 * stripe length can be 64KiB * n, so we need to manually round down.
	 */
396 397
	ret = div64_u64(bytenr - cache->start, cache->full_stripe_len) *
			cache->full_stripe_len + cache->start;
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
	return ret;
}

/*
 * Lock a full stripe to avoid concurrency of recovery and read
 *
 * It's only used for profiles with parities (RAID5/6), for other profiles it
 * does nothing.
 *
 * Return 0 if we locked full stripe covering @bytenr, with a mutex held.
 * So caller must call unlock_full_stripe() at the same context.
 *
 * Return <0 if encounters error.
 */
static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
			    bool *locked_ret)
{
415
	struct btrfs_block_group *bg_cache;
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
	struct btrfs_full_stripe_locks_tree *locks_root;
	struct full_stripe_lock *existing;
	u64 fstripe_start;
	int ret = 0;

	*locked_ret = false;
	bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
	if (!bg_cache) {
		ASSERT(0);
		return -ENOENT;
	}

	/* Profiles not based on parity don't need full stripe lock */
	if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
		goto out;
	locks_root = &bg_cache->full_stripe_locks_root;

	fstripe_start = get_full_stripe_logical(bg_cache, bytenr);

	/* Now insert the full stripe lock */
	mutex_lock(&locks_root->lock);
	existing = insert_full_stripe_lock(locks_root, fstripe_start);
	mutex_unlock(&locks_root->lock);
	if (IS_ERR(existing)) {
		ret = PTR_ERR(existing);
		goto out;
	}
	mutex_lock(&existing->mutex);
	*locked_ret = true;
out:
	btrfs_put_block_group(bg_cache);
	return ret;
}

/*
 * Unlock a full stripe.
 *
 * NOTE: Caller must ensure it's the same context calling corresponding
 * lock_full_stripe().
 *
 * Return 0 if we unlock full stripe without problem.
 * Return <0 for error
 */
static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
			      bool locked)
{
462
	struct btrfs_block_group *bg_cache;
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
	struct btrfs_full_stripe_locks_tree *locks_root;
	struct full_stripe_lock *fstripe_lock;
	u64 fstripe_start;
	bool freeit = false;
	int ret = 0;

	/* If we didn't acquire full stripe lock, no need to continue */
	if (!locked)
		return 0;

	bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
	if (!bg_cache) {
		ASSERT(0);
		return -ENOENT;
	}
	if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
		goto out;

	locks_root = &bg_cache->full_stripe_locks_root;
	fstripe_start = get_full_stripe_logical(bg_cache, bytenr);

	mutex_lock(&locks_root->lock);
	fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start);
	/* Unpaired unlock_full_stripe() detected */
	if (!fstripe_lock) {
		WARN_ON(1);
		ret = -ENOENT;
		mutex_unlock(&locks_root->lock);
		goto out;
	}

	if (fstripe_lock->refs == 0) {
		WARN_ON(1);
		btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow",
			fstripe_lock->logical);
	} else {
		fstripe_lock->refs--;
	}

	if (fstripe_lock->refs == 0) {
		rb_erase(&fstripe_lock->node, &locks_root->root);
		freeit = true;
	}
	mutex_unlock(&locks_root->lock);

	mutex_unlock(&fstripe_lock->mutex);
	if (freeit)
		kfree(fstripe_lock);
out:
	btrfs_put_block_group(bg_cache);
	return ret;
}

516
static void scrub_free_csums(struct scrub_ctx *sctx)
A
Arne Jansen 已提交
517
{
518
	while (!list_empty(&sctx->csum_list)) {
A
Arne Jansen 已提交
519
		struct btrfs_ordered_sum *sum;
520
		sum = list_first_entry(&sctx->csum_list,
A
Arne Jansen 已提交
521 522 523 524 525 526
				       struct btrfs_ordered_sum, list);
		list_del(&sum->list);
		kfree(sum);
	}
}

527
static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
A
Arne Jansen 已提交
528 529 530
{
	int i;

531
	if (!sctx)
A
Arne Jansen 已提交
532 533
		return;

534
	/* this can happen when scrub is cancelled */
535 536
	if (sctx->curr != -1) {
		struct scrub_bio *sbio = sctx->bios[sctx->curr];
537 538

		for (i = 0; i < sbio->page_count; i++) {
539
			WARN_ON(!sbio->pagev[i]->page);
540 541 542 543 544
			scrub_block_put(sbio->pagev[i]->sblock);
		}
		bio_put(sbio->bio);
	}

545
	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
546
		struct scrub_bio *sbio = sctx->bios[i];
A
Arne Jansen 已提交
547 548 549 550 551 552

		if (!sbio)
			break;
		kfree(sbio);
	}

553
	kfree(sctx->wr_curr_bio);
554 555
	scrub_free_csums(sctx);
	kfree(sctx);
A
Arne Jansen 已提交
556 557
}

558 559
static void scrub_put_ctx(struct scrub_ctx *sctx)
{
560
	if (refcount_dec_and_test(&sctx->refs))
561 562 563
		scrub_free_ctx(sctx);
}

564 565
static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
		struct btrfs_fs_info *fs_info, int is_dev_replace)
A
Arne Jansen 已提交
566
{
567
	struct scrub_ctx *sctx;
A
Arne Jansen 已提交
568 569
	int		i;

570
	sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
571
	if (!sctx)
A
Arne Jansen 已提交
572
		goto nomem;
573
	refcount_set(&sctx->refs, 1);
574
	sctx->is_dev_replace = is_dev_replace;
575
	sctx->pages_per_bio = SCRUB_PAGES_PER_BIO;
576
	sctx->curr = -1;
577
	sctx->fs_info = fs_info;
578
	INIT_LIST_HEAD(&sctx->csum_list);
579
	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
A
Arne Jansen 已提交
580 581
		struct scrub_bio *sbio;

582
		sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
A
Arne Jansen 已提交
583 584
		if (!sbio)
			goto nomem;
585
		sctx->bios[i] = sbio;
A
Arne Jansen 已提交
586 587

		sbio->index = i;
588
		sbio->sctx = sctx;
589
		sbio->page_count = 0;
590 591
		btrfs_init_work(&sbio->work, scrub_bio_end_io_worker, NULL,
				NULL);
A
Arne Jansen 已提交
592

593
		if (i != SCRUB_BIOS_PER_SCTX - 1)
594
			sctx->bios[i]->next_free = i + 1;
595
		else
596 597 598
			sctx->bios[i]->next_free = -1;
	}
	sctx->first_free = 0;
599 600
	atomic_set(&sctx->bios_in_flight, 0);
	atomic_set(&sctx->workers_pending, 0);
601 602 603 604 605
	atomic_set(&sctx->cancel_req, 0);

	spin_lock_init(&sctx->list_lock);
	spin_lock_init(&sctx->stat_lock);
	init_waitqueue_head(&sctx->list_wait);
606
	sctx->throttle_deadline = 0;
607

608 609 610
	WARN_ON(sctx->wr_curr_bio != NULL);
	mutex_init(&sctx->wr_lock);
	sctx->wr_curr_bio = NULL;
611
	if (is_dev_replace) {
612 613
		WARN_ON(!fs_info->dev_replace.tgtdev);
		sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
614
		sctx->flush_all_writes = false;
615
	}
616

617
	return sctx;
A
Arne Jansen 已提交
618 619

nomem:
620
	scrub_free_ctx(sctx);
A
Arne Jansen 已提交
621 622 623
	return ERR_PTR(-ENOMEM);
}

624 625
static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
				     void *warn_ctx)
626 627 628 629
{
	u32 nlink;
	int ret;
	int i;
630
	unsigned nofs_flag;
631 632
	struct extent_buffer *eb;
	struct btrfs_inode_item *inode_item;
633
	struct scrub_warning *swarn = warn_ctx;
634
	struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
635 636
	struct inode_fs_paths *ipath = NULL;
	struct btrfs_root *local_root;
637
	struct btrfs_key key;
638

D
David Sterba 已提交
639
	local_root = btrfs_get_fs_root(fs_info, root, true);
640 641 642 643 644
	if (IS_ERR(local_root)) {
		ret = PTR_ERR(local_root);
		goto err;
	}

645 646 647
	/*
	 * this makes the path point to (inum INODE_ITEM ioff)
	 */
648 649 650 651 652
	key.objectid = inum;
	key.type = BTRFS_INODE_ITEM_KEY;
	key.offset = 0;

	ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
653
	if (ret) {
654
		btrfs_put_root(local_root);
655 656 657 658 659 660 661 662 663 664
		btrfs_release_path(swarn->path);
		goto err;
	}

	eb = swarn->path->nodes[0];
	inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
					struct btrfs_inode_item);
	nlink = btrfs_inode_nlink(eb, inode_item);
	btrfs_release_path(swarn->path);

665 666 667 668 669 670
	/*
	 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
	 * uses GFP_NOFS in this context, so we keep it consistent but it does
	 * not seem to be strictly necessary.
	 */
	nofs_flag = memalloc_nofs_save();
671
	ipath = init_ipath(4096, local_root, swarn->path);
672
	memalloc_nofs_restore(nofs_flag);
673
	if (IS_ERR(ipath)) {
674
		btrfs_put_root(local_root);
675 676 677 678
		ret = PTR_ERR(ipath);
		ipath = NULL;
		goto err;
	}
679 680 681 682 683 684 685 686 687 688
	ret = paths_from_inode(inum, ipath);

	if (ret < 0)
		goto err;

	/*
	 * we deliberately ignore the bit ipath might have been too small to
	 * hold all of the paths here
	 */
	for (i = 0; i < ipath->fspath->elem_cnt; ++i)
J
Jeff Mahoney 已提交
689
		btrfs_warn_in_rcu(fs_info,
690
"%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)",
J
Jeff Mahoney 已提交
691 692
				  swarn->errstr, swarn->logical,
				  rcu_str_deref(swarn->dev->name),
D
David Sterba 已提交
693
				  swarn->physical,
J
Jeff Mahoney 已提交
694
				  root, inum, offset,
695
				  fs_info->sectorsize, nlink,
J
Jeff Mahoney 已提交
696
				  (char *)(unsigned long)ipath->fspath->val[i]);
697

698
	btrfs_put_root(local_root);
699 700 701 702
	free_ipath(ipath);
	return 0;

err:
J
Jeff Mahoney 已提交
703
	btrfs_warn_in_rcu(fs_info,
D
David Sterba 已提交
704
			  "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
J
Jeff Mahoney 已提交
705 706
			  swarn->errstr, swarn->logical,
			  rcu_str_deref(swarn->dev->name),
D
David Sterba 已提交
707
			  swarn->physical,
J
Jeff Mahoney 已提交
708
			  root, inum, offset, ret);
709 710 711 712 713

	free_ipath(ipath);
	return 0;
}

714
static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
715
{
716 717
	struct btrfs_device *dev;
	struct btrfs_fs_info *fs_info;
718 719 720 721 722
	struct btrfs_path *path;
	struct btrfs_key found_key;
	struct extent_buffer *eb;
	struct btrfs_extent_item *ei;
	struct scrub_warning swarn;
723 724 725
	unsigned long ptr = 0;
	u64 extent_item_pos;
	u64 flags = 0;
726
	u64 ref_root;
727
	u32 item_size;
728
	u8 ref_level = 0;
729
	int ret;
730

731 732
	WARN_ON(sblock->sector_count < 1);
	dev = sblock->sectors[0]->dev;
733
	fs_info = sblock->sctx->fs_info;
734

735
	path = btrfs_alloc_path();
736 737
	if (!path)
		return;
738

739 740
	swarn.physical = sblock->sectors[0]->physical;
	swarn.logical = sblock->sectors[0]->logical;
741
	swarn.errstr = errstr;
742
	swarn.dev = NULL;
743

744 745
	ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
				  &flags);
746 747 748
	if (ret < 0)
		goto out;

J
Jan Schmidt 已提交
749
	extent_item_pos = swarn.logical - found_key.objectid;
750 751 752 753
	swarn.extent_item_size = found_key.offset;

	eb = path->nodes[0];
	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
754
	item_size = btrfs_item_size(eb, path->slots[0]);
755

756
	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
757
		do {
758 759 760
			ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
						      item_size, &ref_root,
						      &ref_level);
761
			btrfs_warn_in_rcu(fs_info,
D
David Sterba 已提交
762
"%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
J
Jeff Mahoney 已提交
763
				errstr, swarn.logical,
764
				rcu_str_deref(dev->name),
D
David Sterba 已提交
765
				swarn.physical,
766 767 768 769
				ref_level ? "node" : "leaf",
				ret < 0 ? -1 : ref_level,
				ret < 0 ? -1 : ref_root);
		} while (ret != 1);
770
		btrfs_release_path(path);
771
	} else {
772
		btrfs_release_path(path);
773
		swarn.path = path;
774
		swarn.dev = dev;
775 776
		iterate_extent_inodes(fs_info, found_key.objectid,
					extent_item_pos, 1,
777
					scrub_print_warning_inode, &swarn, false);
778 779 780 781 782 783
	}

out:
	btrfs_free_path(path);
}

784 785
static inline void scrub_get_recover(struct scrub_recover *recover)
{
786
	refcount_inc(&recover->refs);
787 788
}

789 790
static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
				     struct scrub_recover *recover)
791
{
792
	if (refcount_dec_and_test(&recover->refs)) {
793
		btrfs_bio_counter_dec(fs_info);
794
		btrfs_put_bioc(recover->bioc);
795 796 797 798
		kfree(recover);
	}
}

A
Arne Jansen 已提交
799
/*
800
 * scrub_handle_errored_block gets called when either verification of the
801 802
 * sectors failed or the bio failed to read, e.g. with EIO. In the latter
 * case, this function handles all sectors in the bio, even though only one
803 804 805
 * may be bad.
 * The goal of this function is to repair the errored block by using the
 * contents of one of the mirrors.
A
Arne Jansen 已提交
806
 */
807
static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
A
Arne Jansen 已提交
808
{
809
	struct scrub_ctx *sctx = sblock_to_check->sctx;
810
	struct btrfs_device *dev;
811 812 813 814 815 816 817 818 819
	struct btrfs_fs_info *fs_info;
	u64 logical;
	unsigned int failed_mirror_index;
	unsigned int is_metadata;
	unsigned int have_csum;
	struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
	struct scrub_block *sblock_bad;
	int ret;
	int mirror_index;
820
	int sector_num;
821
	int success;
822
	bool full_stripe_locked;
823
	unsigned int nofs_flag;
824
	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
825 826
				      DEFAULT_RATELIMIT_BURST);

827
	BUG_ON(sblock_to_check->sector_count < 1);
828
	fs_info = sctx->fs_info;
829
	if (sblock_to_check->sectors[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
830 831 832 833 834 835 836 837 838 839
		/*
		 * if we find an error in a super block, we just report it.
		 * They will get written with the next transaction commit
		 * anyway
		 */
		spin_lock(&sctx->stat_lock);
		++sctx->stat.super_errors;
		spin_unlock(&sctx->stat_lock);
		return 0;
	}
840 841 842 843
	logical = sblock_to_check->sectors[0]->logical;
	BUG_ON(sblock_to_check->sectors[0]->mirror_num < 1);
	failed_mirror_index = sblock_to_check->sectors[0]->mirror_num - 1;
	is_metadata = !(sblock_to_check->sectors[0]->flags &
844
			BTRFS_EXTENT_FLAG_DATA);
845 846
	have_csum = sblock_to_check->sectors[0]->have_csum;
	dev = sblock_to_check->sectors[0]->dev;
847

848 849
	if (!sctx->is_dev_replace && btrfs_repair_one_zone(fs_info, logical))
		return 0;
850

851 852 853 854 855 856
	/*
	 * We must use GFP_NOFS because the scrub task might be waiting for a
	 * worker task executing this function and in turn a transaction commit
	 * might be waiting the scrub task to pause (which needs to wait for all
	 * the worker tasks to complete before pausing).
	 * We do allocations in the workers through insert_full_stripe_lock()
857
	 * and scrub_add_sector_to_wr_bio(), which happens down the call chain of
858 859 860
	 * this function.
	 */
	nofs_flag = memalloc_nofs_save();
861 862 863 864 865 866 867 868 869
	/*
	 * For RAID5/6, race can happen for a different device scrub thread.
	 * For data corruption, Parity and Data threads will both try
	 * to recovery the data.
	 * Race can lead to doubly added csum error, or even unrecoverable
	 * error.
	 */
	ret = lock_full_stripe(fs_info, logical, &full_stripe_locked);
	if (ret < 0) {
870
		memalloc_nofs_restore(nofs_flag);
871 872 873 874 875 876 877 878 879
		spin_lock(&sctx->stat_lock);
		if (ret == -ENOMEM)
			sctx->stat.malloc_errors++;
		sctx->stat.read_errors++;
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
		return ret;
	}

880 881 882 883
	/*
	 * read all mirrors one after the other. This includes to
	 * re-read the extent or metadata block that failed (that was
	 * the cause that this fixup code is called) another time,
884
	 * sector by sector this time in order to know which sectors
885 886 887 888
	 * caused I/O errors and which ones are good (for all mirrors).
	 * It is the goal to handle the situation when more than one
	 * mirror contains I/O errors, but the errors do not
	 * overlap, i.e. the data can be repaired by selecting the
889 890 891 892 893 894 895 896 897 898
	 * sectors from those mirrors without I/O error on the
	 * particular sectors. One example (with blocks >= 2 * sectorsize)
	 * would be that mirror #1 has an I/O error on the first sector,
	 * the second sector is good, and mirror #2 has an I/O error on
	 * the second sector, but the first sector is good.
	 * Then the first sector of the first mirror can be repaired by
	 * taking the first sector of the second mirror, and the
	 * second sector of the second mirror can be repaired by
	 * copying the contents of the 2nd sector of the 1st mirror.
	 * One more note: if the sectors of one mirror contain I/O
899 900 901
	 * errors, the checksum cannot be verified. In order to get
	 * the best data for repairing, the first attempt is to find
	 * a mirror without I/O errors and with a validated checksum.
902
	 * Only if this is not possible, the sectors are picked from
903 904 905 906 907 908
	 * mirrors with I/O errors without considering the checksum.
	 * If the latter is the case, at the end, the checksum of the
	 * repaired area is verified in order to correctly maintain
	 * the statistics.
	 */

909
	sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
910
				      sizeof(*sblocks_for_recheck), GFP_KERNEL);
911
	if (!sblocks_for_recheck) {
912 913 914 915 916
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		sctx->stat.read_errors++;
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
917
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
918
		goto out;
A
Arne Jansen 已提交
919 920
	}

921
	/* Setup the context, map the logical blocks and alloc the sectors */
922
	ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
923
	if (ret) {
924 925 926 927
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors++;
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
928
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
929 930 931 932
		goto out;
	}
	BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
	sblock_bad = sblocks_for_recheck + failed_mirror_index;
933

934
	/* build and submit the bios for the failed mirror, check checksums */
935
	scrub_recheck_block(fs_info, sblock_bad, 1);
A
Arne Jansen 已提交
936

937 938 939
	if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
	    sblock_bad->no_io_error_seen) {
		/*
940
		 * The error disappeared after reading sector by sector, or
941 942 943 944 945 946
		 * the area was part of a huge bio and other parts of the
		 * bio caused I/O errors, or the block layer merged several
		 * read requests into one and the error is caused by a
		 * different bio (usually one of the two latter cases is
		 * the cause)
		 */
947 948
		spin_lock(&sctx->stat_lock);
		sctx->stat.unverified_errors++;
949
		sblock_to_check->data_corrected = 1;
950
		spin_unlock(&sctx->stat_lock);
A
Arne Jansen 已提交
951

952 953
		if (sctx->is_dev_replace)
			scrub_write_block_to_dev_replace(sblock_bad);
954
		goto out;
A
Arne Jansen 已提交
955 956
	}

957
	if (!sblock_bad->no_io_error_seen) {
958 959 960
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors++;
		spin_unlock(&sctx->stat_lock);
961
		if (__ratelimit(&rs))
962
			scrub_print_warning("i/o error", sblock_to_check);
963
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
964
	} else if (sblock_bad->checksum_error) {
965 966 967
		spin_lock(&sctx->stat_lock);
		sctx->stat.csum_errors++;
		spin_unlock(&sctx->stat_lock);
968
		if (__ratelimit(&rs))
969
			scrub_print_warning("checksum error", sblock_to_check);
970
		btrfs_dev_stat_inc_and_print(dev,
971
					     BTRFS_DEV_STAT_CORRUPTION_ERRS);
972
	} else if (sblock_bad->header_error) {
973 974 975
		spin_lock(&sctx->stat_lock);
		sctx->stat.verify_errors++;
		spin_unlock(&sctx->stat_lock);
976
		if (__ratelimit(&rs))
977 978
			scrub_print_warning("checksum/header error",
					    sblock_to_check);
979
		if (sblock_bad->generation_error)
980
			btrfs_dev_stat_inc_and_print(dev,
981 982
				BTRFS_DEV_STAT_GENERATION_ERRS);
		else
983
			btrfs_dev_stat_inc_and_print(dev,
984
				BTRFS_DEV_STAT_CORRUPTION_ERRS);
985
	}
A
Arne Jansen 已提交
986

987 988 989 990
	if (sctx->readonly) {
		ASSERT(!sctx->is_dev_replace);
		goto out;
	}
A
Arne Jansen 已提交
991

992 993
	/*
	 * now build and submit the bios for the other mirrors, check
994 995
	 * checksums.
	 * First try to pick the mirror which is completely without I/O
996 997 998 999 1000
	 * errors and also does not have a checksum error.
	 * If one is found, and if a checksum is present, the full block
	 * that is known to contain an error is rewritten. Afterwards
	 * the block is known to be corrected.
	 * If a mirror is found which is completely correct, and no
1001
	 * checksum is present, only those sectors are rewritten that had
1002
	 * an I/O error in the block to be repaired, since it cannot be
1003 1004
	 * determined, which copy of the other sectors is better (and it
	 * could happen otherwise that a correct sector would be
1005 1006
	 * overwritten by a bad one).
	 */
1007
	for (mirror_index = 0; ;mirror_index++) {
1008
		struct scrub_block *sblock_other;
1009

1010 1011
		if (mirror_index == failed_mirror_index)
			continue;
1012 1013

		/* raid56's mirror can be more than BTRFS_MAX_MIRRORS */
1014
		if (!scrub_is_page_on_raid56(sblock_bad->sectors[0])) {
1015 1016
			if (mirror_index >= BTRFS_MAX_MIRRORS)
				break;
1017
			if (!sblocks_for_recheck[mirror_index].sector_count)
1018 1019 1020 1021
				break;

			sblock_other = sblocks_for_recheck + mirror_index;
		} else {
1022
			struct scrub_recover *r = sblock_bad->sectors[0]->recover;
1023
			int max_allowed = r->bioc->num_stripes - r->bioc->num_tgtdevs;
1024 1025 1026

			if (mirror_index >= max_allowed)
				break;
1027
			if (!sblocks_for_recheck[1].sector_count)
1028 1029 1030 1031
				break;

			ASSERT(failed_mirror_index == 0);
			sblock_other = sblocks_for_recheck + 1;
1032
			sblock_other->sectors[0]->mirror_num = 1 + mirror_index;
1033
		}
1034 1035

		/* build and submit the bios, check checksums */
1036
		scrub_recheck_block(fs_info, sblock_other, 0);
1037 1038

		if (!sblock_other->header_error &&
1039 1040
		    !sblock_other->checksum_error &&
		    sblock_other->no_io_error_seen) {
1041 1042
			if (sctx->is_dev_replace) {
				scrub_write_block_to_dev_replace(sblock_other);
1043
				goto corrected_error;
1044 1045
			} else {
				ret = scrub_repair_block_from_good_copy(
1046 1047 1048
						sblock_bad, sblock_other);
				if (!ret)
					goto corrected_error;
1049
			}
1050 1051
		}
	}
A
Arne Jansen 已提交
1052

1053 1054
	if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
		goto did_not_correct_error;
1055 1056 1057

	/*
	 * In case of I/O errors in the area that is supposed to be
1058 1059
	 * repaired, continue by picking good copies of those sectors.
	 * Select the good sectors from mirrors to rewrite bad sectors from
1060 1061 1062 1063 1064
	 * the area to fix. Afterwards verify the checksum of the block
	 * that is supposed to be repaired. This verification step is
	 * only done for the purpose of statistic counting and for the
	 * final scrub report, whether errors remain.
	 * A perfect algorithm could make use of the checksum and try
1065
	 * all possible combinations of sectors from the different mirrors
1066
	 * until the checksum verification succeeds. For example, when
1067
	 * the 2nd sector of mirror #1 faces I/O errors, and the 2nd sector
1068
	 * of mirror #2 is readable but the final checksum test fails,
1069
	 * then the 2nd sector of mirror #3 could be tried, whether now
1070
	 * the final checksum succeeds. But this would be a rare
1071 1072 1073 1074
	 * exception and is therefore not implemented. At least it is
	 * avoided that the good copy is overwritten.
	 * A more useful improvement would be to pick the sectors
	 * without I/O error based on sector sizes (512 bytes on legacy
1075
	 * disks) instead of on sectorsize. Then maybe 512 byte of one
1076
	 * mirror could be repaired by taking 512 byte of a different
1077
	 * mirror, even if other 512 byte sectors in the same sectorsize
1078
	 * area are unreadable.
A
Arne Jansen 已提交
1079
	 */
1080
	success = 1;
1081 1082
	for (sector_num = 0; sector_num < sblock_bad->sector_count;
	     sector_num++) {
1083
		struct scrub_sector *sector_bad = sblock_bad->sectors[sector_num];
1084
		struct scrub_block *sblock_other = NULL;
1085

1086 1087
		/* Skip no-io-error sectors in scrub */
		if (!sector_bad->io_error && !sctx->is_dev_replace)
A
Arne Jansen 已提交
1088
			continue;
1089

1090
		if (scrub_is_page_on_raid56(sblock_bad->sectors[0])) {
1091 1092 1093 1094 1095 1096 1097 1098
			/*
			 * In case of dev replace, if raid56 rebuild process
			 * didn't work out correct data, then copy the content
			 * in sblock_bad to make sure target device is identical
			 * to source device, instead of writing garbage data in
			 * sblock_for_recheck array to target device.
			 */
			sblock_other = NULL;
1099 1100
		} else if (sector_bad->io_error) {
			/* Try to find no-io-error sector in mirrors */
1101 1102
			for (mirror_index = 0;
			     mirror_index < BTRFS_MAX_MIRRORS &&
1103
			     sblocks_for_recheck[mirror_index].sector_count > 0;
1104 1105
			     mirror_index++) {
				if (!sblocks_for_recheck[mirror_index].
1106
				    sectors[sector_num]->io_error) {
1107 1108 1109
					sblock_other = sblocks_for_recheck +
						       mirror_index;
					break;
1110 1111
				}
			}
1112 1113
			if (!sblock_other)
				success = 0;
I
Ilya Dryomov 已提交
1114
		}
A
Arne Jansen 已提交
1115

1116 1117
		if (sctx->is_dev_replace) {
			/*
1118 1119 1120 1121
			 * Did not find a mirror to fetch the sector from.
			 * scrub_write_sector_to_dev_replace() handles this
			 * case (sector->io_error), by filling the block with
			 * zeros before submitting the write request
1122 1123 1124 1125
			 */
			if (!sblock_other)
				sblock_other = sblock_bad;

1126 1127
			if (scrub_write_sector_to_dev_replace(sblock_other,
							      sector_num) != 0) {
1128
				atomic64_inc(
1129
					&fs_info->dev_replace.num_write_errors);
1130 1131 1132
				success = 0;
			}
		} else if (sblock_other) {
1133 1134 1135
			ret = scrub_repair_sector_from_good_copy(sblock_bad,
								 sblock_other,
								 sector_num, 0);
1136
			if (0 == ret)
1137
				sector_bad->io_error = 0;
1138 1139
			else
				success = 0;
1140
		}
A
Arne Jansen 已提交
1141 1142
	}

1143
	if (success && !sctx->is_dev_replace) {
1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
		if (is_metadata || have_csum) {
			/*
			 * need to verify the checksum now that all
			 * sectors on disk are repaired (the write
			 * request for data to be repaired is on its way).
			 * Just be lazy and use scrub_recheck_block()
			 * which re-reads the data before the checksum
			 * is verified, but most likely the data comes out
			 * of the page cache.
			 */
1154
			scrub_recheck_block(fs_info, sblock_bad, 1);
1155
			if (!sblock_bad->header_error &&
1156 1157 1158 1159 1160 1161 1162
			    !sblock_bad->checksum_error &&
			    sblock_bad->no_io_error_seen)
				goto corrected_error;
			else
				goto did_not_correct_error;
		} else {
corrected_error:
1163 1164
			spin_lock(&sctx->stat_lock);
			sctx->stat.corrected_errors++;
1165
			sblock_to_check->data_corrected = 1;
1166
			spin_unlock(&sctx->stat_lock);
1167 1168
			btrfs_err_rl_in_rcu(fs_info,
				"fixed up error at logical %llu on dev %s",
1169
				logical, rcu_str_deref(dev->name));
A
Arne Jansen 已提交
1170
		}
1171 1172
	} else {
did_not_correct_error:
1173 1174 1175
		spin_lock(&sctx->stat_lock);
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
1176 1177
		btrfs_err_rl_in_rcu(fs_info,
			"unable to fixup (regular) error at logical %llu on dev %s",
1178
			logical, rcu_str_deref(dev->name));
I
Ilya Dryomov 已提交
1179
	}
A
Arne Jansen 已提交
1180

1181 1182 1183 1184 1185 1186
out:
	if (sblocks_for_recheck) {
		for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
		     mirror_index++) {
			struct scrub_block *sblock = sblocks_for_recheck +
						     mirror_index;
1187
			struct scrub_recover *recover;
1188
			int i;
1189

1190 1191 1192
			for (i = 0; i < sblock->sector_count; i++) {
				sblock->sectors[i]->sblock = NULL;
				recover = sblock->sectors[i]->recover;
1193
				if (recover) {
1194
					scrub_put_recover(fs_info, recover);
1195
					sblock->sectors[i]->recover = NULL;
1196
				}
1197
				scrub_sector_put(sblock->sectors[i]);
1198
			}
1199 1200 1201
		}
		kfree(sblocks_for_recheck);
	}
A
Arne Jansen 已提交
1202

1203
	ret = unlock_full_stripe(fs_info, logical, full_stripe_locked);
1204
	memalloc_nofs_restore(nofs_flag);
1205 1206
	if (ret < 0)
		return ret;
1207 1208
	return 0;
}
A
Arne Jansen 已提交
1209

1210
static inline int scrub_nr_raid_mirrors(struct btrfs_io_context *bioc)
1211
{
1212
	if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID5)
Z
Zhao Lei 已提交
1213
		return 2;
1214
	else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID6)
Z
Zhao Lei 已提交
1215 1216
		return 3;
	else
1217
		return (int)bioc->num_stripes;
1218 1219
}

Z
Zhao Lei 已提交
1220 1221
static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
						 u64 *raid_map,
1222 1223 1224 1225 1226 1227 1228
						 u64 mapped_length,
						 int nstripes, int mirror,
						 int *stripe_index,
						 u64 *stripe_offset)
{
	int i;

1229
	if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249
		/* RAID5/6 */
		for (i = 0; i < nstripes; i++) {
			if (raid_map[i] == RAID6_Q_STRIPE ||
			    raid_map[i] == RAID5_P_STRIPE)
				continue;

			if (logical >= raid_map[i] &&
			    logical < raid_map[i] + mapped_length)
				break;
		}

		*stripe_index = i;
		*stripe_offset = logical - raid_map[i];
	} else {
		/* The other RAID type */
		*stripe_index = mirror;
		*stripe_offset = 0;
	}
}

1250
static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
1251 1252
				     struct scrub_block *sblocks_for_recheck)
{
1253
	struct scrub_ctx *sctx = original_sblock->sctx;
1254
	struct btrfs_fs_info *fs_info = sctx->fs_info;
1255 1256 1257 1258 1259
	u64 length = original_sblock->sector_count << fs_info->sectorsize_bits;
	u64 logical = original_sblock->sectors[0]->logical;
	u64 generation = original_sblock->sectors[0]->generation;
	u64 flags = original_sblock->sectors[0]->flags;
	u64 have_csum = original_sblock->sectors[0]->have_csum;
1260
	struct scrub_recover *recover;
1261
	struct btrfs_io_context *bioc;
1262 1263 1264 1265
	u64 sublen;
	u64 mapped_length;
	u64 stripe_offset;
	int stripe_index;
1266
	int sector_index = 0;
1267
	int mirror_index;
1268
	int nmirrors;
1269 1270 1271
	int ret;

	/*
1272 1273
	 * Note: the two members refs and outstanding_sectors are not used (and
	 * not set) in the blocks that are used for the recheck procedure.
1274 1275 1276
	 */

	while (length > 0) {
1277
		sublen = min_t(u64, length, fs_info->sectorsize);
1278
		mapped_length = sublen;
1279
		bioc = NULL;
A
Arne Jansen 已提交
1280

1281
		/*
1282 1283
		 * With a length of sectorsize, each returned stripe represents
		 * one mirror
1284
		 */
1285
		btrfs_bio_counter_inc_blocked(fs_info);
1286
		ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
1287 1288 1289
				       logical, &mapped_length, &bioc);
		if (ret || !bioc || mapped_length < sublen) {
			btrfs_put_bioc(bioc);
1290
			btrfs_bio_counter_dec(fs_info);
1291 1292
			return -EIO;
		}
A
Arne Jansen 已提交
1293

1294 1295
		recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
		if (!recover) {
1296
			btrfs_put_bioc(bioc);
1297
			btrfs_bio_counter_dec(fs_info);
1298 1299 1300
			return -ENOMEM;
		}

1301
		refcount_set(&recover->refs, 1);
1302
		recover->bioc = bioc;
1303 1304
		recover->map_length = mapped_length;

1305
		ASSERT(sector_index < SCRUB_MAX_SECTORS_PER_BLOCK);
1306

1307
		nmirrors = min(scrub_nr_raid_mirrors(bioc), BTRFS_MAX_MIRRORS);
Z
Zhao Lei 已提交
1308

1309
		for (mirror_index = 0; mirror_index < nmirrors;
1310 1311
		     mirror_index++) {
			struct scrub_block *sblock;
1312
			struct scrub_sector *sector;
1313 1314

			sblock = sblocks_for_recheck + mirror_index;
1315
			sblock->sctx = sctx;
1316

1317 1318
			sector = kzalloc(sizeof(*sector), GFP_NOFS);
			if (!sector) {
1319
leave_nomem:
1320 1321 1322
				spin_lock(&sctx->stat_lock);
				sctx->stat.malloc_errors++;
				spin_unlock(&sctx->stat_lock);
1323
				scrub_put_recover(fs_info, recover);
1324 1325
				return -ENOMEM;
			}
1326 1327 1328 1329 1330 1331 1332
			scrub_sector_get(sector);
			sblock->sectors[sector_index] = sector;
			sector->sblock = sblock;
			sector->flags = flags;
			sector->generation = generation;
			sector->logical = logical;
			sector->have_csum = have_csum;
1333
			if (have_csum)
1334
				memcpy(sector->csum,
1335
				       original_sblock->sectors[0]->csum,
1336
				       sctx->fs_info->csum_size);
1337

Z
Zhao Lei 已提交
1338
			scrub_stripe_index_and_offset(logical,
1339 1340
						      bioc->map_type,
						      bioc->raid_map,
1341
						      mapped_length,
1342 1343
						      bioc->num_stripes -
						      bioc->num_tgtdevs,
1344 1345 1346
						      mirror_index,
						      &stripe_index,
						      &stripe_offset);
1347
			sector->physical = bioc->stripes[stripe_index].physical +
1348
					 stripe_offset;
1349
			sector->dev = bioc->stripes[stripe_index].dev;
1350

1351
			BUG_ON(sector_index >= original_sblock->sector_count);
1352
			sector->physical_for_dev_replace =
1353
				original_sblock->sectors[sector_index]->
1354
				physical_for_dev_replace;
1355 1356
			/* For missing devices, dev->bdev is NULL */
			sector->mirror_num = mirror_index + 1;
1357
			sblock->sector_count++;
1358 1359
			sector->page = alloc_page(GFP_NOFS);
			if (!sector->page)
1360
				goto leave_nomem;
1361 1362

			scrub_get_recover(recover);
1363
			sector->recover = recover;
1364
		}
1365
		scrub_put_recover(fs_info, recover);
1366 1367
		length -= sublen;
		logical += sublen;
1368
		sector_index++;
1369 1370 1371
	}

	return 0;
I
Ilya Dryomov 已提交
1372 1373
}

1374
static void scrub_bio_wait_endio(struct bio *bio)
1375
{
1376
	complete(bio->bi_private);
1377 1378 1379 1380
}

static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
					struct bio *bio,
1381
					struct scrub_sector *sector)
1382
{
1383
	DECLARE_COMPLETION_ONSTACK(done);
1384
	int ret;
1385
	int mirror_num;
1386

1387
	bio->bi_iter.bi_sector = sector->logical >> 9;
1388 1389 1390
	bio->bi_private = &done;
	bio->bi_end_io = scrub_bio_wait_endio;

1391 1392 1393
	mirror_num = sector->sblock->sectors[0]->mirror_num;
	ret = raid56_parity_recover(bio, sector->recover->bioc,
				    sector->recover->map_length,
1394
				    mirror_num, 0);
1395 1396 1397
	if (ret)
		return ret;

1398 1399
	wait_for_completion_io(&done);
	return blk_status_to_errno(bio->bi_status);
1400 1401
}

L
Liu Bo 已提交
1402 1403 1404
static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info,
					  struct scrub_block *sblock)
{
1405
	struct scrub_sector *first_sector = sblock->sectors[0];
L
Liu Bo 已提交
1406
	struct bio *bio;
1407
	int i;
L
Liu Bo 已提交
1408

1409 1410 1411
	/* All sectors in sblock belong to the same stripe on the same device. */
	ASSERT(first_sector->dev);
	if (!first_sector->dev->bdev)
L
Liu Bo 已提交
1412 1413
		goto out;

1414
	bio = btrfs_bio_alloc(BIO_MAX_VECS);
1415
	bio_set_dev(bio, first_sector->dev->bdev);
L
Liu Bo 已提交
1416

1417
	for (i = 0; i < sblock->sector_count; i++) {
1418
		struct scrub_sector *sector = sblock->sectors[i];
L
Liu Bo 已提交
1419

1420 1421
		WARN_ON(!sector->page);
		bio_add_page(bio, sector->page, PAGE_SIZE, 0);
L
Liu Bo 已提交
1422 1423
	}

1424
	if (scrub_submit_raid56_bio_wait(fs_info, bio, first_sector)) {
L
Liu Bo 已提交
1425 1426 1427 1428 1429 1430 1431 1432 1433 1434
		bio_put(bio);
		goto out;
	}

	bio_put(bio);

	scrub_recheck_block_checksum(sblock);

	return;
out:
1435 1436
	for (i = 0; i < sblock->sector_count; i++)
		sblock->sectors[i]->io_error = 1;
L
Liu Bo 已提交
1437 1438 1439 1440

	sblock->no_io_error_seen = 0;
}

1441
/*
1442 1443 1444 1445 1446
 * This function will check the on disk data for checksum errors, header errors
 * and read I/O errors. If any I/O errors happen, the exact sectors which are
 * errored are marked as being bad. The goal is to enable scrub to take those
 * sectors that are not errored from all the mirrors so that the sectors that
 * are errored in the just handled mirror can be repaired.
1447
 */
1448
static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1449 1450
				struct scrub_block *sblock,
				int retry_failed_mirror)
I
Ilya Dryomov 已提交
1451
{
1452
	int i;
I
Ilya Dryomov 已提交
1453

1454
	sblock->no_io_error_seen = 1;
I
Ilya Dryomov 已提交
1455

L
Liu Bo 已提交
1456
	/* short cut for raid56 */
1457
	if (!retry_failed_mirror && scrub_is_page_on_raid56(sblock->sectors[0]))
L
Liu Bo 已提交
1458 1459
		return scrub_recheck_block_on_raid56(fs_info, sblock);

1460
	for (i = 0; i < sblock->sector_count; i++) {
1461
		struct bio *bio;
1462
		struct scrub_sector *sector = sblock->sectors[i];
1463

1464 1465
		if (sector->dev->bdev == NULL) {
			sector->io_error = 1;
1466 1467 1468 1469
			sblock->no_io_error_seen = 0;
			continue;
		}

1470
		WARN_ON(!sector->page);
1471
		bio = btrfs_bio_alloc(1);
1472
		bio_set_dev(bio, sector->dev->bdev);
1473

1474 1475
		bio_add_page(bio, sector->page, fs_info->sectorsize, 0);
		bio->bi_iter.bi_sector = sector->physical >> 9;
L
Liu Bo 已提交
1476
		bio->bi_opf = REQ_OP_READ;
1477

L
Liu Bo 已提交
1478
		if (btrfsic_submit_bio_wait(bio)) {
1479
			sector->io_error = 1;
L
Liu Bo 已提交
1480
			sblock->no_io_error_seen = 0;
1481
		}
1482

1483 1484
		bio_put(bio);
	}
I
Ilya Dryomov 已提交
1485

1486
	if (sblock->no_io_error_seen)
1487
		scrub_recheck_block_checksum(sblock);
A
Arne Jansen 已提交
1488 1489
}

1490
static inline int scrub_check_fsid(u8 fsid[], struct scrub_sector *sector)
M
Miao Xie 已提交
1491
{
1492
	struct btrfs_fs_devices *fs_devices = sector->dev->fs_devices;
M
Miao Xie 已提交
1493 1494
	int ret;

1495
	ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
M
Miao Xie 已提交
1496 1497 1498
	return !ret;
}

1499
static void scrub_recheck_block_checksum(struct scrub_block *sblock)
A
Arne Jansen 已提交
1500
{
1501 1502 1503
	sblock->header_error = 0;
	sblock->checksum_error = 0;
	sblock->generation_error = 0;
1504

1505
	if (sblock->sectors[0]->flags & BTRFS_EXTENT_FLAG_DATA)
1506 1507 1508
		scrub_checksum_data(sblock);
	else
		scrub_checksum_tree_block(sblock);
A
Arne Jansen 已提交
1509 1510
}

1511
static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1512
					     struct scrub_block *sblock_good)
1513
{
1514
	int i;
1515
	int ret = 0;
I
Ilya Dryomov 已提交
1516

1517
	for (i = 0; i < sblock_bad->sector_count; i++) {
1518
		int ret_sub;
I
Ilya Dryomov 已提交
1519

1520 1521
		ret_sub = scrub_repair_sector_from_good_copy(sblock_bad,
							     sblock_good, i, 1);
1522 1523
		if (ret_sub)
			ret = ret_sub;
A
Arne Jansen 已提交
1524
	}
1525 1526 1527 1528

	return ret;
}

1529 1530 1531
static int scrub_repair_sector_from_good_copy(struct scrub_block *sblock_bad,
					      struct scrub_block *sblock_good,
					      int sector_num, int force_write)
1532
{
1533 1534
	struct scrub_sector *sector_bad = sblock_bad->sectors[sector_num];
	struct scrub_sector *sector_good = sblock_good->sectors[sector_num];
1535
	struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
1536
	const u32 sectorsize = fs_info->sectorsize;
1537

1538 1539
	BUG_ON(sector_bad->page == NULL);
	BUG_ON(sector_good->page == NULL);
1540
	if (force_write || sblock_bad->header_error ||
1541
	    sblock_bad->checksum_error || sector_bad->io_error) {
1542 1543 1544
		struct bio *bio;
		int ret;

1545
		if (!sector_bad->dev->bdev) {
1546
			btrfs_warn_rl(fs_info,
J
Jeff Mahoney 已提交
1547
				"scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
1548 1549 1550
			return -EIO;
		}

1551
		bio = btrfs_bio_alloc(1);
1552 1553
		bio_set_dev(bio, sector_bad->dev->bdev);
		bio->bi_iter.bi_sector = sector_bad->physical >> 9;
D
David Sterba 已提交
1554
		bio->bi_opf = REQ_OP_WRITE;
1555

1556
		ret = bio_add_page(bio, sector_good->page, sectorsize, 0);
1557
		if (ret != sectorsize) {
1558 1559
			bio_put(bio);
			return -EIO;
1560
		}
1561

1562
		if (btrfsic_submit_bio_wait(bio)) {
1563
			btrfs_dev_stat_inc_and_print(sector_bad->dev,
1564
				BTRFS_DEV_STAT_WRITE_ERRS);
1565
			atomic64_inc(&fs_info->dev_replace.num_write_errors);
1566 1567 1568
			bio_put(bio);
			return -EIO;
		}
1569
		bio_put(bio);
A
Arne Jansen 已提交
1570 1571
	}

1572 1573 1574
	return 0;
}

1575 1576
static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
{
1577
	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
1578
	int i;
1579

1580 1581 1582 1583 1584 1585 1586
	/*
	 * This block is used for the check of the parity on the source device,
	 * so the data needn't be written into the destination device.
	 */
	if (sblock->sparity)
		return;

1587
	for (i = 0; i < sblock->sector_count; i++) {
1588 1589
		int ret;

1590
		ret = scrub_write_sector_to_dev_replace(sblock, i);
1591
		if (ret)
1592
			atomic64_inc(&fs_info->dev_replace.num_write_errors);
1593 1594 1595
	}
}

1596
static int scrub_write_sector_to_dev_replace(struct scrub_block *sblock, int sector_num)
1597
{
1598
	struct scrub_sector *sector = sblock->sectors[sector_num];
1599

1600 1601 1602
	BUG_ON(sector->page == NULL);
	if (sector->io_error)
		clear_page(page_address(sector->page));
1603

1604
	return scrub_add_sector_to_wr_bio(sblock->sctx, sector);
1605 1606
}

1607 1608 1609 1610 1611 1612 1613 1614
static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
{
	int ret = 0;
	u64 length;

	if (!btrfs_is_zoned(sctx->fs_info))
		return 0;

1615 1616 1617
	if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical))
		return 0;

1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628
	if (sctx->write_pointer < physical) {
		length = physical - sctx->write_pointer;

		ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev,
						sctx->write_pointer, length);
		if (!ret)
			sctx->write_pointer = physical;
	}
	return ret;
}

1629 1630
static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx,
				      struct scrub_sector *sector)
1631 1632 1633
{
	struct scrub_bio *sbio;
	int ret;
1634
	const u32 sectorsize = sctx->fs_info->sectorsize;
1635

1636
	mutex_lock(&sctx->wr_lock);
1637
again:
1638 1639
	if (!sctx->wr_curr_bio) {
		sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
1640
					      GFP_KERNEL);
1641 1642
		if (!sctx->wr_curr_bio) {
			mutex_unlock(&sctx->wr_lock);
1643 1644
			return -ENOMEM;
		}
1645 1646
		sctx->wr_curr_bio->sctx = sctx;
		sctx->wr_curr_bio->page_count = 0;
1647
	}
1648
	sbio = sctx->wr_curr_bio;
1649 1650 1651
	if (sbio->page_count == 0) {
		struct bio *bio;

1652
		ret = fill_writer_pointer_gap(sctx, sector->physical_for_dev_replace);
1653 1654 1655 1656 1657
		if (ret) {
			mutex_unlock(&sctx->wr_lock);
			return ret;
		}

1658 1659
		sbio->physical = sector->physical_for_dev_replace;
		sbio->logical = sector->logical;
1660
		sbio->dev = sctx->wr_tgtdev;
1661 1662
		bio = sbio->bio;
		if (!bio) {
1663
			bio = btrfs_bio_alloc(sctx->pages_per_bio);
1664 1665 1666 1667 1668
			sbio->bio = bio;
		}

		bio->bi_private = sbio;
		bio->bi_end_io = scrub_wr_bio_end_io;
1669
		bio_set_dev(bio, sbio->dev->bdev);
1670
		bio->bi_iter.bi_sector = sbio->physical >> 9;
D
David Sterba 已提交
1671
		bio->bi_opf = REQ_OP_WRITE;
1672
		sbio->status = 0;
1673
	} else if (sbio->physical + sbio->page_count * sectorsize !=
1674
		   sector->physical_for_dev_replace ||
1675
		   sbio->logical + sbio->page_count * sectorsize !=
1676
		   sector->logical) {
1677 1678 1679 1680
		scrub_wr_submit(sctx);
		goto again;
	}

1681
	ret = bio_add_page(sbio->bio, sector->page, sectorsize, 0);
1682
	if (ret != sectorsize) {
1683 1684 1685
		if (sbio->page_count < 1) {
			bio_put(sbio->bio);
			sbio->bio = NULL;
1686
			mutex_unlock(&sctx->wr_lock);
1687 1688 1689 1690 1691 1692
			return -EIO;
		}
		scrub_wr_submit(sctx);
		goto again;
	}

1693 1694
	sbio->pagev[sbio->page_count] = sector;
	scrub_sector_get(sector);
1695
	sbio->page_count++;
1696
	if (sbio->page_count == sctx->pages_per_bio)
1697
		scrub_wr_submit(sctx);
1698
	mutex_unlock(&sctx->wr_lock);
1699 1700 1701 1702 1703 1704 1705 1706

	return 0;
}

static void scrub_wr_submit(struct scrub_ctx *sctx)
{
	struct scrub_bio *sbio;

1707
	if (!sctx->wr_curr_bio)
1708 1709
		return;

1710 1711
	sbio = sctx->wr_curr_bio;
	sctx->wr_curr_bio = NULL;
1712
	WARN_ON(!sbio->bio->bi_bdev);
1713 1714 1715 1716 1717
	scrub_pending_bio_inc(sctx);
	/* process all writes in a single worker thread. Then the block layer
	 * orders the requests before sending them to the driver which
	 * doubled the write performance on spinning disks when measured
	 * with Linux 3.5 */
1718
	btrfsic_submit_bio(sbio->bio);
1719 1720

	if (btrfs_is_zoned(sctx->fs_info))
1721 1722
		sctx->write_pointer = sbio->physical + sbio->page_count *
			sctx->fs_info->sectorsize;
1723 1724
}

1725
static void scrub_wr_bio_end_io(struct bio *bio)
1726 1727
{
	struct scrub_bio *sbio = bio->bi_private;
1728
	struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
1729

1730
	sbio->status = bio->bi_status;
1731 1732
	sbio->bio = bio;

1733
	btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL);
1734
	btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1735 1736 1737 1738 1739 1740 1741 1742
}

static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
{
	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
	struct scrub_ctx *sctx = sbio->sctx;
	int i;

1743
	ASSERT(sbio->page_count <= SCRUB_PAGES_PER_BIO);
1744
	if (sbio->status) {
1745
		struct btrfs_dev_replace *dev_replace =
1746
			&sbio->sctx->fs_info->dev_replace;
1747 1748

		for (i = 0; i < sbio->page_count; i++) {
1749
			struct scrub_sector *sector = sbio->pagev[i];
1750

1751
			sector->io_error = 1;
1752
			atomic64_inc(&dev_replace->num_write_errors);
1753 1754 1755 1756
		}
	}

	for (i = 0; i < sbio->page_count; i++)
1757
		scrub_sector_put(sbio->pagev[i]);
1758 1759 1760 1761 1762 1763 1764

	bio_put(sbio->bio);
	kfree(sbio);
	scrub_pending_bio_dec(sctx);
}

static int scrub_checksum(struct scrub_block *sblock)
1765 1766 1767 1768
{
	u64 flags;
	int ret;

1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780
	/*
	 * No need to initialize these stats currently,
	 * because this function only use return value
	 * instead of these stats value.
	 *
	 * Todo:
	 * always use stats
	 */
	sblock->header_error = 0;
	sblock->generation_error = 0;
	sblock->checksum_error = 0;

1781 1782
	WARN_ON(sblock->sector_count < 1);
	flags = sblock->sectors[0]->flags;
1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793
	ret = 0;
	if (flags & BTRFS_EXTENT_FLAG_DATA)
		ret = scrub_checksum_data(sblock);
	else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
		ret = scrub_checksum_tree_block(sblock);
	else if (flags & BTRFS_EXTENT_FLAG_SUPER)
		(void)scrub_checksum_super(sblock);
	else
		WARN_ON(1);
	if (ret)
		scrub_handle_errored_block(sblock);
1794 1795

	return ret;
A
Arne Jansen 已提交
1796 1797
}

1798
static int scrub_checksum_data(struct scrub_block *sblock)
A
Arne Jansen 已提交
1799
{
1800
	struct scrub_ctx *sctx = sblock->sctx;
1801 1802
	struct btrfs_fs_info *fs_info = sctx->fs_info;
	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
A
Arne Jansen 已提交
1803
	u8 csum[BTRFS_CSUM_SIZE];
1804
	struct scrub_sector *sector;
1805
	char *kaddr;
A
Arne Jansen 已提交
1806

1807
	BUG_ON(sblock->sector_count < 1);
1808 1809
	sector = sblock->sectors[0];
	if (!sector->have_csum)
A
Arne Jansen 已提交
1810 1811
		return 0;

1812
	kaddr = page_address(sector->page);
1813

1814 1815
	shash->tfm = fs_info->csum_shash;
	crypto_shash_init(shash);
1816

1817
	/*
1818
	 * In scrub_sectors() and scrub_sectors_for_parity() we ensure each sector
1819 1820 1821
	 * only contains one sector of data.
	 */
	crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum);
A
Arne Jansen 已提交
1822

1823
	if (memcmp(csum, sector->csum, fs_info->csum_size))
1824
		sblock->checksum_error = 1;
1825
	return sblock->checksum_error;
A
Arne Jansen 已提交
1826 1827
}

1828
static int scrub_checksum_tree_block(struct scrub_block *sblock)
A
Arne Jansen 已提交
1829
{
1830
	struct scrub_ctx *sctx = sblock->sctx;
A
Arne Jansen 已提交
1831
	struct btrfs_header *h;
1832
	struct btrfs_fs_info *fs_info = sctx->fs_info;
1833
	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
1834 1835
	u8 calculated_csum[BTRFS_CSUM_SIZE];
	u8 on_disk_csum[BTRFS_CSUM_SIZE];
1836 1837 1838 1839 1840 1841 1842
	/*
	 * This is done in sectorsize steps even for metadata as there's a
	 * constraint for nodesize to be aligned to sectorsize. This will need
	 * to change so we don't misuse data and metadata units like that.
	 */
	const u32 sectorsize = sctx->fs_info->sectorsize;
	const int num_sectors = fs_info->nodesize >> fs_info->sectorsize_bits;
1843
	int i;
1844
	struct scrub_sector *sector;
1845
	char *kaddr;
1846

1847
	BUG_ON(sblock->sector_count < 1);
1848

1849
	/* Each member in sectors is just one sector */
1850
	ASSERT(sblock->sector_count == num_sectors);
1851

1852 1853
	sector = sblock->sectors[0];
	kaddr = page_address(sector->page);
1854
	h = (struct btrfs_header *)kaddr;
1855
	memcpy(on_disk_csum, h->csum, sctx->fs_info->csum_size);
A
Arne Jansen 已提交
1856 1857 1858 1859 1860 1861

	/*
	 * we don't use the getter functions here, as we
	 * a) don't have an extent buffer and
	 * b) the page is already kmapped
	 */
1862
	if (sector->logical != btrfs_stack_header_bytenr(h))
1863
		sblock->header_error = 1;
A
Arne Jansen 已提交
1864

1865
	if (sector->generation != btrfs_stack_header_generation(h)) {
1866 1867 1868
		sblock->header_error = 1;
		sblock->generation_error = 1;
	}
A
Arne Jansen 已提交
1869

1870
	if (!scrub_check_fsid(h->fsid, sector))
1871
		sblock->header_error = 1;
A
Arne Jansen 已提交
1872 1873 1874

	if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
		   BTRFS_UUID_SIZE))
1875
		sblock->header_error = 1;
A
Arne Jansen 已提交
1876

1877 1878 1879
	shash->tfm = fs_info->csum_shash;
	crypto_shash_init(shash);
	crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
1880
			    sectorsize - BTRFS_CSUM_SIZE);
1881

1882
	for (i = 1; i < num_sectors; i++) {
1883
		kaddr = page_address(sblock->sectors[i]->page);
1884
		crypto_shash_update(shash, kaddr, sectorsize);
1885 1886
	}

1887
	crypto_shash_final(shash, calculated_csum);
1888
	if (memcmp(calculated_csum, on_disk_csum, sctx->fs_info->csum_size))
1889
		sblock->checksum_error = 1;
A
Arne Jansen 已提交
1890

1891
	return sblock->header_error || sblock->checksum_error;
A
Arne Jansen 已提交
1892 1893
}

1894
static int scrub_checksum_super(struct scrub_block *sblock)
A
Arne Jansen 已提交
1895 1896
{
	struct btrfs_super_block *s;
1897
	struct scrub_ctx *sctx = sblock->sctx;
1898 1899
	struct btrfs_fs_info *fs_info = sctx->fs_info;
	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
1900
	u8 calculated_csum[BTRFS_CSUM_SIZE];
1901
	struct scrub_sector *sector;
1902
	char *kaddr;
1903 1904
	int fail_gen = 0;
	int fail_cor = 0;
1905

1906
	BUG_ON(sblock->sector_count < 1);
1907 1908
	sector = sblock->sectors[0];
	kaddr = page_address(sector->page);
1909
	s = (struct btrfs_super_block *)kaddr;
A
Arne Jansen 已提交
1910

1911
	if (sector->logical != btrfs_super_bytenr(s))
1912
		++fail_cor;
A
Arne Jansen 已提交
1913

1914
	if (sector->generation != btrfs_super_generation(s))
1915
		++fail_gen;
A
Arne Jansen 已提交
1916

1917
	if (!scrub_check_fsid(s->fsid, sector))
1918
		++fail_cor;
A
Arne Jansen 已提交
1919

1920 1921 1922 1923
	shash->tfm = fs_info->csum_shash;
	crypto_shash_init(shash);
	crypto_shash_digest(shash, kaddr + BTRFS_CSUM_SIZE,
			BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, calculated_csum);
1924

1925
	if (memcmp(calculated_csum, s->csum, sctx->fs_info->csum_size))
1926
		++fail_cor;
A
Arne Jansen 已提交
1927

1928
	if (fail_cor + fail_gen) {
A
Arne Jansen 已提交
1929 1930 1931 1932 1933
		/*
		 * if we find an error in a super block, we just report it.
		 * They will get written with the next transaction commit
		 * anyway
		 */
1934 1935 1936
		spin_lock(&sctx->stat_lock);
		++sctx->stat.super_errors;
		spin_unlock(&sctx->stat_lock);
1937
		if (fail_cor)
1938
			btrfs_dev_stat_inc_and_print(sector->dev,
1939 1940
				BTRFS_DEV_STAT_CORRUPTION_ERRS);
		else
1941
			btrfs_dev_stat_inc_and_print(sector->dev,
1942
				BTRFS_DEV_STAT_GENERATION_ERRS);
A
Arne Jansen 已提交
1943 1944
	}

1945
	return fail_cor + fail_gen;
A
Arne Jansen 已提交
1946 1947
}

1948 1949
static void scrub_block_get(struct scrub_block *sblock)
{
1950
	refcount_inc(&sblock->refs);
1951 1952 1953 1954
}

static void scrub_block_put(struct scrub_block *sblock)
{
1955
	if (refcount_dec_and_test(&sblock->refs)) {
1956 1957
		int i;

1958 1959 1960
		if (sblock->sparity)
			scrub_parity_put(sblock->sparity);

1961
		for (i = 0; i < sblock->sector_count; i++)
1962
			scrub_sector_put(sblock->sectors[i]);
1963 1964 1965 1966
		kfree(sblock);
	}
}

1967
static void scrub_sector_get(struct scrub_sector *sector)
1968
{
1969
	atomic_inc(&sector->refs);
1970 1971
}

1972
static void scrub_sector_put(struct scrub_sector *sector)
1973
{
1974 1975 1976 1977
	if (atomic_dec_and_test(&sector->refs)) {
		if (sector->page)
			__free_page(sector->page);
		kfree(sector);
1978 1979 1980
	}
}

1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039
/*
 * Throttling of IO submission, bandwidth-limit based, the timeslice is 1
 * second.  Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max.
 */
static void scrub_throttle(struct scrub_ctx *sctx)
{
	const int time_slice = 1000;
	struct scrub_bio *sbio;
	struct btrfs_device *device;
	s64 delta;
	ktime_t now;
	u32 div;
	u64 bwlimit;

	sbio = sctx->bios[sctx->curr];
	device = sbio->dev;
	bwlimit = READ_ONCE(device->scrub_speed_max);
	if (bwlimit == 0)
		return;

	/*
	 * Slice is divided into intervals when the IO is submitted, adjust by
	 * bwlimit and maximum of 64 intervals.
	 */
	div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024)));
	div = min_t(u32, 64, div);

	/* Start new epoch, set deadline */
	now = ktime_get();
	if (sctx->throttle_deadline == 0) {
		sctx->throttle_deadline = ktime_add_ms(now, time_slice / div);
		sctx->throttle_sent = 0;
	}

	/* Still in the time to send? */
	if (ktime_before(now, sctx->throttle_deadline)) {
		/* If current bio is within the limit, send it */
		sctx->throttle_sent += sbio->bio->bi_iter.bi_size;
		if (sctx->throttle_sent <= div_u64(bwlimit, div))
			return;

		/* We're over the limit, sleep until the rest of the slice */
		delta = ktime_ms_delta(sctx->throttle_deadline, now);
	} else {
		/* New request after deadline, start new epoch */
		delta = 0;
	}

	if (delta) {
		long timeout;

		timeout = div_u64(delta * HZ, 1000);
		schedule_timeout_interruptible(timeout);
	}

	/* Next call will start the deadline period */
	sctx->throttle_deadline = 0;
}

2040
static void scrub_submit(struct scrub_ctx *sctx)
A
Arne Jansen 已提交
2041 2042 2043
{
	struct scrub_bio *sbio;

2044
	if (sctx->curr == -1)
S
Stefan Behrens 已提交
2045
		return;
A
Arne Jansen 已提交
2046

2047 2048
	scrub_throttle(sctx);

2049 2050
	sbio = sctx->bios[sctx->curr];
	sctx->curr = -1;
2051
	scrub_pending_bio_inc(sctx);
2052
	btrfsic_submit_bio(sbio->bio);
A
Arne Jansen 已提交
2053 2054
}

2055 2056
static int scrub_add_sector_to_rd_bio(struct scrub_ctx *sctx,
				      struct scrub_sector *sector)
A
Arne Jansen 已提交
2057
{
2058
	struct scrub_block *sblock = sector->sblock;
A
Arne Jansen 已提交
2059
	struct scrub_bio *sbio;
2060
	const u32 sectorsize = sctx->fs_info->sectorsize;
2061
	int ret;
A
Arne Jansen 已提交
2062 2063 2064 2065 2066

again:
	/*
	 * grab a fresh bio or wait for one to become available
	 */
2067 2068 2069 2070 2071 2072 2073 2074
	while (sctx->curr == -1) {
		spin_lock(&sctx->list_lock);
		sctx->curr = sctx->first_free;
		if (sctx->curr != -1) {
			sctx->first_free = sctx->bios[sctx->curr]->next_free;
			sctx->bios[sctx->curr]->next_free = -1;
			sctx->bios[sctx->curr]->page_count = 0;
			spin_unlock(&sctx->list_lock);
A
Arne Jansen 已提交
2075
		} else {
2076 2077
			spin_unlock(&sctx->list_lock);
			wait_event(sctx->list_wait, sctx->first_free != -1);
A
Arne Jansen 已提交
2078 2079
		}
	}
2080
	sbio = sctx->bios[sctx->curr];
2081
	if (sbio->page_count == 0) {
2082 2083
		struct bio *bio;

2084 2085 2086
		sbio->physical = sector->physical;
		sbio->logical = sector->logical;
		sbio->dev = sector->dev;
2087 2088
		bio = sbio->bio;
		if (!bio) {
2089
			bio = btrfs_bio_alloc(sctx->pages_per_bio);
2090 2091
			sbio->bio = bio;
		}
2092 2093 2094

		bio->bi_private = sbio;
		bio->bi_end_io = scrub_bio_end_io;
2095
		bio_set_dev(bio, sbio->dev->bdev);
2096
		bio->bi_iter.bi_sector = sbio->physical >> 9;
D
David Sterba 已提交
2097
		bio->bi_opf = REQ_OP_READ;
2098
		sbio->status = 0;
2099
	} else if (sbio->physical + sbio->page_count * sectorsize !=
2100
		   sector->physical ||
2101
		   sbio->logical + sbio->page_count * sectorsize !=
2102 2103
		   sector->logical ||
		   sbio->dev != sector->dev) {
2104
		scrub_submit(sctx);
A
Arne Jansen 已提交
2105 2106
		goto again;
	}
2107

2108 2109
	sbio->pagev[sbio->page_count] = sector;
	ret = bio_add_page(sbio->bio, sector->page, sectorsize, 0);
2110
	if (ret != sectorsize) {
2111 2112 2113 2114 2115
		if (sbio->page_count < 1) {
			bio_put(sbio->bio);
			sbio->bio = NULL;
			return -EIO;
		}
2116
		scrub_submit(sctx);
2117 2118 2119
		goto again;
	}

2120
	scrub_block_get(sblock); /* one for the page added to the bio */
2121
	atomic_inc(&sblock->outstanding_sectors);
2122
	sbio->page_count++;
2123
	if (sbio->page_count == sctx->pages_per_bio)
2124
		scrub_submit(sctx);
2125 2126 2127 2128

	return 0;
}

2129
static void scrub_missing_raid56_end_io(struct bio *bio)
2130 2131
{
	struct scrub_block *sblock = bio->bi_private;
2132
	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
2133

2134
	if (bio->bi_status)
2135 2136
		sblock->no_io_error_seen = 0;

2137 2138
	bio_put(bio);

2139 2140 2141 2142 2143 2144 2145
	btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
}

static void scrub_missing_raid56_worker(struct btrfs_work *work)
{
	struct scrub_block *sblock = container_of(work, struct scrub_block, work);
	struct scrub_ctx *sctx = sblock->sctx;
2146
	struct btrfs_fs_info *fs_info = sctx->fs_info;
2147 2148 2149
	u64 logical;
	struct btrfs_device *dev;

2150 2151
	logical = sblock->sectors[0]->logical;
	dev = sblock->sectors[0]->dev;
2152

2153
	if (sblock->no_io_error_seen)
2154
		scrub_recheck_block_checksum(sblock);
2155 2156 2157 2158 2159

	if (!sblock->no_io_error_seen) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors++;
		spin_unlock(&sctx->stat_lock);
2160
		btrfs_err_rl_in_rcu(fs_info,
2161
			"IO error rebuilding logical %llu for dev %s",
2162 2163 2164 2165 2166
			logical, rcu_str_deref(dev->name));
	} else if (sblock->header_error || sblock->checksum_error) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
2167
		btrfs_err_rl_in_rcu(fs_info,
2168
			"failed to rebuild valid logical %llu for dev %s",
2169 2170 2171 2172 2173
			logical, rcu_str_deref(dev->name));
	} else {
		scrub_write_block_to_dev_replace(sblock);
	}

2174
	if (sctx->is_dev_replace && sctx->flush_all_writes) {
2175
		mutex_lock(&sctx->wr_lock);
2176
		scrub_wr_submit(sctx);
2177
		mutex_unlock(&sctx->wr_lock);
2178 2179
	}

2180
	scrub_block_put(sblock);
2181 2182 2183 2184 2185 2186
	scrub_pending_bio_dec(sctx);
}

static void scrub_missing_raid56_pages(struct scrub_block *sblock)
{
	struct scrub_ctx *sctx = sblock->sctx;
2187
	struct btrfs_fs_info *fs_info = sctx->fs_info;
2188 2189
	u64 length = sblock->sector_count << fs_info->sectorsize_bits;
	u64 logical = sblock->sectors[0]->logical;
2190
	struct btrfs_io_context *bioc = NULL;
2191 2192 2193 2194 2195
	struct bio *bio;
	struct btrfs_raid_bio *rbio;
	int ret;
	int i;

2196
	btrfs_bio_counter_inc_blocked(fs_info);
2197
	ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
2198 2199 2200
			       &length, &bioc);
	if (ret || !bioc || !bioc->raid_map)
		goto bioc_out;
2201 2202

	if (WARN_ON(!sctx->is_dev_replace ||
2203
		    !(bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2204 2205 2206 2207 2208 2209
		/*
		 * We shouldn't be scrubbing a missing device. Even for dev
		 * replace, we should only get here for RAID 5/6. We either
		 * managed to mount something with no mirrors remaining or
		 * there's a bug in scrub_remap_extent()/btrfs_map_block().
		 */
2210
		goto bioc_out;
2211 2212
	}

2213
	bio = btrfs_bio_alloc(BIO_MAX_VECS);
2214 2215 2216 2217
	bio->bi_iter.bi_sector = logical >> 9;
	bio->bi_private = sblock;
	bio->bi_end_io = scrub_missing_raid56_end_io;

2218
	rbio = raid56_alloc_missing_rbio(bio, bioc, length);
2219 2220 2221
	if (!rbio)
		goto rbio_out;

2222
	for (i = 0; i < sblock->sector_count; i++) {
2223
		struct scrub_sector *sector = sblock->sectors[i];
2224

2225
		raid56_add_scrub_pages(rbio, sector->page, sector->logical);
2226 2227
	}

2228
	btrfs_init_work(&sblock->work, scrub_missing_raid56_worker, NULL, NULL);
2229 2230 2231 2232 2233 2234 2235
	scrub_block_get(sblock);
	scrub_pending_bio_inc(sctx);
	raid56_submit_missing_rbio(rbio);
	return;

rbio_out:
	bio_put(bio);
2236
bioc_out:
2237
	btrfs_bio_counter_dec(fs_info);
2238
	btrfs_put_bioc(bioc);
2239 2240 2241 2242 2243
	spin_lock(&sctx->stat_lock);
	sctx->stat.malloc_errors++;
	spin_unlock(&sctx->stat_lock);
}

2244
static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len,
2245
		       u64 physical, struct btrfs_device *dev, u64 flags,
2246
		       u64 gen, int mirror_num, u8 *csum,
2247
		       u64 physical_for_dev_replace)
2248 2249
{
	struct scrub_block *sblock;
2250
	const u32 sectorsize = sctx->fs_info->sectorsize;
2251 2252
	int index;

2253
	sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2254
	if (!sblock) {
2255 2256 2257
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
2258
		return -ENOMEM;
A
Arne Jansen 已提交
2259
	}
2260

2261 2262
	/* one ref inside this function, plus one for each page added to
	 * a bio later on */
2263
	refcount_set(&sblock->refs, 1);
2264
	sblock->sctx = sctx;
2265 2266 2267
	sblock->no_io_error_seen = 1;

	for (index = 0; len > 0; index++) {
2268
		struct scrub_sector *sector;
2269 2270 2271 2272 2273 2274
		/*
		 * Here we will allocate one page for one sector to scrub.
		 * This is fine if PAGE_SIZE == sectorsize, but will cost
		 * more memory for PAGE_SIZE > sectorsize case.
		 */
		u32 l = min(sectorsize, len);
2275

2276 2277
		sector = kzalloc(sizeof(*sector), GFP_KERNEL);
		if (!sector) {
2278
leave_nomem:
2279 2280 2281
			spin_lock(&sctx->stat_lock);
			sctx->stat.malloc_errors++;
			spin_unlock(&sctx->stat_lock);
2282
			scrub_block_put(sblock);
2283 2284
			return -ENOMEM;
		}
2285
		ASSERT(index < SCRUB_MAX_SECTORS_PER_BLOCK);
2286 2287 2288 2289 2290 2291 2292 2293 2294 2295
		scrub_sector_get(sector);
		sblock->sectors[index] = sector;
		sector->sblock = sblock;
		sector->dev = dev;
		sector->flags = flags;
		sector->generation = gen;
		sector->logical = logical;
		sector->physical = physical;
		sector->physical_for_dev_replace = physical_for_dev_replace;
		sector->mirror_num = mirror_num;
2296
		if (csum) {
2297 2298
			sector->have_csum = 1;
			memcpy(sector->csum, csum, sctx->fs_info->csum_size);
2299
		} else {
2300
			sector->have_csum = 0;
2301
		}
2302
		sblock->sector_count++;
2303 2304
		sector->page = alloc_page(GFP_KERNEL);
		if (!sector->page)
2305
			goto leave_nomem;
2306 2307 2308
		len -= l;
		logical += l;
		physical += l;
2309
		physical_for_dev_replace += l;
2310 2311
	}

2312
	WARN_ON(sblock->sector_count == 0);
2313
	if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
2314 2315 2316 2317 2318 2319
		/*
		 * This case should only be hit for RAID 5/6 device replace. See
		 * the comment in scrub_missing_raid56_pages() for details.
		 */
		scrub_missing_raid56_pages(sblock);
	} else {
2320
		for (index = 0; index < sblock->sector_count; index++) {
2321
			struct scrub_sector *sector = sblock->sectors[index];
2322
			int ret;
2323

2324
			ret = scrub_add_sector_to_rd_bio(sctx, sector);
2325 2326 2327 2328
			if (ret) {
				scrub_block_put(sblock);
				return ret;
			}
2329
		}
A
Arne Jansen 已提交
2330

2331
		if (flags & BTRFS_EXTENT_FLAG_SUPER)
2332 2333
			scrub_submit(sctx);
	}
A
Arne Jansen 已提交
2334

2335 2336
	/* last one frees, either here or in bio completion for last page */
	scrub_block_put(sblock);
A
Arne Jansen 已提交
2337 2338 2339
	return 0;
}

2340
static void scrub_bio_end_io(struct bio *bio)
2341 2342
{
	struct scrub_bio *sbio = bio->bi_private;
2343
	struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
2344

2345
	sbio->status = bio->bi_status;
2346 2347
	sbio->bio = bio;

2348
	btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
2349 2350 2351 2352 2353
}

static void scrub_bio_end_io_worker(struct btrfs_work *work)
{
	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2354
	struct scrub_ctx *sctx = sbio->sctx;
2355 2356
	int i;

2357
	ASSERT(sbio->page_count <= SCRUB_PAGES_PER_BIO);
2358
	if (sbio->status) {
2359
		for (i = 0; i < sbio->page_count; i++) {
2360
			struct scrub_sector *sector = sbio->pagev[i];
2361

2362 2363
			sector->io_error = 1;
			sector->sblock->no_io_error_seen = 0;
2364 2365 2366
		}
	}

2367
	/* Now complete the scrub_block items that have all pages completed */
2368
	for (i = 0; i < sbio->page_count; i++) {
2369 2370
		struct scrub_sector *sector = sbio->pagev[i];
		struct scrub_block *sblock = sector->sblock;
2371

2372
		if (atomic_dec_and_test(&sblock->outstanding_sectors))
2373 2374 2375 2376 2377 2378
			scrub_block_complete(sblock);
		scrub_block_put(sblock);
	}

	bio_put(sbio->bio);
	sbio->bio = NULL;
2379 2380 2381 2382
	spin_lock(&sctx->list_lock);
	sbio->next_free = sctx->first_free;
	sctx->first_free = sbio->index;
	spin_unlock(&sctx->list_lock);
2383

2384
	if (sctx->is_dev_replace && sctx->flush_all_writes) {
2385
		mutex_lock(&sctx->wr_lock);
2386
		scrub_wr_submit(sctx);
2387
		mutex_unlock(&sctx->wr_lock);
2388 2389
	}

2390
	scrub_pending_bio_dec(sctx);
2391 2392
}

2393 2394
static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
				       unsigned long *bitmap,
2395
				       u64 start, u32 len)
2396
{
2397
	u64 offset;
2398
	u32 nsectors;
2399
	u32 sectorsize_bits = sparity->sctx->fs_info->sectorsize_bits;
2400 2401 2402 2403 2404 2405 2406

	if (len >= sparity->stripe_len) {
		bitmap_set(bitmap, 0, sparity->nsectors);
		return;
	}

	start -= sparity->logic_start;
2407
	start = div64_u64_rem(start, sparity->stripe_len, &offset);
2408
	offset = offset >> sectorsize_bits;
2409
	nsectors = len >> sectorsize_bits;
2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420

	if (offset + nsectors <= sparity->nsectors) {
		bitmap_set(bitmap, offset, nsectors);
		return;
	}

	bitmap_set(bitmap, offset, sparity->nsectors - offset);
	bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
}

static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2421
						   u64 start, u32 len)
2422 2423 2424 2425 2426
{
	__scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
}

static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2427
						  u64 start, u32 len)
2428 2429 2430 2431
{
	__scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
}

2432 2433
static void scrub_block_complete(struct scrub_block *sblock)
{
2434 2435
	int corrupted = 0;

2436
	if (!sblock->no_io_error_seen) {
2437
		corrupted = 1;
2438
		scrub_handle_errored_block(sblock);
2439 2440 2441 2442 2443 2444
	} else {
		/*
		 * if has checksum error, write via repair mechanism in
		 * dev replace case, otherwise write here in dev replace
		 * case.
		 */
2445 2446
		corrupted = scrub_checksum(sblock);
		if (!corrupted && sblock->sctx->is_dev_replace)
2447 2448
			scrub_write_block_to_dev_replace(sblock);
	}
2449 2450

	if (sblock->sparity && corrupted && !sblock->data_corrected) {
2451 2452
		u64 start = sblock->sectors[0]->logical;
		u64 end = sblock->sectors[sblock->sector_count - 1]->logical +
2453
			  sblock->sctx->fs_info->sectorsize;
2454

2455
		ASSERT(end - start <= U32_MAX);
2456 2457 2458
		scrub_parity_mark_sectors_error(sblock->sparity,
						start, end - start);
	}
2459 2460
}

2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472
static void drop_csum_range(struct scrub_ctx *sctx, struct btrfs_ordered_sum *sum)
{
	sctx->stat.csum_discards += sum->len >> sctx->fs_info->sectorsize_bits;
	list_del(&sum->list);
	kfree(sum);
}

/*
 * Find the desired csum for range [logical, logical + sectorsize), and store
 * the csum into @csum.
 *
 * The search source is sctx->csum_list, which is a pre-populated list
D
David Sterba 已提交
2473
 * storing bytenr ordered csum ranges.  We're responsible to cleanup any range
2474 2475 2476 2477 2478
 * that is before @logical.
 *
 * Return 0 if there is no csum for the range.
 * Return 1 if there is csum for the range and copied to @csum.
 */
2479
static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
A
Arne Jansen 已提交
2480
{
2481
	bool found = false;
A
Arne Jansen 已提交
2482

2483
	while (!list_empty(&sctx->csum_list)) {
2484 2485 2486 2487
		struct btrfs_ordered_sum *sum = NULL;
		unsigned long index;
		unsigned long num_sectors;

2488
		sum = list_first_entry(&sctx->csum_list,
A
Arne Jansen 已提交
2489
				       struct btrfs_ordered_sum, list);
2490
		/* The current csum range is beyond our range, no csum found */
A
Arne Jansen 已提交
2491 2492 2493
		if (sum->bytenr > logical)
			break;

2494 2495 2496 2497 2498 2499 2500 2501 2502 2503
		/*
		 * The current sum is before our bytenr, since scrub is always
		 * done in bytenr order, the csum will never be used anymore,
		 * clean it up so that later calls won't bother with the range,
		 * and continue search the next range.
		 */
		if (sum->bytenr + sum->len <= logical) {
			drop_csum_range(sctx, sum);
			continue;
		}
A
Arne Jansen 已提交
2504

2505 2506 2507 2508
		/* Now the csum range covers our bytenr, copy the csum */
		found = true;
		index = (logical - sum->bytenr) >> sctx->fs_info->sectorsize_bits;
		num_sectors = sum->len >> sctx->fs_info->sectorsize_bits;
2509

2510 2511 2512 2513 2514 2515 2516
		memcpy(csum, sum->sums + index * sctx->fs_info->csum_size,
		       sctx->fs_info->csum_size);

		/* Cleanup the range if we're at the end of the csum range */
		if (index == num_sectors - 1)
			drop_csum_range(sctx, sum);
		break;
A
Arne Jansen 已提交
2517
	}
2518 2519
	if (!found)
		return 0;
2520
	return 1;
A
Arne Jansen 已提交
2521 2522 2523
}

/* scrub extent tries to collect up to 64 kB for each bio */
L
Liu Bo 已提交
2524
static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
2525
			u64 logical, u32 len,
2526
			u64 physical, struct btrfs_device *dev, u64 flags,
2527
			u64 gen, int mirror_num, u64 physical_for_dev_replace)
A
Arne Jansen 已提交
2528 2529 2530
{
	int ret;
	u8 csum[BTRFS_CSUM_SIZE];
2531 2532 2533
	u32 blocksize;

	if (flags & BTRFS_EXTENT_FLAG_DATA) {
L
Liu Bo 已提交
2534 2535 2536 2537
		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
			blocksize = map->stripe_len;
		else
			blocksize = sctx->fs_info->sectorsize;
2538 2539 2540 2541
		spin_lock(&sctx->stat_lock);
		sctx->stat.data_extents_scrubbed++;
		sctx->stat.data_bytes_scrubbed += len;
		spin_unlock(&sctx->stat_lock);
2542
	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
L
Liu Bo 已提交
2543 2544 2545 2546
		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
			blocksize = map->stripe_len;
		else
			blocksize = sctx->fs_info->nodesize;
2547 2548 2549 2550
		spin_lock(&sctx->stat_lock);
		sctx->stat.tree_extents_scrubbed++;
		sctx->stat.tree_bytes_scrubbed += len;
		spin_unlock(&sctx->stat_lock);
2551
	} else {
2552
		blocksize = sctx->fs_info->sectorsize;
2553
		WARN_ON(1);
2554
	}
A
Arne Jansen 已提交
2555 2556

	while (len) {
2557
		u32 l = min(len, blocksize);
A
Arne Jansen 已提交
2558 2559 2560 2561
		int have_csum = 0;

		if (flags & BTRFS_EXTENT_FLAG_DATA) {
			/* push csums to sbio */
2562
			have_csum = scrub_find_csum(sctx, logical, csum);
A
Arne Jansen 已提交
2563
			if (have_csum == 0)
2564
				++sctx->stat.no_csum;
A
Arne Jansen 已提交
2565
		}
2566
		ret = scrub_sectors(sctx, logical, l, physical, dev, flags, gen,
2567
				  mirror_num, have_csum ? csum : NULL,
2568
				  physical_for_dev_replace);
A
Arne Jansen 已提交
2569 2570 2571 2572 2573
		if (ret)
			return ret;
		len -= l;
		logical += l;
		physical += l;
2574
		physical_for_dev_replace += l;
A
Arne Jansen 已提交
2575 2576 2577 2578
	}
	return 0;
}

2579
static int scrub_sectors_for_parity(struct scrub_parity *sparity,
2580
				  u64 logical, u32 len,
2581 2582 2583 2584 2585
				  u64 physical, struct btrfs_device *dev,
				  u64 flags, u64 gen, int mirror_num, u8 *csum)
{
	struct scrub_ctx *sctx = sparity->sctx;
	struct scrub_block *sblock;
2586
	const u32 sectorsize = sctx->fs_info->sectorsize;
2587 2588
	int index;

2589 2590
	ASSERT(IS_ALIGNED(len, sectorsize));

2591
	sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2592 2593 2594 2595 2596 2597 2598 2599 2600
	if (!sblock) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
		return -ENOMEM;
	}

	/* one ref inside this function, plus one for each page added to
	 * a bio later on */
2601
	refcount_set(&sblock->refs, 1);
2602 2603 2604 2605 2606 2607
	sblock->sctx = sctx;
	sblock->no_io_error_seen = 1;
	sblock->sparity = sparity;
	scrub_parity_get(sparity);

	for (index = 0; len > 0; index++) {
2608
		struct scrub_sector *sector;
2609

2610 2611
		sector = kzalloc(sizeof(*sector), GFP_KERNEL);
		if (!sector) {
2612 2613 2614 2615 2616 2617 2618
leave_nomem:
			spin_lock(&sctx->stat_lock);
			sctx->stat.malloc_errors++;
			spin_unlock(&sctx->stat_lock);
			scrub_block_put(sblock);
			return -ENOMEM;
		}
2619
		ASSERT(index < SCRUB_MAX_SECTORS_PER_BLOCK);
2620
		/* For scrub block */
2621 2622
		scrub_sector_get(sector);
		sblock->sectors[index] = sector;
2623
		/* For scrub parity */
2624 2625 2626 2627 2628 2629 2630 2631 2632
		scrub_sector_get(sector);
		list_add_tail(&sector->list, &sparity->sectors_list);
		sector->sblock = sblock;
		sector->dev = dev;
		sector->flags = flags;
		sector->generation = gen;
		sector->logical = logical;
		sector->physical = physical;
		sector->mirror_num = mirror_num;
2633
		if (csum) {
2634 2635
			sector->have_csum = 1;
			memcpy(sector->csum, csum, sctx->fs_info->csum_size);
2636
		} else {
2637
			sector->have_csum = 0;
2638
		}
2639
		sblock->sector_count++;
2640 2641
		sector->page = alloc_page(GFP_KERNEL);
		if (!sector->page)
2642
			goto leave_nomem;
2643 2644 2645 2646 2647 2648


		/* Iterate over the stripe range in sectorsize steps */
		len -= sectorsize;
		logical += sectorsize;
		physical += sectorsize;
2649 2650
	}

2651 2652
	WARN_ON(sblock->sector_count == 0);
	for (index = 0; index < sblock->sector_count; index++) {
2653
		struct scrub_sector *sector = sblock->sectors[index];
2654 2655
		int ret;

2656
		ret = scrub_add_sector_to_rd_bio(sctx, sector);
2657 2658 2659 2660 2661 2662
		if (ret) {
			scrub_block_put(sblock);
			return ret;
		}
	}

2663
	/* Last one frees, either here or in bio completion for last sector */
2664 2665 2666 2667 2668
	scrub_block_put(sblock);
	return 0;
}

static int scrub_extent_for_parity(struct scrub_parity *sparity,
2669
				   u64 logical, u32 len,
2670 2671 2672 2673 2674 2675 2676 2677
				   u64 physical, struct btrfs_device *dev,
				   u64 flags, u64 gen, int mirror_num)
{
	struct scrub_ctx *sctx = sparity->sctx;
	int ret;
	u8 csum[BTRFS_CSUM_SIZE];
	u32 blocksize;

2678
	if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
2679 2680 2681 2682
		scrub_parity_mark_sectors_error(sparity, logical, len);
		return 0;
	}

2683
	if (flags & BTRFS_EXTENT_FLAG_DATA) {
L
Liu Bo 已提交
2684
		blocksize = sparity->stripe_len;
2685
	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
L
Liu Bo 已提交
2686
		blocksize = sparity->stripe_len;
2687
	} else {
2688
		blocksize = sctx->fs_info->sectorsize;
2689 2690 2691 2692
		WARN_ON(1);
	}

	while (len) {
2693
		u32 l = min(len, blocksize);
2694 2695 2696 2697
		int have_csum = 0;

		if (flags & BTRFS_EXTENT_FLAG_DATA) {
			/* push csums to sbio */
2698
			have_csum = scrub_find_csum(sctx, logical, csum);
2699 2700 2701
			if (have_csum == 0)
				goto skip;
		}
2702
		ret = scrub_sectors_for_parity(sparity, logical, l, physical, dev,
2703 2704 2705 2706
					     flags, gen, mirror_num,
					     have_csum ? csum : NULL);
		if (ret)
			return ret;
2707
skip:
2708 2709 2710 2711 2712 2713 2714
		len -= l;
		logical += l;
		physical += l;
	}
	return 0;
}

2715 2716 2717 2718 2719 2720 2721 2722
/*
 * Given a physical address, this will calculate it's
 * logical offset. if this is a parity stripe, it will return
 * the most left data stripe's logical offset.
 *
 * return 0 if it is a data stripe, 1 means parity stripe.
 */
static int get_raid56_logic_offset(u64 physical, int num,
2723 2724
				   struct map_lookup *map, u64 *offset,
				   u64 *stripe_start)
2725 2726 2727 2728 2729
{
	int i;
	int j = 0;
	u64 stripe_nr;
	u64 last_offset;
2730 2731
	u32 stripe_index;
	u32 rot;
2732
	const int data_stripes = nr_data_stripes(map);
2733

2734
	last_offset = (physical - map->stripes[num].physical) * data_stripes;
2735 2736 2737
	if (stripe_start)
		*stripe_start = last_offset;

2738
	*offset = last_offset;
2739
	for (i = 0; i < data_stripes; i++) {
2740 2741
		*offset = last_offset + i * map->stripe_len;

2742
		stripe_nr = div64_u64(*offset, map->stripe_len);
2743
		stripe_nr = div_u64(stripe_nr, data_stripes);
2744 2745

		/* Work out the disk rotation on this stripe-set */
2746
		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
2747 2748
		/* calculate which stripe this data locates */
		rot += i;
2749
		stripe_index = rot % map->num_stripes;
2750 2751 2752 2753 2754 2755 2756 2757 2758
		if (stripe_index == num)
			return 0;
		if (stripe_index < num)
			j++;
	}
	*offset = last_offset + j * map->stripe_len;
	return 1;
}

2759 2760 2761
static void scrub_free_parity(struct scrub_parity *sparity)
{
	struct scrub_ctx *sctx = sparity->sctx;
2762
	struct scrub_sector *curr, *next;
2763 2764 2765 2766 2767 2768 2769 2770 2771 2772
	int nbits;

	nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
	if (nbits) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors += nbits;
		sctx->stat.uncorrectable_errors += nbits;
		spin_unlock(&sctx->stat_lock);
	}

2773
	list_for_each_entry_safe(curr, next, &sparity->sectors_list, list) {
2774
		list_del_init(&curr->list);
2775
		scrub_sector_put(curr);
2776 2777 2778 2779 2780
	}

	kfree(sparity);
}

2781 2782 2783 2784 2785 2786 2787 2788 2789 2790
static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
{
	struct scrub_parity *sparity = container_of(work, struct scrub_parity,
						    work);
	struct scrub_ctx *sctx = sparity->sctx;

	scrub_free_parity(sparity);
	scrub_pending_bio_dec(sctx);
}

2791
static void scrub_parity_bio_endio(struct bio *bio)
2792 2793
{
	struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
2794
	struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
2795

2796
	if (bio->bi_status)
2797 2798 2799 2800
		bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
			  sparity->nsectors);

	bio_put(bio);
2801

2802 2803
	btrfs_init_work(&sparity->work, scrub_parity_bio_endio_worker, NULL,
			NULL);
2804
	btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
2805 2806 2807 2808 2809
}

static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
{
	struct scrub_ctx *sctx = sparity->sctx;
2810
	struct btrfs_fs_info *fs_info = sctx->fs_info;
2811 2812
	struct bio *bio;
	struct btrfs_raid_bio *rbio;
2813
	struct btrfs_io_context *bioc = NULL;
2814 2815 2816 2817 2818 2819 2820
	u64 length;
	int ret;

	if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
			   sparity->nsectors))
		goto out;

2821
	length = sparity->logic_end - sparity->logic_start;
2822 2823

	btrfs_bio_counter_inc_blocked(fs_info);
2824
	ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
2825 2826 2827
			       &length, &bioc);
	if (ret || !bioc || !bioc->raid_map)
		goto bioc_out;
2828

2829
	bio = btrfs_bio_alloc(BIO_MAX_VECS);
2830 2831 2832 2833
	bio->bi_iter.bi_sector = sparity->logic_start >> 9;
	bio->bi_private = sparity;
	bio->bi_end_io = scrub_parity_bio_endio;

2834 2835
	rbio = raid56_parity_alloc_scrub_rbio(bio, bioc, length,
					      sparity->scrub_dev,
2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846
					      sparity->dbitmap,
					      sparity->nsectors);
	if (!rbio)
		goto rbio_out;

	scrub_pending_bio_inc(sctx);
	raid56_parity_submit_scrub_rbio(rbio);
	return;

rbio_out:
	bio_put(bio);
2847
bioc_out:
2848
	btrfs_bio_counter_dec(fs_info);
2849
	btrfs_put_bioc(bioc);
2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860
	bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
		  sparity->nsectors);
	spin_lock(&sctx->stat_lock);
	sctx->stat.malloc_errors++;
	spin_unlock(&sctx->stat_lock);
out:
	scrub_free_parity(sparity);
}

static inline int scrub_calc_parity_bitmap_len(int nsectors)
{
2861
	return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long);
2862 2863 2864 2865
}

static void scrub_parity_get(struct scrub_parity *sparity)
{
2866
	refcount_inc(&sparity->refs);
2867 2868 2869 2870
}

static void scrub_parity_put(struct scrub_parity *sparity)
{
2871
	if (!refcount_dec_and_test(&sparity->refs))
2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882
		return;

	scrub_parity_check_and_repair(sparity);
}

static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
						  struct map_lookup *map,
						  struct btrfs_device *sdev,
						  u64 logic_start,
						  u64 logic_end)
{
2883
	struct btrfs_fs_info *fs_info = sctx->fs_info;
2884
	struct btrfs_root *root = btrfs_extent_root(fs_info, logic_start);
2885
	struct btrfs_root *csum_root;
2886
	struct btrfs_extent_item *extent;
2887
	struct btrfs_io_context *bioc = NULL;
2888
	struct btrfs_path *path;
2889 2890 2891 2892 2893 2894 2895 2896
	u64 flags;
	int ret;
	int slot;
	struct extent_buffer *l;
	struct btrfs_key key;
	u64 generation;
	u64 extent_logical;
	u64 extent_physical;
2897 2898
	/* Check the comment in scrub_stripe() for why u32 is enough here */
	u32 extent_len;
2899
	u64 mapped_length;
2900 2901 2902 2903 2904 2905 2906
	struct btrfs_device *extent_dev;
	struct scrub_parity *sparity;
	int nsectors;
	int bitmap_len;
	int extent_mirror_num;
	int stop_loop = 0;

2907 2908 2909 2910 2911 2912 2913 2914 2915 2916
	path = btrfs_alloc_path();
	if (!path) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
		return -ENOMEM;
	}
	path->search_commit_root = 1;
	path->skip_locking = 1;

2917
	ASSERT(map->stripe_len <= U32_MAX);
2918
	nsectors = map->stripe_len >> fs_info->sectorsize_bits;
2919 2920 2921 2922 2923 2924 2925
	bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
	sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
			  GFP_NOFS);
	if (!sparity) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
2926
		btrfs_free_path(path);
2927 2928 2929
		return -ENOMEM;
	}

2930
	ASSERT(map->stripe_len <= U32_MAX);
2931 2932 2933 2934 2935 2936
	sparity->stripe_len = map->stripe_len;
	sparity->nsectors = nsectors;
	sparity->sctx = sctx;
	sparity->scrub_dev = sdev;
	sparity->logic_start = logic_start;
	sparity->logic_end = logic_end;
2937
	refcount_set(&sparity->refs, 1);
2938
	INIT_LIST_HEAD(&sparity->sectors_list);
2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985
	sparity->dbitmap = sparity->bitmap;
	sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;

	ret = 0;
	while (logic_start < logic_end) {
		if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
			key.type = BTRFS_METADATA_ITEM_KEY;
		else
			key.type = BTRFS_EXTENT_ITEM_KEY;
		key.objectid = logic_start;
		key.offset = (u64)-1;

		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		if (ret < 0)
			goto out;

		if (ret > 0) {
			ret = btrfs_previous_extent_item(root, path, 0);
			if (ret < 0)
				goto out;
			if (ret > 0) {
				btrfs_release_path(path);
				ret = btrfs_search_slot(NULL, root, &key,
							path, 0, 0);
				if (ret < 0)
					goto out;
			}
		}

		stop_loop = 0;
		while (1) {
			u64 bytes;

			l = path->nodes[0];
			slot = path->slots[0];
			if (slot >= btrfs_header_nritems(l)) {
				ret = btrfs_next_leaf(root, path);
				if (ret == 0)
					continue;
				if (ret < 0)
					goto out;

				stop_loop = 1;
				break;
			}
			btrfs_item_key_to_cpu(l, &key, slot);

2986 2987 2988 2989
			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
			    key.type != BTRFS_METADATA_ITEM_KEY)
				goto next;

2990
			if (key.type == BTRFS_METADATA_ITEM_KEY)
2991
				bytes = fs_info->nodesize;
2992 2993 2994 2995 2996 2997
			else
				bytes = key.offset;

			if (key.objectid + bytes <= logic_start)
				goto next;

2998
			if (key.objectid >= logic_end) {
2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010
				stop_loop = 1;
				break;
			}

			while (key.objectid >= logic_start + map->stripe_len)
				logic_start += map->stripe_len;

			extent = btrfs_item_ptr(l, slot,
						struct btrfs_extent_item);
			flags = btrfs_extent_flags(l, extent);
			generation = btrfs_extent_generation(l, extent);

3011 3012 3013 3014
			if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
			    (key.objectid < logic_start ||
			     key.objectid + bytes >
			     logic_start + map->stripe_len)) {
J
Jeff Mahoney 已提交
3015 3016
				btrfs_err(fs_info,
					  "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3017
					  key.objectid, logic_start);
3018 3019 3020
				spin_lock(&sctx->stat_lock);
				sctx->stat.uncorrectable_errors++;
				spin_unlock(&sctx->stat_lock);
3021 3022 3023 3024
				goto next;
			}
again:
			extent_logical = key.objectid;
3025
			ASSERT(bytes <= U32_MAX);
3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040
			extent_len = bytes;

			if (extent_logical < logic_start) {
				extent_len -= logic_start - extent_logical;
				extent_logical = logic_start;
			}

			if (extent_logical + extent_len >
			    logic_start + map->stripe_len)
				extent_len = logic_start + map->stripe_len -
					     extent_logical;

			scrub_parity_mark_sectors_data(sparity, extent_logical,
						       extent_len);

3041
			mapped_length = extent_len;
3042
			bioc = NULL;
3043
			ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
3044
					extent_logical, &mapped_length, &bioc,
3045
					0);
3046
			if (!ret) {
3047
				if (!bioc || mapped_length < extent_len)
3048 3049 3050
					ret = -EIO;
			}
			if (ret) {
3051
				btrfs_put_bioc(bioc);
3052 3053
				goto out;
			}
3054 3055 3056 3057
			extent_physical = bioc->stripes[0].physical;
			extent_mirror_num = bioc->mirror_num;
			extent_dev = bioc->stripes[0].dev;
			btrfs_put_bioc(bioc);
3058

3059
			csum_root = btrfs_csum_root(fs_info, extent_logical);
3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072
			ret = btrfs_lookup_csums_range(csum_root,
						extent_logical,
						extent_logical + extent_len - 1,
						&sctx->csum_list, 1);
			if (ret)
				goto out;

			ret = scrub_extent_for_parity(sparity, extent_logical,
						      extent_len,
						      extent_physical,
						      extent_dev, flags,
						      generation,
						      extent_mirror_num);
3073 3074 3075

			scrub_free_csums(sctx);

3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104
			if (ret)
				goto out;

			if (extent_logical + extent_len <
			    key.objectid + bytes) {
				logic_start += map->stripe_len;

				if (logic_start >= logic_end) {
					stop_loop = 1;
					break;
				}

				if (logic_start < key.objectid + bytes) {
					cond_resched();
					goto again;
				}
			}
next:
			path->slots[0]++;
		}

		btrfs_release_path(path);

		if (stop_loop)
			break;

		logic_start += map->stripe_len;
	}
out:
3105 3106
	if (ret < 0) {
		ASSERT(logic_end - logic_start <= U32_MAX);
3107
		scrub_parity_mark_sectors_error(sparity, logic_start,
3108
						logic_end - logic_start);
3109
	}
3110 3111
	scrub_parity_put(sparity);
	scrub_submit(sctx);
3112
	mutex_lock(&sctx->wr_lock);
3113
	scrub_wr_submit(sctx);
3114
	mutex_unlock(&sctx->wr_lock);
3115

3116
	btrfs_free_path(path);
3117 3118 3119
	return ret < 0 ? ret : 0;
}

3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133
static void sync_replace_for_zoned(struct scrub_ctx *sctx)
{
	if (!btrfs_is_zoned(sctx->fs_info))
		return;

	sctx->flush_all_writes = true;
	scrub_submit(sctx);
	mutex_lock(&sctx->wr_lock);
	scrub_wr_submit(sctx);
	mutex_unlock(&sctx->wr_lock);

	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
}

3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159
static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical,
					u64 physical, u64 physical_end)
{
	struct btrfs_fs_info *fs_info = sctx->fs_info;
	int ret = 0;

	if (!btrfs_is_zoned(fs_info))
		return 0;

	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);

	mutex_lock(&sctx->wr_lock);
	if (sctx->write_pointer < physical_end) {
		ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical,
						    physical,
						    sctx->write_pointer);
		if (ret)
			btrfs_err(fs_info,
				  "zoned: failed to recover write pointer");
	}
	mutex_unlock(&sctx->wr_lock);
	btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical);

	return ret;
}

3160
static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3161
					   struct btrfs_block_group *bg,
3162 3163
					   struct map_lookup *map,
					   struct btrfs_device *scrub_dev,
3164
					   int stripe_index, u64 dev_extent_len)
A
Arne Jansen 已提交
3165
{
3166
	struct btrfs_path *path;
3167
	struct btrfs_fs_info *fs_info = sctx->fs_info;
3168
	struct btrfs_root *root;
3169
	struct btrfs_root *csum_root;
A
Arne Jansen 已提交
3170
	struct btrfs_extent_item *extent;
3171
	struct blk_plug plug;
3172
	const u64 chunk_logical = bg->start;
A
Arne Jansen 已提交
3173 3174 3175 3176 3177 3178 3179
	u64 flags;
	int ret;
	int slot;
	u64 nstripes;
	struct extent_buffer *l;
	u64 physical;
	u64 logical;
L
Liu Bo 已提交
3180
	u64 logic_end;
3181
	u64 physical_end;
A
Arne Jansen 已提交
3182
	u64 generation;
3183
	int mirror_num;
3184
	struct btrfs_key key;
3185
	u64 increment;
A
Arne Jansen 已提交
3186
	u64 offset;
3187 3188
	u64 extent_logical;
	u64 extent_physical;
3189 3190 3191 3192 3193
	/*
	 * Unlike chunk length, extent length should never go beyond
	 * BTRFS_MAX_EXTENT_SIZE, thus u32 is enough here.
	 */
	u32 extent_len;
3194 3195
	u64 stripe_logical;
	u64 stripe_end;
3196 3197
	struct btrfs_device *extent_dev;
	int extent_mirror_num;
3198
	int stop_loop = 0;
D
David Woodhouse 已提交
3199

3200
	physical = map->stripes[stripe_index].physical;
A
Arne Jansen 已提交
3201
	offset = 0;
3202
	nstripes = div64_u64(dev_extent_len, map->stripe_len);
3203 3204
	mirror_num = 1;
	increment = map->stripe_len;
A
Arne Jansen 已提交
3205
	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3206
		offset = map->stripe_len * stripe_index;
A
Arne Jansen 已提交
3207 3208 3209
		increment = map->stripe_len * map->num_stripes;
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
		int factor = map->num_stripes / map->sub_stripes;
3210
		offset = map->stripe_len * (stripe_index / map->sub_stripes);
A
Arne Jansen 已提交
3211
		increment = map->stripe_len * factor;
3212
		mirror_num = stripe_index % map->sub_stripes + 1;
3213
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
3214
		mirror_num = stripe_index % map->num_stripes + 1;
A
Arne Jansen 已提交
3215
	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3216
		mirror_num = stripe_index % map->num_stripes + 1;
3217
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3218 3219
		get_raid56_logic_offset(physical, stripe_index, map, &offset,
					NULL);
3220
		increment = map->stripe_len * nr_data_stripes(map);
A
Arne Jansen 已提交
3221 3222 3223 3224 3225 3226
	}

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

3227 3228 3229 3230 3231
	/*
	 * work on commit root. The related disk blocks are static as
	 * long as COW is applied. This means, it is save to rewrite
	 * them to repair disk errors without any race conditions
	 */
A
Arne Jansen 已提交
3232 3233
	path->search_commit_root = 1;
	path->skip_locking = 1;
3234
	path->reada = READA_FORWARD;
A
Arne Jansen 已提交
3235

3236
	logical = chunk_logical + offset;
3237
	physical_end = physical + nstripes * map->stripe_len;
3238
	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3239
		get_raid56_logic_offset(physical_end, stripe_index,
3240
					map, &logic_end, NULL);
3241
		logic_end += chunk_logical;
3242 3243 3244
	} else {
		logic_end = logical + increment * nstripes;
	}
3245
	wait_event(sctx->list_wait,
3246
		   atomic_read(&sctx->bios_in_flight) == 0);
3247
	scrub_blocked_if_needed(fs_info);
A
Arne Jansen 已提交
3248

3249
	root = btrfs_extent_root(fs_info, logical);
3250 3251
	csum_root = btrfs_csum_root(fs_info, logical);

A
Arne Jansen 已提交
3252 3253 3254 3255
	/*
	 * collect all data csums for the stripe to avoid seeking during
	 * the scrub. This might currently (crc32) end up to be about 1MB
	 */
3256
	blk_start_plug(&plug);
A
Arne Jansen 已提交
3257

3258 3259 3260 3261 3262 3263 3264 3265
	if (sctx->is_dev_replace &&
	    btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
		mutex_lock(&sctx->wr_lock);
		sctx->write_pointer = physical;
		mutex_unlock(&sctx->wr_lock);
		sctx->flush_all_writes = true;
	}

A
Arne Jansen 已提交
3266 3267 3268 3269
	/*
	 * now find all extents for each stripe and scrub them
	 */
	ret = 0;
3270
	while (physical < physical_end) {
A
Arne Jansen 已提交
3271 3272 3273 3274
		/*
		 * canceled?
		 */
		if (atomic_read(&fs_info->scrub_cancel_req) ||
3275
		    atomic_read(&sctx->cancel_req)) {
A
Arne Jansen 已提交
3276 3277 3278 3279 3280 3281 3282 3283
			ret = -ECANCELED;
			goto out;
		}
		/*
		 * check to see if we have to pause
		 */
		if (atomic_read(&fs_info->scrub_pause_req)) {
			/* push queued extents */
3284
			sctx->flush_all_writes = true;
3285
			scrub_submit(sctx);
3286
			mutex_lock(&sctx->wr_lock);
3287
			scrub_wr_submit(sctx);
3288
			mutex_unlock(&sctx->wr_lock);
3289
			wait_event(sctx->list_wait,
3290
				   atomic_read(&sctx->bios_in_flight) == 0);
3291
			sctx->flush_all_writes = false;
3292
			scrub_blocked_if_needed(fs_info);
A
Arne Jansen 已提交
3293 3294
		}

3295
		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3296 3297
			ret = get_raid56_logic_offset(physical, stripe_index,
						      map, &logical,
3298
						      &stripe_logical);
3299
			logical += chunk_logical;
3300
			if (ret) {
3301
				/* it is parity strip */
3302
				stripe_logical += chunk_logical;
3303
				stripe_end = stripe_logical + increment;
3304
				ret = scrub_raid56_parity(sctx, map, scrub_dev,
3305
							  stripe_logical,
3306 3307 3308 3309 3310 3311 3312
							  stripe_end);
				if (ret)
					goto out;
				goto skip;
			}
		}

3313 3314 3315 3316
		if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
			key.type = BTRFS_METADATA_ITEM_KEY;
		else
			key.type = BTRFS_EXTENT_ITEM_KEY;
A
Arne Jansen 已提交
3317
		key.objectid = logical;
L
Liu Bo 已提交
3318
		key.offset = (u64)-1;
A
Arne Jansen 已提交
3319 3320 3321 3322

		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		if (ret < 0)
			goto out;
3323

3324
		if (ret > 0) {
3325
			ret = btrfs_previous_extent_item(root, path, 0);
A
Arne Jansen 已提交
3326 3327
			if (ret < 0)
				goto out;
3328 3329 3330 3331 3332 3333 3334 3335 3336
			if (ret > 0) {
				/* there's no smaller item, so stick with the
				 * larger one */
				btrfs_release_path(path);
				ret = btrfs_search_slot(NULL, root, &key,
							path, 0, 0);
				if (ret < 0)
					goto out;
			}
A
Arne Jansen 已提交
3337 3338
		}

L
Liu Bo 已提交
3339
		stop_loop = 0;
A
Arne Jansen 已提交
3340
		while (1) {
3341 3342
			u64 bytes;

A
Arne Jansen 已提交
3343 3344 3345 3346 3347 3348 3349 3350 3351
			l = path->nodes[0];
			slot = path->slots[0];
			if (slot >= btrfs_header_nritems(l)) {
				ret = btrfs_next_leaf(root, path);
				if (ret == 0)
					continue;
				if (ret < 0)
					goto out;

L
Liu Bo 已提交
3352
				stop_loop = 1;
A
Arne Jansen 已提交
3353 3354 3355 3356
				break;
			}
			btrfs_item_key_to_cpu(l, &key, slot);

3357 3358 3359 3360
			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
			    key.type != BTRFS_METADATA_ITEM_KEY)
				goto next;

3361
			if (key.type == BTRFS_METADATA_ITEM_KEY)
3362
				bytes = fs_info->nodesize;
3363 3364 3365 3366
			else
				bytes = key.offset;

			if (key.objectid + bytes <= logical)
A
Arne Jansen 已提交
3367 3368
				goto next;

L
Liu Bo 已提交
3369 3370 3371 3372 3373 3374
			if (key.objectid >= logical + map->stripe_len) {
				/* out of this device extent */
				if (key.objectid >= logic_end)
					stop_loop = 1;
				break;
			}
A
Arne Jansen 已提交
3375

3376 3377 3378 3379 3380 3381
			/*
			 * If our block group was removed in the meanwhile, just
			 * stop scrubbing since there is no point in continuing.
			 * Continuing would prevent reusing its device extents
			 * for new block groups for a long time.
			 */
3382 3383 3384
			spin_lock(&bg->lock);
			if (bg->removed) {
				spin_unlock(&bg->lock);
3385 3386 3387
				ret = 0;
				goto out;
			}
3388
			spin_unlock(&bg->lock);
3389

A
Arne Jansen 已提交
3390 3391 3392 3393 3394
			extent = btrfs_item_ptr(l, slot,
						struct btrfs_extent_item);
			flags = btrfs_extent_flags(l, extent);
			generation = btrfs_extent_generation(l, extent);

3395 3396 3397 3398
			if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
			    (key.objectid < logical ||
			     key.objectid + bytes >
			     logical + map->stripe_len)) {
3399
				btrfs_err(fs_info,
J
Jeff Mahoney 已提交
3400
					   "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3401
				       key.objectid, logical);
3402 3403 3404
				spin_lock(&sctx->stat_lock);
				sctx->stat.uncorrectable_errors++;
				spin_unlock(&sctx->stat_lock);
A
Arne Jansen 已提交
3405 3406 3407
				goto next;
			}

L
Liu Bo 已提交
3408 3409
again:
			extent_logical = key.objectid;
3410
			ASSERT(bytes <= U32_MAX);
L
Liu Bo 已提交
3411 3412
			extent_len = bytes;

A
Arne Jansen 已提交
3413 3414 3415
			/*
			 * trim extent to this stripe
			 */
L
Liu Bo 已提交
3416 3417 3418
			if (extent_logical < logical) {
				extent_len -= logical - extent_logical;
				extent_logical = logical;
A
Arne Jansen 已提交
3419
			}
L
Liu Bo 已提交
3420
			if (extent_logical + extent_len >
A
Arne Jansen 已提交
3421
			    logical + map->stripe_len) {
L
Liu Bo 已提交
3422 3423
				extent_len = logical + map->stripe_len -
					     extent_logical;
A
Arne Jansen 已提交
3424 3425
			}

L
Liu Bo 已提交
3426
			extent_physical = extent_logical - logical + physical;
3427 3428
			extent_dev = scrub_dev;
			extent_mirror_num = mirror_num;
3429
			if (sctx->is_dev_replace)
3430 3431 3432 3433
				scrub_remap_extent(fs_info, extent_logical,
						   extent_len, &extent_physical,
						   &extent_dev,
						   &extent_mirror_num);
L
Liu Bo 已提交
3434

3435 3436 3437 3438 3439 3440 3441 3442
			if (flags & BTRFS_EXTENT_FLAG_DATA) {
				ret = btrfs_lookup_csums_range(csum_root,
						extent_logical,
						extent_logical + extent_len - 1,
						&sctx->csum_list, 1);
				if (ret)
					goto out;
			}
L
Liu Bo 已提交
3443

L
Liu Bo 已提交
3444
			ret = scrub_extent(sctx, map, extent_logical, extent_len,
3445 3446
					   extent_physical, extent_dev, flags,
					   generation, extent_mirror_num,
3447
					   extent_logical - logical + physical);
3448 3449 3450

			scrub_free_csums(sctx);

A
Arne Jansen 已提交
3451 3452 3453
			if (ret)
				goto out;

3454 3455 3456
			if (sctx->is_dev_replace)
				sync_replace_for_zoned(sctx);

L
Liu Bo 已提交
3457 3458
			if (extent_logical + extent_len <
			    key.objectid + bytes) {
3459
				if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3460 3461 3462 3463
					/*
					 * loop until we find next data stripe
					 * or we have finished all stripes.
					 */
3464 3465 3466
loop:
					physical += map->stripe_len;
					ret = get_raid56_logic_offset(physical,
3467 3468 3469
							stripe_index, map,
							&logical, &stripe_logical);
					logical += chunk_logical;
3470 3471

					if (ret && physical < physical_end) {
3472
						stripe_logical += chunk_logical;
3473
						stripe_end = stripe_logical +
3474
								increment;
3475
						ret = scrub_raid56_parity(sctx,
3476
							map, scrub_dev,
3477 3478 3479 3480 3481 3482
							stripe_logical,
							stripe_end);
						if (ret)
							goto out;
						goto loop;
					}
3483 3484 3485 3486
				} else {
					physical += map->stripe_len;
					logical += increment;
				}
L
Liu Bo 已提交
3487 3488 3489 3490 3491
				if (logical < key.objectid + bytes) {
					cond_resched();
					goto again;
				}

3492
				if (physical >= physical_end) {
L
Liu Bo 已提交
3493 3494 3495 3496
					stop_loop = 1;
					break;
				}
			}
A
Arne Jansen 已提交
3497 3498 3499
next:
			path->slots[0]++;
		}
C
Chris Mason 已提交
3500
		btrfs_release_path(path);
3501
skip:
A
Arne Jansen 已提交
3502 3503
		logical += increment;
		physical += map->stripe_len;
3504
		spin_lock(&sctx->stat_lock);
L
Liu Bo 已提交
3505
		if (stop_loop)
3506 3507
			sctx->stat.last_physical = map->stripes[stripe_index].physical +
						   dev_extent_len;
L
Liu Bo 已提交
3508 3509
		else
			sctx->stat.last_physical = physical;
3510
		spin_unlock(&sctx->stat_lock);
L
Liu Bo 已提交
3511 3512
		if (stop_loop)
			break;
A
Arne Jansen 已提交
3513
	}
3514
out:
A
Arne Jansen 已提交
3515
	/* push queued extents */
3516
	scrub_submit(sctx);
3517
	mutex_lock(&sctx->wr_lock);
3518
	scrub_wr_submit(sctx);
3519
	mutex_unlock(&sctx->wr_lock);
A
Arne Jansen 已提交
3520

3521
	blk_finish_plug(&plug);
A
Arne Jansen 已提交
3522
	btrfs_free_path(path);
3523 3524 3525 3526

	if (sctx->is_dev_replace && ret >= 0) {
		int ret2;

3527 3528 3529 3530
		ret2 = sync_write_pointer_for_zoned(sctx,
				chunk_logical + offset,
				map->stripes[stripe_index].physical,
				physical_end);
3531 3532 3533 3534
		if (ret2)
			ret = ret2;
	}

A
Arne Jansen 已提交
3535 3536 3537
	return ret < 0 ? ret : 0;
}

3538
static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3539
					  struct btrfs_block_group *bg,
3540
					  struct btrfs_device *scrub_dev,
3541
					  u64 dev_offset,
3542
					  u64 dev_extent_len)
A
Arne Jansen 已提交
3543
{
3544
	struct btrfs_fs_info *fs_info = sctx->fs_info;
3545
	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
A
Arne Jansen 已提交
3546 3547 3548
	struct map_lookup *map;
	struct extent_map *em;
	int i;
3549
	int ret = 0;
A
Arne Jansen 已提交
3550

3551
	read_lock(&map_tree->lock);
3552
	em = lookup_extent_mapping(map_tree, bg->start, bg->length);
3553
	read_unlock(&map_tree->lock);
A
Arne Jansen 已提交
3554

3555 3556 3557 3558 3559
	if (!em) {
		/*
		 * Might have been an unused block group deleted by the cleaner
		 * kthread or relocation.
		 */
3560 3561
		spin_lock(&bg->lock);
		if (!bg->removed)
3562
			ret = -EINVAL;
3563
		spin_unlock(&bg->lock);
3564 3565 3566

		return ret;
	}
3567
	if (em->start != bg->start)
A
Arne Jansen 已提交
3568
		goto out;
3569
	if (em->len < dev_extent_len)
A
Arne Jansen 已提交
3570 3571
		goto out;

3572
	map = em->map_lookup;
A
Arne Jansen 已提交
3573
	for (i = 0; i < map->num_stripes; ++i) {
3574
		if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
3575
		    map->stripes[i].physical == dev_offset) {
3576 3577
			ret = scrub_stripe(sctx, bg, map, scrub_dev, i,
					   dev_extent_len);
A
Arne Jansen 已提交
3578 3579 3580 3581 3582 3583 3584 3585 3586 3587
			if (ret)
				goto out;
		}
	}
out:
	free_extent_map(em);

	return ret;
}

3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606
static int finish_extent_writes_for_zoned(struct btrfs_root *root,
					  struct btrfs_block_group *cache)
{
	struct btrfs_fs_info *fs_info = cache->fs_info;
	struct btrfs_trans_handle *trans;

	if (!btrfs_is_zoned(fs_info))
		return 0;

	btrfs_wait_block_group_reservations(cache);
	btrfs_wait_nocow_writers(cache);
	btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length);

	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans))
		return PTR_ERR(trans);
	return btrfs_commit_transaction(trans);
}

A
Arne Jansen 已提交
3607
static noinline_for_stack
3608
int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3609
			   struct btrfs_device *scrub_dev, u64 start, u64 end)
A
Arne Jansen 已提交
3610 3611 3612
{
	struct btrfs_dev_extent *dev_extent = NULL;
	struct btrfs_path *path;
3613 3614
	struct btrfs_fs_info *fs_info = sctx->fs_info;
	struct btrfs_root *root = fs_info->dev_root;
A
Arne Jansen 已提交
3615
	u64 chunk_offset;
3616
	int ret = 0;
3617
	int ro_set;
A
Arne Jansen 已提交
3618 3619 3620 3621
	int slot;
	struct extent_buffer *l;
	struct btrfs_key key;
	struct btrfs_key found_key;
3622
	struct btrfs_block_group *cache;
3623
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
A
Arne Jansen 已提交
3624 3625 3626 3627 3628

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

3629
	path->reada = READA_FORWARD;
A
Arne Jansen 已提交
3630 3631 3632
	path->search_commit_root = 1;
	path->skip_locking = 1;

3633
	key.objectid = scrub_dev->devid;
A
Arne Jansen 已提交
3634 3635 3636 3637
	key.offset = 0ull;
	key.type = BTRFS_DEV_EXTENT_KEY;

	while (1) {
3638 3639
		u64 dev_extent_len;

A
Arne Jansen 已提交
3640 3641
		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		if (ret < 0)
3642 3643 3644 3645 3646
			break;
		if (ret > 0) {
			if (path->slots[0] >=
			    btrfs_header_nritems(path->nodes[0])) {
				ret = btrfs_next_leaf(root, path);
3647 3648 3649 3650
				if (ret < 0)
					break;
				if (ret > 0) {
					ret = 0;
3651
					break;
3652 3653 3654
				}
			} else {
				ret = 0;
3655 3656
			}
		}
A
Arne Jansen 已提交
3657 3658 3659 3660 3661 3662

		l = path->nodes[0];
		slot = path->slots[0];

		btrfs_item_key_to_cpu(l, &found_key, slot);

3663
		if (found_key.objectid != scrub_dev->devid)
A
Arne Jansen 已提交
3664 3665
			break;

3666
		if (found_key.type != BTRFS_DEV_EXTENT_KEY)
A
Arne Jansen 已提交
3667 3668 3669 3670 3671 3672 3673 3674 3675
			break;

		if (found_key.offset >= end)
			break;

		if (found_key.offset < key.offset)
			break;

		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3676
		dev_extent_len = btrfs_dev_extent_length(l, dev_extent);
A
Arne Jansen 已提交
3677

3678
		if (found_key.offset + dev_extent_len <= start)
3679
			goto skip;
A
Arne Jansen 已提交
3680 3681 3682 3683 3684 3685 3686 3687

		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);

		/*
		 * get a reference on the corresponding block group to prevent
		 * the chunk from going away while we scrub it
		 */
		cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3688 3689 3690 3691 3692 3693

		/* some chunks are removed but not committed to disk yet,
		 * continue scrubbing */
		if (!cache)
			goto skip;

3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718
		ASSERT(cache->start <= chunk_offset);
		/*
		 * We are using the commit root to search for device extents, so
		 * that means we could have found a device extent item from a
		 * block group that was deleted in the current transaction. The
		 * logical start offset of the deleted block group, stored at
		 * @chunk_offset, might be part of the logical address range of
		 * a new block group (which uses different physical extents).
		 * In this case btrfs_lookup_block_group() has returned the new
		 * block group, and its start address is less than @chunk_offset.
		 *
		 * We skip such new block groups, because it's pointless to
		 * process them, as we won't find their extents because we search
		 * for them using the commit root of the extent tree. For a device
		 * replace it's also fine to skip it, we won't miss copying them
		 * to the target device because we have the write duplication
		 * setup through the regular write path (by btrfs_map_block()),
		 * and we have committed a transaction when we started the device
		 * replace, right after setting up the device replace state.
		 */
		if (cache->start < chunk_offset) {
			btrfs_put_block_group(cache);
			goto skip;
		}

3719 3720 3721 3722
		if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
			spin_lock(&cache->lock);
			if (!cache->to_copy) {
				spin_unlock(&cache->lock);
3723 3724
				btrfs_put_block_group(cache);
				goto skip;
3725 3726 3727 3728
			}
			spin_unlock(&cache->lock);
		}

3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742
		/*
		 * Make sure that while we are scrubbing the corresponding block
		 * group doesn't get its logical address and its device extents
		 * reused for another block group, which can possibly be of a
		 * different type and different profile. We do this to prevent
		 * false error detections and crashes due to bogus attempts to
		 * repair extents.
		 */
		spin_lock(&cache->lock);
		if (cache->removed) {
			spin_unlock(&cache->lock);
			btrfs_put_block_group(cache);
			goto skip;
		}
3743
		btrfs_freeze_block_group(cache);
3744 3745
		spin_unlock(&cache->lock);

3746 3747 3748 3749 3750 3751 3752 3753 3754
		/*
		 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
		 * to avoid deadlock caused by:
		 * btrfs_inc_block_group_ro()
		 * -> btrfs_wait_for_commit()
		 * -> btrfs_commit_transaction()
		 * -> btrfs_scrub_pause()
		 */
		scrub_pause_on(fs_info);
3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772

		/*
		 * Don't do chunk preallocation for scrub.
		 *
		 * This is especially important for SYSTEM bgs, or we can hit
		 * -EFBIG from btrfs_finish_chunk_alloc() like:
		 * 1. The only SYSTEM bg is marked RO.
		 *    Since SYSTEM bg is small, that's pretty common.
		 * 2. New SYSTEM bg will be allocated
		 *    Due to regular version will allocate new chunk.
		 * 3. New SYSTEM bg is empty and will get cleaned up
		 *    Before cleanup really happens, it's marked RO again.
		 * 4. Empty SYSTEM bg get scrubbed
		 *    We go back to 2.
		 *
		 * This can easily boost the amount of SYSTEM chunks if cleaner
		 * thread can't be triggered fast enough, and use up all space
		 * of btrfs_super_block::sys_chunk_array
3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784
		 *
		 * While for dev replace, we need to try our best to mark block
		 * group RO, to prevent race between:
		 * - Write duplication
		 *   Contains latest data
		 * - Scrub copy
		 *   Contains data from commit tree
		 *
		 * If target block group is not marked RO, nocow writes can
		 * be overwritten by scrub copy, causing data corruption.
		 * So for dev-replace, it's not allowed to continue if a block
		 * group is not RO.
3785
		 */
3786
		ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
3787 3788 3789 3790 3791 3792 3793 3794 3795 3796
		if (!ret && sctx->is_dev_replace) {
			ret = finish_extent_writes_for_zoned(root, cache);
			if (ret) {
				btrfs_dec_block_group_ro(cache);
				scrub_pause_off(fs_info);
				btrfs_put_block_group(cache);
				break;
			}
		}

3797 3798
		if (ret == 0) {
			ro_set = 1;
3799
		} else if (ret == -ENOSPC && !sctx->is_dev_replace) {
3800 3801 3802
			/*
			 * btrfs_inc_block_group_ro return -ENOSPC when it
			 * failed in creating new chunk for metadata.
3803
			 * It is not a problem for scrub, because
3804 3805 3806 3807
			 * metadata are always cowed, and our scrub paused
			 * commit_transactions.
			 */
			ro_set = 0;
3808 3809 3810 3811 3812 3813 3814
		} else if (ret == -ETXTBSY) {
			btrfs_warn(fs_info,
		   "skipping scrub of block group %llu due to active swapfile",
				   cache->start);
			scrub_pause_off(fs_info);
			ret = 0;
			goto skip_unfreeze;
3815
		} else {
J
Jeff Mahoney 已提交
3816
			btrfs_warn(fs_info,
3817
				   "failed setting block group ro: %d", ret);
3818
			btrfs_unfreeze_block_group(cache);
3819
			btrfs_put_block_group(cache);
3820
			scrub_pause_off(fs_info);
3821 3822 3823
			break;
		}

3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835
		/*
		 * Now the target block is marked RO, wait for nocow writes to
		 * finish before dev-replace.
		 * COW is fine, as COW never overwrites extents in commit tree.
		 */
		if (sctx->is_dev_replace) {
			btrfs_wait_nocow_writers(cache);
			btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
					cache->length);
		}

		scrub_pause_off(fs_info);
3836
		down_write(&dev_replace->rwsem);
3837
		dev_replace->cursor_right = found_key.offset + dev_extent_len;
3838 3839
		dev_replace->cursor_left = found_key.offset;
		dev_replace->item_needs_writeback = 1;
3840 3841
		up_write(&dev_replace->rwsem);

3842 3843
		ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset,
				  dev_extent_len);
3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854

		/*
		 * flush, submit all pending read and write bios, afterwards
		 * wait for them.
		 * Note that in the dev replace case, a read request causes
		 * write requests that are submitted in the read completion
		 * worker. Therefore in the current situation, it is required
		 * that all write requests are flushed, so that all read and
		 * write requests are really completed when bios_in_flight
		 * changes to 0.
		 */
3855
		sctx->flush_all_writes = true;
3856
		scrub_submit(sctx);
3857
		mutex_lock(&sctx->wr_lock);
3858
		scrub_wr_submit(sctx);
3859
		mutex_unlock(&sctx->wr_lock);
3860 3861 3862

		wait_event(sctx->list_wait,
			   atomic_read(&sctx->bios_in_flight) == 0);
3863 3864

		scrub_pause_on(fs_info);
3865 3866 3867 3868 3869 3870

		/*
		 * must be called before we decrease @scrub_paused.
		 * make sure we don't block transaction commit while
		 * we are waiting pending workers finished.
		 */
3871 3872
		wait_event(sctx->list_wait,
			   atomic_read(&sctx->workers_pending) == 0);
3873
		sctx->flush_all_writes = false;
3874

3875
		scrub_pause_off(fs_info);
3876

3877 3878 3879 3880 3881
		if (sctx->is_dev_replace &&
		    !btrfs_finish_block_group_to_copy(dev_replace->srcdev,
						      cache, found_key.offset))
			ro_set = 0;

3882
		down_write(&dev_replace->rwsem);
3883 3884
		dev_replace->cursor_left = dev_replace->cursor_right;
		dev_replace->item_needs_writeback = 1;
3885
		up_write(&dev_replace->rwsem);
3886

3887
		if (ro_set)
3888
			btrfs_dec_block_group_ro(cache);
3889

3890 3891 3892 3893 3894 3895 3896 3897 3898
		/*
		 * We might have prevented the cleaner kthread from deleting
		 * this block group if it was already unused because we raced
		 * and set it to RO mode first. So add it back to the unused
		 * list, otherwise it might not ever be deleted unless a manual
		 * balance is triggered or it becomes used and unused again.
		 */
		spin_lock(&cache->lock);
		if (!cache->removed && !cache->ro && cache->reserved == 0 &&
3899
		    cache->used == 0) {
3900
			spin_unlock(&cache->lock);
3901 3902 3903 3904 3905
			if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
				btrfs_discard_queue_work(&fs_info->discard_ctl,
							 cache);
			else
				btrfs_mark_bg_unused(cache);
3906 3907 3908
		} else {
			spin_unlock(&cache->lock);
		}
3909
skip_unfreeze:
3910
		btrfs_unfreeze_block_group(cache);
A
Arne Jansen 已提交
3911 3912 3913
		btrfs_put_block_group(cache);
		if (ret)
			break;
3914
		if (sctx->is_dev_replace &&
3915
		    atomic64_read(&dev_replace->num_write_errors) > 0) {
3916 3917 3918 3919 3920 3921 3922
			ret = -EIO;
			break;
		}
		if (sctx->stat.malloc_errors > 0) {
			ret = -ENOMEM;
			break;
		}
3923
skip:
3924
		key.offset = found_key.offset + dev_extent_len;
C
Chris Mason 已提交
3925
		btrfs_release_path(path);
A
Arne Jansen 已提交
3926 3927 3928
	}

	btrfs_free_path(path);
3929

3930
	return ret;
A
Arne Jansen 已提交
3931 3932
}

3933 3934
static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
					   struct btrfs_device *scrub_dev)
A
Arne Jansen 已提交
3935 3936 3937 3938 3939
{
	int	i;
	u64	bytenr;
	u64	gen;
	int	ret;
3940
	struct btrfs_fs_info *fs_info = sctx->fs_info;
A
Arne Jansen 已提交
3941

J
Josef Bacik 已提交
3942
	if (BTRFS_FS_ERROR(fs_info))
3943
		return -EROFS;
3944

3945
	/* Seed devices of a new filesystem has their own generation. */
3946
	if (scrub_dev->fs_devices != fs_info->fs_devices)
3947 3948
		gen = scrub_dev->generation;
	else
3949
		gen = fs_info->last_trans_committed;
A
Arne Jansen 已提交
3950 3951 3952

	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
		bytenr = btrfs_sb_offset(i);
3953 3954
		if (bytenr + BTRFS_SUPER_INFO_SIZE >
		    scrub_dev->commit_total_bytes)
A
Arne Jansen 已提交
3955
			break;
3956 3957
		if (!btrfs_check_super_location(scrub_dev, bytenr))
			continue;
A
Arne Jansen 已提交
3958

3959 3960 3961
		ret = scrub_sectors(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
				    scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
				    NULL, bytenr);
A
Arne Jansen 已提交
3962 3963 3964
		if (ret)
			return ret;
	}
3965
	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
A
Arne Jansen 已提交
3966 3967 3968 3969

	return 0;
}

3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992
static void scrub_workers_put(struct btrfs_fs_info *fs_info)
{
	if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
					&fs_info->scrub_lock)) {
		struct btrfs_workqueue *scrub_workers = NULL;
		struct btrfs_workqueue *scrub_wr_comp = NULL;
		struct btrfs_workqueue *scrub_parity = NULL;

		scrub_workers = fs_info->scrub_workers;
		scrub_wr_comp = fs_info->scrub_wr_completion_workers;
		scrub_parity = fs_info->scrub_parity_workers;

		fs_info->scrub_workers = NULL;
		fs_info->scrub_wr_completion_workers = NULL;
		fs_info->scrub_parity_workers = NULL;
		mutex_unlock(&fs_info->scrub_lock);

		btrfs_destroy_workqueue(scrub_workers);
		btrfs_destroy_workqueue(scrub_wr_comp);
		btrfs_destroy_workqueue(scrub_parity);
	}
}

A
Arne Jansen 已提交
3993 3994 3995
/*
 * get a reference count on fs_info->scrub_workers. start worker if necessary
 */
3996 3997
static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
						int is_dev_replace)
A
Arne Jansen 已提交
3998
{
3999 4000 4001
	struct btrfs_workqueue *scrub_workers = NULL;
	struct btrfs_workqueue *scrub_wr_comp = NULL;
	struct btrfs_workqueue *scrub_parity = NULL;
4002
	unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
4003
	int max_active = fs_info->thread_pool_size;
4004
	int ret = -ENOMEM;
A
Arne Jansen 已提交
4005

4006 4007
	if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
		return 0;
4008

4009 4010 4011 4012
	scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub", flags,
					      is_dev_replace ? 1 : max_active, 4);
	if (!scrub_workers)
		goto fail_scrub_workers;
4013

4014
	scrub_wr_comp = btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
4015
					      max_active, 2);
4016 4017
	if (!scrub_wr_comp)
		goto fail_scrub_wr_completion_workers;
4018

4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031
	scrub_parity = btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
					     max_active, 2);
	if (!scrub_parity)
		goto fail_scrub_parity_workers;

	mutex_lock(&fs_info->scrub_lock);
	if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
		ASSERT(fs_info->scrub_workers == NULL &&
		       fs_info->scrub_wr_completion_workers == NULL &&
		       fs_info->scrub_parity_workers == NULL);
		fs_info->scrub_workers = scrub_workers;
		fs_info->scrub_wr_completion_workers = scrub_wr_comp;
		fs_info->scrub_parity_workers = scrub_parity;
4032
		refcount_set(&fs_info->scrub_workers_refcnt, 1);
4033 4034
		mutex_unlock(&fs_info->scrub_lock);
		return 0;
A
Arne Jansen 已提交
4035
	}
4036 4037 4038
	/* Other thread raced in and created the workers for us */
	refcount_inc(&fs_info->scrub_workers_refcnt);
	mutex_unlock(&fs_info->scrub_lock);
4039

4040 4041
	ret = 0;
	btrfs_destroy_workqueue(scrub_parity);
4042
fail_scrub_parity_workers:
4043
	btrfs_destroy_workqueue(scrub_wr_comp);
4044
fail_scrub_wr_completion_workers:
4045
	btrfs_destroy_workqueue(scrub_workers);
4046
fail_scrub_workers:
4047
	return ret;
A
Arne Jansen 已提交
4048 4049
}

4050 4051
int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
		    u64 end, struct btrfs_scrub_progress *progress,
4052
		    int readonly, int is_dev_replace)
A
Arne Jansen 已提交
4053
{
4054
	struct btrfs_dev_lookup_args args = { .devid = devid };
4055
	struct scrub_ctx *sctx;
A
Arne Jansen 已提交
4056 4057
	int ret;
	struct btrfs_device *dev;
4058
	unsigned int nofs_flag;
A
Arne Jansen 已提交
4059

4060
	if (btrfs_fs_closing(fs_info))
4061
		return -EAGAIN;
A
Arne Jansen 已提交
4062

4063
	if (fs_info->nodesize > BTRFS_STRIPE_LEN) {
4064 4065 4066 4067 4068
		/*
		 * in this case scrub is unable to calculate the checksum
		 * the way scrub is implemented. Do not handle this
		 * situation at all because it won't ever happen.
		 */
4069 4070
		btrfs_err(fs_info,
			   "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
4071 4072
		       fs_info->nodesize,
		       BTRFS_STRIPE_LEN);
4073 4074 4075
		return -EINVAL;
	}

4076
	if (fs_info->nodesize >
4077 4078
	    SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits ||
	    fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_SECTORS_PER_BLOCK) {
4079
		/*
4080
		 * Would exhaust the array bounds of sectorv member in
4081 4082
		 * struct scrub_block
		 */
J
Jeff Mahoney 已提交
4083
		btrfs_err(fs_info,
4084 4085 4086
"scrub: nodesize and sectorsize <= SCRUB_MAX_SECTORS_PER_BLOCK (%d <= %d && %d <= %d) fails",
		       fs_info->nodesize, SCRUB_MAX_SECTORS_PER_BLOCK,
		       fs_info->sectorsize, SCRUB_MAX_SECTORS_PER_BLOCK);
4087 4088 4089
		return -EINVAL;
	}

4090 4091 4092 4093
	/* Allocate outside of device_list_mutex */
	sctx = scrub_setup_ctx(fs_info, is_dev_replace);
	if (IS_ERR(sctx))
		return PTR_ERR(sctx);
A
Arne Jansen 已提交
4094

4095 4096 4097 4098
	ret = scrub_workers_get(fs_info, is_dev_replace);
	if (ret)
		goto out_free_ctx;

4099
	mutex_lock(&fs_info->fs_devices->device_list_mutex);
4100
	dev = btrfs_find_device(fs_info->fs_devices, &args);
4101 4102
	if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
		     !is_dev_replace)) {
4103
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4104
		ret = -ENODEV;
4105
		goto out;
A
Arne Jansen 已提交
4106 4107
	}

4108 4109
	if (!is_dev_replace && !readonly &&
	    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
4110
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4111 4112 4113
		btrfs_err_in_rcu(fs_info,
			"scrub on devid %llu: filesystem on %s is not writable",
				 devid, rcu_str_deref(dev->name));
4114
		ret = -EROFS;
4115
		goto out;
4116 4117
	}

4118
	mutex_lock(&fs_info->scrub_lock);
4119
	if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
4120
	    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
A
Arne Jansen 已提交
4121
		mutex_unlock(&fs_info->scrub_lock);
4122
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4123
		ret = -EIO;
4124
		goto out;
A
Arne Jansen 已提交
4125 4126
	}

4127
	down_read(&fs_info->dev_replace.rwsem);
4128
	if (dev->scrub_ctx ||
4129 4130
	    (!is_dev_replace &&
	     btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
4131
		up_read(&fs_info->dev_replace.rwsem);
A
Arne Jansen 已提交
4132
		mutex_unlock(&fs_info->scrub_lock);
4133
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4134
		ret = -EINPROGRESS;
4135
		goto out;
A
Arne Jansen 已提交
4136
	}
4137
	up_read(&fs_info->dev_replace.rwsem);
4138

4139
	sctx->readonly = readonly;
4140
	dev->scrub_ctx = sctx;
4141
	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
A
Arne Jansen 已提交
4142

4143 4144 4145 4146
	/*
	 * checking @scrub_pause_req here, we can avoid
	 * race between committing transaction and scrubbing.
	 */
4147
	__scrub_blocked_if_needed(fs_info);
A
Arne Jansen 已提交
4148 4149 4150
	atomic_inc(&fs_info->scrubs_running);
	mutex_unlock(&fs_info->scrub_lock);

4151 4152 4153
	/*
	 * In order to avoid deadlock with reclaim when there is a transaction
	 * trying to pause scrub, make sure we use GFP_NOFS for all the
4154
	 * allocations done at btrfs_scrub_sectors() and scrub_sectors_for_parity()
4155 4156 4157 4158 4159 4160
	 * invoked by our callees. The pausing request is done when the
	 * transaction commit starts, and it blocks the transaction until scrub
	 * is paused (done at specific points at scrub_stripe() or right above
	 * before incrementing fs_info->scrubs_running).
	 */
	nofs_flag = memalloc_nofs_save();
4161
	if (!is_dev_replace) {
4162
		btrfs_info(fs_info, "scrub: started on devid %llu", devid);
4163 4164 4165 4166
		/*
		 * by holding device list mutex, we can
		 * kick off writing super in log tree sync.
		 */
4167
		mutex_lock(&fs_info->fs_devices->device_list_mutex);
4168
		ret = scrub_supers(sctx, dev);
4169
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4170
	}
A
Arne Jansen 已提交
4171 4172

	if (!ret)
4173
		ret = scrub_enumerate_chunks(sctx, dev, start, end);
4174
	memalloc_nofs_restore(nofs_flag);
A
Arne Jansen 已提交
4175

4176
	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
A
Arne Jansen 已提交
4177 4178 4179
	atomic_dec(&fs_info->scrubs_running);
	wake_up(&fs_info->scrub_pause_wait);

4180
	wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
4181

A
Arne Jansen 已提交
4182
	if (progress)
4183
		memcpy(progress, &sctx->stat, sizeof(*progress));
A
Arne Jansen 已提交
4184

4185 4186 4187 4188
	if (!is_dev_replace)
		btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d",
			ret ? "not finished" : "finished", devid, ret);

A
Arne Jansen 已提交
4189
	mutex_lock(&fs_info->scrub_lock);
4190
	dev->scrub_ctx = NULL;
A
Arne Jansen 已提交
4191 4192
	mutex_unlock(&fs_info->scrub_lock);

4193
	scrub_workers_put(fs_info);
4194
	scrub_put_ctx(sctx);
A
Arne Jansen 已提交
4195

4196
	return ret;
4197 4198
out:
	scrub_workers_put(fs_info);
4199 4200 4201
out_free_ctx:
	scrub_free_ctx(sctx);

A
Arne Jansen 已提交
4202 4203 4204
	return ret;
}

4205
void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
A
Arne Jansen 已提交
4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219
{
	mutex_lock(&fs_info->scrub_lock);
	atomic_inc(&fs_info->scrub_pause_req);
	while (atomic_read(&fs_info->scrubs_paused) !=
	       atomic_read(&fs_info->scrubs_running)) {
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
			   atomic_read(&fs_info->scrubs_paused) ==
			   atomic_read(&fs_info->scrubs_running));
		mutex_lock(&fs_info->scrub_lock);
	}
	mutex_unlock(&fs_info->scrub_lock);
}

4220
void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
A
Arne Jansen 已提交
4221 4222 4223 4224 4225
{
	atomic_dec(&fs_info->scrub_pause_req);
	wake_up(&fs_info->scrub_pause_wait);
}

4226
int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
A
Arne Jansen 已提交
4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246
{
	mutex_lock(&fs_info->scrub_lock);
	if (!atomic_read(&fs_info->scrubs_running)) {
		mutex_unlock(&fs_info->scrub_lock);
		return -ENOTCONN;
	}

	atomic_inc(&fs_info->scrub_cancel_req);
	while (atomic_read(&fs_info->scrubs_running)) {
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
			   atomic_read(&fs_info->scrubs_running) == 0);
		mutex_lock(&fs_info->scrub_lock);
	}
	atomic_dec(&fs_info->scrub_cancel_req);
	mutex_unlock(&fs_info->scrub_lock);

	return 0;
}

4247
int btrfs_scrub_cancel_dev(struct btrfs_device *dev)
4248
{
4249
	struct btrfs_fs_info *fs_info = dev->fs_info;
4250
	struct scrub_ctx *sctx;
A
Arne Jansen 已提交
4251 4252

	mutex_lock(&fs_info->scrub_lock);
4253
	sctx = dev->scrub_ctx;
4254
	if (!sctx) {
A
Arne Jansen 已提交
4255 4256 4257
		mutex_unlock(&fs_info->scrub_lock);
		return -ENOTCONN;
	}
4258
	atomic_inc(&sctx->cancel_req);
4259
	while (dev->scrub_ctx) {
A
Arne Jansen 已提交
4260 4261
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
4262
			   dev->scrub_ctx == NULL);
A
Arne Jansen 已提交
4263 4264 4265 4266 4267 4268
		mutex_lock(&fs_info->scrub_lock);
	}
	mutex_unlock(&fs_info->scrub_lock);

	return 0;
}
S
Stefan Behrens 已提交
4269

4270
int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
A
Arne Jansen 已提交
4271 4272
			 struct btrfs_scrub_progress *progress)
{
4273
	struct btrfs_dev_lookup_args args = { .devid = devid };
A
Arne Jansen 已提交
4274
	struct btrfs_device *dev;
4275
	struct scrub_ctx *sctx = NULL;
A
Arne Jansen 已提交
4276

4277
	mutex_lock(&fs_info->fs_devices->device_list_mutex);
4278
	dev = btrfs_find_device(fs_info->fs_devices, &args);
A
Arne Jansen 已提交
4279
	if (dev)
4280
		sctx = dev->scrub_ctx;
4281 4282
	if (sctx)
		memcpy(progress, &sctx->stat, sizeof(*progress));
4283
	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
A
Arne Jansen 已提交
4284

4285
	return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
A
Arne Jansen 已提交
4286
}
4287 4288

static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
4289
			       u64 extent_logical, u32 extent_len,
4290 4291 4292 4293 4294
			       u64 *extent_physical,
			       struct btrfs_device **extent_dev,
			       int *extent_mirror_num)
{
	u64 mapped_length;
4295
	struct btrfs_io_context *bioc = NULL;
4296 4297 4298
	int ret;

	mapped_length = extent_len;
4299
	ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
4300 4301 4302 4303
			      &mapped_length, &bioc, 0);
	if (ret || !bioc || mapped_length < extent_len ||
	    !bioc->stripes[0].dev->bdev) {
		btrfs_put_bioc(bioc);
4304 4305 4306
		return;
	}

4307 4308 4309 4310
	*extent_physical = bioc->stripes[0].physical;
	*extent_mirror_num = bioc->mirror_num;
	*extent_dev = bioc->stripes[0].dev;
	btrfs_put_bioc(bioc);
4311
}