scrub.c 104.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
A
Arne Jansen 已提交
2
/*
3
 * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
A
Arne Jansen 已提交
4 5 6
 */

#include <linux/blkdev.h>
7
#include <linux/ratelimit.h>
8
#include <linux/sched/mm.h>
A
Arne Jansen 已提交
9 10 11 12
#include "ctree.h"
#include "volumes.h"
#include "disk-io.h"
#include "ordered-data.h"
13
#include "transaction.h"
14
#include "backref.h"
15
#include "extent_io.h"
16
#include "dev-replace.h"
17
#include "check-integrity.h"
18
#include "rcu-string.h"
D
David Woodhouse 已提交
19
#include "raid56.h"
A
Arne Jansen 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33

/*
 * This is only the first step towards a full-features scrub. It reads all
 * extent and super block and verifies the checksums. In case a bad checksum
 * is found or the extent cannot be read, good data will be written back if
 * any can be found.
 *
 * Future enhancements:
 *  - In case an unrepairable extent is encountered, track which files are
 *    affected and report them
 *  - track and record media errors, throw out bad devices
 *  - add a mode to also read unallocated space
 */

34
struct scrub_block;
35
struct scrub_ctx;
A
Arne Jansen 已提交
36

37 38 39 40 41 42 43 44 45
/*
 * the following three values only influence the performance.
 * The last one configures the number of parallel and outstanding I/O
 * operations. The first two values configure an upper limit for the number
 * of (dynamically allocated) pages that are added to a bio.
 */
#define SCRUB_PAGES_PER_RD_BIO	32	/* 128k per bio */
#define SCRUB_PAGES_PER_WR_BIO	32	/* 128k per bio */
#define SCRUB_BIOS_PER_SCTX	64	/* 8MB per device in flight */
46 47 48 49 50 51

/*
 * the following value times PAGE_SIZE needs to be large enough to match the
 * largest node/leaf/sector size that shall be supported.
 * Values larger than BTRFS_STRIPE_LEN are not supported.
 */
52
#define SCRUB_MAX_PAGES_PER_BLOCK	16	/* 64k per node/leaf/sector */
A
Arne Jansen 已提交
53

54
struct scrub_recover {
55
	refcount_t		refs;
56 57 58 59
	struct btrfs_bio	*bbio;
	u64			map_length;
};

A
Arne Jansen 已提交
60
struct scrub_page {
61 62
	struct scrub_block	*sblock;
	struct page		*page;
63
	struct btrfs_device	*dev;
64
	struct list_head	list;
A
Arne Jansen 已提交
65 66
	u64			flags;  /* extent flags */
	u64			generation;
67 68
	u64			logical;
	u64			physical;
69
	u64			physical_for_dev_replace;
70
	atomic_t		refs;
71 72 73 74 75
	struct {
		unsigned int	mirror_num:8;
		unsigned int	have_csum:1;
		unsigned int	io_error:1;
	};
A
Arne Jansen 已提交
76
	u8			csum[BTRFS_CSUM_SIZE];
77 78

	struct scrub_recover	*recover;
A
Arne Jansen 已提交
79 80 81 82
};

struct scrub_bio {
	int			index;
83
	struct scrub_ctx	*sctx;
84
	struct btrfs_device	*dev;
A
Arne Jansen 已提交
85
	struct bio		*bio;
86
	blk_status_t		status;
A
Arne Jansen 已提交
87 88
	u64			logical;
	u64			physical;
89 90 91 92 93
#if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
	struct scrub_page	*pagev[SCRUB_PAGES_PER_WR_BIO];
#else
	struct scrub_page	*pagev[SCRUB_PAGES_PER_RD_BIO];
#endif
94
	int			page_count;
A
Arne Jansen 已提交
95 96 97 98
	int			next_free;
	struct btrfs_work	work;
};

99
struct scrub_block {
100
	struct scrub_page	*pagev[SCRUB_MAX_PAGES_PER_BLOCK];
101 102
	int			page_count;
	atomic_t		outstanding_pages;
103
	refcount_t		refs; /* free mem on transition to zero */
104
	struct scrub_ctx	*sctx;
105
	struct scrub_parity	*sparity;
106 107 108 109
	struct {
		unsigned int	header_error:1;
		unsigned int	checksum_error:1;
		unsigned int	no_io_error_seen:1;
110
		unsigned int	generation_error:1; /* also sets header_error */
111 112 113 114

		/* The following is for the data used to check parity */
		/* It is for the data with checksum */
		unsigned int	data_corrected:1;
115
	};
116
	struct btrfs_work	work;
117 118
};

119 120 121 122 123 124 125 126 127 128 129 130
/* Used for the chunks with parity stripe such RAID5/6 */
struct scrub_parity {
	struct scrub_ctx	*sctx;

	struct btrfs_device	*scrub_dev;

	u64			logic_start;

	u64			logic_end;

	int			nsectors;

131
	u64			stripe_len;
132

133
	refcount_t		refs;
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151

	struct list_head	spages;

	/* Work of parity check and repair */
	struct btrfs_work	work;

	/* Mark the parity blocks which have data */
	unsigned long		*dbitmap;

	/*
	 * Mark the parity blocks which have data, but errors happen when
	 * read data or check data
	 */
	unsigned long		*ebitmap;

	unsigned long		bitmap[0];
};

152
struct scrub_ctx {
153
	struct scrub_bio	*bios[SCRUB_BIOS_PER_SCTX];
154
	struct btrfs_fs_info	*fs_info;
A
Arne Jansen 已提交
155 156
	int			first_free;
	int			curr;
157 158
	atomic_t		bios_in_flight;
	atomic_t		workers_pending;
A
Arne Jansen 已提交
159 160 161 162 163
	spinlock_t		list_lock;
	wait_queue_head_t	list_wait;
	u16			csum_size;
	struct list_head	csum_list;
	atomic_t		cancel_req;
A
Arne Jansen 已提交
164
	int			readonly;
165
	int			pages_per_rd_bio;
166 167

	int			is_dev_replace;
168 169 170 171 172

	struct scrub_bio        *wr_curr_bio;
	struct mutex            wr_lock;
	int                     pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
	struct btrfs_device     *wr_tgtdev;
173
	bool                    flush_all_writes;
174

A
Arne Jansen 已提交
175 176 177 178 179
	/*
	 * statistics
	 */
	struct btrfs_scrub_progress stat;
	spinlock_t		stat_lock;
180 181 182 183 184 185 186 187

	/*
	 * Use a ref counter to avoid use-after-free issues. Scrub workers
	 * decrement bios_in_flight and workers_pending and then do a wakeup
	 * on the list_wait wait queue. We must ensure the main scrub task
	 * doesn't free the scrub context before or while the workers are
	 * doing the wakeup() call.
	 */
188
	refcount_t              refs;
A
Arne Jansen 已提交
189 190
};

191 192 193 194
struct scrub_warning {
	struct btrfs_path	*path;
	u64			extent_item_size;
	const char		*errstr;
D
David Sterba 已提交
195
	u64			physical;
196 197 198 199
	u64			logical;
	struct btrfs_device	*dev;
};

200 201 202 203 204 205 206
struct full_stripe_lock {
	struct rb_node node;
	u64 logical;
	u64 refs;
	struct mutex mutex;
};

207 208
static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
209
static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
210
static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
211
				     struct scrub_block *sblocks_for_recheck);
212
static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
213 214
				struct scrub_block *sblock,
				int retry_failed_mirror);
215
static void scrub_recheck_block_checksum(struct scrub_block *sblock);
216
static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
217
					     struct scrub_block *sblock_good);
218 219 220
static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
					    struct scrub_block *sblock_good,
					    int page_num, int force_write);
221 222 223
static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
					   int page_num);
224 225 226 227 228
static int scrub_checksum_data(struct scrub_block *sblock);
static int scrub_checksum_tree_block(struct scrub_block *sblock);
static int scrub_checksum_super(struct scrub_block *sblock);
static void scrub_block_get(struct scrub_block *sblock);
static void scrub_block_put(struct scrub_block *sblock);
229 230
static void scrub_page_get(struct scrub_page *spage);
static void scrub_page_put(struct scrub_page *spage);
231 232
static void scrub_parity_get(struct scrub_parity *sparity);
static void scrub_parity_put(struct scrub_parity *sparity);
233 234
static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
				    struct scrub_page *spage);
235
static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
236
		       u64 physical, struct btrfs_device *dev, u64 flags,
237 238
		       u64 gen, int mirror_num, u8 *csum, int force,
		       u64 physical_for_dev_replace);
239
static void scrub_bio_end_io(struct bio *bio);
240 241
static void scrub_bio_end_io_worker(struct btrfs_work *work);
static void scrub_block_complete(struct scrub_block *sblock);
242 243 244 245 246 247 248 249
static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
			       u64 extent_logical, u64 extent_len,
			       u64 *extent_physical,
			       struct btrfs_device **extent_dev,
			       int *extent_mirror_num);
static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
				    struct scrub_page *spage);
static void scrub_wr_submit(struct scrub_ctx *sctx);
250
static void scrub_wr_bio_end_io(struct bio *bio);
251
static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
252
static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
253
static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
254
static void scrub_put_ctx(struct scrub_ctx *sctx);
S
Stefan Behrens 已提交
255

256 257 258 259 260
static inline int scrub_is_page_on_raid56(struct scrub_page *page)
{
	return page->recover &&
	       (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
}
S
Stefan Behrens 已提交
261

262 263
static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
{
264
	refcount_inc(&sctx->refs);
265 266 267 268 269 270 271
	atomic_inc(&sctx->bios_in_flight);
}

static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
{
	atomic_dec(&sctx->bios_in_flight);
	wake_up(&sctx->list_wait);
272
	scrub_put_ctx(sctx);
273 274
}

275
static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
276 277 278 279 280 281 282 283 284
{
	while (atomic_read(&fs_info->scrub_pause_req)) {
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
		   atomic_read(&fs_info->scrub_pause_req) == 0);
		mutex_lock(&fs_info->scrub_lock);
	}
}

285
static void scrub_pause_on(struct btrfs_fs_info *fs_info)
286 287 288
{
	atomic_inc(&fs_info->scrubs_paused);
	wake_up(&fs_info->scrub_pause_wait);
289
}
290

291 292
static void scrub_pause_off(struct btrfs_fs_info *fs_info)
{
293 294 295 296 297 298 299 300
	mutex_lock(&fs_info->scrub_lock);
	__scrub_blocked_if_needed(fs_info);
	atomic_dec(&fs_info->scrubs_paused);
	mutex_unlock(&fs_info->scrub_lock);

	wake_up(&fs_info->scrub_pause_wait);
}

301 302 303 304 305 306
static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
{
	scrub_pause_on(fs_info);
	scrub_pause_off(fs_info);
}

307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
/*
 * Insert new full stripe lock into full stripe locks tree
 *
 * Return pointer to existing or newly inserted full_stripe_lock structure if
 * everything works well.
 * Return ERR_PTR(-ENOMEM) if we failed to allocate memory
 *
 * NOTE: caller must hold full_stripe_locks_root->lock before calling this
 * function
 */
static struct full_stripe_lock *insert_full_stripe_lock(
		struct btrfs_full_stripe_locks_tree *locks_root,
		u64 fstripe_logical)
{
	struct rb_node **p;
	struct rb_node *parent = NULL;
	struct full_stripe_lock *entry;
	struct full_stripe_lock *ret;

326
	lockdep_assert_held(&locks_root->lock);
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367

	p = &locks_root->root.rb_node;
	while (*p) {
		parent = *p;
		entry = rb_entry(parent, struct full_stripe_lock, node);
		if (fstripe_logical < entry->logical) {
			p = &(*p)->rb_left;
		} else if (fstripe_logical > entry->logical) {
			p = &(*p)->rb_right;
		} else {
			entry->refs++;
			return entry;
		}
	}

	/* Insert new lock */
	ret = kmalloc(sizeof(*ret), GFP_KERNEL);
	if (!ret)
		return ERR_PTR(-ENOMEM);
	ret->logical = fstripe_logical;
	ret->refs = 1;
	mutex_init(&ret->mutex);

	rb_link_node(&ret->node, parent, p);
	rb_insert_color(&ret->node, &locks_root->root);
	return ret;
}

/*
 * Search for a full stripe lock of a block group
 *
 * Return pointer to existing full stripe lock if found
 * Return NULL if not found
 */
static struct full_stripe_lock *search_full_stripe_lock(
		struct btrfs_full_stripe_locks_tree *locks_root,
		u64 fstripe_logical)
{
	struct rb_node *node;
	struct full_stripe_lock *entry;

368
	lockdep_assert_held(&locks_root->lock);
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522

	node = locks_root->root.rb_node;
	while (node) {
		entry = rb_entry(node, struct full_stripe_lock, node);
		if (fstripe_logical < entry->logical)
			node = node->rb_left;
		else if (fstripe_logical > entry->logical)
			node = node->rb_right;
		else
			return entry;
	}
	return NULL;
}

/*
 * Helper to get full stripe logical from a normal bytenr.
 *
 * Caller must ensure @cache is a RAID56 block group.
 */
static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache,
				   u64 bytenr)
{
	u64 ret;

	/*
	 * Due to chunk item size limit, full stripe length should not be
	 * larger than U32_MAX. Just a sanity check here.
	 */
	WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX);

	/*
	 * round_down() can only handle power of 2, while RAID56 full
	 * stripe length can be 64KiB * n, so we need to manually round down.
	 */
	ret = div64_u64(bytenr - cache->key.objectid, cache->full_stripe_len) *
		cache->full_stripe_len + cache->key.objectid;
	return ret;
}

/*
 * Lock a full stripe to avoid concurrency of recovery and read
 *
 * It's only used for profiles with parities (RAID5/6), for other profiles it
 * does nothing.
 *
 * Return 0 if we locked full stripe covering @bytenr, with a mutex held.
 * So caller must call unlock_full_stripe() at the same context.
 *
 * Return <0 if encounters error.
 */
static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
			    bool *locked_ret)
{
	struct btrfs_block_group_cache *bg_cache;
	struct btrfs_full_stripe_locks_tree *locks_root;
	struct full_stripe_lock *existing;
	u64 fstripe_start;
	int ret = 0;

	*locked_ret = false;
	bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
	if (!bg_cache) {
		ASSERT(0);
		return -ENOENT;
	}

	/* Profiles not based on parity don't need full stripe lock */
	if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
		goto out;
	locks_root = &bg_cache->full_stripe_locks_root;

	fstripe_start = get_full_stripe_logical(bg_cache, bytenr);

	/* Now insert the full stripe lock */
	mutex_lock(&locks_root->lock);
	existing = insert_full_stripe_lock(locks_root, fstripe_start);
	mutex_unlock(&locks_root->lock);
	if (IS_ERR(existing)) {
		ret = PTR_ERR(existing);
		goto out;
	}
	mutex_lock(&existing->mutex);
	*locked_ret = true;
out:
	btrfs_put_block_group(bg_cache);
	return ret;
}

/*
 * Unlock a full stripe.
 *
 * NOTE: Caller must ensure it's the same context calling corresponding
 * lock_full_stripe().
 *
 * Return 0 if we unlock full stripe without problem.
 * Return <0 for error
 */
static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
			      bool locked)
{
	struct btrfs_block_group_cache *bg_cache;
	struct btrfs_full_stripe_locks_tree *locks_root;
	struct full_stripe_lock *fstripe_lock;
	u64 fstripe_start;
	bool freeit = false;
	int ret = 0;

	/* If we didn't acquire full stripe lock, no need to continue */
	if (!locked)
		return 0;

	bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
	if (!bg_cache) {
		ASSERT(0);
		return -ENOENT;
	}
	if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
		goto out;

	locks_root = &bg_cache->full_stripe_locks_root;
	fstripe_start = get_full_stripe_logical(bg_cache, bytenr);

	mutex_lock(&locks_root->lock);
	fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start);
	/* Unpaired unlock_full_stripe() detected */
	if (!fstripe_lock) {
		WARN_ON(1);
		ret = -ENOENT;
		mutex_unlock(&locks_root->lock);
		goto out;
	}

	if (fstripe_lock->refs == 0) {
		WARN_ON(1);
		btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow",
			fstripe_lock->logical);
	} else {
		fstripe_lock->refs--;
	}

	if (fstripe_lock->refs == 0) {
		rb_erase(&fstripe_lock->node, &locks_root->root);
		freeit = true;
	}
	mutex_unlock(&locks_root->lock);

	mutex_unlock(&fstripe_lock->mutex);
	if (freeit)
		kfree(fstripe_lock);
out:
	btrfs_put_block_group(bg_cache);
	return ret;
}

523
static void scrub_free_csums(struct scrub_ctx *sctx)
A
Arne Jansen 已提交
524
{
525
	while (!list_empty(&sctx->csum_list)) {
A
Arne Jansen 已提交
526
		struct btrfs_ordered_sum *sum;
527
		sum = list_first_entry(&sctx->csum_list,
A
Arne Jansen 已提交
528 529 530 531 532 533
				       struct btrfs_ordered_sum, list);
		list_del(&sum->list);
		kfree(sum);
	}
}

534
static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
A
Arne Jansen 已提交
535 536 537
{
	int i;

538
	if (!sctx)
A
Arne Jansen 已提交
539 540
		return;

541
	/* this can happen when scrub is cancelled */
542 543
	if (sctx->curr != -1) {
		struct scrub_bio *sbio = sctx->bios[sctx->curr];
544 545

		for (i = 0; i < sbio->page_count; i++) {
546
			WARN_ON(!sbio->pagev[i]->page);
547 548 549 550 551
			scrub_block_put(sbio->pagev[i]->sblock);
		}
		bio_put(sbio->bio);
	}

552
	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
553
		struct scrub_bio *sbio = sctx->bios[i];
A
Arne Jansen 已提交
554 555 556 557 558 559

		if (!sbio)
			break;
		kfree(sbio);
	}

560
	kfree(sctx->wr_curr_bio);
561 562
	scrub_free_csums(sctx);
	kfree(sctx);
A
Arne Jansen 已提交
563 564
}

565 566
static void scrub_put_ctx(struct scrub_ctx *sctx)
{
567
	if (refcount_dec_and_test(&sctx->refs))
568 569 570
		scrub_free_ctx(sctx);
}

A
Arne Jansen 已提交
571
static noinline_for_stack
572
struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
A
Arne Jansen 已提交
573
{
574
	struct scrub_ctx *sctx;
A
Arne Jansen 已提交
575
	int		i;
576
	struct btrfs_fs_info *fs_info = dev->fs_info;
A
Arne Jansen 已提交
577

578
	sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
579
	if (!sctx)
A
Arne Jansen 已提交
580
		goto nomem;
581
	refcount_set(&sctx->refs, 1);
582
	sctx->is_dev_replace = is_dev_replace;
583
	sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
584
	sctx->curr = -1;
585
	sctx->fs_info = dev->fs_info;
586
	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
A
Arne Jansen 已提交
587 588
		struct scrub_bio *sbio;

589
		sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
A
Arne Jansen 已提交
590 591
		if (!sbio)
			goto nomem;
592
		sctx->bios[i] = sbio;
A
Arne Jansen 已提交
593 594

		sbio->index = i;
595
		sbio->sctx = sctx;
596
		sbio->page_count = 0;
597 598
		btrfs_init_work(&sbio->work, btrfs_scrub_helper,
				scrub_bio_end_io_worker, NULL, NULL);
A
Arne Jansen 已提交
599

600
		if (i != SCRUB_BIOS_PER_SCTX - 1)
601
			sctx->bios[i]->next_free = i + 1;
602
		else
603 604 605
			sctx->bios[i]->next_free = -1;
	}
	sctx->first_free = 0;
606 607
	atomic_set(&sctx->bios_in_flight, 0);
	atomic_set(&sctx->workers_pending, 0);
608 609 610 611 612 613 614
	atomic_set(&sctx->cancel_req, 0);
	sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
	INIT_LIST_HEAD(&sctx->csum_list);

	spin_lock_init(&sctx->list_lock);
	spin_lock_init(&sctx->stat_lock);
	init_waitqueue_head(&sctx->list_wait);
615

616 617 618
	WARN_ON(sctx->wr_curr_bio != NULL);
	mutex_init(&sctx->wr_lock);
	sctx->wr_curr_bio = NULL;
619
	if (is_dev_replace) {
620
		WARN_ON(!fs_info->dev_replace.tgtdev);
621
		sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
622
		sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
623
		sctx->flush_all_writes = false;
624
	}
625

626
	return sctx;
A
Arne Jansen 已提交
627 628

nomem:
629
	scrub_free_ctx(sctx);
A
Arne Jansen 已提交
630 631 632
	return ERR_PTR(-ENOMEM);
}

633 634
static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
				     void *warn_ctx)
635 636 637 638 639
{
	u64 isize;
	u32 nlink;
	int ret;
	int i;
640
	unsigned nofs_flag;
641 642
	struct extent_buffer *eb;
	struct btrfs_inode_item *inode_item;
643
	struct scrub_warning *swarn = warn_ctx;
644
	struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
645 646 647
	struct inode_fs_paths *ipath = NULL;
	struct btrfs_root *local_root;
	struct btrfs_key root_key;
648
	struct btrfs_key key;
649 650 651 652 653 654 655 656 657 658

	root_key.objectid = root;
	root_key.type = BTRFS_ROOT_ITEM_KEY;
	root_key.offset = (u64)-1;
	local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
	if (IS_ERR(local_root)) {
		ret = PTR_ERR(local_root);
		goto err;
	}

659 660 661
	/*
	 * this makes the path point to (inum INODE_ITEM ioff)
	 */
662 663 664 665 666
	key.objectid = inum;
	key.type = BTRFS_INODE_ITEM_KEY;
	key.offset = 0;

	ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
667 668 669 670 671 672 673 674 675 676 677 678
	if (ret) {
		btrfs_release_path(swarn->path);
		goto err;
	}

	eb = swarn->path->nodes[0];
	inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
					struct btrfs_inode_item);
	isize = btrfs_inode_size(eb, inode_item);
	nlink = btrfs_inode_nlink(eb, inode_item);
	btrfs_release_path(swarn->path);

679 680 681 682 683 684
	/*
	 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
	 * uses GFP_NOFS in this context, so we keep it consistent but it does
	 * not seem to be strictly necessary.
	 */
	nofs_flag = memalloc_nofs_save();
685
	ipath = init_ipath(4096, local_root, swarn->path);
686
	memalloc_nofs_restore(nofs_flag);
687 688 689 690 691
	if (IS_ERR(ipath)) {
		ret = PTR_ERR(ipath);
		ipath = NULL;
		goto err;
	}
692 693 694 695 696 697 698 699 700 701
	ret = paths_from_inode(inum, ipath);

	if (ret < 0)
		goto err;

	/*
	 * we deliberately ignore the bit ipath might have been too small to
	 * hold all of the paths here
	 */
	for (i = 0; i < ipath->fspath->elem_cnt; ++i)
J
Jeff Mahoney 已提交
702
		btrfs_warn_in_rcu(fs_info,
D
David Sterba 已提交
703
"%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
J
Jeff Mahoney 已提交
704 705
				  swarn->errstr, swarn->logical,
				  rcu_str_deref(swarn->dev->name),
D
David Sterba 已提交
706
				  swarn->physical,
J
Jeff Mahoney 已提交
707 708 709
				  root, inum, offset,
				  min(isize - offset, (u64)PAGE_SIZE), nlink,
				  (char *)(unsigned long)ipath->fspath->val[i]);
710 711 712 713 714

	free_ipath(ipath);
	return 0;

err:
J
Jeff Mahoney 已提交
715
	btrfs_warn_in_rcu(fs_info,
D
David Sterba 已提交
716
			  "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
J
Jeff Mahoney 已提交
717 718
			  swarn->errstr, swarn->logical,
			  rcu_str_deref(swarn->dev->name),
D
David Sterba 已提交
719
			  swarn->physical,
J
Jeff Mahoney 已提交
720
			  root, inum, offset, ret);
721 722 723 724 725

	free_ipath(ipath);
	return 0;
}

726
static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
727
{
728 729
	struct btrfs_device *dev;
	struct btrfs_fs_info *fs_info;
730 731 732 733 734
	struct btrfs_path *path;
	struct btrfs_key found_key;
	struct extent_buffer *eb;
	struct btrfs_extent_item *ei;
	struct scrub_warning swarn;
735 736 737
	unsigned long ptr = 0;
	u64 extent_item_pos;
	u64 flags = 0;
738
	u64 ref_root;
739
	u32 item_size;
740
	u8 ref_level = 0;
741
	int ret;
742

743
	WARN_ON(sblock->page_count < 1);
744
	dev = sblock->pagev[0]->dev;
745
	fs_info = sblock->sctx->fs_info;
746

747
	path = btrfs_alloc_path();
748 749
	if (!path)
		return;
750

D
David Sterba 已提交
751
	swarn.physical = sblock->pagev[0]->physical;
752
	swarn.logical = sblock->pagev[0]->logical;
753
	swarn.errstr = errstr;
754
	swarn.dev = NULL;
755

756 757
	ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
				  &flags);
758 759 760
	if (ret < 0)
		goto out;

J
Jan Schmidt 已提交
761
	extent_item_pos = swarn.logical - found_key.objectid;
762 763 764 765 766 767
	swarn.extent_item_size = found_key.offset;

	eb = path->nodes[0];
	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
	item_size = btrfs_item_size_nr(eb, path->slots[0]);

768
	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
769
		do {
770 771 772
			ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
						      item_size, &ref_root,
						      &ref_level);
773
			btrfs_warn_in_rcu(fs_info,
D
David Sterba 已提交
774
"%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
J
Jeff Mahoney 已提交
775
				errstr, swarn.logical,
776
				rcu_str_deref(dev->name),
D
David Sterba 已提交
777
				swarn.physical,
778 779 780 781
				ref_level ? "node" : "leaf",
				ret < 0 ? -1 : ref_level,
				ret < 0 ? -1 : ref_root);
		} while (ret != 1);
782
		btrfs_release_path(path);
783
	} else {
784
		btrfs_release_path(path);
785
		swarn.path = path;
786
		swarn.dev = dev;
787 788
		iterate_extent_inodes(fs_info, found_key.objectid,
					extent_item_pos, 1,
789
					scrub_print_warning_inode, &swarn, false);
790 791 792 793 794 795
	}

out:
	btrfs_free_path(path);
}

796 797
static inline void scrub_get_recover(struct scrub_recover *recover)
{
798
	refcount_inc(&recover->refs);
799 800
}

801 802
static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
				     struct scrub_recover *recover)
803
{
804
	if (refcount_dec_and_test(&recover->refs)) {
805
		btrfs_bio_counter_dec(fs_info);
806
		btrfs_put_bbio(recover->bbio);
807 808 809 810
		kfree(recover);
	}
}

A
Arne Jansen 已提交
811
/*
812 813 814 815 816 817
 * scrub_handle_errored_block gets called when either verification of the
 * pages failed or the bio failed to read, e.g. with EIO. In the latter
 * case, this function handles all pages in the bio, even though only one
 * may be bad.
 * The goal of this function is to repair the errored block by using the
 * contents of one of the mirrors.
A
Arne Jansen 已提交
818
 */
819
static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
A
Arne Jansen 已提交
820
{
821
	struct scrub_ctx *sctx = sblock_to_check->sctx;
822
	struct btrfs_device *dev;
823 824 825 826 827 828 829 830 831 832 833
	struct btrfs_fs_info *fs_info;
	u64 logical;
	unsigned int failed_mirror_index;
	unsigned int is_metadata;
	unsigned int have_csum;
	struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
	struct scrub_block *sblock_bad;
	int ret;
	int mirror_index;
	int page_num;
	int success;
834
	bool full_stripe_locked;
835
	static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
836 837 838
				      DEFAULT_RATELIMIT_BURST);

	BUG_ON(sblock_to_check->page_count < 1);
839
	fs_info = sctx->fs_info;
840 841 842 843 844 845 846 847 848 849 850
	if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
		/*
		 * if we find an error in a super block, we just report it.
		 * They will get written with the next transaction commit
		 * anyway
		 */
		spin_lock(&sctx->stat_lock);
		++sctx->stat.super_errors;
		spin_unlock(&sctx->stat_lock);
		return 0;
	}
851 852 853 854
	logical = sblock_to_check->pagev[0]->logical;
	BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
	failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
	is_metadata = !(sblock_to_check->pagev[0]->flags &
855
			BTRFS_EXTENT_FLAG_DATA);
856 857
	have_csum = sblock_to_check->pagev[0]->have_csum;
	dev = sblock_to_check->pagev[0]->dev;
858

859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876
	/*
	 * For RAID5/6, race can happen for a different device scrub thread.
	 * For data corruption, Parity and Data threads will both try
	 * to recovery the data.
	 * Race can lead to doubly added csum error, or even unrecoverable
	 * error.
	 */
	ret = lock_full_stripe(fs_info, logical, &full_stripe_locked);
	if (ret < 0) {
		spin_lock(&sctx->stat_lock);
		if (ret == -ENOMEM)
			sctx->stat.malloc_errors++;
		sctx->stat.read_errors++;
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
		return ret;
	}

877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905
	/*
	 * read all mirrors one after the other. This includes to
	 * re-read the extent or metadata block that failed (that was
	 * the cause that this fixup code is called) another time,
	 * page by page this time in order to know which pages
	 * caused I/O errors and which ones are good (for all mirrors).
	 * It is the goal to handle the situation when more than one
	 * mirror contains I/O errors, but the errors do not
	 * overlap, i.e. the data can be repaired by selecting the
	 * pages from those mirrors without I/O error on the
	 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
	 * would be that mirror #1 has an I/O error on the first page,
	 * the second page is good, and mirror #2 has an I/O error on
	 * the second page, but the first page is good.
	 * Then the first page of the first mirror can be repaired by
	 * taking the first page of the second mirror, and the
	 * second page of the second mirror can be repaired by
	 * copying the contents of the 2nd page of the 1st mirror.
	 * One more note: if the pages of one mirror contain I/O
	 * errors, the checksum cannot be verified. In order to get
	 * the best data for repairing, the first attempt is to find
	 * a mirror without I/O errors and with a validated checksum.
	 * Only if this is not possible, the pages are picked from
	 * mirrors with I/O errors without considering the checksum.
	 * If the latter is the case, at the end, the checksum of the
	 * repaired area is verified in order to correctly maintain
	 * the statistics.
	 */

906 907
	sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
				      sizeof(*sblocks_for_recheck), GFP_NOFS);
908
	if (!sblocks_for_recheck) {
909 910 911 912 913
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		sctx->stat.read_errors++;
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
914
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
915
		goto out;
A
Arne Jansen 已提交
916 917
	}

918
	/* setup the context, map the logical blocks and alloc the pages */
919
	ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
920
	if (ret) {
921 922 923 924
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors++;
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
925
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
926 927 928 929
		goto out;
	}
	BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
	sblock_bad = sblocks_for_recheck + failed_mirror_index;
930

931
	/* build and submit the bios for the failed mirror, check checksums */
932
	scrub_recheck_block(fs_info, sblock_bad, 1);
A
Arne Jansen 已提交
933

934 935 936 937 938 939 940 941 942 943
	if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
	    sblock_bad->no_io_error_seen) {
		/*
		 * the error disappeared after reading page by page, or
		 * the area was part of a huge bio and other parts of the
		 * bio caused I/O errors, or the block layer merged several
		 * read requests into one and the error is caused by a
		 * different bio (usually one of the two latter cases is
		 * the cause)
		 */
944 945
		spin_lock(&sctx->stat_lock);
		sctx->stat.unverified_errors++;
946
		sblock_to_check->data_corrected = 1;
947
		spin_unlock(&sctx->stat_lock);
A
Arne Jansen 已提交
948

949 950
		if (sctx->is_dev_replace)
			scrub_write_block_to_dev_replace(sblock_bad);
951
		goto out;
A
Arne Jansen 已提交
952 953
	}

954
	if (!sblock_bad->no_io_error_seen) {
955 956 957
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors++;
		spin_unlock(&sctx->stat_lock);
958 959
		if (__ratelimit(&_rs))
			scrub_print_warning("i/o error", sblock_to_check);
960
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
961
	} else if (sblock_bad->checksum_error) {
962 963 964
		spin_lock(&sctx->stat_lock);
		sctx->stat.csum_errors++;
		spin_unlock(&sctx->stat_lock);
965 966
		if (__ratelimit(&_rs))
			scrub_print_warning("checksum error", sblock_to_check);
967
		btrfs_dev_stat_inc_and_print(dev,
968
					     BTRFS_DEV_STAT_CORRUPTION_ERRS);
969
	} else if (sblock_bad->header_error) {
970 971 972
		spin_lock(&sctx->stat_lock);
		sctx->stat.verify_errors++;
		spin_unlock(&sctx->stat_lock);
973 974 975
		if (__ratelimit(&_rs))
			scrub_print_warning("checksum/header error",
					    sblock_to_check);
976
		if (sblock_bad->generation_error)
977
			btrfs_dev_stat_inc_and_print(dev,
978 979
				BTRFS_DEV_STAT_GENERATION_ERRS);
		else
980
			btrfs_dev_stat_inc_and_print(dev,
981
				BTRFS_DEV_STAT_CORRUPTION_ERRS);
982
	}
A
Arne Jansen 已提交
983

984 985 986 987
	if (sctx->readonly) {
		ASSERT(!sctx->is_dev_replace);
		goto out;
	}
A
Arne Jansen 已提交
988

989 990
	/*
	 * now build and submit the bios for the other mirrors, check
991 992
	 * checksums.
	 * First try to pick the mirror which is completely without I/O
993 994 995 996 997 998 999 1000 1001 1002 1003
	 * errors and also does not have a checksum error.
	 * If one is found, and if a checksum is present, the full block
	 * that is known to contain an error is rewritten. Afterwards
	 * the block is known to be corrected.
	 * If a mirror is found which is completely correct, and no
	 * checksum is present, only those pages are rewritten that had
	 * an I/O error in the block to be repaired, since it cannot be
	 * determined, which copy of the other pages is better (and it
	 * could happen otherwise that a correct page would be
	 * overwritten by a bad one).
	 */
1004
	for (mirror_index = 0; ;mirror_index++) {
1005
		struct scrub_block *sblock_other;
1006

1007 1008
		if (mirror_index == failed_mirror_index)
			continue;
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031

		/* raid56's mirror can be more than BTRFS_MAX_MIRRORS */
		if (!scrub_is_page_on_raid56(sblock_bad->pagev[0])) {
			if (mirror_index >= BTRFS_MAX_MIRRORS)
				break;
			if (!sblocks_for_recheck[mirror_index].page_count)
				break;

			sblock_other = sblocks_for_recheck + mirror_index;
		} else {
			struct scrub_recover *r = sblock_bad->pagev[0]->recover;
			int max_allowed = r->bbio->num_stripes -
						r->bbio->num_tgtdevs;

			if (mirror_index >= max_allowed)
				break;
			if (!sblocks_for_recheck[1].page_count)
				break;

			ASSERT(failed_mirror_index == 0);
			sblock_other = sblocks_for_recheck + 1;
			sblock_other->pagev[0]->mirror_num = 1 + mirror_index;
		}
1032 1033

		/* build and submit the bios, check checksums */
1034
		scrub_recheck_block(fs_info, sblock_other, 0);
1035 1036

		if (!sblock_other->header_error &&
1037 1038
		    !sblock_other->checksum_error &&
		    sblock_other->no_io_error_seen) {
1039 1040
			if (sctx->is_dev_replace) {
				scrub_write_block_to_dev_replace(sblock_other);
1041
				goto corrected_error;
1042 1043
			} else {
				ret = scrub_repair_block_from_good_copy(
1044 1045 1046
						sblock_bad, sblock_other);
				if (!ret)
					goto corrected_error;
1047
			}
1048 1049
		}
	}
A
Arne Jansen 已提交
1050

1051 1052
	if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
		goto did_not_correct_error;
1053 1054 1055

	/*
	 * In case of I/O errors in the area that is supposed to be
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
	 * repaired, continue by picking good copies of those pages.
	 * Select the good pages from mirrors to rewrite bad pages from
	 * the area to fix. Afterwards verify the checksum of the block
	 * that is supposed to be repaired. This verification step is
	 * only done for the purpose of statistic counting and for the
	 * final scrub report, whether errors remain.
	 * A perfect algorithm could make use of the checksum and try
	 * all possible combinations of pages from the different mirrors
	 * until the checksum verification succeeds. For example, when
	 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
	 * of mirror #2 is readable but the final checksum test fails,
	 * then the 2nd page of mirror #3 could be tried, whether now
1068
	 * the final checksum succeeds. But this would be a rare
1069 1070 1071 1072 1073 1074 1075 1076
	 * exception and is therefore not implemented. At least it is
	 * avoided that the good copy is overwritten.
	 * A more useful improvement would be to pick the sectors
	 * without I/O error based on sector sizes (512 bytes on legacy
	 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
	 * mirror could be repaired by taking 512 byte of a different
	 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
	 * area are unreadable.
A
Arne Jansen 已提交
1077
	 */
1078
	success = 1;
1079 1080
	for (page_num = 0; page_num < sblock_bad->page_count;
	     page_num++) {
1081
		struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1082
		struct scrub_block *sblock_other = NULL;
1083

1084 1085
		/* skip no-io-error page in scrub */
		if (!page_bad->io_error && !sctx->is_dev_replace)
A
Arne Jansen 已提交
1086
			continue;
1087

1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
		if (scrub_is_page_on_raid56(sblock_bad->pagev[0])) {
			/*
			 * In case of dev replace, if raid56 rebuild process
			 * didn't work out correct data, then copy the content
			 * in sblock_bad to make sure target device is identical
			 * to source device, instead of writing garbage data in
			 * sblock_for_recheck array to target device.
			 */
			sblock_other = NULL;
		} else if (page_bad->io_error) {
			/* try to find no-io-error page in mirrors */
1099 1100 1101 1102 1103 1104 1105 1106 1107
			for (mirror_index = 0;
			     mirror_index < BTRFS_MAX_MIRRORS &&
			     sblocks_for_recheck[mirror_index].page_count > 0;
			     mirror_index++) {
				if (!sblocks_for_recheck[mirror_index].
				    pagev[page_num]->io_error) {
					sblock_other = sblocks_for_recheck +
						       mirror_index;
					break;
1108 1109
				}
			}
1110 1111
			if (!sblock_other)
				success = 0;
I
Ilya Dryomov 已提交
1112
		}
A
Arne Jansen 已提交
1113

1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
		if (sctx->is_dev_replace) {
			/*
			 * did not find a mirror to fetch the page
			 * from. scrub_write_page_to_dev_replace()
			 * handles this case (page->io_error), by
			 * filling the block with zeros before
			 * submitting the write request
			 */
			if (!sblock_other)
				sblock_other = sblock_bad;

			if (scrub_write_page_to_dev_replace(sblock_other,
							    page_num) != 0) {
1127
				atomic64_inc(
1128
					&fs_info->dev_replace.num_write_errors);
1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
				success = 0;
			}
		} else if (sblock_other) {
			ret = scrub_repair_page_from_good_copy(sblock_bad,
							       sblock_other,
							       page_num, 0);
			if (0 == ret)
				page_bad->io_error = 0;
			else
				success = 0;
1139
		}
A
Arne Jansen 已提交
1140 1141
	}

1142
	if (success && !sctx->is_dev_replace) {
1143 1144 1145 1146 1147 1148 1149 1150 1151 1152
		if (is_metadata || have_csum) {
			/*
			 * need to verify the checksum now that all
			 * sectors on disk are repaired (the write
			 * request for data to be repaired is on its way).
			 * Just be lazy and use scrub_recheck_block()
			 * which re-reads the data before the checksum
			 * is verified, but most likely the data comes out
			 * of the page cache.
			 */
1153
			scrub_recheck_block(fs_info, sblock_bad, 1);
1154
			if (!sblock_bad->header_error &&
1155 1156 1157 1158 1159 1160 1161
			    !sblock_bad->checksum_error &&
			    sblock_bad->no_io_error_seen)
				goto corrected_error;
			else
				goto did_not_correct_error;
		} else {
corrected_error:
1162 1163
			spin_lock(&sctx->stat_lock);
			sctx->stat.corrected_errors++;
1164
			sblock_to_check->data_corrected = 1;
1165
			spin_unlock(&sctx->stat_lock);
1166 1167
			btrfs_err_rl_in_rcu(fs_info,
				"fixed up error at logical %llu on dev %s",
1168
				logical, rcu_str_deref(dev->name));
A
Arne Jansen 已提交
1169
		}
1170 1171
	} else {
did_not_correct_error:
1172 1173 1174
		spin_lock(&sctx->stat_lock);
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
1175 1176
		btrfs_err_rl_in_rcu(fs_info,
			"unable to fixup (regular) error at logical %llu on dev %s",
1177
			logical, rcu_str_deref(dev->name));
I
Ilya Dryomov 已提交
1178
	}
A
Arne Jansen 已提交
1179

1180 1181 1182 1183 1184 1185
out:
	if (sblocks_for_recheck) {
		for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
		     mirror_index++) {
			struct scrub_block *sblock = sblocks_for_recheck +
						     mirror_index;
1186
			struct scrub_recover *recover;
1187 1188
			int page_index;

1189 1190 1191
			for (page_index = 0; page_index < sblock->page_count;
			     page_index++) {
				sblock->pagev[page_index]->sblock = NULL;
1192 1193
				recover = sblock->pagev[page_index]->recover;
				if (recover) {
1194
					scrub_put_recover(fs_info, recover);
1195 1196 1197
					sblock->pagev[page_index]->recover =
									NULL;
				}
1198 1199
				scrub_page_put(sblock->pagev[page_index]);
			}
1200 1201 1202
		}
		kfree(sblocks_for_recheck);
	}
A
Arne Jansen 已提交
1203

1204 1205 1206
	ret = unlock_full_stripe(fs_info, logical, full_stripe_locked);
	if (ret < 0)
		return ret;
1207 1208
	return 0;
}
A
Arne Jansen 已提交
1209

1210
static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
1211
{
Z
Zhao Lei 已提交
1212 1213 1214 1215 1216
	if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
		return 2;
	else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
		return 3;
	else
1217 1218 1219
		return (int)bbio->num_stripes;
}

Z
Zhao Lei 已提交
1220 1221
static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
						 u64 *raid_map,
1222 1223 1224 1225 1226 1227 1228
						 u64 mapped_length,
						 int nstripes, int mirror,
						 int *stripe_index,
						 u64 *stripe_offset)
{
	int i;

1229
	if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249
		/* RAID5/6 */
		for (i = 0; i < nstripes; i++) {
			if (raid_map[i] == RAID6_Q_STRIPE ||
			    raid_map[i] == RAID5_P_STRIPE)
				continue;

			if (logical >= raid_map[i] &&
			    logical < raid_map[i] + mapped_length)
				break;
		}

		*stripe_index = i;
		*stripe_offset = logical - raid_map[i];
	} else {
		/* The other RAID type */
		*stripe_index = mirror;
		*stripe_offset = 0;
	}
}

1250
static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
1251 1252
				     struct scrub_block *sblocks_for_recheck)
{
1253
	struct scrub_ctx *sctx = original_sblock->sctx;
1254
	struct btrfs_fs_info *fs_info = sctx->fs_info;
1255 1256
	u64 length = original_sblock->page_count * PAGE_SIZE;
	u64 logical = original_sblock->pagev[0]->logical;
1257 1258 1259
	u64 generation = original_sblock->pagev[0]->generation;
	u64 flags = original_sblock->pagev[0]->flags;
	u64 have_csum = original_sblock->pagev[0]->have_csum;
1260 1261 1262 1263 1264 1265
	struct scrub_recover *recover;
	struct btrfs_bio *bbio;
	u64 sublen;
	u64 mapped_length;
	u64 stripe_offset;
	int stripe_index;
1266
	int page_index = 0;
1267
	int mirror_index;
1268
	int nmirrors;
1269 1270 1271
	int ret;

	/*
1272
	 * note: the two members refs and outstanding_pages
1273 1274 1275 1276 1277
	 * are not used (and not set) in the blocks that are used for
	 * the recheck procedure
	 */

	while (length > 0) {
1278 1279 1280
		sublen = min_t(u64, length, PAGE_SIZE);
		mapped_length = sublen;
		bbio = NULL;
A
Arne Jansen 已提交
1281

1282 1283 1284 1285
		/*
		 * with a length of PAGE_SIZE, each returned stripe
		 * represents one mirror
		 */
1286
		btrfs_bio_counter_inc_blocked(fs_info);
1287
		ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
1288
				logical, &mapped_length, &bbio);
1289
		if (ret || !bbio || mapped_length < sublen) {
1290
			btrfs_put_bbio(bbio);
1291
			btrfs_bio_counter_dec(fs_info);
1292 1293
			return -EIO;
		}
A
Arne Jansen 已提交
1294

1295 1296
		recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
		if (!recover) {
1297
			btrfs_put_bbio(bbio);
1298
			btrfs_bio_counter_dec(fs_info);
1299 1300 1301
			return -ENOMEM;
		}

1302
		refcount_set(&recover->refs, 1);
1303 1304 1305
		recover->bbio = bbio;
		recover->map_length = mapped_length;

1306
		BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK);
1307

1308
		nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
Z
Zhao Lei 已提交
1309

1310
		for (mirror_index = 0; mirror_index < nmirrors;
1311 1312 1313 1314 1315
		     mirror_index++) {
			struct scrub_block *sblock;
			struct scrub_page *page;

			sblock = sblocks_for_recheck + mirror_index;
1316
			sblock->sctx = sctx;
1317

1318 1319 1320
			page = kzalloc(sizeof(*page), GFP_NOFS);
			if (!page) {
leave_nomem:
1321 1322 1323
				spin_lock(&sctx->stat_lock);
				sctx->stat.malloc_errors++;
				spin_unlock(&sctx->stat_lock);
1324
				scrub_put_recover(fs_info, recover);
1325 1326
				return -ENOMEM;
			}
1327 1328
			scrub_page_get(page);
			sblock->pagev[page_index] = page;
1329 1330 1331
			page->sblock = sblock;
			page->flags = flags;
			page->generation = generation;
1332
			page->logical = logical;
1333 1334 1335 1336 1337
			page->have_csum = have_csum;
			if (have_csum)
				memcpy(page->csum,
				       original_sblock->pagev[0]->csum,
				       sctx->csum_size);
1338

Z
Zhao Lei 已提交
1339 1340 1341
			scrub_stripe_index_and_offset(logical,
						      bbio->map_type,
						      bbio->raid_map,
1342
						      mapped_length,
1343 1344
						      bbio->num_stripes -
						      bbio->num_tgtdevs,
1345 1346 1347 1348 1349 1350 1351
						      mirror_index,
						      &stripe_index,
						      &stripe_offset);
			page->physical = bbio->stripes[stripe_index].physical +
					 stripe_offset;
			page->dev = bbio->stripes[stripe_index].dev;

1352 1353 1354 1355
			BUG_ON(page_index >= original_sblock->page_count);
			page->physical_for_dev_replace =
				original_sblock->pagev[page_index]->
				physical_for_dev_replace;
1356 1357
			/* for missing devices, dev->bdev is NULL */
			page->mirror_num = mirror_index + 1;
1358
			sblock->page_count++;
1359 1360 1361
			page->page = alloc_page(GFP_NOFS);
			if (!page->page)
				goto leave_nomem;
1362 1363 1364

			scrub_get_recover(recover);
			page->recover = recover;
1365
		}
1366
		scrub_put_recover(fs_info, recover);
1367 1368 1369 1370 1371 1372
		length -= sublen;
		logical += sublen;
		page_index++;
	}

	return 0;
I
Ilya Dryomov 已提交
1373 1374
}

1375
static void scrub_bio_wait_endio(struct bio *bio)
1376
{
1377
	complete(bio->bi_private);
1378 1379 1380 1381 1382 1383
}

static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
					struct bio *bio,
					struct scrub_page *page)
{
1384
	DECLARE_COMPLETION_ONSTACK(done);
1385
	int ret;
1386
	int mirror_num;
1387 1388 1389 1390 1391

	bio->bi_iter.bi_sector = page->logical >> 9;
	bio->bi_private = &done;
	bio->bi_end_io = scrub_bio_wait_endio;

1392
	mirror_num = page->sblock->pagev[0]->mirror_num;
1393
	ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
1394
				    page->recover->map_length,
1395
				    mirror_num, 0);
1396 1397 1398
	if (ret)
		return ret;

1399 1400
	wait_for_completion_io(&done);
	return blk_status_to_errno(bio->bi_status);
1401 1402
}

L
Liu Bo 已提交
1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441
static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info,
					  struct scrub_block *sblock)
{
	struct scrub_page *first_page = sblock->pagev[0];
	struct bio *bio;
	int page_num;

	/* All pages in sblock belong to the same stripe on the same device. */
	ASSERT(first_page->dev);
	if (!first_page->dev->bdev)
		goto out;

	bio = btrfs_io_bio_alloc(BIO_MAX_PAGES);
	bio_set_dev(bio, first_page->dev->bdev);

	for (page_num = 0; page_num < sblock->page_count; page_num++) {
		struct scrub_page *page = sblock->pagev[page_num];

		WARN_ON(!page->page);
		bio_add_page(bio, page->page, PAGE_SIZE, 0);
	}

	if (scrub_submit_raid56_bio_wait(fs_info, bio, first_page)) {
		bio_put(bio);
		goto out;
	}

	bio_put(bio);

	scrub_recheck_block_checksum(sblock);

	return;
out:
	for (page_num = 0; page_num < sblock->page_count; page_num++)
		sblock->pagev[page_num]->io_error = 1;

	sblock->no_io_error_seen = 0;
}

1442 1443 1444 1445 1446 1447 1448
/*
 * this function will check the on disk data for checksum errors, header
 * errors and read I/O errors. If any I/O errors happen, the exact pages
 * which are errored are marked as being bad. The goal is to enable scrub
 * to take those pages that are not errored from all the mirrors so that
 * the pages that are errored in the just handled mirror can be repaired.
 */
1449
static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1450 1451
				struct scrub_block *sblock,
				int retry_failed_mirror)
I
Ilya Dryomov 已提交
1452
{
1453
	int page_num;
I
Ilya Dryomov 已提交
1454

1455
	sblock->no_io_error_seen = 1;
I
Ilya Dryomov 已提交
1456

L
Liu Bo 已提交
1457 1458 1459 1460
	/* short cut for raid56 */
	if (!retry_failed_mirror && scrub_is_page_on_raid56(sblock->pagev[0]))
		return scrub_recheck_block_on_raid56(fs_info, sblock);

1461 1462
	for (page_num = 0; page_num < sblock->page_count; page_num++) {
		struct bio *bio;
1463
		struct scrub_page *page = sblock->pagev[page_num];
1464

1465
		if (page->dev->bdev == NULL) {
1466 1467 1468 1469 1470
			page->io_error = 1;
			sblock->no_io_error_seen = 0;
			continue;
		}

1471
		WARN_ON(!page->page);
1472
		bio = btrfs_io_bio_alloc(1);
1473
		bio_set_dev(bio, page->dev->bdev);
1474

1475
		bio_add_page(bio, page->page, PAGE_SIZE, 0);
L
Liu Bo 已提交
1476 1477
		bio->bi_iter.bi_sector = page->physical >> 9;
		bio->bi_opf = REQ_OP_READ;
1478

L
Liu Bo 已提交
1479 1480 1481
		if (btrfsic_submit_bio_wait(bio)) {
			page->io_error = 1;
			sblock->no_io_error_seen = 0;
1482
		}
1483

1484 1485
		bio_put(bio);
	}
I
Ilya Dryomov 已提交
1486

1487
	if (sblock->no_io_error_seen)
1488
		scrub_recheck_block_checksum(sblock);
A
Arne Jansen 已提交
1489 1490
}

M
Miao Xie 已提交
1491 1492 1493 1494 1495 1496
static inline int scrub_check_fsid(u8 fsid[],
				   struct scrub_page *spage)
{
	struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
	int ret;

1497
	ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
M
Miao Xie 已提交
1498 1499 1500
	return !ret;
}

1501
static void scrub_recheck_block_checksum(struct scrub_block *sblock)
A
Arne Jansen 已提交
1502
{
1503 1504 1505
	sblock->header_error = 0;
	sblock->checksum_error = 0;
	sblock->generation_error = 0;
1506

1507 1508 1509 1510
	if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA)
		scrub_checksum_data(sblock);
	else
		scrub_checksum_tree_block(sblock);
A
Arne Jansen 已提交
1511 1512
}

1513
static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1514
					     struct scrub_block *sblock_good)
1515 1516 1517
{
	int page_num;
	int ret = 0;
I
Ilya Dryomov 已提交
1518

1519 1520
	for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
		int ret_sub;
I
Ilya Dryomov 已提交
1521

1522 1523
		ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
							   sblock_good,
1524
							   page_num, 1);
1525 1526
		if (ret_sub)
			ret = ret_sub;
A
Arne Jansen 已提交
1527
	}
1528 1529 1530 1531 1532 1533 1534 1535

	return ret;
}

static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
					    struct scrub_block *sblock_good,
					    int page_num, int force_write)
{
1536 1537
	struct scrub_page *page_bad = sblock_bad->pagev[page_num];
	struct scrub_page *page_good = sblock_good->pagev[page_num];
1538
	struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
1539

1540 1541
	BUG_ON(page_bad->page == NULL);
	BUG_ON(page_good->page == NULL);
1542 1543 1544 1545 1546
	if (force_write || sblock_bad->header_error ||
	    sblock_bad->checksum_error || page_bad->io_error) {
		struct bio *bio;
		int ret;

1547
		if (!page_bad->dev->bdev) {
1548
			btrfs_warn_rl(fs_info,
J
Jeff Mahoney 已提交
1549
				"scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
1550 1551 1552
			return -EIO;
		}

1553
		bio = btrfs_io_bio_alloc(1);
1554
		bio_set_dev(bio, page_bad->dev->bdev);
1555
		bio->bi_iter.bi_sector = page_bad->physical >> 9;
D
David Sterba 已提交
1556
		bio->bi_opf = REQ_OP_WRITE;
1557 1558 1559 1560 1561

		ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
		if (PAGE_SIZE != ret) {
			bio_put(bio);
			return -EIO;
1562
		}
1563

1564
		if (btrfsic_submit_bio_wait(bio)) {
1565 1566
			btrfs_dev_stat_inc_and_print(page_bad->dev,
				BTRFS_DEV_STAT_WRITE_ERRS);
1567
			atomic64_inc(&fs_info->dev_replace.num_write_errors);
1568 1569 1570
			bio_put(bio);
			return -EIO;
		}
1571
		bio_put(bio);
A
Arne Jansen 已提交
1572 1573
	}

1574 1575 1576
	return 0;
}

1577 1578
static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
{
1579
	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
1580 1581
	int page_num;

1582 1583 1584 1585 1586 1587 1588
	/*
	 * This block is used for the check of the parity on the source device,
	 * so the data needn't be written into the destination device.
	 */
	if (sblock->sparity)
		return;

1589 1590 1591 1592 1593
	for (page_num = 0; page_num < sblock->page_count; page_num++) {
		int ret;

		ret = scrub_write_page_to_dev_replace(sblock, page_num);
		if (ret)
1594
			atomic64_inc(&fs_info->dev_replace.num_write_errors);
1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606
	}
}

static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
					   int page_num)
{
	struct scrub_page *spage = sblock->pagev[page_num];

	BUG_ON(spage->page == NULL);
	if (spage->io_error) {
		void *mapped_buffer = kmap_atomic(spage->page);

1607
		clear_page(mapped_buffer);
1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619
		flush_dcache_page(spage->page);
		kunmap_atomic(mapped_buffer);
	}
	return scrub_add_page_to_wr_bio(sblock->sctx, spage);
}

static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
				    struct scrub_page *spage)
{
	struct scrub_bio *sbio;
	int ret;

1620
	mutex_lock(&sctx->wr_lock);
1621
again:
1622 1623
	if (!sctx->wr_curr_bio) {
		sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
1624
					      GFP_KERNEL);
1625 1626
		if (!sctx->wr_curr_bio) {
			mutex_unlock(&sctx->wr_lock);
1627 1628
			return -ENOMEM;
		}
1629 1630
		sctx->wr_curr_bio->sctx = sctx;
		sctx->wr_curr_bio->page_count = 0;
1631
	}
1632
	sbio = sctx->wr_curr_bio;
1633 1634 1635 1636 1637
	if (sbio->page_count == 0) {
		struct bio *bio;

		sbio->physical = spage->physical_for_dev_replace;
		sbio->logical = spage->logical;
1638
		sbio->dev = sctx->wr_tgtdev;
1639 1640
		bio = sbio->bio;
		if (!bio) {
1641
			bio = btrfs_io_bio_alloc(sctx->pages_per_wr_bio);
1642 1643 1644 1645 1646
			sbio->bio = bio;
		}

		bio->bi_private = sbio;
		bio->bi_end_io = scrub_wr_bio_end_io;
1647
		bio_set_dev(bio, sbio->dev->bdev);
1648
		bio->bi_iter.bi_sector = sbio->physical >> 9;
D
David Sterba 已提交
1649
		bio->bi_opf = REQ_OP_WRITE;
1650
		sbio->status = 0;
1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663
	} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
		   spage->physical_for_dev_replace ||
		   sbio->logical + sbio->page_count * PAGE_SIZE !=
		   spage->logical) {
		scrub_wr_submit(sctx);
		goto again;
	}

	ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
	if (ret != PAGE_SIZE) {
		if (sbio->page_count < 1) {
			bio_put(sbio->bio);
			sbio->bio = NULL;
1664
			mutex_unlock(&sctx->wr_lock);
1665 1666 1667 1668 1669 1670 1671 1672 1673
			return -EIO;
		}
		scrub_wr_submit(sctx);
		goto again;
	}

	sbio->pagev[sbio->page_count] = spage;
	scrub_page_get(spage);
	sbio->page_count++;
1674
	if (sbio->page_count == sctx->pages_per_wr_bio)
1675
		scrub_wr_submit(sctx);
1676
	mutex_unlock(&sctx->wr_lock);
1677 1678 1679 1680 1681 1682 1683 1684

	return 0;
}

static void scrub_wr_submit(struct scrub_ctx *sctx)
{
	struct scrub_bio *sbio;

1685
	if (!sctx->wr_curr_bio)
1686 1687
		return;

1688 1689
	sbio = sctx->wr_curr_bio;
	sctx->wr_curr_bio = NULL;
1690
	WARN_ON(!sbio->bio->bi_disk);
1691 1692 1693 1694 1695
	scrub_pending_bio_inc(sctx);
	/* process all writes in a single worker thread. Then the block layer
	 * orders the requests before sending them to the driver which
	 * doubled the write performance on spinning disks when measured
	 * with Linux 3.5 */
1696
	btrfsic_submit_bio(sbio->bio);
1697 1698
}

1699
static void scrub_wr_bio_end_io(struct bio *bio)
1700 1701
{
	struct scrub_bio *sbio = bio->bi_private;
1702
	struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
1703

1704
	sbio->status = bio->bi_status;
1705 1706
	sbio->bio = bio;

1707 1708
	btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
			 scrub_wr_bio_end_io_worker, NULL, NULL);
1709
	btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1710 1711 1712 1713 1714 1715 1716 1717 1718
}

static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
{
	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
	struct scrub_ctx *sctx = sbio->sctx;
	int i;

	WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1719
	if (sbio->status) {
1720
		struct btrfs_dev_replace *dev_replace =
1721
			&sbio->sctx->fs_info->dev_replace;
1722 1723 1724 1725 1726

		for (i = 0; i < sbio->page_count; i++) {
			struct scrub_page *spage = sbio->pagev[i];

			spage->io_error = 1;
1727
			atomic64_inc(&dev_replace->num_write_errors);
1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739
		}
	}

	for (i = 0; i < sbio->page_count; i++)
		scrub_page_put(sbio->pagev[i]);

	bio_put(sbio->bio);
	kfree(sbio);
	scrub_pending_bio_dec(sctx);
}

static int scrub_checksum(struct scrub_block *sblock)
1740 1741 1742 1743
{
	u64 flags;
	int ret;

1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755
	/*
	 * No need to initialize these stats currently,
	 * because this function only use return value
	 * instead of these stats value.
	 *
	 * Todo:
	 * always use stats
	 */
	sblock->header_error = 0;
	sblock->generation_error = 0;
	sblock->checksum_error = 0;

1756 1757
	WARN_ON(sblock->page_count < 1);
	flags = sblock->pagev[0]->flags;
1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768
	ret = 0;
	if (flags & BTRFS_EXTENT_FLAG_DATA)
		ret = scrub_checksum_data(sblock);
	else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
		ret = scrub_checksum_tree_block(sblock);
	else if (flags & BTRFS_EXTENT_FLAG_SUPER)
		(void)scrub_checksum_super(sblock);
	else
		WARN_ON(1);
	if (ret)
		scrub_handle_errored_block(sblock);
1769 1770

	return ret;
A
Arne Jansen 已提交
1771 1772
}

1773
static int scrub_checksum_data(struct scrub_block *sblock)
A
Arne Jansen 已提交
1774
{
1775
	struct scrub_ctx *sctx = sblock->sctx;
A
Arne Jansen 已提交
1776
	u8 csum[BTRFS_CSUM_SIZE];
1777 1778 1779
	u8 *on_disk_csum;
	struct page *page;
	void *buffer;
A
Arne Jansen 已提交
1780
	u32 crc = ~(u32)0;
1781 1782
	u64 len;
	int index;
A
Arne Jansen 已提交
1783

1784
	BUG_ON(sblock->page_count < 1);
1785
	if (!sblock->pagev[0]->have_csum)
A
Arne Jansen 已提交
1786 1787
		return 0;

1788 1789
	on_disk_csum = sblock->pagev[0]->csum;
	page = sblock->pagev[0]->page;
1790
	buffer = kmap_atomic(page);
1791

1792
	len = sctx->fs_info->sectorsize;
1793 1794 1795 1796
	index = 0;
	for (;;) {
		u64 l = min_t(u64, len, PAGE_SIZE);

1797
		crc = btrfs_csum_data(buffer, crc, l);
1798
		kunmap_atomic(buffer);
1799 1800 1801 1802 1803
		len -= l;
		if (len == 0)
			break;
		index++;
		BUG_ON(index >= sblock->page_count);
1804 1805
		BUG_ON(!sblock->pagev[index]->page);
		page = sblock->pagev[index]->page;
1806
		buffer = kmap_atomic(page);
1807 1808
	}

A
Arne Jansen 已提交
1809
	btrfs_csum_final(crc, csum);
1810
	if (memcmp(csum, on_disk_csum, sctx->csum_size))
1811
		sblock->checksum_error = 1;
A
Arne Jansen 已提交
1812

1813
	return sblock->checksum_error;
A
Arne Jansen 已提交
1814 1815
}

1816
static int scrub_checksum_tree_block(struct scrub_block *sblock)
A
Arne Jansen 已提交
1817
{
1818
	struct scrub_ctx *sctx = sblock->sctx;
A
Arne Jansen 已提交
1819
	struct btrfs_header *h;
1820
	struct btrfs_fs_info *fs_info = sctx->fs_info;
1821 1822 1823 1824 1825 1826
	u8 calculated_csum[BTRFS_CSUM_SIZE];
	u8 on_disk_csum[BTRFS_CSUM_SIZE];
	struct page *page;
	void *mapped_buffer;
	u64 mapped_size;
	void *p;
A
Arne Jansen 已提交
1827
	u32 crc = ~(u32)0;
1828 1829 1830 1831
	u64 len;
	int index;

	BUG_ON(sblock->page_count < 1);
1832
	page = sblock->pagev[0]->page;
1833
	mapped_buffer = kmap_atomic(page);
1834
	h = (struct btrfs_header *)mapped_buffer;
1835
	memcpy(on_disk_csum, h->csum, sctx->csum_size);
A
Arne Jansen 已提交
1836 1837 1838 1839 1840 1841

	/*
	 * we don't use the getter functions here, as we
	 * a) don't have an extent buffer and
	 * b) the page is already kmapped
	 */
1842
	if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
1843
		sblock->header_error = 1;
A
Arne Jansen 已提交
1844

1845 1846 1847 1848
	if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) {
		sblock->header_error = 1;
		sblock->generation_error = 1;
	}
A
Arne Jansen 已提交
1849

M
Miao Xie 已提交
1850
	if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
1851
		sblock->header_error = 1;
A
Arne Jansen 已提交
1852 1853 1854

	if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
		   BTRFS_UUID_SIZE))
1855
		sblock->header_error = 1;
A
Arne Jansen 已提交
1856

1857
	len = sctx->fs_info->nodesize - BTRFS_CSUM_SIZE;
1858 1859 1860 1861 1862 1863
	mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
	p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
	index = 0;
	for (;;) {
		u64 l = min_t(u64, len, mapped_size);

1864
		crc = btrfs_csum_data(p, crc, l);
1865
		kunmap_atomic(mapped_buffer);
1866 1867 1868 1869 1870
		len -= l;
		if (len == 0)
			break;
		index++;
		BUG_ON(index >= sblock->page_count);
1871 1872
		BUG_ON(!sblock->pagev[index]->page);
		page = sblock->pagev[index]->page;
1873
		mapped_buffer = kmap_atomic(page);
1874 1875 1876 1877 1878
		mapped_size = PAGE_SIZE;
		p = mapped_buffer;
	}

	btrfs_csum_final(crc, calculated_csum);
1879
	if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1880
		sblock->checksum_error = 1;
A
Arne Jansen 已提交
1881

1882
	return sblock->header_error || sblock->checksum_error;
A
Arne Jansen 已提交
1883 1884
}

1885
static int scrub_checksum_super(struct scrub_block *sblock)
A
Arne Jansen 已提交
1886 1887
{
	struct btrfs_super_block *s;
1888
	struct scrub_ctx *sctx = sblock->sctx;
1889 1890 1891 1892 1893 1894
	u8 calculated_csum[BTRFS_CSUM_SIZE];
	u8 on_disk_csum[BTRFS_CSUM_SIZE];
	struct page *page;
	void *mapped_buffer;
	u64 mapped_size;
	void *p;
A
Arne Jansen 已提交
1895
	u32 crc = ~(u32)0;
1896 1897
	int fail_gen = 0;
	int fail_cor = 0;
1898 1899
	u64 len;
	int index;
A
Arne Jansen 已提交
1900

1901
	BUG_ON(sblock->page_count < 1);
1902
	page = sblock->pagev[0]->page;
1903
	mapped_buffer = kmap_atomic(page);
1904
	s = (struct btrfs_super_block *)mapped_buffer;
1905
	memcpy(on_disk_csum, s->csum, sctx->csum_size);
A
Arne Jansen 已提交
1906

1907
	if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
1908
		++fail_cor;
A
Arne Jansen 已提交
1909

1910
	if (sblock->pagev[0]->generation != btrfs_super_generation(s))
1911
		++fail_gen;
A
Arne Jansen 已提交
1912

M
Miao Xie 已提交
1913
	if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
1914
		++fail_cor;
A
Arne Jansen 已提交
1915

1916 1917 1918 1919 1920 1921 1922
	len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
	mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
	p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
	index = 0;
	for (;;) {
		u64 l = min_t(u64, len, mapped_size);

1923
		crc = btrfs_csum_data(p, crc, l);
1924
		kunmap_atomic(mapped_buffer);
1925 1926 1927 1928 1929
		len -= l;
		if (len == 0)
			break;
		index++;
		BUG_ON(index >= sblock->page_count);
1930 1931
		BUG_ON(!sblock->pagev[index]->page);
		page = sblock->pagev[index]->page;
1932
		mapped_buffer = kmap_atomic(page);
1933 1934 1935 1936 1937
		mapped_size = PAGE_SIZE;
		p = mapped_buffer;
	}

	btrfs_csum_final(crc, calculated_csum);
1938
	if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1939
		++fail_cor;
A
Arne Jansen 已提交
1940

1941
	if (fail_cor + fail_gen) {
A
Arne Jansen 已提交
1942 1943 1944 1945 1946
		/*
		 * if we find an error in a super block, we just report it.
		 * They will get written with the next transaction commit
		 * anyway
		 */
1947 1948 1949
		spin_lock(&sctx->stat_lock);
		++sctx->stat.super_errors;
		spin_unlock(&sctx->stat_lock);
1950
		if (fail_cor)
1951
			btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
1952 1953
				BTRFS_DEV_STAT_CORRUPTION_ERRS);
		else
1954
			btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
1955
				BTRFS_DEV_STAT_GENERATION_ERRS);
A
Arne Jansen 已提交
1956 1957
	}

1958
	return fail_cor + fail_gen;
A
Arne Jansen 已提交
1959 1960
}

1961 1962
static void scrub_block_get(struct scrub_block *sblock)
{
1963
	refcount_inc(&sblock->refs);
1964 1965 1966 1967
}

static void scrub_block_put(struct scrub_block *sblock)
{
1968
	if (refcount_dec_and_test(&sblock->refs)) {
1969 1970
		int i;

1971 1972 1973
		if (sblock->sparity)
			scrub_parity_put(sblock->sparity);

1974
		for (i = 0; i < sblock->page_count; i++)
1975
			scrub_page_put(sblock->pagev[i]);
1976 1977 1978 1979
		kfree(sblock);
	}
}

1980 1981
static void scrub_page_get(struct scrub_page *spage)
{
1982
	atomic_inc(&spage->refs);
1983 1984 1985 1986
}

static void scrub_page_put(struct scrub_page *spage)
{
1987
	if (atomic_dec_and_test(&spage->refs)) {
1988 1989 1990 1991 1992 1993
		if (spage->page)
			__free_page(spage->page);
		kfree(spage);
	}
}

1994
static void scrub_submit(struct scrub_ctx *sctx)
A
Arne Jansen 已提交
1995 1996 1997
{
	struct scrub_bio *sbio;

1998
	if (sctx->curr == -1)
S
Stefan Behrens 已提交
1999
		return;
A
Arne Jansen 已提交
2000

2001 2002
	sbio = sctx->bios[sctx->curr];
	sctx->curr = -1;
2003
	scrub_pending_bio_inc(sctx);
2004
	btrfsic_submit_bio(sbio->bio);
A
Arne Jansen 已提交
2005 2006
}

2007 2008
static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
				    struct scrub_page *spage)
A
Arne Jansen 已提交
2009
{
2010
	struct scrub_block *sblock = spage->sblock;
A
Arne Jansen 已提交
2011
	struct scrub_bio *sbio;
2012
	int ret;
A
Arne Jansen 已提交
2013 2014 2015 2016 2017

again:
	/*
	 * grab a fresh bio or wait for one to become available
	 */
2018 2019 2020 2021 2022 2023 2024 2025
	while (sctx->curr == -1) {
		spin_lock(&sctx->list_lock);
		sctx->curr = sctx->first_free;
		if (sctx->curr != -1) {
			sctx->first_free = sctx->bios[sctx->curr]->next_free;
			sctx->bios[sctx->curr]->next_free = -1;
			sctx->bios[sctx->curr]->page_count = 0;
			spin_unlock(&sctx->list_lock);
A
Arne Jansen 已提交
2026
		} else {
2027 2028
			spin_unlock(&sctx->list_lock);
			wait_event(sctx->list_wait, sctx->first_free != -1);
A
Arne Jansen 已提交
2029 2030
		}
	}
2031
	sbio = sctx->bios[sctx->curr];
2032
	if (sbio->page_count == 0) {
2033 2034
		struct bio *bio;

2035 2036
		sbio->physical = spage->physical;
		sbio->logical = spage->logical;
2037
		sbio->dev = spage->dev;
2038 2039
		bio = sbio->bio;
		if (!bio) {
2040
			bio = btrfs_io_bio_alloc(sctx->pages_per_rd_bio);
2041 2042
			sbio->bio = bio;
		}
2043 2044 2045

		bio->bi_private = sbio;
		bio->bi_end_io = scrub_bio_end_io;
2046
		bio_set_dev(bio, sbio->dev->bdev);
2047
		bio->bi_iter.bi_sector = sbio->physical >> 9;
D
David Sterba 已提交
2048
		bio->bi_opf = REQ_OP_READ;
2049
		sbio->status = 0;
2050 2051 2052
	} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
		   spage->physical ||
		   sbio->logical + sbio->page_count * PAGE_SIZE !=
2053 2054
		   spage->logical ||
		   sbio->dev != spage->dev) {
2055
		scrub_submit(sctx);
A
Arne Jansen 已提交
2056 2057
		goto again;
	}
2058

2059 2060 2061 2062 2063 2064 2065 2066
	sbio->pagev[sbio->page_count] = spage;
	ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
	if (ret != PAGE_SIZE) {
		if (sbio->page_count < 1) {
			bio_put(sbio->bio);
			sbio->bio = NULL;
			return -EIO;
		}
2067
		scrub_submit(sctx);
2068 2069 2070
		goto again;
	}

2071
	scrub_block_get(sblock); /* one for the page added to the bio */
2072 2073
	atomic_inc(&sblock->outstanding_pages);
	sbio->page_count++;
2074
	if (sbio->page_count == sctx->pages_per_rd_bio)
2075
		scrub_submit(sctx);
2076 2077 2078 2079

	return 0;
}

2080
static void scrub_missing_raid56_end_io(struct bio *bio)
2081 2082
{
	struct scrub_block *sblock = bio->bi_private;
2083
	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
2084

2085
	if (bio->bi_status)
2086 2087
		sblock->no_io_error_seen = 0;

2088 2089
	bio_put(bio);

2090 2091 2092 2093 2094 2095 2096
	btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
}

static void scrub_missing_raid56_worker(struct btrfs_work *work)
{
	struct scrub_block *sblock = container_of(work, struct scrub_block, work);
	struct scrub_ctx *sctx = sblock->sctx;
2097
	struct btrfs_fs_info *fs_info = sctx->fs_info;
2098 2099 2100 2101 2102 2103
	u64 logical;
	struct btrfs_device *dev;

	logical = sblock->pagev[0]->logical;
	dev = sblock->pagev[0]->dev;

2104
	if (sblock->no_io_error_seen)
2105
		scrub_recheck_block_checksum(sblock);
2106 2107 2108 2109 2110

	if (!sblock->no_io_error_seen) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors++;
		spin_unlock(&sctx->stat_lock);
2111
		btrfs_err_rl_in_rcu(fs_info,
2112
			"IO error rebuilding logical %llu for dev %s",
2113 2114 2115 2116 2117
			logical, rcu_str_deref(dev->name));
	} else if (sblock->header_error || sblock->checksum_error) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
2118
		btrfs_err_rl_in_rcu(fs_info,
2119
			"failed to rebuild valid logical %llu for dev %s",
2120 2121 2122 2123 2124 2125 2126
			logical, rcu_str_deref(dev->name));
	} else {
		scrub_write_block_to_dev_replace(sblock);
	}

	scrub_block_put(sblock);

2127
	if (sctx->is_dev_replace && sctx->flush_all_writes) {
2128
		mutex_lock(&sctx->wr_lock);
2129
		scrub_wr_submit(sctx);
2130
		mutex_unlock(&sctx->wr_lock);
2131 2132 2133 2134 2135 2136 2137 2138
	}

	scrub_pending_bio_dec(sctx);
}

static void scrub_missing_raid56_pages(struct scrub_block *sblock)
{
	struct scrub_ctx *sctx = sblock->sctx;
2139
	struct btrfs_fs_info *fs_info = sctx->fs_info;
2140 2141
	u64 length = sblock->page_count * PAGE_SIZE;
	u64 logical = sblock->pagev[0]->logical;
2142
	struct btrfs_bio *bbio = NULL;
2143 2144 2145 2146 2147
	struct bio *bio;
	struct btrfs_raid_bio *rbio;
	int ret;
	int i;

2148
	btrfs_bio_counter_inc_blocked(fs_info);
2149
	ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
2150
			&length, &bbio);
2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164
	if (ret || !bbio || !bbio->raid_map)
		goto bbio_out;

	if (WARN_ON(!sctx->is_dev_replace ||
		    !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
		/*
		 * We shouldn't be scrubbing a missing device. Even for dev
		 * replace, we should only get here for RAID 5/6. We either
		 * managed to mount something with no mirrors remaining or
		 * there's a bug in scrub_remap_extent()/btrfs_map_block().
		 */
		goto bbio_out;
	}

2165
	bio = btrfs_io_bio_alloc(0);
2166 2167 2168 2169
	bio->bi_iter.bi_sector = logical >> 9;
	bio->bi_private = sblock;
	bio->bi_end_io = scrub_missing_raid56_end_io;

2170
	rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length);
2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189
	if (!rbio)
		goto rbio_out;

	for (i = 0; i < sblock->page_count; i++) {
		struct scrub_page *spage = sblock->pagev[i];

		raid56_add_scrub_pages(rbio, spage->page, spage->logical);
	}

	btrfs_init_work(&sblock->work, btrfs_scrub_helper,
			scrub_missing_raid56_worker, NULL, NULL);
	scrub_block_get(sblock);
	scrub_pending_bio_inc(sctx);
	raid56_submit_missing_rbio(rbio);
	return;

rbio_out:
	bio_put(bio);
bbio_out:
2190
	btrfs_bio_counter_dec(fs_info);
2191 2192 2193 2194 2195 2196
	btrfs_put_bbio(bbio);
	spin_lock(&sctx->stat_lock);
	sctx->stat.malloc_errors++;
	spin_unlock(&sctx->stat_lock);
}

2197
static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
2198
		       u64 physical, struct btrfs_device *dev, u64 flags,
2199 2200
		       u64 gen, int mirror_num, u8 *csum, int force,
		       u64 physical_for_dev_replace)
2201 2202 2203 2204
{
	struct scrub_block *sblock;
	int index;

2205
	sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2206
	if (!sblock) {
2207 2208 2209
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
2210
		return -ENOMEM;
A
Arne Jansen 已提交
2211
	}
2212

2213 2214
	/* one ref inside this function, plus one for each page added to
	 * a bio later on */
2215
	refcount_set(&sblock->refs, 1);
2216
	sblock->sctx = sctx;
2217 2218 2219
	sblock->no_io_error_seen = 1;

	for (index = 0; len > 0; index++) {
2220
		struct scrub_page *spage;
2221 2222
		u64 l = min_t(u64, len, PAGE_SIZE);

2223
		spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2224 2225
		if (!spage) {
leave_nomem:
2226 2227 2228
			spin_lock(&sctx->stat_lock);
			sctx->stat.malloc_errors++;
			spin_unlock(&sctx->stat_lock);
2229
			scrub_block_put(sblock);
2230 2231
			return -ENOMEM;
		}
2232 2233 2234
		BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
		scrub_page_get(spage);
		sblock->pagev[index] = spage;
2235
		spage->sblock = sblock;
2236
		spage->dev = dev;
2237 2238 2239 2240
		spage->flags = flags;
		spage->generation = gen;
		spage->logical = logical;
		spage->physical = physical;
2241
		spage->physical_for_dev_replace = physical_for_dev_replace;
2242 2243 2244
		spage->mirror_num = mirror_num;
		if (csum) {
			spage->have_csum = 1;
2245
			memcpy(spage->csum, csum, sctx->csum_size);
2246 2247 2248 2249
		} else {
			spage->have_csum = 0;
		}
		sblock->page_count++;
2250
		spage->page = alloc_page(GFP_KERNEL);
2251 2252
		if (!spage->page)
			goto leave_nomem;
2253 2254 2255
		len -= l;
		logical += l;
		physical += l;
2256
		physical_for_dev_replace += l;
2257 2258
	}

2259
	WARN_ON(sblock->page_count == 0);
2260
	if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
2261 2262 2263 2264 2265 2266 2267 2268 2269
		/*
		 * This case should only be hit for RAID 5/6 device replace. See
		 * the comment in scrub_missing_raid56_pages() for details.
		 */
		scrub_missing_raid56_pages(sblock);
	} else {
		for (index = 0; index < sblock->page_count; index++) {
			struct scrub_page *spage = sblock->pagev[index];
			int ret;
2270

2271 2272 2273 2274 2275
			ret = scrub_add_page_to_rd_bio(sctx, spage);
			if (ret) {
				scrub_block_put(sblock);
				return ret;
			}
2276
		}
A
Arne Jansen 已提交
2277

2278 2279 2280
		if (force)
			scrub_submit(sctx);
	}
A
Arne Jansen 已提交
2281

2282 2283
	/* last one frees, either here or in bio completion for last page */
	scrub_block_put(sblock);
A
Arne Jansen 已提交
2284 2285 2286
	return 0;
}

2287
static void scrub_bio_end_io(struct bio *bio)
2288 2289
{
	struct scrub_bio *sbio = bio->bi_private;
2290
	struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
2291

2292
	sbio->status = bio->bi_status;
2293 2294
	sbio->bio = bio;

2295
	btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
2296 2297 2298 2299 2300
}

static void scrub_bio_end_io_worker(struct btrfs_work *work)
{
	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2301
	struct scrub_ctx *sctx = sbio->sctx;
2302 2303
	int i;

2304
	BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
2305
	if (sbio->status) {
2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325
		for (i = 0; i < sbio->page_count; i++) {
			struct scrub_page *spage = sbio->pagev[i];

			spage->io_error = 1;
			spage->sblock->no_io_error_seen = 0;
		}
	}

	/* now complete the scrub_block items that have all pages completed */
	for (i = 0; i < sbio->page_count; i++) {
		struct scrub_page *spage = sbio->pagev[i];
		struct scrub_block *sblock = spage->sblock;

		if (atomic_dec_and_test(&sblock->outstanding_pages))
			scrub_block_complete(sblock);
		scrub_block_put(sblock);
	}

	bio_put(sbio->bio);
	sbio->bio = NULL;
2326 2327 2328 2329
	spin_lock(&sctx->list_lock);
	sbio->next_free = sctx->first_free;
	sctx->first_free = sbio->index;
	spin_unlock(&sctx->list_lock);
2330

2331
	if (sctx->is_dev_replace && sctx->flush_all_writes) {
2332
		mutex_lock(&sctx->wr_lock);
2333
		scrub_wr_submit(sctx);
2334
		mutex_unlock(&sctx->wr_lock);
2335 2336
	}

2337
	scrub_pending_bio_dec(sctx);
2338 2339
}

2340 2341 2342 2343
static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
				       unsigned long *bitmap,
				       u64 start, u64 len)
{
2344
	u64 offset;
2345 2346
	u64 nsectors64;
	u32 nsectors;
2347
	int sectorsize = sparity->sctx->fs_info->sectorsize;
2348 2349 2350 2351 2352 2353 2354

	if (len >= sparity->stripe_len) {
		bitmap_set(bitmap, 0, sparity->nsectors);
		return;
	}

	start -= sparity->logic_start;
2355 2356
	start = div64_u64_rem(start, sparity->stripe_len, &offset);
	offset = div_u64(offset, sectorsize);
2357 2358 2359 2360
	nsectors64 = div_u64(len, sectorsize);

	ASSERT(nsectors64 < UINT_MAX);
	nsectors = (u32)nsectors64;
2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382

	if (offset + nsectors <= sparity->nsectors) {
		bitmap_set(bitmap, offset, nsectors);
		return;
	}

	bitmap_set(bitmap, offset, sparity->nsectors - offset);
	bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
}

static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
						   u64 start, u64 len)
{
	__scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
}

static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
						  u64 start, u64 len)
{
	__scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
}

2383 2384
static void scrub_block_complete(struct scrub_block *sblock)
{
2385 2386
	int corrupted = 0;

2387
	if (!sblock->no_io_error_seen) {
2388
		corrupted = 1;
2389
		scrub_handle_errored_block(sblock);
2390 2391 2392 2393 2394 2395
	} else {
		/*
		 * if has checksum error, write via repair mechanism in
		 * dev replace case, otherwise write here in dev replace
		 * case.
		 */
2396 2397
		corrupted = scrub_checksum(sblock);
		if (!corrupted && sblock->sctx->is_dev_replace)
2398 2399
			scrub_write_block_to_dev_replace(sblock);
	}
2400 2401 2402 2403 2404 2405 2406 2407 2408

	if (sblock->sparity && corrupted && !sblock->data_corrected) {
		u64 start = sblock->pagev[0]->logical;
		u64 end = sblock->pagev[sblock->page_count - 1]->logical +
			  PAGE_SIZE;

		scrub_parity_mark_sectors_error(sblock->sparity,
						start, end - start);
	}
2409 2410
}

2411
static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
A
Arne Jansen 已提交
2412 2413
{
	struct btrfs_ordered_sum *sum = NULL;
2414
	unsigned long index;
A
Arne Jansen 已提交
2415 2416
	unsigned long num_sectors;

2417 2418
	while (!list_empty(&sctx->csum_list)) {
		sum = list_first_entry(&sctx->csum_list,
A
Arne Jansen 已提交
2419 2420 2421 2422 2423 2424
				       struct btrfs_ordered_sum, list);
		if (sum->bytenr > logical)
			return 0;
		if (sum->bytenr + sum->len > logical)
			break;

2425
		++sctx->stat.csum_discards;
A
Arne Jansen 已提交
2426 2427 2428 2429 2430 2431 2432
		list_del(&sum->list);
		kfree(sum);
		sum = NULL;
	}
	if (!sum)
		return 0;

2433 2434 2435
	index = div_u64(logical - sum->bytenr, sctx->fs_info->sectorsize);
	ASSERT(index < UINT_MAX);

2436
	num_sectors = sum->len / sctx->fs_info->sectorsize;
2437 2438
	memcpy(csum, sum->sums + index, sctx->csum_size);
	if (index == num_sectors - 1) {
A
Arne Jansen 已提交
2439 2440 2441
		list_del(&sum->list);
		kfree(sum);
	}
2442
	return 1;
A
Arne Jansen 已提交
2443 2444 2445
}

/* scrub extent tries to collect up to 64 kB for each bio */
L
Liu Bo 已提交
2446 2447
static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
			u64 logical, u64 len,
2448
			u64 physical, struct btrfs_device *dev, u64 flags,
2449
			u64 gen, int mirror_num, u64 physical_for_dev_replace)
A
Arne Jansen 已提交
2450 2451 2452
{
	int ret;
	u8 csum[BTRFS_CSUM_SIZE];
2453 2454 2455
	u32 blocksize;

	if (flags & BTRFS_EXTENT_FLAG_DATA) {
L
Liu Bo 已提交
2456 2457 2458 2459
		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
			blocksize = map->stripe_len;
		else
			blocksize = sctx->fs_info->sectorsize;
2460 2461 2462 2463
		spin_lock(&sctx->stat_lock);
		sctx->stat.data_extents_scrubbed++;
		sctx->stat.data_bytes_scrubbed += len;
		spin_unlock(&sctx->stat_lock);
2464
	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
L
Liu Bo 已提交
2465 2466 2467 2468
		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
			blocksize = map->stripe_len;
		else
			blocksize = sctx->fs_info->nodesize;
2469 2470 2471 2472
		spin_lock(&sctx->stat_lock);
		sctx->stat.tree_extents_scrubbed++;
		sctx->stat.tree_bytes_scrubbed += len;
		spin_unlock(&sctx->stat_lock);
2473
	} else {
2474
		blocksize = sctx->fs_info->sectorsize;
2475
		WARN_ON(1);
2476
	}
A
Arne Jansen 已提交
2477 2478

	while (len) {
2479
		u64 l = min_t(u64, len, blocksize);
A
Arne Jansen 已提交
2480 2481 2482 2483
		int have_csum = 0;

		if (flags & BTRFS_EXTENT_FLAG_DATA) {
			/* push csums to sbio */
2484
			have_csum = scrub_find_csum(sctx, logical, csum);
A
Arne Jansen 已提交
2485
			if (have_csum == 0)
2486
				++sctx->stat.no_csum;
A
Arne Jansen 已提交
2487
		}
2488
		ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
2489 2490
				  mirror_num, have_csum ? csum : NULL, 0,
				  physical_for_dev_replace);
A
Arne Jansen 已提交
2491 2492 2493 2494 2495
		if (ret)
			return ret;
		len -= l;
		logical += l;
		physical += l;
2496
		physical_for_dev_replace += l;
A
Arne Jansen 已提交
2497 2498 2499 2500
	}
	return 0;
}

2501 2502 2503 2504 2505 2506 2507 2508 2509
static int scrub_pages_for_parity(struct scrub_parity *sparity,
				  u64 logical, u64 len,
				  u64 physical, struct btrfs_device *dev,
				  u64 flags, u64 gen, int mirror_num, u8 *csum)
{
	struct scrub_ctx *sctx = sparity->sctx;
	struct scrub_block *sblock;
	int index;

2510
	sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2511 2512 2513 2514 2515 2516 2517 2518 2519
	if (!sblock) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
		return -ENOMEM;
	}

	/* one ref inside this function, plus one for each page added to
	 * a bio later on */
2520
	refcount_set(&sblock->refs, 1);
2521 2522 2523 2524 2525 2526 2527 2528 2529
	sblock->sctx = sctx;
	sblock->no_io_error_seen = 1;
	sblock->sparity = sparity;
	scrub_parity_get(sparity);

	for (index = 0; len > 0; index++) {
		struct scrub_page *spage;
		u64 l = min_t(u64, len, PAGE_SIZE);

2530
		spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559
		if (!spage) {
leave_nomem:
			spin_lock(&sctx->stat_lock);
			sctx->stat.malloc_errors++;
			spin_unlock(&sctx->stat_lock);
			scrub_block_put(sblock);
			return -ENOMEM;
		}
		BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
		/* For scrub block */
		scrub_page_get(spage);
		sblock->pagev[index] = spage;
		/* For scrub parity */
		scrub_page_get(spage);
		list_add_tail(&spage->list, &sparity->spages);
		spage->sblock = sblock;
		spage->dev = dev;
		spage->flags = flags;
		spage->generation = gen;
		spage->logical = logical;
		spage->physical = physical;
		spage->mirror_num = mirror_num;
		if (csum) {
			spage->have_csum = 1;
			memcpy(spage->csum, csum, sctx->csum_size);
		} else {
			spage->have_csum = 0;
		}
		sblock->page_count++;
2560
		spage->page = alloc_page(GFP_KERNEL);
2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594
		if (!spage->page)
			goto leave_nomem;
		len -= l;
		logical += l;
		physical += l;
	}

	WARN_ON(sblock->page_count == 0);
	for (index = 0; index < sblock->page_count; index++) {
		struct scrub_page *spage = sblock->pagev[index];
		int ret;

		ret = scrub_add_page_to_rd_bio(sctx, spage);
		if (ret) {
			scrub_block_put(sblock);
			return ret;
		}
	}

	/* last one frees, either here or in bio completion for last page */
	scrub_block_put(sblock);
	return 0;
}

static int scrub_extent_for_parity(struct scrub_parity *sparity,
				   u64 logical, u64 len,
				   u64 physical, struct btrfs_device *dev,
				   u64 flags, u64 gen, int mirror_num)
{
	struct scrub_ctx *sctx = sparity->sctx;
	int ret;
	u8 csum[BTRFS_CSUM_SIZE];
	u32 blocksize;

2595
	if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
2596 2597 2598 2599
		scrub_parity_mark_sectors_error(sparity, logical, len);
		return 0;
	}

2600
	if (flags & BTRFS_EXTENT_FLAG_DATA) {
L
Liu Bo 已提交
2601
		blocksize = sparity->stripe_len;
2602
	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
L
Liu Bo 已提交
2603
		blocksize = sparity->stripe_len;
2604
	} else {
2605
		blocksize = sctx->fs_info->sectorsize;
2606 2607 2608 2609 2610 2611 2612 2613 2614
		WARN_ON(1);
	}

	while (len) {
		u64 l = min_t(u64, len, blocksize);
		int have_csum = 0;

		if (flags & BTRFS_EXTENT_FLAG_DATA) {
			/* push csums to sbio */
2615
			have_csum = scrub_find_csum(sctx, logical, csum);
2616 2617 2618 2619 2620 2621 2622 2623
			if (have_csum == 0)
				goto skip;
		}
		ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
					     flags, gen, mirror_num,
					     have_csum ? csum : NULL);
		if (ret)
			return ret;
2624
skip:
2625 2626 2627 2628 2629 2630 2631
		len -= l;
		logical += l;
		physical += l;
	}
	return 0;
}

2632 2633 2634 2635 2636 2637 2638 2639
/*
 * Given a physical address, this will calculate it's
 * logical offset. if this is a parity stripe, it will return
 * the most left data stripe's logical offset.
 *
 * return 0 if it is a data stripe, 1 means parity stripe.
 */
static int get_raid56_logic_offset(u64 physical, int num,
2640 2641
				   struct map_lookup *map, u64 *offset,
				   u64 *stripe_start)
2642 2643 2644 2645 2646
{
	int i;
	int j = 0;
	u64 stripe_nr;
	u64 last_offset;
2647 2648
	u32 stripe_index;
	u32 rot;
2649 2650 2651

	last_offset = (physical - map->stripes[num].physical) *
		      nr_data_stripes(map);
2652 2653 2654
	if (stripe_start)
		*stripe_start = last_offset;

2655 2656 2657 2658
	*offset = last_offset;
	for (i = 0; i < nr_data_stripes(map); i++) {
		*offset = last_offset + i * map->stripe_len;

2659
		stripe_nr = div64_u64(*offset, map->stripe_len);
2660
		stripe_nr = div_u64(stripe_nr, nr_data_stripes(map));
2661 2662

		/* Work out the disk rotation on this stripe-set */
2663
		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
2664 2665
		/* calculate which stripe this data locates */
		rot += i;
2666
		stripe_index = rot % map->num_stripes;
2667 2668 2669 2670 2671 2672 2673 2674 2675
		if (stripe_index == num)
			return 0;
		if (stripe_index < num)
			j++;
	}
	*offset = last_offset + j * map->stripe_len;
	return 1;
}

2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697
static void scrub_free_parity(struct scrub_parity *sparity)
{
	struct scrub_ctx *sctx = sparity->sctx;
	struct scrub_page *curr, *next;
	int nbits;

	nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
	if (nbits) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors += nbits;
		sctx->stat.uncorrectable_errors += nbits;
		spin_unlock(&sctx->stat_lock);
	}

	list_for_each_entry_safe(curr, next, &sparity->spages, list) {
		list_del_init(&curr->list);
		scrub_page_put(curr);
	}

	kfree(sparity);
}

2698 2699 2700 2701 2702 2703 2704 2705 2706 2707
static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
{
	struct scrub_parity *sparity = container_of(work, struct scrub_parity,
						    work);
	struct scrub_ctx *sctx = sparity->sctx;

	scrub_free_parity(sparity);
	scrub_pending_bio_dec(sctx);
}

2708
static void scrub_parity_bio_endio(struct bio *bio)
2709 2710
{
	struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
2711
	struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
2712

2713
	if (bio->bi_status)
2714 2715 2716 2717
		bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
			  sparity->nsectors);

	bio_put(bio);
2718 2719 2720

	btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
			scrub_parity_bio_endio_worker, NULL, NULL);
2721
	btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
2722 2723 2724 2725 2726
}

static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
{
	struct scrub_ctx *sctx = sparity->sctx;
2727
	struct btrfs_fs_info *fs_info = sctx->fs_info;
2728 2729 2730 2731 2732 2733 2734 2735 2736 2737
	struct bio *bio;
	struct btrfs_raid_bio *rbio;
	struct btrfs_bio *bbio = NULL;
	u64 length;
	int ret;

	if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
			   sparity->nsectors))
		goto out;

2738
	length = sparity->logic_end - sparity->logic_start;
2739 2740

	btrfs_bio_counter_inc_blocked(fs_info);
2741
	ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
2742
			       &length, &bbio);
2743
	if (ret || !bbio || !bbio->raid_map)
2744 2745
		goto bbio_out;

2746
	bio = btrfs_io_bio_alloc(0);
2747 2748 2749 2750
	bio->bi_iter.bi_sector = sparity->logic_start >> 9;
	bio->bi_private = sparity;
	bio->bi_end_io = scrub_parity_bio_endio;

2751
	rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio,
2752
					      length, sparity->scrub_dev,
2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764
					      sparity->dbitmap,
					      sparity->nsectors);
	if (!rbio)
		goto rbio_out;

	scrub_pending_bio_inc(sctx);
	raid56_parity_submit_scrub_rbio(rbio);
	return;

rbio_out:
	bio_put(bio);
bbio_out:
2765
	btrfs_bio_counter_dec(fs_info);
2766
	btrfs_put_bbio(bbio);
2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777
	bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
		  sparity->nsectors);
	spin_lock(&sctx->stat_lock);
	sctx->stat.malloc_errors++;
	spin_unlock(&sctx->stat_lock);
out:
	scrub_free_parity(sparity);
}

static inline int scrub_calc_parity_bitmap_len(int nsectors)
{
2778
	return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long);
2779 2780 2781 2782
}

static void scrub_parity_get(struct scrub_parity *sparity)
{
2783
	refcount_inc(&sparity->refs);
2784 2785 2786 2787
}

static void scrub_parity_put(struct scrub_parity *sparity)
{
2788
	if (!refcount_dec_and_test(&sparity->refs))
2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800
		return;

	scrub_parity_check_and_repair(sparity);
}

static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
						  struct map_lookup *map,
						  struct btrfs_device *sdev,
						  struct btrfs_path *path,
						  u64 logic_start,
						  u64 logic_end)
{
2801
	struct btrfs_fs_info *fs_info = sctx->fs_info;
2802 2803 2804
	struct btrfs_root *root = fs_info->extent_root;
	struct btrfs_root *csum_root = fs_info->csum_root;
	struct btrfs_extent_item *extent;
2805
	struct btrfs_bio *bbio = NULL;
2806 2807 2808 2809 2810 2811 2812 2813 2814
	u64 flags;
	int ret;
	int slot;
	struct extent_buffer *l;
	struct btrfs_key key;
	u64 generation;
	u64 extent_logical;
	u64 extent_physical;
	u64 extent_len;
2815
	u64 mapped_length;
2816 2817 2818 2819 2820 2821 2822
	struct btrfs_device *extent_dev;
	struct scrub_parity *sparity;
	int nsectors;
	int bitmap_len;
	int extent_mirror_num;
	int stop_loop = 0;

2823
	nsectors = div_u64(map->stripe_len, fs_info->sectorsize);
2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839
	bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
	sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
			  GFP_NOFS);
	if (!sparity) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
		return -ENOMEM;
	}

	sparity->stripe_len = map->stripe_len;
	sparity->nsectors = nsectors;
	sparity->sctx = sctx;
	sparity->scrub_dev = sdev;
	sparity->logic_start = logic_start;
	sparity->logic_end = logic_end;
2840
	refcount_set(&sparity->refs, 1);
2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888
	INIT_LIST_HEAD(&sparity->spages);
	sparity->dbitmap = sparity->bitmap;
	sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;

	ret = 0;
	while (logic_start < logic_end) {
		if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
			key.type = BTRFS_METADATA_ITEM_KEY;
		else
			key.type = BTRFS_EXTENT_ITEM_KEY;
		key.objectid = logic_start;
		key.offset = (u64)-1;

		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		if (ret < 0)
			goto out;

		if (ret > 0) {
			ret = btrfs_previous_extent_item(root, path, 0);
			if (ret < 0)
				goto out;
			if (ret > 0) {
				btrfs_release_path(path);
				ret = btrfs_search_slot(NULL, root, &key,
							path, 0, 0);
				if (ret < 0)
					goto out;
			}
		}

		stop_loop = 0;
		while (1) {
			u64 bytes;

			l = path->nodes[0];
			slot = path->slots[0];
			if (slot >= btrfs_header_nritems(l)) {
				ret = btrfs_next_leaf(root, path);
				if (ret == 0)
					continue;
				if (ret < 0)
					goto out;

				stop_loop = 1;
				break;
			}
			btrfs_item_key_to_cpu(l, &key, slot);

2889 2890 2891 2892
			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
			    key.type != BTRFS_METADATA_ITEM_KEY)
				goto next;

2893
			if (key.type == BTRFS_METADATA_ITEM_KEY)
2894
				bytes = fs_info->nodesize;
2895 2896 2897 2898 2899 2900
			else
				bytes = key.offset;

			if (key.objectid + bytes <= logic_start)
				goto next;

2901
			if (key.objectid >= logic_end) {
2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913
				stop_loop = 1;
				break;
			}

			while (key.objectid >= logic_start + map->stripe_len)
				logic_start += map->stripe_len;

			extent = btrfs_item_ptr(l, slot,
						struct btrfs_extent_item);
			flags = btrfs_extent_flags(l, extent);
			generation = btrfs_extent_generation(l, extent);

2914 2915 2916 2917
			if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
			    (key.objectid < logic_start ||
			     key.objectid + bytes >
			     logic_start + map->stripe_len)) {
J
Jeff Mahoney 已提交
2918 2919
				btrfs_err(fs_info,
					  "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
2920
					  key.objectid, logic_start);
2921 2922 2923
				spin_lock(&sctx->stat_lock);
				sctx->stat.uncorrectable_errors++;
				spin_unlock(&sctx->stat_lock);
2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942
				goto next;
			}
again:
			extent_logical = key.objectid;
			extent_len = bytes;

			if (extent_logical < logic_start) {
				extent_len -= logic_start - extent_logical;
				extent_logical = logic_start;
			}

			if (extent_logical + extent_len >
			    logic_start + map->stripe_len)
				extent_len = logic_start + map->stripe_len -
					     extent_logical;

			scrub_parity_mark_sectors_data(sparity, extent_logical,
						       extent_len);

2943
			mapped_length = extent_len;
2944
			bbio = NULL;
2945 2946 2947
			ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
					extent_logical, &mapped_length, &bbio,
					0);
2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959
			if (!ret) {
				if (!bbio || mapped_length < extent_len)
					ret = -EIO;
			}
			if (ret) {
				btrfs_put_bbio(bbio);
				goto out;
			}
			extent_physical = bbio->stripes[0].physical;
			extent_mirror_num = bbio->mirror_num;
			extent_dev = bbio->stripes[0].dev;
			btrfs_put_bbio(bbio);
2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973

			ret = btrfs_lookup_csums_range(csum_root,
						extent_logical,
						extent_logical + extent_len - 1,
						&sctx->csum_list, 1);
			if (ret)
				goto out;

			ret = scrub_extent_for_parity(sparity, extent_logical,
						      extent_len,
						      extent_physical,
						      extent_dev, flags,
						      generation,
						      extent_mirror_num);
2974 2975 2976

			scrub_free_csums(sctx);

2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007
			if (ret)
				goto out;

			if (extent_logical + extent_len <
			    key.objectid + bytes) {
				logic_start += map->stripe_len;

				if (logic_start >= logic_end) {
					stop_loop = 1;
					break;
				}

				if (logic_start < key.objectid + bytes) {
					cond_resched();
					goto again;
				}
			}
next:
			path->slots[0]++;
		}

		btrfs_release_path(path);

		if (stop_loop)
			break;

		logic_start += map->stripe_len;
	}
out:
	if (ret < 0)
		scrub_parity_mark_sectors_error(sparity, logic_start,
3008
						logic_end - logic_start);
3009 3010
	scrub_parity_put(sparity);
	scrub_submit(sctx);
3011
	mutex_lock(&sctx->wr_lock);
3012
	scrub_wr_submit(sctx);
3013
	mutex_unlock(&sctx->wr_lock);
3014 3015 3016 3017 3018

	btrfs_release_path(path);
	return ret < 0 ? ret : 0;
}

3019
static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3020 3021
					   struct map_lookup *map,
					   struct btrfs_device *scrub_dev,
3022
					   int num, u64 base, u64 length)
A
Arne Jansen 已提交
3023
{
3024
	struct btrfs_path *path, *ppath;
3025
	struct btrfs_fs_info *fs_info = sctx->fs_info;
A
Arne Jansen 已提交
3026 3027 3028
	struct btrfs_root *root = fs_info->extent_root;
	struct btrfs_root *csum_root = fs_info->csum_root;
	struct btrfs_extent_item *extent;
3029
	struct blk_plug plug;
A
Arne Jansen 已提交
3030 3031 3032 3033 3034 3035 3036
	u64 flags;
	int ret;
	int slot;
	u64 nstripes;
	struct extent_buffer *l;
	u64 physical;
	u64 logical;
L
Liu Bo 已提交
3037
	u64 logic_end;
3038
	u64 physical_end;
A
Arne Jansen 已提交
3039
	u64 generation;
3040
	int mirror_num;
A
Arne Jansen 已提交
3041 3042
	struct reada_control *reada1;
	struct reada_control *reada2;
3043
	struct btrfs_key key;
A
Arne Jansen 已提交
3044
	struct btrfs_key key_end;
A
Arne Jansen 已提交
3045 3046
	u64 increment = map->stripe_len;
	u64 offset;
3047 3048 3049
	u64 extent_logical;
	u64 extent_physical;
	u64 extent_len;
3050 3051
	u64 stripe_logical;
	u64 stripe_end;
3052 3053
	struct btrfs_device *extent_dev;
	int extent_mirror_num;
3054
	int stop_loop = 0;
D
David Woodhouse 已提交
3055

3056
	physical = map->stripes[num].physical;
A
Arne Jansen 已提交
3057
	offset = 0;
3058
	nstripes = div64_u64(length, map->stripe_len);
A
Arne Jansen 已提交
3059 3060 3061
	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
		offset = map->stripe_len * num;
		increment = map->stripe_len * map->num_stripes;
3062
		mirror_num = 1;
A
Arne Jansen 已提交
3063 3064 3065 3066
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
		int factor = map->num_stripes / map->sub_stripes;
		offset = map->stripe_len * (num / map->sub_stripes);
		increment = map->stripe_len * factor;
3067
		mirror_num = num % map->sub_stripes + 1;
A
Arne Jansen 已提交
3068 3069
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
		increment = map->stripe_len;
3070
		mirror_num = num % map->num_stripes + 1;
A
Arne Jansen 已提交
3071 3072
	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
		increment = map->stripe_len;
3073
		mirror_num = num % map->num_stripes + 1;
3074
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3075
		get_raid56_logic_offset(physical, num, map, &offset, NULL);
3076 3077
		increment = map->stripe_len * nr_data_stripes(map);
		mirror_num = 1;
A
Arne Jansen 已提交
3078 3079
	} else {
		increment = map->stripe_len;
3080
		mirror_num = 1;
A
Arne Jansen 已提交
3081 3082 3083 3084 3085 3086
	}

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

3087 3088
	ppath = btrfs_alloc_path();
	if (!ppath) {
3089
		btrfs_free_path(path);
3090 3091 3092
		return -ENOMEM;
	}

3093 3094 3095 3096 3097
	/*
	 * work on commit root. The related disk blocks are static as
	 * long as COW is applied. This means, it is save to rewrite
	 * them to repair disk errors without any race conditions
	 */
A
Arne Jansen 已提交
3098 3099 3100
	path->search_commit_root = 1;
	path->skip_locking = 1;

3101 3102
	ppath->search_commit_root = 1;
	ppath->skip_locking = 1;
A
Arne Jansen 已提交
3103
	/*
A
Arne Jansen 已提交
3104 3105 3106
	 * trigger the readahead for extent tree csum tree and wait for
	 * completion. During readahead, the scrub is officially paused
	 * to not hold off transaction commits
A
Arne Jansen 已提交
3107 3108
	 */
	logical = base + offset;
3109
	physical_end = physical + nstripes * map->stripe_len;
3110
	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3111
		get_raid56_logic_offset(physical_end, num,
3112
					map, &logic_end, NULL);
3113 3114 3115 3116
		logic_end += base;
	} else {
		logic_end = logical + increment * nstripes;
	}
3117
	wait_event(sctx->list_wait,
3118
		   atomic_read(&sctx->bios_in_flight) == 0);
3119
	scrub_blocked_if_needed(fs_info);
A
Arne Jansen 已提交
3120 3121

	/* FIXME it might be better to start readahead at commit root */
3122 3123 3124
	key.objectid = logical;
	key.type = BTRFS_EXTENT_ITEM_KEY;
	key.offset = (u64)0;
3125
	key_end.objectid = logic_end;
3126 3127
	key_end.type = BTRFS_METADATA_ITEM_KEY;
	key_end.offset = (u64)-1;
3128
	reada1 = btrfs_reada_add(root, &key, &key_end);
A
Arne Jansen 已提交
3129

3130 3131 3132
	key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
	key.type = BTRFS_EXTENT_CSUM_KEY;
	key.offset = logical;
A
Arne Jansen 已提交
3133 3134
	key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
	key_end.type = BTRFS_EXTENT_CSUM_KEY;
3135
	key_end.offset = logic_end;
3136
	reada2 = btrfs_reada_add(csum_root, &key, &key_end);
A
Arne Jansen 已提交
3137 3138 3139 3140 3141 3142

	if (!IS_ERR(reada1))
		btrfs_reada_wait(reada1);
	if (!IS_ERR(reada2))
		btrfs_reada_wait(reada2);

A
Arne Jansen 已提交
3143 3144 3145 3146 3147

	/*
	 * collect all data csums for the stripe to avoid seeking during
	 * the scrub. This might currently (crc32) end up to be about 1MB
	 */
3148
	blk_start_plug(&plug);
A
Arne Jansen 已提交
3149 3150 3151 3152 3153

	/*
	 * now find all extents for each stripe and scrub them
	 */
	ret = 0;
3154
	while (physical < physical_end) {
A
Arne Jansen 已提交
3155 3156 3157 3158
		/*
		 * canceled?
		 */
		if (atomic_read(&fs_info->scrub_cancel_req) ||
3159
		    atomic_read(&sctx->cancel_req)) {
A
Arne Jansen 已提交
3160 3161 3162 3163 3164 3165 3166 3167
			ret = -ECANCELED;
			goto out;
		}
		/*
		 * check to see if we have to pause
		 */
		if (atomic_read(&fs_info->scrub_pause_req)) {
			/* push queued extents */
3168
			sctx->flush_all_writes = true;
3169
			scrub_submit(sctx);
3170
			mutex_lock(&sctx->wr_lock);
3171
			scrub_wr_submit(sctx);
3172
			mutex_unlock(&sctx->wr_lock);
3173
			wait_event(sctx->list_wait,
3174
				   atomic_read(&sctx->bios_in_flight) == 0);
3175
			sctx->flush_all_writes = false;
3176
			scrub_blocked_if_needed(fs_info);
A
Arne Jansen 已提交
3177 3178
		}

3179 3180 3181 3182 3183 3184
		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
			ret = get_raid56_logic_offset(physical, num, map,
						      &logical,
						      &stripe_logical);
			logical += base;
			if (ret) {
3185
				/* it is parity strip */
3186
				stripe_logical += base;
3187
				stripe_end = stripe_logical + increment;
3188 3189 3190 3191 3192 3193 3194 3195 3196
				ret = scrub_raid56_parity(sctx, map, scrub_dev,
							  ppath, stripe_logical,
							  stripe_end);
				if (ret)
					goto out;
				goto skip;
			}
		}

3197 3198 3199 3200
		if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
			key.type = BTRFS_METADATA_ITEM_KEY;
		else
			key.type = BTRFS_EXTENT_ITEM_KEY;
A
Arne Jansen 已提交
3201
		key.objectid = logical;
L
Liu Bo 已提交
3202
		key.offset = (u64)-1;
A
Arne Jansen 已提交
3203 3204 3205 3206

		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		if (ret < 0)
			goto out;
3207

3208
		if (ret > 0) {
3209
			ret = btrfs_previous_extent_item(root, path, 0);
A
Arne Jansen 已提交
3210 3211
			if (ret < 0)
				goto out;
3212 3213 3214 3215 3216 3217 3218 3219 3220
			if (ret > 0) {
				/* there's no smaller item, so stick with the
				 * larger one */
				btrfs_release_path(path);
				ret = btrfs_search_slot(NULL, root, &key,
							path, 0, 0);
				if (ret < 0)
					goto out;
			}
A
Arne Jansen 已提交
3221 3222
		}

L
Liu Bo 已提交
3223
		stop_loop = 0;
A
Arne Jansen 已提交
3224
		while (1) {
3225 3226
			u64 bytes;

A
Arne Jansen 已提交
3227 3228 3229 3230 3231 3232 3233 3234 3235
			l = path->nodes[0];
			slot = path->slots[0];
			if (slot >= btrfs_header_nritems(l)) {
				ret = btrfs_next_leaf(root, path);
				if (ret == 0)
					continue;
				if (ret < 0)
					goto out;

L
Liu Bo 已提交
3236
				stop_loop = 1;
A
Arne Jansen 已提交
3237 3238 3239 3240
				break;
			}
			btrfs_item_key_to_cpu(l, &key, slot);

3241 3242 3243 3244
			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
			    key.type != BTRFS_METADATA_ITEM_KEY)
				goto next;

3245
			if (key.type == BTRFS_METADATA_ITEM_KEY)
3246
				bytes = fs_info->nodesize;
3247 3248 3249 3250
			else
				bytes = key.offset;

			if (key.objectid + bytes <= logical)
A
Arne Jansen 已提交
3251 3252
				goto next;

L
Liu Bo 已提交
3253 3254 3255 3256 3257 3258
			if (key.objectid >= logical + map->stripe_len) {
				/* out of this device extent */
				if (key.objectid >= logic_end)
					stop_loop = 1;
				break;
			}
A
Arne Jansen 已提交
3259 3260 3261 3262 3263 3264

			extent = btrfs_item_ptr(l, slot,
						struct btrfs_extent_item);
			flags = btrfs_extent_flags(l, extent);
			generation = btrfs_extent_generation(l, extent);

3265 3266 3267 3268
			if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
			    (key.objectid < logical ||
			     key.objectid + bytes >
			     logical + map->stripe_len)) {
3269
				btrfs_err(fs_info,
J
Jeff Mahoney 已提交
3270
					   "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3271
				       key.objectid, logical);
3272 3273 3274
				spin_lock(&sctx->stat_lock);
				sctx->stat.uncorrectable_errors++;
				spin_unlock(&sctx->stat_lock);
A
Arne Jansen 已提交
3275 3276 3277
				goto next;
			}

L
Liu Bo 已提交
3278 3279 3280 3281
again:
			extent_logical = key.objectid;
			extent_len = bytes;

A
Arne Jansen 已提交
3282 3283 3284
			/*
			 * trim extent to this stripe
			 */
L
Liu Bo 已提交
3285 3286 3287
			if (extent_logical < logical) {
				extent_len -= logical - extent_logical;
				extent_logical = logical;
A
Arne Jansen 已提交
3288
			}
L
Liu Bo 已提交
3289
			if (extent_logical + extent_len >
A
Arne Jansen 已提交
3290
			    logical + map->stripe_len) {
L
Liu Bo 已提交
3291 3292
				extent_len = logical + map->stripe_len -
					     extent_logical;
A
Arne Jansen 已提交
3293 3294
			}

L
Liu Bo 已提交
3295
			extent_physical = extent_logical - logical + physical;
3296 3297
			extent_dev = scrub_dev;
			extent_mirror_num = mirror_num;
3298
			if (sctx->is_dev_replace)
3299 3300 3301 3302
				scrub_remap_extent(fs_info, extent_logical,
						   extent_len, &extent_physical,
						   &extent_dev,
						   &extent_mirror_num);
L
Liu Bo 已提交
3303

3304 3305 3306 3307 3308
			ret = btrfs_lookup_csums_range(csum_root,
						       extent_logical,
						       extent_logical +
						       extent_len - 1,
						       &sctx->csum_list, 1);
L
Liu Bo 已提交
3309 3310 3311
			if (ret)
				goto out;

L
Liu Bo 已提交
3312
			ret = scrub_extent(sctx, map, extent_logical, extent_len,
3313 3314
					   extent_physical, extent_dev, flags,
					   generation, extent_mirror_num,
3315
					   extent_logical - logical + physical);
3316 3317 3318

			scrub_free_csums(sctx);

A
Arne Jansen 已提交
3319 3320 3321
			if (ret)
				goto out;

L
Liu Bo 已提交
3322 3323
			if (extent_logical + extent_len <
			    key.objectid + bytes) {
3324
				if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3325 3326 3327 3328
					/*
					 * loop until we find next data stripe
					 * or we have finished all stripes.
					 */
3329 3330 3331 3332 3333 3334 3335 3336 3337 3338
loop:
					physical += map->stripe_len;
					ret = get_raid56_logic_offset(physical,
							num, map, &logical,
							&stripe_logical);
					logical += base;

					if (ret && physical < physical_end) {
						stripe_logical += base;
						stripe_end = stripe_logical +
3339
								increment;
3340 3341 3342 3343 3344 3345 3346 3347
						ret = scrub_raid56_parity(sctx,
							map, scrub_dev, ppath,
							stripe_logical,
							stripe_end);
						if (ret)
							goto out;
						goto loop;
					}
3348 3349 3350 3351
				} else {
					physical += map->stripe_len;
					logical += increment;
				}
L
Liu Bo 已提交
3352 3353 3354 3355 3356
				if (logical < key.objectid + bytes) {
					cond_resched();
					goto again;
				}

3357
				if (physical >= physical_end) {
L
Liu Bo 已提交
3358 3359 3360 3361
					stop_loop = 1;
					break;
				}
			}
A
Arne Jansen 已提交
3362 3363 3364
next:
			path->slots[0]++;
		}
C
Chris Mason 已提交
3365
		btrfs_release_path(path);
3366
skip:
A
Arne Jansen 已提交
3367 3368
		logical += increment;
		physical += map->stripe_len;
3369
		spin_lock(&sctx->stat_lock);
L
Liu Bo 已提交
3370 3371 3372 3373 3374
		if (stop_loop)
			sctx->stat.last_physical = map->stripes[num].physical +
						   length;
		else
			sctx->stat.last_physical = physical;
3375
		spin_unlock(&sctx->stat_lock);
L
Liu Bo 已提交
3376 3377
		if (stop_loop)
			break;
A
Arne Jansen 已提交
3378
	}
3379
out:
A
Arne Jansen 已提交
3380
	/* push queued extents */
3381
	scrub_submit(sctx);
3382
	mutex_lock(&sctx->wr_lock);
3383
	scrub_wr_submit(sctx);
3384
	mutex_unlock(&sctx->wr_lock);
A
Arne Jansen 已提交
3385

3386
	blk_finish_plug(&plug);
A
Arne Jansen 已提交
3387
	btrfs_free_path(path);
3388
	btrfs_free_path(ppath);
A
Arne Jansen 已提交
3389 3390 3391
	return ret < 0 ? ret : 0;
}

3392
static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3393 3394
					  struct btrfs_device *scrub_dev,
					  u64 chunk_offset, u64 length,
3395
					  u64 dev_offset,
3396
					  struct btrfs_block_group_cache *cache)
A
Arne Jansen 已提交
3397
{
3398 3399
	struct btrfs_fs_info *fs_info = sctx->fs_info;
	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
A
Arne Jansen 已提交
3400 3401 3402
	struct map_lookup *map;
	struct extent_map *em;
	int i;
3403
	int ret = 0;
A
Arne Jansen 已提交
3404 3405 3406 3407 3408

	read_lock(&map_tree->map_tree.lock);
	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
	read_unlock(&map_tree->map_tree.lock);

3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420
	if (!em) {
		/*
		 * Might have been an unused block group deleted by the cleaner
		 * kthread or relocation.
		 */
		spin_lock(&cache->lock);
		if (!cache->removed)
			ret = -EINVAL;
		spin_unlock(&cache->lock);

		return ret;
	}
A
Arne Jansen 已提交
3421

3422
	map = em->map_lookup;
A
Arne Jansen 已提交
3423 3424 3425 3426 3427 3428 3429
	if (em->start != chunk_offset)
		goto out;

	if (em->len < length)
		goto out;

	for (i = 0; i < map->num_stripes; ++i) {
3430
		if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
3431
		    map->stripes[i].physical == dev_offset) {
3432
			ret = scrub_stripe(sctx, map, scrub_dev, i,
3433
					   chunk_offset, length);
A
Arne Jansen 已提交
3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444
			if (ret)
				goto out;
		}
	}
out:
	free_extent_map(em);

	return ret;
}

static noinline_for_stack
3445
int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3446
			   struct btrfs_device *scrub_dev, u64 start, u64 end)
A
Arne Jansen 已提交
3447 3448 3449
{
	struct btrfs_dev_extent *dev_extent = NULL;
	struct btrfs_path *path;
3450 3451
	struct btrfs_fs_info *fs_info = sctx->fs_info;
	struct btrfs_root *root = fs_info->dev_root;
A
Arne Jansen 已提交
3452 3453
	u64 length;
	u64 chunk_offset;
3454
	int ret = 0;
3455
	int ro_set;
A
Arne Jansen 已提交
3456 3457 3458 3459 3460
	int slot;
	struct extent_buffer *l;
	struct btrfs_key key;
	struct btrfs_key found_key;
	struct btrfs_block_group_cache *cache;
3461
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
A
Arne Jansen 已提交
3462 3463 3464 3465 3466

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

3467
	path->reada = READA_FORWARD;
A
Arne Jansen 已提交
3468 3469 3470
	path->search_commit_root = 1;
	path->skip_locking = 1;

3471
	key.objectid = scrub_dev->devid;
A
Arne Jansen 已提交
3472 3473 3474 3475 3476 3477
	key.offset = 0ull;
	key.type = BTRFS_DEV_EXTENT_KEY;

	while (1) {
		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		if (ret < 0)
3478 3479 3480 3481 3482
			break;
		if (ret > 0) {
			if (path->slots[0] >=
			    btrfs_header_nritems(path->nodes[0])) {
				ret = btrfs_next_leaf(root, path);
3483 3484 3485 3486
				if (ret < 0)
					break;
				if (ret > 0) {
					ret = 0;
3487
					break;
3488 3489 3490
				}
			} else {
				ret = 0;
3491 3492
			}
		}
A
Arne Jansen 已提交
3493 3494 3495 3496 3497 3498

		l = path->nodes[0];
		slot = path->slots[0];

		btrfs_item_key_to_cpu(l, &found_key, slot);

3499
		if (found_key.objectid != scrub_dev->devid)
A
Arne Jansen 已提交
3500 3501
			break;

3502
		if (found_key.type != BTRFS_DEV_EXTENT_KEY)
A
Arne Jansen 已提交
3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513
			break;

		if (found_key.offset >= end)
			break;

		if (found_key.offset < key.offset)
			break;

		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
		length = btrfs_dev_extent_length(l, dev_extent);

3514 3515
		if (found_key.offset + length <= start)
			goto skip;
A
Arne Jansen 已提交
3516 3517 3518 3519 3520 3521 3522 3523

		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);

		/*
		 * get a reference on the corresponding block group to prevent
		 * the chunk from going away while we scrub it
		 */
		cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3524 3525 3526 3527 3528 3529

		/* some chunks are removed but not committed to disk yet,
		 * continue scrubbing */
		if (!cache)
			goto skip;

3530 3531 3532 3533 3534 3535 3536 3537 3538
		/*
		 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
		 * to avoid deadlock caused by:
		 * btrfs_inc_block_group_ro()
		 * -> btrfs_wait_for_commit()
		 * -> btrfs_commit_transaction()
		 * -> btrfs_scrub_pause()
		 */
		scrub_pause_on(fs_info);
3539
		ret = btrfs_inc_block_group_ro(cache);
3540
		if (!ret && sctx->is_dev_replace) {
3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560
			/*
			 * If we are doing a device replace wait for any tasks
			 * that started dellaloc right before we set the block
			 * group to RO mode, as they might have just allocated
			 * an extent from it or decided they could do a nocow
			 * write. And if any such tasks did that, wait for their
			 * ordered extents to complete and then commit the
			 * current transaction, so that we can later see the new
			 * extent items in the extent tree - the ordered extents
			 * create delayed data references (for cow writes) when
			 * they complete, which will be run and insert the
			 * corresponding extent items into the extent tree when
			 * we commit the transaction they used when running
			 * inode.c:btrfs_finish_ordered_io(). We later use
			 * the commit root of the extent tree to find extents
			 * to copy from the srcdev into the tgtdev, and we don't
			 * want to miss any new extents.
			 */
			btrfs_wait_block_group_reservations(cache);
			btrfs_wait_nocow_writers(cache);
3561
			ret = btrfs_wait_ordered_roots(fs_info, U64_MAX,
3562 3563 3564 3565 3566 3567 3568 3569 3570
						       cache->key.objectid,
						       cache->key.offset);
			if (ret > 0) {
				struct btrfs_trans_handle *trans;

				trans = btrfs_join_transaction(root);
				if (IS_ERR(trans))
					ret = PTR_ERR(trans);
				else
3571
					ret = btrfs_commit_transaction(trans);
3572 3573 3574 3575 3576 3577 3578
				if (ret) {
					scrub_pause_off(fs_info);
					btrfs_put_block_group(cache);
					break;
				}
			}
		}
3579
		scrub_pause_off(fs_info);
3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592

		if (ret == 0) {
			ro_set = 1;
		} else if (ret == -ENOSPC) {
			/*
			 * btrfs_inc_block_group_ro return -ENOSPC when it
			 * failed in creating new chunk for metadata.
			 * It is not a problem for scrub/replace, because
			 * metadata are always cowed, and our scrub paused
			 * commit_transactions.
			 */
			ro_set = 0;
		} else {
J
Jeff Mahoney 已提交
3593
			btrfs_warn(fs_info,
3594
				   "failed setting block group ro: %d", ret);
3595 3596 3597 3598
			btrfs_put_block_group(cache);
			break;
		}

3599
		btrfs_dev_replace_write_lock(&fs_info->dev_replace);
3600 3601 3602
		dev_replace->cursor_right = found_key.offset + length;
		dev_replace->cursor_left = found_key.offset;
		dev_replace->item_needs_writeback = 1;
3603
		btrfs_dev_replace_write_unlock(&fs_info->dev_replace);
3604
		ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
3605
				  found_key.offset, cache);
3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616

		/*
		 * flush, submit all pending read and write bios, afterwards
		 * wait for them.
		 * Note that in the dev replace case, a read request causes
		 * write requests that are submitted in the read completion
		 * worker. Therefore in the current situation, it is required
		 * that all write requests are flushed, so that all read and
		 * write requests are really completed when bios_in_flight
		 * changes to 0.
		 */
3617
		sctx->flush_all_writes = true;
3618
		scrub_submit(sctx);
3619
		mutex_lock(&sctx->wr_lock);
3620
		scrub_wr_submit(sctx);
3621
		mutex_unlock(&sctx->wr_lock);
3622 3623 3624

		wait_event(sctx->list_wait,
			   atomic_read(&sctx->bios_in_flight) == 0);
3625 3626

		scrub_pause_on(fs_info);
3627 3628 3629 3630 3631 3632

		/*
		 * must be called before we decrease @scrub_paused.
		 * make sure we don't block transaction commit while
		 * we are waiting pending workers finished.
		 */
3633 3634
		wait_event(sctx->list_wait,
			   atomic_read(&sctx->workers_pending) == 0);
3635
		sctx->flush_all_writes = false;
3636

3637
		scrub_pause_off(fs_info);
3638

3639
		btrfs_dev_replace_write_lock(&fs_info->dev_replace);
3640 3641
		dev_replace->cursor_left = dev_replace->cursor_right;
		dev_replace->item_needs_writeback = 1;
3642
		btrfs_dev_replace_write_unlock(&fs_info->dev_replace);
3643

3644
		if (ro_set)
3645
			btrfs_dec_block_group_ro(cache);
3646

3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657
		/*
		 * We might have prevented the cleaner kthread from deleting
		 * this block group if it was already unused because we raced
		 * and set it to RO mode first. So add it back to the unused
		 * list, otherwise it might not ever be deleted unless a manual
		 * balance is triggered or it becomes used and unused again.
		 */
		spin_lock(&cache->lock);
		if (!cache->removed && !cache->ro && cache->reserved == 0 &&
		    btrfs_block_group_used(&cache->item) == 0) {
			spin_unlock(&cache->lock);
3658
			btrfs_mark_bg_unused(cache);
3659 3660 3661 3662
		} else {
			spin_unlock(&cache->lock);
		}

A
Arne Jansen 已提交
3663 3664 3665
		btrfs_put_block_group(cache);
		if (ret)
			break;
3666
		if (sctx->is_dev_replace &&
3667
		    atomic64_read(&dev_replace->num_write_errors) > 0) {
3668 3669 3670 3671 3672 3673 3674
			ret = -EIO;
			break;
		}
		if (sctx->stat.malloc_errors > 0) {
			ret = -ENOMEM;
			break;
		}
3675
skip:
A
Arne Jansen 已提交
3676
		key.offset = found_key.offset + length;
C
Chris Mason 已提交
3677
		btrfs_release_path(path);
A
Arne Jansen 已提交
3678 3679 3680
	}

	btrfs_free_path(path);
3681

3682
	return ret;
A
Arne Jansen 已提交
3683 3684
}

3685 3686
static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
					   struct btrfs_device *scrub_dev)
A
Arne Jansen 已提交
3687 3688 3689 3690 3691
{
	int	i;
	u64	bytenr;
	u64	gen;
	int	ret;
3692
	struct btrfs_fs_info *fs_info = sctx->fs_info;
A
Arne Jansen 已提交
3693

3694
	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3695 3696
		return -EIO;

3697
	/* Seed devices of a new filesystem has their own generation. */
3698
	if (scrub_dev->fs_devices != fs_info->fs_devices)
3699 3700
		gen = scrub_dev->generation;
	else
3701
		gen = fs_info->last_trans_committed;
A
Arne Jansen 已提交
3702 3703 3704

	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
		bytenr = btrfs_sb_offset(i);
3705 3706
		if (bytenr + BTRFS_SUPER_INFO_SIZE >
		    scrub_dev->commit_total_bytes)
A
Arne Jansen 已提交
3707 3708
			break;

3709
		ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
3710
				  scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
3711
				  NULL, 1, bytenr);
A
Arne Jansen 已提交
3712 3713 3714
		if (ret)
			return ret;
	}
3715
	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
A
Arne Jansen 已提交
3716 3717 3718 3719 3720 3721 3722

	return 0;
}

/*
 * get a reference count on fs_info->scrub_workers. start worker if necessary
 */
3723 3724
static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
						int is_dev_replace)
A
Arne Jansen 已提交
3725
{
3726
	unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
3727
	int max_active = fs_info->thread_pool_size;
A
Arne Jansen 已提交
3728

A
Arne Jansen 已提交
3729
	if (fs_info->scrub_workers_refcnt == 0) {
3730 3731
		fs_info->scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub",
				flags, is_dev_replace ? 1 : max_active, 4);
3732 3733 3734
		if (!fs_info->scrub_workers)
			goto fail_scrub_workers;

3735
		fs_info->scrub_wr_completion_workers =
3736
			btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
3737
					      max_active, 2);
3738 3739 3740
		if (!fs_info->scrub_wr_completion_workers)
			goto fail_scrub_wr_completion_workers;

3741
		fs_info->scrub_parity_workers =
3742
			btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
3743
					      max_active, 2);
3744 3745
		if (!fs_info->scrub_parity_workers)
			goto fail_scrub_parity_workers;
A
Arne Jansen 已提交
3746
	}
A
Arne Jansen 已提交
3747
	++fs_info->scrub_workers_refcnt;
3748 3749 3750 3751 3752 3753 3754 3755
	return 0;

fail_scrub_parity_workers:
	btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
fail_scrub_wr_completion_workers:
	btrfs_destroy_workqueue(fs_info->scrub_workers);
fail_scrub_workers:
	return -ENOMEM;
A
Arne Jansen 已提交
3756 3757
}

3758
static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
A
Arne Jansen 已提交
3759
{
3760
	if (--fs_info->scrub_workers_refcnt == 0) {
3761 3762
		btrfs_destroy_workqueue(fs_info->scrub_workers);
		btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3763
		btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
3764
	}
A
Arne Jansen 已提交
3765 3766 3767
	WARN_ON(fs_info->scrub_workers_refcnt < 0);
}

3768 3769
int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
		    u64 end, struct btrfs_scrub_progress *progress,
3770
		    int readonly, int is_dev_replace)
A
Arne Jansen 已提交
3771
{
3772
	struct scrub_ctx *sctx;
A
Arne Jansen 已提交
3773 3774 3775
	int ret;
	struct btrfs_device *dev;

3776
	if (btrfs_fs_closing(fs_info))
A
Arne Jansen 已提交
3777 3778
		return -EINVAL;

3779
	if (fs_info->nodesize > BTRFS_STRIPE_LEN) {
3780 3781 3782 3783 3784
		/*
		 * in this case scrub is unable to calculate the checksum
		 * the way scrub is implemented. Do not handle this
		 * situation at all because it won't ever happen.
		 */
3785 3786
		btrfs_err(fs_info,
			   "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
3787 3788
		       fs_info->nodesize,
		       BTRFS_STRIPE_LEN);
3789 3790 3791
		return -EINVAL;
	}

3792
	if (fs_info->sectorsize != PAGE_SIZE) {
3793
		/* not supported for data w/o checksums */
3794
		btrfs_err_rl(fs_info,
J
Jeff Mahoney 已提交
3795
			   "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
3796
		       fs_info->sectorsize, PAGE_SIZE);
A
Arne Jansen 已提交
3797 3798 3799
		return -EINVAL;
	}

3800
	if (fs_info->nodesize >
3801
	    PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
3802
	    fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
3803 3804 3805 3806
		/*
		 * would exhaust the array bounds of pagev member in
		 * struct scrub_block
		 */
J
Jeff Mahoney 已提交
3807 3808
		btrfs_err(fs_info,
			  "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
3809
		       fs_info->nodesize,
3810
		       SCRUB_MAX_PAGES_PER_BLOCK,
3811
		       fs_info->sectorsize,
3812 3813 3814 3815
		       SCRUB_MAX_PAGES_PER_BLOCK);
		return -EINVAL;
	}

A
Arne Jansen 已提交
3816

3817 3818
	mutex_lock(&fs_info->fs_devices->device_list_mutex);
	dev = btrfs_find_device(fs_info, devid, NULL, NULL);
3819 3820
	if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
		     !is_dev_replace)) {
3821
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
A
Arne Jansen 已提交
3822 3823 3824
		return -ENODEV;
	}

3825 3826
	if (!is_dev_replace && !readonly &&
	    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
3827
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3828 3829
		btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable",
				rcu_str_deref(dev->name));
3830 3831 3832
		return -EROFS;
	}

3833
	mutex_lock(&fs_info->scrub_lock);
3834
	if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3835
	    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
A
Arne Jansen 已提交
3836
		mutex_unlock(&fs_info->scrub_lock);
3837 3838
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
		return -EIO;
A
Arne Jansen 已提交
3839 3840
	}

3841
	btrfs_dev_replace_read_lock(&fs_info->dev_replace);
3842
	if (dev->scrub_ctx ||
3843 3844
	    (!is_dev_replace &&
	     btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
3845
		btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
A
Arne Jansen 已提交
3846
		mutex_unlock(&fs_info->scrub_lock);
3847
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
A
Arne Jansen 已提交
3848 3849
		return -EINPROGRESS;
	}
3850
	btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
3851 3852 3853 3854 3855 3856 3857 3858

	ret = scrub_workers_get(fs_info, is_dev_replace);
	if (ret) {
		mutex_unlock(&fs_info->scrub_lock);
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
		return ret;
	}

3859
	sctx = scrub_setup_ctx(dev, is_dev_replace);
3860
	if (IS_ERR(sctx)) {
A
Arne Jansen 已提交
3861
		mutex_unlock(&fs_info->scrub_lock);
3862 3863
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
		scrub_workers_put(fs_info);
3864
		return PTR_ERR(sctx);
A
Arne Jansen 已提交
3865
	}
3866
	sctx->readonly = readonly;
3867
	dev->scrub_ctx = sctx;
3868
	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
A
Arne Jansen 已提交
3869

3870 3871 3872 3873
	/*
	 * checking @scrub_pause_req here, we can avoid
	 * race between committing transaction and scrubbing.
	 */
3874
	__scrub_blocked_if_needed(fs_info);
A
Arne Jansen 已提交
3875 3876 3877
	atomic_inc(&fs_info->scrubs_running);
	mutex_unlock(&fs_info->scrub_lock);

3878
	if (!is_dev_replace) {
3879 3880 3881 3882
		/*
		 * by holding device list mutex, we can
		 * kick off writing super in log tree sync.
		 */
3883
		mutex_lock(&fs_info->fs_devices->device_list_mutex);
3884
		ret = scrub_supers(sctx, dev);
3885
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3886
	}
A
Arne Jansen 已提交
3887 3888

	if (!ret)
3889
		ret = scrub_enumerate_chunks(sctx, dev, start, end);
A
Arne Jansen 已提交
3890

3891
	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
A
Arne Jansen 已提交
3892 3893 3894
	atomic_dec(&fs_info->scrubs_running);
	wake_up(&fs_info->scrub_pause_wait);

3895
	wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
3896

A
Arne Jansen 已提交
3897
	if (progress)
3898
		memcpy(progress, &sctx->stat, sizeof(*progress));
A
Arne Jansen 已提交
3899 3900

	mutex_lock(&fs_info->scrub_lock);
3901
	dev->scrub_ctx = NULL;
3902
	scrub_workers_put(fs_info);
A
Arne Jansen 已提交
3903 3904
	mutex_unlock(&fs_info->scrub_lock);

3905
	scrub_put_ctx(sctx);
A
Arne Jansen 已提交
3906 3907 3908 3909

	return ret;
}

3910
void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
A
Arne Jansen 已提交
3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924
{
	mutex_lock(&fs_info->scrub_lock);
	atomic_inc(&fs_info->scrub_pause_req);
	while (atomic_read(&fs_info->scrubs_paused) !=
	       atomic_read(&fs_info->scrubs_running)) {
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
			   atomic_read(&fs_info->scrubs_paused) ==
			   atomic_read(&fs_info->scrubs_running));
		mutex_lock(&fs_info->scrub_lock);
	}
	mutex_unlock(&fs_info->scrub_lock);
}

3925
void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
A
Arne Jansen 已提交
3926 3927 3928 3929 3930
{
	atomic_dec(&fs_info->scrub_pause_req);
	wake_up(&fs_info->scrub_pause_wait);
}

3931
int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
A
Arne Jansen 已提交
3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951
{
	mutex_lock(&fs_info->scrub_lock);
	if (!atomic_read(&fs_info->scrubs_running)) {
		mutex_unlock(&fs_info->scrub_lock);
		return -ENOTCONN;
	}

	atomic_inc(&fs_info->scrub_cancel_req);
	while (atomic_read(&fs_info->scrubs_running)) {
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
			   atomic_read(&fs_info->scrubs_running) == 0);
		mutex_lock(&fs_info->scrub_lock);
	}
	atomic_dec(&fs_info->scrub_cancel_req);
	mutex_unlock(&fs_info->scrub_lock);

	return 0;
}

3952 3953
int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
			   struct btrfs_device *dev)
3954
{
3955
	struct scrub_ctx *sctx;
A
Arne Jansen 已提交
3956 3957

	mutex_lock(&fs_info->scrub_lock);
3958
	sctx = dev->scrub_ctx;
3959
	if (!sctx) {
A
Arne Jansen 已提交
3960 3961 3962
		mutex_unlock(&fs_info->scrub_lock);
		return -ENOTCONN;
	}
3963
	atomic_inc(&sctx->cancel_req);
3964
	while (dev->scrub_ctx) {
A
Arne Jansen 已提交
3965 3966
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
3967
			   dev->scrub_ctx == NULL);
A
Arne Jansen 已提交
3968 3969 3970 3971 3972 3973
		mutex_lock(&fs_info->scrub_lock);
	}
	mutex_unlock(&fs_info->scrub_lock);

	return 0;
}
S
Stefan Behrens 已提交
3974

3975
int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
A
Arne Jansen 已提交
3976 3977 3978
			 struct btrfs_scrub_progress *progress)
{
	struct btrfs_device *dev;
3979
	struct scrub_ctx *sctx = NULL;
A
Arne Jansen 已提交
3980

3981 3982
	mutex_lock(&fs_info->fs_devices->device_list_mutex);
	dev = btrfs_find_device(fs_info, devid, NULL, NULL);
A
Arne Jansen 已提交
3983
	if (dev)
3984
		sctx = dev->scrub_ctx;
3985 3986
	if (sctx)
		memcpy(progress, &sctx->stat, sizeof(*progress));
3987
	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
A
Arne Jansen 已提交
3988

3989
	return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
A
Arne Jansen 已提交
3990
}
3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002

static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
			       u64 extent_logical, u64 extent_len,
			       u64 *extent_physical,
			       struct btrfs_device **extent_dev,
			       int *extent_mirror_num)
{
	u64 mapped_length;
	struct btrfs_bio *bbio = NULL;
	int ret;

	mapped_length = extent_len;
4003
	ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
4004 4005 4006
			      &mapped_length, &bbio, 0);
	if (ret || !bbio || mapped_length < extent_len ||
	    !bbio->stripes[0].dev->bdev) {
4007
		btrfs_put_bbio(bbio);
4008 4009 4010 4011 4012 4013
		return;
	}

	*extent_physical = bbio->stripes[0].physical;
	*extent_mirror_num = bbio->mirror_num;
	*extent_dev = bbio->stripes[0].dev;
4014
	btrfs_put_bbio(bbio);
4015
}