scrub.c 122.0 KB
Newer Older
A
Arne Jansen 已提交
1
/*
2
 * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
A
Arne Jansen 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

#include <linux/blkdev.h>
20
#include <linux/ratelimit.h>
A
Arne Jansen 已提交
21 22 23 24
#include "ctree.h"
#include "volumes.h"
#include "disk-io.h"
#include "ordered-data.h"
25
#include "transaction.h"
26
#include "backref.h"
27
#include "extent_io.h"
28
#include "dev-replace.h"
29
#include "check-integrity.h"
30
#include "rcu-string.h"
D
David Woodhouse 已提交
31
#include "raid56.h"
A
Arne Jansen 已提交
32 33 34 35 36 37 38 39 40 41 42 43 44 45

/*
 * This is only the first step towards a full-features scrub. It reads all
 * extent and super block and verifies the checksums. In case a bad checksum
 * is found or the extent cannot be read, good data will be written back if
 * any can be found.
 *
 * Future enhancements:
 *  - In case an unrepairable extent is encountered, track which files are
 *    affected and report them
 *  - track and record media errors, throw out bad devices
 *  - add a mode to also read unallocated space
 */

46
struct scrub_block;
47
struct scrub_ctx;
A
Arne Jansen 已提交
48

49 50 51 52 53 54 55 56 57
/*
 * the following three values only influence the performance.
 * The last one configures the number of parallel and outstanding I/O
 * operations. The first two values configure an upper limit for the number
 * of (dynamically allocated) pages that are added to a bio.
 */
#define SCRUB_PAGES_PER_RD_BIO	32	/* 128k per bio */
#define SCRUB_PAGES_PER_WR_BIO	32	/* 128k per bio */
#define SCRUB_BIOS_PER_SCTX	64	/* 8MB per device in flight */
58 59 60 61 62 63

/*
 * the following value times PAGE_SIZE needs to be large enough to match the
 * largest node/leaf/sector size that shall be supported.
 * Values larger than BTRFS_STRIPE_LEN are not supported.
 */
64
#define SCRUB_MAX_PAGES_PER_BLOCK	16	/* 64k per node/leaf/sector */
A
Arne Jansen 已提交
65

66
struct scrub_recover {
67
	refcount_t		refs;
68 69 70 71
	struct btrfs_bio	*bbio;
	u64			map_length;
};

A
Arne Jansen 已提交
72
struct scrub_page {
73 74
	struct scrub_block	*sblock;
	struct page		*page;
75
	struct btrfs_device	*dev;
76
	struct list_head	list;
A
Arne Jansen 已提交
77 78
	u64			flags;  /* extent flags */
	u64			generation;
79 80
	u64			logical;
	u64			physical;
81
	u64			physical_for_dev_replace;
82
	atomic_t		refs;
83 84 85 86 87
	struct {
		unsigned int	mirror_num:8;
		unsigned int	have_csum:1;
		unsigned int	io_error:1;
	};
A
Arne Jansen 已提交
88
	u8			csum[BTRFS_CSUM_SIZE];
89 90

	struct scrub_recover	*recover;
A
Arne Jansen 已提交
91 92 93 94
};

struct scrub_bio {
	int			index;
95
	struct scrub_ctx	*sctx;
96
	struct btrfs_device	*dev;
A
Arne Jansen 已提交
97 98 99 100
	struct bio		*bio;
	int			err;
	u64			logical;
	u64			physical;
101 102 103 104 105
#if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
	struct scrub_page	*pagev[SCRUB_PAGES_PER_WR_BIO];
#else
	struct scrub_page	*pagev[SCRUB_PAGES_PER_RD_BIO];
#endif
106
	int			page_count;
A
Arne Jansen 已提交
107 108 109 110
	int			next_free;
	struct btrfs_work	work;
};

111
struct scrub_block {
112
	struct scrub_page	*pagev[SCRUB_MAX_PAGES_PER_BLOCK];
113 114
	int			page_count;
	atomic_t		outstanding_pages;
115
	refcount_t		refs; /* free mem on transition to zero */
116
	struct scrub_ctx	*sctx;
117
	struct scrub_parity	*sparity;
118 119 120 121
	struct {
		unsigned int	header_error:1;
		unsigned int	checksum_error:1;
		unsigned int	no_io_error_seen:1;
122
		unsigned int	generation_error:1; /* also sets header_error */
123 124 125 126

		/* The following is for the data used to check parity */
		/* It is for the data with checksum */
		unsigned int	data_corrected:1;
127
	};
128
	struct btrfs_work	work;
129 130
};

131 132 133 134 135 136 137 138 139 140 141 142
/* Used for the chunks with parity stripe such RAID5/6 */
struct scrub_parity {
	struct scrub_ctx	*sctx;

	struct btrfs_device	*scrub_dev;

	u64			logic_start;

	u64			logic_end;

	int			nsectors;

143
	u64			stripe_len;
144

145
	refcount_t		refs;
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163

	struct list_head	spages;

	/* Work of parity check and repair */
	struct btrfs_work	work;

	/* Mark the parity blocks which have data */
	unsigned long		*dbitmap;

	/*
	 * Mark the parity blocks which have data, but errors happen when
	 * read data or check data
	 */
	unsigned long		*ebitmap;

	unsigned long		bitmap[0];
};

164 165 166 167 168 169 170 171
struct scrub_wr_ctx {
	struct scrub_bio *wr_curr_bio;
	struct btrfs_device *tgtdev;
	int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
	atomic_t flush_all_writes;
	struct mutex wr_lock;
};

172
struct scrub_ctx {
173
	struct scrub_bio	*bios[SCRUB_BIOS_PER_SCTX];
174
	struct btrfs_fs_info	*fs_info;
A
Arne Jansen 已提交
175 176
	int			first_free;
	int			curr;
177 178
	atomic_t		bios_in_flight;
	atomic_t		workers_pending;
A
Arne Jansen 已提交
179 180 181 182 183
	spinlock_t		list_lock;
	wait_queue_head_t	list_wait;
	u16			csum_size;
	struct list_head	csum_list;
	atomic_t		cancel_req;
A
Arne Jansen 已提交
184
	int			readonly;
185
	int			pages_per_rd_bio;
186 187
	u32			sectorsize;
	u32			nodesize;
188 189

	int			is_dev_replace;
190
	struct scrub_wr_ctx	wr_ctx;
191

A
Arne Jansen 已提交
192 193 194 195 196
	/*
	 * statistics
	 */
	struct btrfs_scrub_progress stat;
	spinlock_t		stat_lock;
197 198 199 200 201 202 203 204

	/*
	 * Use a ref counter to avoid use-after-free issues. Scrub workers
	 * decrement bios_in_flight and workers_pending and then do a wakeup
	 * on the list_wait wait queue. We must ensure the main scrub task
	 * doesn't free the scrub context before or while the workers are
	 * doing the wakeup() call.
	 */
205
	refcount_t              refs;
A
Arne Jansen 已提交
206 207
};

208
struct scrub_fixup_nodatasum {
209
	struct scrub_ctx	*sctx;
210
	struct btrfs_device	*dev;
211 212 213 214 215 216
	u64			logical;
	struct btrfs_root	*root;
	struct btrfs_work	work;
	int			mirror_num;
};

217 218 219 220 221 222 223
struct scrub_nocow_inode {
	u64			inum;
	u64			offset;
	u64			root;
	struct list_head	list;
};

224 225 226 227 228 229
struct scrub_copy_nocow_ctx {
	struct scrub_ctx	*sctx;
	u64			logical;
	u64			len;
	int			mirror_num;
	u64			physical_for_dev_replace;
230
	struct list_head	inodes;
231 232 233
	struct btrfs_work	work;
};

234 235 236 237 238 239 240 241 242
struct scrub_warning {
	struct btrfs_path	*path;
	u64			extent_item_size;
	const char		*errstr;
	sector_t		sector;
	u64			logical;
	struct btrfs_device	*dev;
};

243 244 245 246 247 248 249
struct full_stripe_lock {
	struct rb_node node;
	u64 logical;
	u64 refs;
	struct mutex mutex;
};

250 251 252 253
static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
254
static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
255
static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
256
				     struct scrub_block *sblocks_for_recheck);
257
static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
258 259
				struct scrub_block *sblock,
				int retry_failed_mirror);
260
static void scrub_recheck_block_checksum(struct scrub_block *sblock);
261
static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
262
					     struct scrub_block *sblock_good);
263 264 265
static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
					    struct scrub_block *sblock_good,
					    int page_num, int force_write);
266 267 268
static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
					   int page_num);
269 270 271 272 273
static int scrub_checksum_data(struct scrub_block *sblock);
static int scrub_checksum_tree_block(struct scrub_block *sblock);
static int scrub_checksum_super(struct scrub_block *sblock);
static void scrub_block_get(struct scrub_block *sblock);
static void scrub_block_put(struct scrub_block *sblock);
274 275
static void scrub_page_get(struct scrub_page *spage);
static void scrub_page_put(struct scrub_page *spage);
276 277
static void scrub_parity_get(struct scrub_parity *sparity);
static void scrub_parity_put(struct scrub_parity *sparity);
278 279
static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
				    struct scrub_page *spage);
280
static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
281
		       u64 physical, struct btrfs_device *dev, u64 flags,
282 283
		       u64 gen, int mirror_num, u8 *csum, int force,
		       u64 physical_for_dev_replace);
284
static void scrub_bio_end_io(struct bio *bio);
285 286
static void scrub_bio_end_io_worker(struct btrfs_work *work);
static void scrub_block_complete(struct scrub_block *sblock);
287 288 289 290 291
static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
			       u64 extent_logical, u64 extent_len,
			       u64 *extent_physical,
			       struct btrfs_device **extent_dev,
			       int *extent_mirror_num);
292
static int scrub_setup_wr_ctx(struct scrub_wr_ctx *wr_ctx,
293 294 295 296 297 298
			      struct btrfs_device *dev,
			      int is_dev_replace);
static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
				    struct scrub_page *spage);
static void scrub_wr_submit(struct scrub_ctx *sctx);
299
static void scrub_wr_bio_end_io(struct bio *bio);
300 301 302 303
static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
static int write_page_nocow(struct scrub_ctx *sctx,
			    u64 physical_for_dev_replace, struct page *page);
static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
304
				      struct scrub_copy_nocow_ctx *ctx);
305 306 307
static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
			    int mirror_num, u64 physical_for_dev_replace);
static void copy_nocow_pages_worker(struct btrfs_work *work);
308
static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
309
static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
310
static void scrub_put_ctx(struct scrub_ctx *sctx);
S
Stefan Behrens 已提交
311 312


313 314
static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
{
315
	refcount_inc(&sctx->refs);
316 317 318 319 320 321 322
	atomic_inc(&sctx->bios_in_flight);
}

static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
{
	atomic_dec(&sctx->bios_in_flight);
	wake_up(&sctx->list_wait);
323
	scrub_put_ctx(sctx);
324 325
}

326
static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
327 328 329 330 331 332 333 334 335
{
	while (atomic_read(&fs_info->scrub_pause_req)) {
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
		   atomic_read(&fs_info->scrub_pause_req) == 0);
		mutex_lock(&fs_info->scrub_lock);
	}
}

336
static void scrub_pause_on(struct btrfs_fs_info *fs_info)
337 338 339
{
	atomic_inc(&fs_info->scrubs_paused);
	wake_up(&fs_info->scrub_pause_wait);
340
}
341

342 343
static void scrub_pause_off(struct btrfs_fs_info *fs_info)
{
344 345 346 347 348 349 350 351
	mutex_lock(&fs_info->scrub_lock);
	__scrub_blocked_if_needed(fs_info);
	atomic_dec(&fs_info->scrubs_paused);
	mutex_unlock(&fs_info->scrub_lock);

	wake_up(&fs_info->scrub_pause_wait);
}

352 353 354 355 356 357
static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
{
	scrub_pause_on(fs_info);
	scrub_pause_off(fs_info);
}

358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573
/*
 * Insert new full stripe lock into full stripe locks tree
 *
 * Return pointer to existing or newly inserted full_stripe_lock structure if
 * everything works well.
 * Return ERR_PTR(-ENOMEM) if we failed to allocate memory
 *
 * NOTE: caller must hold full_stripe_locks_root->lock before calling this
 * function
 */
static struct full_stripe_lock *insert_full_stripe_lock(
		struct btrfs_full_stripe_locks_tree *locks_root,
		u64 fstripe_logical)
{
	struct rb_node **p;
	struct rb_node *parent = NULL;
	struct full_stripe_lock *entry;
	struct full_stripe_lock *ret;

	WARN_ON(!mutex_is_locked(&locks_root->lock));

	p = &locks_root->root.rb_node;
	while (*p) {
		parent = *p;
		entry = rb_entry(parent, struct full_stripe_lock, node);
		if (fstripe_logical < entry->logical) {
			p = &(*p)->rb_left;
		} else if (fstripe_logical > entry->logical) {
			p = &(*p)->rb_right;
		} else {
			entry->refs++;
			return entry;
		}
	}

	/* Insert new lock */
	ret = kmalloc(sizeof(*ret), GFP_KERNEL);
	if (!ret)
		return ERR_PTR(-ENOMEM);
	ret->logical = fstripe_logical;
	ret->refs = 1;
	mutex_init(&ret->mutex);

	rb_link_node(&ret->node, parent, p);
	rb_insert_color(&ret->node, &locks_root->root);
	return ret;
}

/*
 * Search for a full stripe lock of a block group
 *
 * Return pointer to existing full stripe lock if found
 * Return NULL if not found
 */
static struct full_stripe_lock *search_full_stripe_lock(
		struct btrfs_full_stripe_locks_tree *locks_root,
		u64 fstripe_logical)
{
	struct rb_node *node;
	struct full_stripe_lock *entry;

	WARN_ON(!mutex_is_locked(&locks_root->lock));

	node = locks_root->root.rb_node;
	while (node) {
		entry = rb_entry(node, struct full_stripe_lock, node);
		if (fstripe_logical < entry->logical)
			node = node->rb_left;
		else if (fstripe_logical > entry->logical)
			node = node->rb_right;
		else
			return entry;
	}
	return NULL;
}

/*
 * Helper to get full stripe logical from a normal bytenr.
 *
 * Caller must ensure @cache is a RAID56 block group.
 */
static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache,
				   u64 bytenr)
{
	u64 ret;

	/*
	 * Due to chunk item size limit, full stripe length should not be
	 * larger than U32_MAX. Just a sanity check here.
	 */
	WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX);

	/*
	 * round_down() can only handle power of 2, while RAID56 full
	 * stripe length can be 64KiB * n, so we need to manually round down.
	 */
	ret = div64_u64(bytenr - cache->key.objectid, cache->full_stripe_len) *
		cache->full_stripe_len + cache->key.objectid;
	return ret;
}

/*
 * Lock a full stripe to avoid concurrency of recovery and read
 *
 * It's only used for profiles with parities (RAID5/6), for other profiles it
 * does nothing.
 *
 * Return 0 if we locked full stripe covering @bytenr, with a mutex held.
 * So caller must call unlock_full_stripe() at the same context.
 *
 * Return <0 if encounters error.
 */
static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
			    bool *locked_ret)
{
	struct btrfs_block_group_cache *bg_cache;
	struct btrfs_full_stripe_locks_tree *locks_root;
	struct full_stripe_lock *existing;
	u64 fstripe_start;
	int ret = 0;

	*locked_ret = false;
	bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
	if (!bg_cache) {
		ASSERT(0);
		return -ENOENT;
	}

	/* Profiles not based on parity don't need full stripe lock */
	if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
		goto out;
	locks_root = &bg_cache->full_stripe_locks_root;

	fstripe_start = get_full_stripe_logical(bg_cache, bytenr);

	/* Now insert the full stripe lock */
	mutex_lock(&locks_root->lock);
	existing = insert_full_stripe_lock(locks_root, fstripe_start);
	mutex_unlock(&locks_root->lock);
	if (IS_ERR(existing)) {
		ret = PTR_ERR(existing);
		goto out;
	}
	mutex_lock(&existing->mutex);
	*locked_ret = true;
out:
	btrfs_put_block_group(bg_cache);
	return ret;
}

/*
 * Unlock a full stripe.
 *
 * NOTE: Caller must ensure it's the same context calling corresponding
 * lock_full_stripe().
 *
 * Return 0 if we unlock full stripe without problem.
 * Return <0 for error
 */
static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
			      bool locked)
{
	struct btrfs_block_group_cache *bg_cache;
	struct btrfs_full_stripe_locks_tree *locks_root;
	struct full_stripe_lock *fstripe_lock;
	u64 fstripe_start;
	bool freeit = false;
	int ret = 0;

	/* If we didn't acquire full stripe lock, no need to continue */
	if (!locked)
		return 0;

	bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
	if (!bg_cache) {
		ASSERT(0);
		return -ENOENT;
	}
	if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
		goto out;

	locks_root = &bg_cache->full_stripe_locks_root;
	fstripe_start = get_full_stripe_logical(bg_cache, bytenr);

	mutex_lock(&locks_root->lock);
	fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start);
	/* Unpaired unlock_full_stripe() detected */
	if (!fstripe_lock) {
		WARN_ON(1);
		ret = -ENOENT;
		mutex_unlock(&locks_root->lock);
		goto out;
	}

	if (fstripe_lock->refs == 0) {
		WARN_ON(1);
		btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow",
			fstripe_lock->logical);
	} else {
		fstripe_lock->refs--;
	}

	if (fstripe_lock->refs == 0) {
		rb_erase(&fstripe_lock->node, &locks_root->root);
		freeit = true;
	}
	mutex_unlock(&locks_root->lock);

	mutex_unlock(&fstripe_lock->mutex);
	if (freeit)
		kfree(fstripe_lock);
out:
	btrfs_put_block_group(bg_cache);
	return ret;
}

574 575 576 577 578 579
/*
 * used for workers that require transaction commits (i.e., for the
 * NOCOW case)
 */
static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
{
580
	struct btrfs_fs_info *fs_info = sctx->fs_info;
581

582
	refcount_inc(&sctx->refs);
583 584 585 586 587 588 589 590 591 592 593 594 595
	/*
	 * increment scrubs_running to prevent cancel requests from
	 * completing as long as a worker is running. we must also
	 * increment scrubs_paused to prevent deadlocking on pause
	 * requests used for transactions commits (as the worker uses a
	 * transaction context). it is safe to regard the worker
	 * as paused for all matters practical. effectively, we only
	 * avoid cancellation requests from completing.
	 */
	mutex_lock(&fs_info->scrub_lock);
	atomic_inc(&fs_info->scrubs_running);
	atomic_inc(&fs_info->scrubs_paused);
	mutex_unlock(&fs_info->scrub_lock);
596 597 598 599 600 601 602 603 604 605

	/*
	 * check if @scrubs_running=@scrubs_paused condition
	 * inside wait_event() is not an atomic operation.
	 * which means we may inc/dec @scrub_running/paused
	 * at any time. Let's wake up @scrub_pause_wait as
	 * much as we can to let commit transaction blocked less.
	 */
	wake_up(&fs_info->scrub_pause_wait);

606 607 608 609 610 611
	atomic_inc(&sctx->workers_pending);
}

/* used for workers that require transaction commits */
static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
{
612
	struct btrfs_fs_info *fs_info = sctx->fs_info;
613 614 615 616 617 618 619 620 621 622 623 624

	/*
	 * see scrub_pending_trans_workers_inc() why we're pretending
	 * to be paused in the scrub counters
	 */
	mutex_lock(&fs_info->scrub_lock);
	atomic_dec(&fs_info->scrubs_running);
	atomic_dec(&fs_info->scrubs_paused);
	mutex_unlock(&fs_info->scrub_lock);
	atomic_dec(&sctx->workers_pending);
	wake_up(&fs_info->scrub_pause_wait);
	wake_up(&sctx->list_wait);
625
	scrub_put_ctx(sctx);
626 627
}

628
static void scrub_free_csums(struct scrub_ctx *sctx)
A
Arne Jansen 已提交
629
{
630
	while (!list_empty(&sctx->csum_list)) {
A
Arne Jansen 已提交
631
		struct btrfs_ordered_sum *sum;
632
		sum = list_first_entry(&sctx->csum_list,
A
Arne Jansen 已提交
633 634 635 636 637 638
				       struct btrfs_ordered_sum, list);
		list_del(&sum->list);
		kfree(sum);
	}
}

639
static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
A
Arne Jansen 已提交
640 641 642
{
	int i;

643
	if (!sctx)
A
Arne Jansen 已提交
644 645
		return;

646 647
	scrub_free_wr_ctx(&sctx->wr_ctx);

648
	/* this can happen when scrub is cancelled */
649 650
	if (sctx->curr != -1) {
		struct scrub_bio *sbio = sctx->bios[sctx->curr];
651 652

		for (i = 0; i < sbio->page_count; i++) {
653
			WARN_ON(!sbio->pagev[i]->page);
654 655 656 657 658
			scrub_block_put(sbio->pagev[i]->sblock);
		}
		bio_put(sbio->bio);
	}

659
	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
660
		struct scrub_bio *sbio = sctx->bios[i];
A
Arne Jansen 已提交
661 662 663 664 665 666

		if (!sbio)
			break;
		kfree(sbio);
	}

667 668
	scrub_free_csums(sctx);
	kfree(sctx);
A
Arne Jansen 已提交
669 670
}

671 672
static void scrub_put_ctx(struct scrub_ctx *sctx)
{
673
	if (refcount_dec_and_test(&sctx->refs))
674 675 676
		scrub_free_ctx(sctx);
}

A
Arne Jansen 已提交
677
static noinline_for_stack
678
struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
A
Arne Jansen 已提交
679
{
680
	struct scrub_ctx *sctx;
A
Arne Jansen 已提交
681
	int		i;
682
	struct btrfs_fs_info *fs_info = dev->fs_info;
683
	int ret;
A
Arne Jansen 已提交
684

685
	sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
686
	if (!sctx)
A
Arne Jansen 已提交
687
		goto nomem;
688
	refcount_set(&sctx->refs, 1);
689
	sctx->is_dev_replace = is_dev_replace;
690
	sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
691
	sctx->curr = -1;
692
	sctx->fs_info = dev->fs_info;
693
	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
A
Arne Jansen 已提交
694 695
		struct scrub_bio *sbio;

696
		sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
A
Arne Jansen 已提交
697 698
		if (!sbio)
			goto nomem;
699
		sctx->bios[i] = sbio;
A
Arne Jansen 已提交
700 701

		sbio->index = i;
702
		sbio->sctx = sctx;
703
		sbio->page_count = 0;
704 705
		btrfs_init_work(&sbio->work, btrfs_scrub_helper,
				scrub_bio_end_io_worker, NULL, NULL);
A
Arne Jansen 已提交
706

707
		if (i != SCRUB_BIOS_PER_SCTX - 1)
708
			sctx->bios[i]->next_free = i + 1;
709
		else
710 711 712
			sctx->bios[i]->next_free = -1;
	}
	sctx->first_free = 0;
713 714
	sctx->nodesize = fs_info->nodesize;
	sctx->sectorsize = fs_info->sectorsize;
715 716
	atomic_set(&sctx->bios_in_flight, 0);
	atomic_set(&sctx->workers_pending, 0);
717 718 719 720 721 722 723
	atomic_set(&sctx->cancel_req, 0);
	sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
	INIT_LIST_HEAD(&sctx->csum_list);

	spin_lock_init(&sctx->list_lock);
	spin_lock_init(&sctx->stat_lock);
	init_waitqueue_head(&sctx->list_wait);
724

725
	ret = scrub_setup_wr_ctx(&sctx->wr_ctx,
726 727 728 729 730
				 fs_info->dev_replace.tgtdev, is_dev_replace);
	if (ret) {
		scrub_free_ctx(sctx);
		return ERR_PTR(ret);
	}
731
	return sctx;
A
Arne Jansen 已提交
732 733

nomem:
734
	scrub_free_ctx(sctx);
A
Arne Jansen 已提交
735 736 737
	return ERR_PTR(-ENOMEM);
}

738 739
static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
				     void *warn_ctx)
740 741 742 743 744 745 746
{
	u64 isize;
	u32 nlink;
	int ret;
	int i;
	struct extent_buffer *eb;
	struct btrfs_inode_item *inode_item;
747
	struct scrub_warning *swarn = warn_ctx;
748
	struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
749 750 751
	struct inode_fs_paths *ipath = NULL;
	struct btrfs_root *local_root;
	struct btrfs_key root_key;
752
	struct btrfs_key key;
753 754 755 756 757 758 759 760 761 762

	root_key.objectid = root;
	root_key.type = BTRFS_ROOT_ITEM_KEY;
	root_key.offset = (u64)-1;
	local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
	if (IS_ERR(local_root)) {
		ret = PTR_ERR(local_root);
		goto err;
	}

763 764 765
	/*
	 * this makes the path point to (inum INODE_ITEM ioff)
	 */
766 767 768 769 770
	key.objectid = inum;
	key.type = BTRFS_INODE_ITEM_KEY;
	key.offset = 0;

	ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
771 772 773 774 775 776 777 778 779 780 781 782 783
	if (ret) {
		btrfs_release_path(swarn->path);
		goto err;
	}

	eb = swarn->path->nodes[0];
	inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
					struct btrfs_inode_item);
	isize = btrfs_inode_size(eb, inode_item);
	nlink = btrfs_inode_nlink(eb, inode_item);
	btrfs_release_path(swarn->path);

	ipath = init_ipath(4096, local_root, swarn->path);
784 785 786 787 788
	if (IS_ERR(ipath)) {
		ret = PTR_ERR(ipath);
		ipath = NULL;
		goto err;
	}
789 790 791 792 793 794 795 796 797 798
	ret = paths_from_inode(inum, ipath);

	if (ret < 0)
		goto err;

	/*
	 * we deliberately ignore the bit ipath might have been too small to
	 * hold all of the paths here
	 */
	for (i = 0; i < ipath->fspath->elem_cnt; ++i)
J
Jeff Mahoney 已提交
799 800 801 802 803 804 805 806
		btrfs_warn_in_rcu(fs_info,
				  "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
				  swarn->errstr, swarn->logical,
				  rcu_str_deref(swarn->dev->name),
				  (unsigned long long)swarn->sector,
				  root, inum, offset,
				  min(isize - offset, (u64)PAGE_SIZE), nlink,
				  (char *)(unsigned long)ipath->fspath->val[i]);
807 808 809 810 811

	free_ipath(ipath);
	return 0;

err:
J
Jeff Mahoney 已提交
812 813 814 815 816 817
	btrfs_warn_in_rcu(fs_info,
			  "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
			  swarn->errstr, swarn->logical,
			  rcu_str_deref(swarn->dev->name),
			  (unsigned long long)swarn->sector,
			  root, inum, offset, ret);
818 819 820 821 822

	free_ipath(ipath);
	return 0;
}

823
static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
824
{
825 826
	struct btrfs_device *dev;
	struct btrfs_fs_info *fs_info;
827 828 829 830 831
	struct btrfs_path *path;
	struct btrfs_key found_key;
	struct extent_buffer *eb;
	struct btrfs_extent_item *ei;
	struct scrub_warning swarn;
832 833 834
	unsigned long ptr = 0;
	u64 extent_item_pos;
	u64 flags = 0;
835
	u64 ref_root;
836
	u32 item_size;
837
	u8 ref_level = 0;
838
	int ret;
839

840
	WARN_ON(sblock->page_count < 1);
841
	dev = sblock->pagev[0]->dev;
842
	fs_info = sblock->sctx->fs_info;
843

844
	path = btrfs_alloc_path();
845 846
	if (!path)
		return;
847

848 849
	swarn.sector = (sblock->pagev[0]->physical) >> 9;
	swarn.logical = sblock->pagev[0]->logical;
850
	swarn.errstr = errstr;
851
	swarn.dev = NULL;
852

853 854
	ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
				  &flags);
855 856 857
	if (ret < 0)
		goto out;

J
Jan Schmidt 已提交
858
	extent_item_pos = swarn.logical - found_key.objectid;
859 860 861 862 863 864
	swarn.extent_item_size = found_key.offset;

	eb = path->nodes[0];
	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
	item_size = btrfs_item_size_nr(eb, path->slots[0]);

865
	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
866
		do {
867 868 869
			ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
						      item_size, &ref_root,
						      &ref_level);
870
			btrfs_warn_in_rcu(fs_info,
J
Jeff Mahoney 已提交
871 872
				"%s at logical %llu on dev %s, sector %llu: metadata %s (level %d) in tree %llu",
				errstr, swarn.logical,
873
				rcu_str_deref(dev->name),
874 875 876 877 878
				(unsigned long long)swarn.sector,
				ref_level ? "node" : "leaf",
				ret < 0 ? -1 : ref_level,
				ret < 0 ? -1 : ref_root);
		} while (ret != 1);
879
		btrfs_release_path(path);
880
	} else {
881
		btrfs_release_path(path);
882
		swarn.path = path;
883
		swarn.dev = dev;
884 885
		iterate_extent_inodes(fs_info, found_key.objectid,
					extent_item_pos, 1,
886 887 888 889 890 891 892
					scrub_print_warning_inode, &swarn);
	}

out:
	btrfs_free_path(path);
}

893
static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
894
{
895
	struct page *page = NULL;
896
	unsigned long index;
897
	struct scrub_fixup_nodatasum *fixup = fixup_ctx;
898
	int ret;
899
	int corrected = 0;
900
	struct btrfs_key key;
901
	struct inode *inode = NULL;
902
	struct btrfs_fs_info *fs_info;
903 904
	u64 end = offset + PAGE_SIZE - 1;
	struct btrfs_root *local_root;
905
	int srcu_index;
906 907 908 909

	key.objectid = root;
	key.type = BTRFS_ROOT_ITEM_KEY;
	key.offset = (u64)-1;
910 911 912 913 914 915 916

	fs_info = fixup->root->fs_info;
	srcu_index = srcu_read_lock(&fs_info->subvol_srcu);

	local_root = btrfs_read_fs_root_no_name(fs_info, &key);
	if (IS_ERR(local_root)) {
		srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
917
		return PTR_ERR(local_root);
918
	}
919 920 921 922

	key.type = BTRFS_INODE_ITEM_KEY;
	key.objectid = inum;
	key.offset = 0;
923 924
	inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
	srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
925 926 927
	if (IS_ERR(inode))
		return PTR_ERR(inode);

928
	index = offset >> PAGE_SHIFT;
929 930

	page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956
	if (!page) {
		ret = -ENOMEM;
		goto out;
	}

	if (PageUptodate(page)) {
		if (PageDirty(page)) {
			/*
			 * we need to write the data to the defect sector. the
			 * data that was in that sector is not in memory,
			 * because the page was modified. we must not write the
			 * modified page to that sector.
			 *
			 * TODO: what could be done here: wait for the delalloc
			 *       runner to write out that page (might involve
			 *       COW) and see whether the sector is still
			 *       referenced afterwards.
			 *
			 * For the meantime, we'll treat this error
			 * incorrectable, although there is a chance that a
			 * later scrub will find the bad sector again and that
			 * there's no dirty page in memory, then.
			 */
			ret = -EIO;
			goto out;
		}
957
		ret = repair_io_failure(BTRFS_I(inode), offset, PAGE_SIZE,
958
					fixup->logical, page,
959
					offset - page_offset(page),
960 961 962 963 964 965 966 967 968 969
					fixup->mirror_num);
		unlock_page(page);
		corrected = !ret;
	} else {
		/*
		 * we need to get good data first. the general readpage path
		 * will call repair_io_failure for us, we just have to make
		 * sure we read the bad mirror.
		 */
		ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
970
					EXTENT_DAMAGED);
971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987
		if (ret) {
			/* set_extent_bits should give proper error */
			WARN_ON(ret > 0);
			if (ret > 0)
				ret = -EFAULT;
			goto out;
		}

		ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
						btrfs_get_extent,
						fixup->mirror_num);
		wait_on_page_locked(page);

		corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
						end, EXTENT_DAMAGED, 0, NULL);
		if (!corrected)
			clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
988
						EXTENT_DAMAGED);
989 990 991 992 993
	}

out:
	if (page)
		put_page(page);
994 995

	iput(inode);
996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012

	if (ret < 0)
		return ret;

	if (ret == 0 && corrected) {
		/*
		 * we only need to call readpage for one of the inodes belonging
		 * to this extent. so make iterate_extent_inodes stop
		 */
		return 1;
	}

	return -EIO;
}

static void scrub_fixup_nodatasum(struct btrfs_work *work)
{
1013
	struct btrfs_fs_info *fs_info;
1014 1015
	int ret;
	struct scrub_fixup_nodatasum *fixup;
1016
	struct scrub_ctx *sctx;
1017 1018 1019 1020 1021
	struct btrfs_trans_handle *trans = NULL;
	struct btrfs_path *path;
	int uncorrectable = 0;

	fixup = container_of(work, struct scrub_fixup_nodatasum, work);
1022
	sctx = fixup->sctx;
1023
	fs_info = fixup->root->fs_info;
1024 1025 1026

	path = btrfs_alloc_path();
	if (!path) {
1027 1028 1029
		spin_lock(&sctx->stat_lock);
		++sctx->stat.malloc_errors;
		spin_unlock(&sctx->stat_lock);
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
		uncorrectable = 1;
		goto out;
	}

	trans = btrfs_join_transaction(fixup->root);
	if (IS_ERR(trans)) {
		uncorrectable = 1;
		goto out;
	}

	/*
	 * the idea is to trigger a regular read through the standard path. we
	 * read a page from the (failed) logical address by specifying the
	 * corresponding copynum of the failed sector. thus, that readpage is
	 * expected to fail.
	 * that is the point where on-the-fly error correction will kick in
	 * (once it's finished) and rewrite the failed sector if a good copy
	 * can be found.
	 */
1049 1050
	ret = iterate_inodes_from_logical(fixup->logical, fs_info, path,
					  scrub_fixup_readpage, fixup);
1051 1052 1053 1054 1055 1056
	if (ret < 0) {
		uncorrectable = 1;
		goto out;
	}
	WARN_ON(ret != 1);

1057 1058 1059
	spin_lock(&sctx->stat_lock);
	++sctx->stat.corrected_errors;
	spin_unlock(&sctx->stat_lock);
1060 1061 1062

out:
	if (trans && !IS_ERR(trans))
1063
		btrfs_end_transaction(trans);
1064
	if (uncorrectable) {
1065 1066 1067
		spin_lock(&sctx->stat_lock);
		++sctx->stat.uncorrectable_errors;
		spin_unlock(&sctx->stat_lock);
1068
		btrfs_dev_replace_stats_inc(
1069 1070
			&fs_info->dev_replace.num_uncorrectable_read_errors);
		btrfs_err_rl_in_rcu(fs_info,
1071
		    "unable to fixup (nodatasum) error at logical %llu on dev %s",
1072
			fixup->logical, rcu_str_deref(fixup->dev->name));
1073 1074 1075 1076 1077
	}

	btrfs_free_path(path);
	kfree(fixup);

1078
	scrub_pending_trans_workers_dec(sctx);
1079 1080
}

1081 1082
static inline void scrub_get_recover(struct scrub_recover *recover)
{
1083
	refcount_inc(&recover->refs);
1084 1085
}

1086 1087
static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
				     struct scrub_recover *recover)
1088
{
1089
	if (refcount_dec_and_test(&recover->refs)) {
1090
		btrfs_bio_counter_dec(fs_info);
1091
		btrfs_put_bbio(recover->bbio);
1092 1093 1094 1095
		kfree(recover);
	}
}

A
Arne Jansen 已提交
1096
/*
1097 1098 1099 1100 1101 1102
 * scrub_handle_errored_block gets called when either verification of the
 * pages failed or the bio failed to read, e.g. with EIO. In the latter
 * case, this function handles all pages in the bio, even though only one
 * may be bad.
 * The goal of this function is to repair the errored block by using the
 * contents of one of the mirrors.
A
Arne Jansen 已提交
1103
 */
1104
static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
A
Arne Jansen 已提交
1105
{
1106
	struct scrub_ctx *sctx = sblock_to_check->sctx;
1107
	struct btrfs_device *dev;
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
	struct btrfs_fs_info *fs_info;
	u64 length;
	u64 logical;
	unsigned int failed_mirror_index;
	unsigned int is_metadata;
	unsigned int have_csum;
	struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
	struct scrub_block *sblock_bad;
	int ret;
	int mirror_index;
	int page_num;
	int success;
1120
	bool full_stripe_locked;
1121
	static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
1122 1123 1124
				      DEFAULT_RATELIMIT_BURST);

	BUG_ON(sblock_to_check->page_count < 1);
1125
	fs_info = sctx->fs_info;
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136
	if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
		/*
		 * if we find an error in a super block, we just report it.
		 * They will get written with the next transaction commit
		 * anyway
		 */
		spin_lock(&sctx->stat_lock);
		++sctx->stat.super_errors;
		spin_unlock(&sctx->stat_lock);
		return 0;
	}
1137
	length = sblock_to_check->page_count * PAGE_SIZE;
1138 1139 1140 1141
	logical = sblock_to_check->pagev[0]->logical;
	BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
	failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
	is_metadata = !(sblock_to_check->pagev[0]->flags &
1142
			BTRFS_EXTENT_FLAG_DATA);
1143 1144
	have_csum = sblock_to_check->pagev[0]->have_csum;
	dev = sblock_to_check->pagev[0]->dev;
1145

1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
	/*
	 * For RAID5/6, race can happen for a different device scrub thread.
	 * For data corruption, Parity and Data threads will both try
	 * to recovery the data.
	 * Race can lead to doubly added csum error, or even unrecoverable
	 * error.
	 */
	ret = lock_full_stripe(fs_info, logical, &full_stripe_locked);
	if (ret < 0) {
		spin_lock(&sctx->stat_lock);
		if (ret == -ENOMEM)
			sctx->stat.malloc_errors++;
		sctx->stat.read_errors++;
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
		return ret;
	}

1164 1165 1166 1167 1168
	if (sctx->is_dev_replace && !is_metadata && !have_csum) {
		sblocks_for_recheck = NULL;
		goto nodatasum_case;
	}

1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197
	/*
	 * read all mirrors one after the other. This includes to
	 * re-read the extent or metadata block that failed (that was
	 * the cause that this fixup code is called) another time,
	 * page by page this time in order to know which pages
	 * caused I/O errors and which ones are good (for all mirrors).
	 * It is the goal to handle the situation when more than one
	 * mirror contains I/O errors, but the errors do not
	 * overlap, i.e. the data can be repaired by selecting the
	 * pages from those mirrors without I/O error on the
	 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
	 * would be that mirror #1 has an I/O error on the first page,
	 * the second page is good, and mirror #2 has an I/O error on
	 * the second page, but the first page is good.
	 * Then the first page of the first mirror can be repaired by
	 * taking the first page of the second mirror, and the
	 * second page of the second mirror can be repaired by
	 * copying the contents of the 2nd page of the 1st mirror.
	 * One more note: if the pages of one mirror contain I/O
	 * errors, the checksum cannot be verified. In order to get
	 * the best data for repairing, the first attempt is to find
	 * a mirror without I/O errors and with a validated checksum.
	 * Only if this is not possible, the pages are picked from
	 * mirrors with I/O errors without considering the checksum.
	 * If the latter is the case, at the end, the checksum of the
	 * repaired area is verified in order to correctly maintain
	 * the statistics.
	 */

1198 1199
	sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
				      sizeof(*sblocks_for_recheck), GFP_NOFS);
1200
	if (!sblocks_for_recheck) {
1201 1202 1203 1204 1205
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		sctx->stat.read_errors++;
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
1206
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1207
		goto out;
A
Arne Jansen 已提交
1208 1209
	}

1210
	/* setup the context, map the logical blocks and alloc the pages */
1211
	ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
1212
	if (ret) {
1213 1214 1215 1216
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors++;
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
1217
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1218 1219 1220 1221
		goto out;
	}
	BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
	sblock_bad = sblocks_for_recheck + failed_mirror_index;
1222

1223
	/* build and submit the bios for the failed mirror, check checksums */
1224
	scrub_recheck_block(fs_info, sblock_bad, 1);
A
Arne Jansen 已提交
1225

1226 1227 1228 1229 1230 1231 1232 1233 1234 1235
	if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
	    sblock_bad->no_io_error_seen) {
		/*
		 * the error disappeared after reading page by page, or
		 * the area was part of a huge bio and other parts of the
		 * bio caused I/O errors, or the block layer merged several
		 * read requests into one and the error is caused by a
		 * different bio (usually one of the two latter cases is
		 * the cause)
		 */
1236 1237
		spin_lock(&sctx->stat_lock);
		sctx->stat.unverified_errors++;
1238
		sblock_to_check->data_corrected = 1;
1239
		spin_unlock(&sctx->stat_lock);
A
Arne Jansen 已提交
1240

1241 1242
		if (sctx->is_dev_replace)
			scrub_write_block_to_dev_replace(sblock_bad);
1243
		goto out;
A
Arne Jansen 已提交
1244 1245
	}

1246
	if (!sblock_bad->no_io_error_seen) {
1247 1248 1249
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors++;
		spin_unlock(&sctx->stat_lock);
1250 1251
		if (__ratelimit(&_rs))
			scrub_print_warning("i/o error", sblock_to_check);
1252
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1253
	} else if (sblock_bad->checksum_error) {
1254 1255 1256
		spin_lock(&sctx->stat_lock);
		sctx->stat.csum_errors++;
		spin_unlock(&sctx->stat_lock);
1257 1258
		if (__ratelimit(&_rs))
			scrub_print_warning("checksum error", sblock_to_check);
1259
		btrfs_dev_stat_inc_and_print(dev,
1260
					     BTRFS_DEV_STAT_CORRUPTION_ERRS);
1261
	} else if (sblock_bad->header_error) {
1262 1263 1264
		spin_lock(&sctx->stat_lock);
		sctx->stat.verify_errors++;
		spin_unlock(&sctx->stat_lock);
1265 1266 1267
		if (__ratelimit(&_rs))
			scrub_print_warning("checksum/header error",
					    sblock_to_check);
1268
		if (sblock_bad->generation_error)
1269
			btrfs_dev_stat_inc_and_print(dev,
1270 1271
				BTRFS_DEV_STAT_GENERATION_ERRS);
		else
1272
			btrfs_dev_stat_inc_and_print(dev,
1273
				BTRFS_DEV_STAT_CORRUPTION_ERRS);
1274
	}
A
Arne Jansen 已提交
1275

1276 1277 1278 1279
	if (sctx->readonly) {
		ASSERT(!sctx->is_dev_replace);
		goto out;
	}
A
Arne Jansen 已提交
1280

1281 1282
	if (!is_metadata && !have_csum) {
		struct scrub_fixup_nodatasum *fixup_nodatasum;
A
Arne Jansen 已提交
1283

1284 1285
		WARN_ON(sctx->is_dev_replace);

1286 1287
nodatasum_case:

1288 1289
		/*
		 * !is_metadata and !have_csum, this means that the data
1290
		 * might not be COWed, that it might be modified
1291 1292 1293 1294 1295 1296 1297
		 * concurrently. The general strategy to work on the
		 * commit root does not help in the case when COW is not
		 * used.
		 */
		fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
		if (!fixup_nodatasum)
			goto did_not_correct_error;
1298
		fixup_nodatasum->sctx = sctx;
1299
		fixup_nodatasum->dev = dev;
1300 1301 1302
		fixup_nodatasum->logical = logical;
		fixup_nodatasum->root = fs_info->extent_root;
		fixup_nodatasum->mirror_num = failed_mirror_index + 1;
1303
		scrub_pending_trans_workers_inc(sctx);
1304 1305
		btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
				scrub_fixup_nodatasum, NULL, NULL);
1306 1307
		btrfs_queue_work(fs_info->scrub_workers,
				 &fixup_nodatasum->work);
1308
		goto out;
A
Arne Jansen 已提交
1309 1310
	}

1311 1312
	/*
	 * now build and submit the bios for the other mirrors, check
1313 1314
	 * checksums.
	 * First try to pick the mirror which is completely without I/O
1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
	 * errors and also does not have a checksum error.
	 * If one is found, and if a checksum is present, the full block
	 * that is known to contain an error is rewritten. Afterwards
	 * the block is known to be corrected.
	 * If a mirror is found which is completely correct, and no
	 * checksum is present, only those pages are rewritten that had
	 * an I/O error in the block to be repaired, since it cannot be
	 * determined, which copy of the other pages is better (and it
	 * could happen otherwise that a correct page would be
	 * overwritten by a bad one).
	 */
	for (mirror_index = 0;
	     mirror_index < BTRFS_MAX_MIRRORS &&
	     sblocks_for_recheck[mirror_index].page_count > 0;
	     mirror_index++) {
1330
		struct scrub_block *sblock_other;
1331

1332 1333 1334 1335 1336
		if (mirror_index == failed_mirror_index)
			continue;
		sblock_other = sblocks_for_recheck + mirror_index;

		/* build and submit the bios, check checksums */
1337
		scrub_recheck_block(fs_info, sblock_other, 0);
1338 1339

		if (!sblock_other->header_error &&
1340 1341
		    !sblock_other->checksum_error &&
		    sblock_other->no_io_error_seen) {
1342 1343
			if (sctx->is_dev_replace) {
				scrub_write_block_to_dev_replace(sblock_other);
1344
				goto corrected_error;
1345 1346
			} else {
				ret = scrub_repair_block_from_good_copy(
1347 1348 1349
						sblock_bad, sblock_other);
				if (!ret)
					goto corrected_error;
1350
			}
1351 1352
		}
	}
A
Arne Jansen 已提交
1353

1354 1355
	if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
		goto did_not_correct_error;
1356 1357 1358

	/*
	 * In case of I/O errors in the area that is supposed to be
1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370
	 * repaired, continue by picking good copies of those pages.
	 * Select the good pages from mirrors to rewrite bad pages from
	 * the area to fix. Afterwards verify the checksum of the block
	 * that is supposed to be repaired. This verification step is
	 * only done for the purpose of statistic counting and for the
	 * final scrub report, whether errors remain.
	 * A perfect algorithm could make use of the checksum and try
	 * all possible combinations of pages from the different mirrors
	 * until the checksum verification succeeds. For example, when
	 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
	 * of mirror #2 is readable but the final checksum test fails,
	 * then the 2nd page of mirror #3 could be tried, whether now
1371
	 * the final checksum succeeds. But this would be a rare
1372 1373 1374 1375 1376 1377 1378 1379
	 * exception and is therefore not implemented. At least it is
	 * avoided that the good copy is overwritten.
	 * A more useful improvement would be to pick the sectors
	 * without I/O error based on sector sizes (512 bytes on legacy
	 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
	 * mirror could be repaired by taking 512 byte of a different
	 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
	 * area are unreadable.
A
Arne Jansen 已提交
1380
	 */
1381
	success = 1;
1382 1383
	for (page_num = 0; page_num < sblock_bad->page_count;
	     page_num++) {
1384
		struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1385
		struct scrub_block *sblock_other = NULL;
1386

1387 1388
		/* skip no-io-error page in scrub */
		if (!page_bad->io_error && !sctx->is_dev_replace)
A
Arne Jansen 已提交
1389
			continue;
1390

1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401
		/* try to find no-io-error page in mirrors */
		if (page_bad->io_error) {
			for (mirror_index = 0;
			     mirror_index < BTRFS_MAX_MIRRORS &&
			     sblocks_for_recheck[mirror_index].page_count > 0;
			     mirror_index++) {
				if (!sblocks_for_recheck[mirror_index].
				    pagev[page_num]->io_error) {
					sblock_other = sblocks_for_recheck +
						       mirror_index;
					break;
1402 1403
				}
			}
1404 1405
			if (!sblock_other)
				success = 0;
I
Ilya Dryomov 已提交
1406
		}
A
Arne Jansen 已提交
1407

1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421
		if (sctx->is_dev_replace) {
			/*
			 * did not find a mirror to fetch the page
			 * from. scrub_write_page_to_dev_replace()
			 * handles this case (page->io_error), by
			 * filling the block with zeros before
			 * submitting the write request
			 */
			if (!sblock_other)
				sblock_other = sblock_bad;

			if (scrub_write_page_to_dev_replace(sblock_other,
							    page_num) != 0) {
				btrfs_dev_replace_stats_inc(
1422
					&fs_info->dev_replace.num_write_errors);
1423 1424 1425 1426 1427 1428 1429 1430 1431 1432
				success = 0;
			}
		} else if (sblock_other) {
			ret = scrub_repair_page_from_good_copy(sblock_bad,
							       sblock_other,
							       page_num, 0);
			if (0 == ret)
				page_bad->io_error = 0;
			else
				success = 0;
1433
		}
A
Arne Jansen 已提交
1434 1435
	}

1436
	if (success && !sctx->is_dev_replace) {
1437 1438 1439 1440 1441 1442 1443 1444 1445 1446
		if (is_metadata || have_csum) {
			/*
			 * need to verify the checksum now that all
			 * sectors on disk are repaired (the write
			 * request for data to be repaired is on its way).
			 * Just be lazy and use scrub_recheck_block()
			 * which re-reads the data before the checksum
			 * is verified, but most likely the data comes out
			 * of the page cache.
			 */
1447
			scrub_recheck_block(fs_info, sblock_bad, 1);
1448
			if (!sblock_bad->header_error &&
1449 1450 1451 1452 1453 1454 1455
			    !sblock_bad->checksum_error &&
			    sblock_bad->no_io_error_seen)
				goto corrected_error;
			else
				goto did_not_correct_error;
		} else {
corrected_error:
1456 1457
			spin_lock(&sctx->stat_lock);
			sctx->stat.corrected_errors++;
1458
			sblock_to_check->data_corrected = 1;
1459
			spin_unlock(&sctx->stat_lock);
1460 1461
			btrfs_err_rl_in_rcu(fs_info,
				"fixed up error at logical %llu on dev %s",
1462
				logical, rcu_str_deref(dev->name));
A
Arne Jansen 已提交
1463
		}
1464 1465
	} else {
did_not_correct_error:
1466 1467 1468
		spin_lock(&sctx->stat_lock);
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
1469 1470
		btrfs_err_rl_in_rcu(fs_info,
			"unable to fixup (regular) error at logical %llu on dev %s",
1471
			logical, rcu_str_deref(dev->name));
I
Ilya Dryomov 已提交
1472
	}
A
Arne Jansen 已提交
1473

1474 1475 1476 1477 1478 1479
out:
	if (sblocks_for_recheck) {
		for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
		     mirror_index++) {
			struct scrub_block *sblock = sblocks_for_recheck +
						     mirror_index;
1480
			struct scrub_recover *recover;
1481 1482
			int page_index;

1483 1484 1485
			for (page_index = 0; page_index < sblock->page_count;
			     page_index++) {
				sblock->pagev[page_index]->sblock = NULL;
1486 1487
				recover = sblock->pagev[page_index]->recover;
				if (recover) {
1488
					scrub_put_recover(fs_info, recover);
1489 1490 1491
					sblock->pagev[page_index]->recover =
									NULL;
				}
1492 1493
				scrub_page_put(sblock->pagev[page_index]);
			}
1494 1495 1496
		}
		kfree(sblocks_for_recheck);
	}
A
Arne Jansen 已提交
1497

1498 1499 1500
	ret = unlock_full_stripe(fs_info, logical, full_stripe_locked);
	if (ret < 0)
		return ret;
1501 1502
	return 0;
}
A
Arne Jansen 已提交
1503

1504
static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
1505
{
Z
Zhao Lei 已提交
1506 1507 1508 1509 1510
	if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
		return 2;
	else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
		return 3;
	else
1511 1512 1513
		return (int)bbio->num_stripes;
}

Z
Zhao Lei 已提交
1514 1515
static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
						 u64 *raid_map,
1516 1517 1518 1519 1520 1521 1522
						 u64 mapped_length,
						 int nstripes, int mirror,
						 int *stripe_index,
						 u64 *stripe_offset)
{
	int i;

1523
	if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543
		/* RAID5/6 */
		for (i = 0; i < nstripes; i++) {
			if (raid_map[i] == RAID6_Q_STRIPE ||
			    raid_map[i] == RAID5_P_STRIPE)
				continue;

			if (logical >= raid_map[i] &&
			    logical < raid_map[i] + mapped_length)
				break;
		}

		*stripe_index = i;
		*stripe_offset = logical - raid_map[i];
	} else {
		/* The other RAID type */
		*stripe_index = mirror;
		*stripe_offset = 0;
	}
}

1544
static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
1545 1546
				     struct scrub_block *sblocks_for_recheck)
{
1547
	struct scrub_ctx *sctx = original_sblock->sctx;
1548
	struct btrfs_fs_info *fs_info = sctx->fs_info;
1549 1550
	u64 length = original_sblock->page_count * PAGE_SIZE;
	u64 logical = original_sblock->pagev[0]->logical;
1551 1552 1553
	u64 generation = original_sblock->pagev[0]->generation;
	u64 flags = original_sblock->pagev[0]->flags;
	u64 have_csum = original_sblock->pagev[0]->have_csum;
1554 1555 1556 1557 1558 1559
	struct scrub_recover *recover;
	struct btrfs_bio *bbio;
	u64 sublen;
	u64 mapped_length;
	u64 stripe_offset;
	int stripe_index;
1560
	int page_index = 0;
1561
	int mirror_index;
1562
	int nmirrors;
1563 1564 1565
	int ret;

	/*
1566
	 * note: the two members refs and outstanding_pages
1567 1568 1569 1570 1571
	 * are not used (and not set) in the blocks that are used for
	 * the recheck procedure
	 */

	while (length > 0) {
1572 1573 1574
		sublen = min_t(u64, length, PAGE_SIZE);
		mapped_length = sublen;
		bbio = NULL;
A
Arne Jansen 已提交
1575

1576 1577 1578 1579
		/*
		 * with a length of PAGE_SIZE, each returned stripe
		 * represents one mirror
		 */
1580
		btrfs_bio_counter_inc_blocked(fs_info);
1581
		ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
1582
				logical, &mapped_length, &bbio);
1583
		if (ret || !bbio || mapped_length < sublen) {
1584
			btrfs_put_bbio(bbio);
1585
			btrfs_bio_counter_dec(fs_info);
1586 1587
			return -EIO;
		}
A
Arne Jansen 已提交
1588

1589 1590
		recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
		if (!recover) {
1591
			btrfs_put_bbio(bbio);
1592
			btrfs_bio_counter_dec(fs_info);
1593 1594 1595
			return -ENOMEM;
		}

1596
		refcount_set(&recover->refs, 1);
1597 1598 1599
		recover->bbio = bbio;
		recover->map_length = mapped_length;

1600
		BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK);
1601

1602
		nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
Z
Zhao Lei 已提交
1603

1604
		for (mirror_index = 0; mirror_index < nmirrors;
1605 1606 1607 1608 1609
		     mirror_index++) {
			struct scrub_block *sblock;
			struct scrub_page *page;

			sblock = sblocks_for_recheck + mirror_index;
1610
			sblock->sctx = sctx;
1611

1612 1613 1614
			page = kzalloc(sizeof(*page), GFP_NOFS);
			if (!page) {
leave_nomem:
1615 1616 1617
				spin_lock(&sctx->stat_lock);
				sctx->stat.malloc_errors++;
				spin_unlock(&sctx->stat_lock);
1618
				scrub_put_recover(fs_info, recover);
1619 1620
				return -ENOMEM;
			}
1621 1622
			scrub_page_get(page);
			sblock->pagev[page_index] = page;
1623 1624 1625
			page->sblock = sblock;
			page->flags = flags;
			page->generation = generation;
1626
			page->logical = logical;
1627 1628 1629 1630 1631
			page->have_csum = have_csum;
			if (have_csum)
				memcpy(page->csum,
				       original_sblock->pagev[0]->csum,
				       sctx->csum_size);
1632

Z
Zhao Lei 已提交
1633 1634 1635
			scrub_stripe_index_and_offset(logical,
						      bbio->map_type,
						      bbio->raid_map,
1636
						      mapped_length,
1637 1638
						      bbio->num_stripes -
						      bbio->num_tgtdevs,
1639 1640 1641 1642 1643 1644 1645
						      mirror_index,
						      &stripe_index,
						      &stripe_offset);
			page->physical = bbio->stripes[stripe_index].physical +
					 stripe_offset;
			page->dev = bbio->stripes[stripe_index].dev;

1646 1647 1648 1649
			BUG_ON(page_index >= original_sblock->page_count);
			page->physical_for_dev_replace =
				original_sblock->pagev[page_index]->
				physical_for_dev_replace;
1650 1651
			/* for missing devices, dev->bdev is NULL */
			page->mirror_num = mirror_index + 1;
1652
			sblock->page_count++;
1653 1654 1655
			page->page = alloc_page(GFP_NOFS);
			if (!page->page)
				goto leave_nomem;
1656 1657 1658

			scrub_get_recover(recover);
			page->recover = recover;
1659
		}
1660
		scrub_put_recover(fs_info, recover);
1661 1662 1663 1664 1665 1666
		length -= sublen;
		logical += sublen;
		page_index++;
	}

	return 0;
I
Ilya Dryomov 已提交
1667 1668
}

1669 1670 1671 1672 1673
struct scrub_bio_ret {
	struct completion event;
	int error;
};

1674
static void scrub_bio_wait_endio(struct bio *bio)
1675 1676 1677
{
	struct scrub_bio_ret *ret = bio->bi_private;

1678
	ret->error = bio->bi_error;
1679 1680 1681 1682 1683
	complete(&ret->event);
}

static inline int scrub_is_page_on_raid56(struct scrub_page *page)
{
Z
Zhao Lei 已提交
1684
	return page->recover &&
1685
	       (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700
}

static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
					struct bio *bio,
					struct scrub_page *page)
{
	struct scrub_bio_ret done;
	int ret;

	init_completion(&done.event);
	done.error = 0;
	bio->bi_iter.bi_sector = page->logical >> 9;
	bio->bi_private = &done;
	bio->bi_end_io = scrub_bio_wait_endio;

1701
	ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
1702
				    page->recover->map_length,
1703
				    page->mirror_num, 0);
1704 1705 1706 1707 1708 1709 1710 1711 1712 1713
	if (ret)
		return ret;

	wait_for_completion(&done.event);
	if (done.error)
		return -EIO;

	return 0;
}

1714 1715 1716 1717 1718 1719 1720
/*
 * this function will check the on disk data for checksum errors, header
 * errors and read I/O errors. If any I/O errors happen, the exact pages
 * which are errored are marked as being bad. The goal is to enable scrub
 * to take those pages that are not errored from all the mirrors so that
 * the pages that are errored in the just handled mirror can be repaired.
 */
1721
static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1722 1723
				struct scrub_block *sblock,
				int retry_failed_mirror)
I
Ilya Dryomov 已提交
1724
{
1725
	int page_num;
I
Ilya Dryomov 已提交
1726

1727
	sblock->no_io_error_seen = 1;
I
Ilya Dryomov 已提交
1728

1729 1730
	for (page_num = 0; page_num < sblock->page_count; page_num++) {
		struct bio *bio;
1731
		struct scrub_page *page = sblock->pagev[page_num];
1732

1733
		if (page->dev->bdev == NULL) {
1734 1735 1736 1737 1738
			page->io_error = 1;
			sblock->no_io_error_seen = 0;
			continue;
		}

1739
		WARN_ON(!page->page);
1740
		bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1741 1742 1743 1744 1745
		if (!bio) {
			page->io_error = 1;
			sblock->no_io_error_seen = 0;
			continue;
		}
1746
		bio->bi_bdev = page->dev->bdev;
1747

1748
		bio_add_page(bio, page->page, PAGE_SIZE, 0);
1749
		if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
1750 1751
			if (scrub_submit_raid56_bio_wait(fs_info, bio, page)) {
				page->io_error = 1;
1752
				sblock->no_io_error_seen = 0;
1753
			}
1754 1755
		} else {
			bio->bi_iter.bi_sector = page->physical >> 9;
M
Mike Christie 已提交
1756
			bio_set_op_attrs(bio, REQ_OP_READ, 0);
1757

1758 1759
			if (btrfsic_submit_bio_wait(bio)) {
				page->io_error = 1;
1760
				sblock->no_io_error_seen = 0;
1761
			}
1762
		}
1763

1764 1765
		bio_put(bio);
	}
I
Ilya Dryomov 已提交
1766

1767
	if (sblock->no_io_error_seen)
1768
		scrub_recheck_block_checksum(sblock);
A
Arne Jansen 已提交
1769 1770
}

M
Miao Xie 已提交
1771 1772 1773 1774 1775 1776 1777 1778 1779 1780
static inline int scrub_check_fsid(u8 fsid[],
				   struct scrub_page *spage)
{
	struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
	int ret;

	ret = memcmp(fsid, fs_devices->fsid, BTRFS_UUID_SIZE);
	return !ret;
}

1781
static void scrub_recheck_block_checksum(struct scrub_block *sblock)
A
Arne Jansen 已提交
1782
{
1783 1784 1785
	sblock->header_error = 0;
	sblock->checksum_error = 0;
	sblock->generation_error = 0;
1786

1787 1788 1789 1790
	if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA)
		scrub_checksum_data(sblock);
	else
		scrub_checksum_tree_block(sblock);
A
Arne Jansen 已提交
1791 1792
}

1793
static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1794
					     struct scrub_block *sblock_good)
1795 1796 1797
{
	int page_num;
	int ret = 0;
I
Ilya Dryomov 已提交
1798

1799 1800
	for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
		int ret_sub;
I
Ilya Dryomov 已提交
1801

1802 1803
		ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
							   sblock_good,
1804
							   page_num, 1);
1805 1806
		if (ret_sub)
			ret = ret_sub;
A
Arne Jansen 已提交
1807
	}
1808 1809 1810 1811 1812 1813 1814 1815

	return ret;
}

static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
					    struct scrub_block *sblock_good,
					    int page_num, int force_write)
{
1816 1817
	struct scrub_page *page_bad = sblock_bad->pagev[page_num];
	struct scrub_page *page_good = sblock_good->pagev[page_num];
1818
	struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
1819

1820 1821
	BUG_ON(page_bad->page == NULL);
	BUG_ON(page_good->page == NULL);
1822 1823 1824 1825 1826
	if (force_write || sblock_bad->header_error ||
	    sblock_bad->checksum_error || page_bad->io_error) {
		struct bio *bio;
		int ret;

1827
		if (!page_bad->dev->bdev) {
1828
			btrfs_warn_rl(fs_info,
J
Jeff Mahoney 已提交
1829
				"scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
1830 1831 1832
			return -EIO;
		}

1833
		bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1834 1835
		if (!bio)
			return -EIO;
1836
		bio->bi_bdev = page_bad->dev->bdev;
1837
		bio->bi_iter.bi_sector = page_bad->physical >> 9;
M
Mike Christie 已提交
1838
		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1839 1840 1841 1842 1843

		ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
		if (PAGE_SIZE != ret) {
			bio_put(bio);
			return -EIO;
1844
		}
1845

1846
		if (btrfsic_submit_bio_wait(bio)) {
1847 1848
			btrfs_dev_stat_inc_and_print(page_bad->dev,
				BTRFS_DEV_STAT_WRITE_ERRS);
1849
			btrfs_dev_replace_stats_inc(
1850
				&fs_info->dev_replace.num_write_errors);
1851 1852 1853
			bio_put(bio);
			return -EIO;
		}
1854
		bio_put(bio);
A
Arne Jansen 已提交
1855 1856
	}

1857 1858 1859
	return 0;
}

1860 1861
static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
{
1862
	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
1863 1864
	int page_num;

1865 1866 1867 1868 1869 1870 1871
	/*
	 * This block is used for the check of the parity on the source device,
	 * so the data needn't be written into the destination device.
	 */
	if (sblock->sparity)
		return;

1872 1873 1874 1875 1876 1877
	for (page_num = 0; page_num < sblock->page_count; page_num++) {
		int ret;

		ret = scrub_write_page_to_dev_replace(sblock, page_num);
		if (ret)
			btrfs_dev_replace_stats_inc(
1878
				&fs_info->dev_replace.num_write_errors);
1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890
	}
}

static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
					   int page_num)
{
	struct scrub_page *spage = sblock->pagev[page_num];

	BUG_ON(spage->page == NULL);
	if (spage->io_error) {
		void *mapped_buffer = kmap_atomic(spage->page);

1891
		clear_page(mapped_buffer);
1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908
		flush_dcache_page(spage->page);
		kunmap_atomic(mapped_buffer);
	}
	return scrub_add_page_to_wr_bio(sblock->sctx, spage);
}

static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
				    struct scrub_page *spage)
{
	struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
	struct scrub_bio *sbio;
	int ret;

	mutex_lock(&wr_ctx->wr_lock);
again:
	if (!wr_ctx->wr_curr_bio) {
		wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
1909
					      GFP_KERNEL);
1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925
		if (!wr_ctx->wr_curr_bio) {
			mutex_unlock(&wr_ctx->wr_lock);
			return -ENOMEM;
		}
		wr_ctx->wr_curr_bio->sctx = sctx;
		wr_ctx->wr_curr_bio->page_count = 0;
	}
	sbio = wr_ctx->wr_curr_bio;
	if (sbio->page_count == 0) {
		struct bio *bio;

		sbio->physical = spage->physical_for_dev_replace;
		sbio->logical = spage->logical;
		sbio->dev = wr_ctx->tgtdev;
		bio = sbio->bio;
		if (!bio) {
1926 1927
			bio = btrfs_io_bio_alloc(GFP_KERNEL,
					wr_ctx->pages_per_wr_bio);
1928 1929 1930 1931 1932 1933 1934 1935 1936 1937
			if (!bio) {
				mutex_unlock(&wr_ctx->wr_lock);
				return -ENOMEM;
			}
			sbio->bio = bio;
		}

		bio->bi_private = sbio;
		bio->bi_end_io = scrub_wr_bio_end_io;
		bio->bi_bdev = sbio->dev->bdev;
1938
		bio->bi_iter.bi_sector = sbio->physical >> 9;
M
Mike Christie 已提交
1939
		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986
		sbio->err = 0;
	} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
		   spage->physical_for_dev_replace ||
		   sbio->logical + sbio->page_count * PAGE_SIZE !=
		   spage->logical) {
		scrub_wr_submit(sctx);
		goto again;
	}

	ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
	if (ret != PAGE_SIZE) {
		if (sbio->page_count < 1) {
			bio_put(sbio->bio);
			sbio->bio = NULL;
			mutex_unlock(&wr_ctx->wr_lock);
			return -EIO;
		}
		scrub_wr_submit(sctx);
		goto again;
	}

	sbio->pagev[sbio->page_count] = spage;
	scrub_page_get(spage);
	sbio->page_count++;
	if (sbio->page_count == wr_ctx->pages_per_wr_bio)
		scrub_wr_submit(sctx);
	mutex_unlock(&wr_ctx->wr_lock);

	return 0;
}

static void scrub_wr_submit(struct scrub_ctx *sctx)
{
	struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
	struct scrub_bio *sbio;

	if (!wr_ctx->wr_curr_bio)
		return;

	sbio = wr_ctx->wr_curr_bio;
	wr_ctx->wr_curr_bio = NULL;
	WARN_ON(!sbio->bio->bi_bdev);
	scrub_pending_bio_inc(sctx);
	/* process all writes in a single worker thread. Then the block layer
	 * orders the requests before sending them to the driver which
	 * doubled the write performance on spinning disks when measured
	 * with Linux 3.5 */
1987
	btrfsic_submit_bio(sbio->bio);
1988 1989
}

1990
static void scrub_wr_bio_end_io(struct bio *bio)
1991 1992
{
	struct scrub_bio *sbio = bio->bi_private;
1993
	struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
1994

1995
	sbio->err = bio->bi_error;
1996 1997
	sbio->bio = bio;

1998 1999
	btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
			 scrub_wr_bio_end_io_worker, NULL, NULL);
2000
	btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011
}

static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
{
	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
	struct scrub_ctx *sctx = sbio->sctx;
	int i;

	WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
	if (sbio->err) {
		struct btrfs_dev_replace *dev_replace =
2012
			&sbio->sctx->fs_info->dev_replace;
2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031

		for (i = 0; i < sbio->page_count; i++) {
			struct scrub_page *spage = sbio->pagev[i];

			spage->io_error = 1;
			btrfs_dev_replace_stats_inc(&dev_replace->
						    num_write_errors);
		}
	}

	for (i = 0; i < sbio->page_count; i++)
		scrub_page_put(sbio->pagev[i]);

	bio_put(sbio->bio);
	kfree(sbio);
	scrub_pending_bio_dec(sctx);
}

static int scrub_checksum(struct scrub_block *sblock)
2032 2033 2034 2035
{
	u64 flags;
	int ret;

2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047
	/*
	 * No need to initialize these stats currently,
	 * because this function only use return value
	 * instead of these stats value.
	 *
	 * Todo:
	 * always use stats
	 */
	sblock->header_error = 0;
	sblock->generation_error = 0;
	sblock->checksum_error = 0;

2048 2049
	WARN_ON(sblock->page_count < 1);
	flags = sblock->pagev[0]->flags;
2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060
	ret = 0;
	if (flags & BTRFS_EXTENT_FLAG_DATA)
		ret = scrub_checksum_data(sblock);
	else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
		ret = scrub_checksum_tree_block(sblock);
	else if (flags & BTRFS_EXTENT_FLAG_SUPER)
		(void)scrub_checksum_super(sblock);
	else
		WARN_ON(1);
	if (ret)
		scrub_handle_errored_block(sblock);
2061 2062

	return ret;
A
Arne Jansen 已提交
2063 2064
}

2065
static int scrub_checksum_data(struct scrub_block *sblock)
A
Arne Jansen 已提交
2066
{
2067
	struct scrub_ctx *sctx = sblock->sctx;
A
Arne Jansen 已提交
2068
	u8 csum[BTRFS_CSUM_SIZE];
2069 2070 2071
	u8 *on_disk_csum;
	struct page *page;
	void *buffer;
A
Arne Jansen 已提交
2072
	u32 crc = ~(u32)0;
2073 2074
	u64 len;
	int index;
A
Arne Jansen 已提交
2075

2076
	BUG_ON(sblock->page_count < 1);
2077
	if (!sblock->pagev[0]->have_csum)
A
Arne Jansen 已提交
2078 2079
		return 0;

2080 2081
	on_disk_csum = sblock->pagev[0]->csum;
	page = sblock->pagev[0]->page;
2082
	buffer = kmap_atomic(page);
2083

2084
	len = sctx->sectorsize;
2085 2086 2087 2088
	index = 0;
	for (;;) {
		u64 l = min_t(u64, len, PAGE_SIZE);

2089
		crc = btrfs_csum_data(buffer, crc, l);
2090
		kunmap_atomic(buffer);
2091 2092 2093 2094 2095
		len -= l;
		if (len == 0)
			break;
		index++;
		BUG_ON(index >= sblock->page_count);
2096 2097
		BUG_ON(!sblock->pagev[index]->page);
		page = sblock->pagev[index]->page;
2098
		buffer = kmap_atomic(page);
2099 2100
	}

A
Arne Jansen 已提交
2101
	btrfs_csum_final(crc, csum);
2102
	if (memcmp(csum, on_disk_csum, sctx->csum_size))
2103
		sblock->checksum_error = 1;
A
Arne Jansen 已提交
2104

2105
	return sblock->checksum_error;
A
Arne Jansen 已提交
2106 2107
}

2108
static int scrub_checksum_tree_block(struct scrub_block *sblock)
A
Arne Jansen 已提交
2109
{
2110
	struct scrub_ctx *sctx = sblock->sctx;
A
Arne Jansen 已提交
2111
	struct btrfs_header *h;
2112
	struct btrfs_fs_info *fs_info = sctx->fs_info;
2113 2114 2115 2116 2117 2118
	u8 calculated_csum[BTRFS_CSUM_SIZE];
	u8 on_disk_csum[BTRFS_CSUM_SIZE];
	struct page *page;
	void *mapped_buffer;
	u64 mapped_size;
	void *p;
A
Arne Jansen 已提交
2119
	u32 crc = ~(u32)0;
2120 2121 2122 2123
	u64 len;
	int index;

	BUG_ON(sblock->page_count < 1);
2124
	page = sblock->pagev[0]->page;
2125
	mapped_buffer = kmap_atomic(page);
2126
	h = (struct btrfs_header *)mapped_buffer;
2127
	memcpy(on_disk_csum, h->csum, sctx->csum_size);
A
Arne Jansen 已提交
2128 2129 2130 2131 2132 2133

	/*
	 * we don't use the getter functions here, as we
	 * a) don't have an extent buffer and
	 * b) the page is already kmapped
	 */
2134
	if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
2135
		sblock->header_error = 1;
A
Arne Jansen 已提交
2136

2137 2138 2139 2140
	if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) {
		sblock->header_error = 1;
		sblock->generation_error = 1;
	}
A
Arne Jansen 已提交
2141

M
Miao Xie 已提交
2142
	if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
2143
		sblock->header_error = 1;
A
Arne Jansen 已提交
2144 2145 2146

	if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
		   BTRFS_UUID_SIZE))
2147
		sblock->header_error = 1;
A
Arne Jansen 已提交
2148

2149
	len = sctx->nodesize - BTRFS_CSUM_SIZE;
2150 2151 2152 2153 2154 2155
	mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
	p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
	index = 0;
	for (;;) {
		u64 l = min_t(u64, len, mapped_size);

2156
		crc = btrfs_csum_data(p, crc, l);
2157
		kunmap_atomic(mapped_buffer);
2158 2159 2160 2161 2162
		len -= l;
		if (len == 0)
			break;
		index++;
		BUG_ON(index >= sblock->page_count);
2163 2164
		BUG_ON(!sblock->pagev[index]->page);
		page = sblock->pagev[index]->page;
2165
		mapped_buffer = kmap_atomic(page);
2166 2167 2168 2169 2170
		mapped_size = PAGE_SIZE;
		p = mapped_buffer;
	}

	btrfs_csum_final(crc, calculated_csum);
2171
	if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
2172
		sblock->checksum_error = 1;
A
Arne Jansen 已提交
2173

2174
	return sblock->header_error || sblock->checksum_error;
A
Arne Jansen 已提交
2175 2176
}

2177
static int scrub_checksum_super(struct scrub_block *sblock)
A
Arne Jansen 已提交
2178 2179
{
	struct btrfs_super_block *s;
2180
	struct scrub_ctx *sctx = sblock->sctx;
2181 2182 2183 2184 2185 2186
	u8 calculated_csum[BTRFS_CSUM_SIZE];
	u8 on_disk_csum[BTRFS_CSUM_SIZE];
	struct page *page;
	void *mapped_buffer;
	u64 mapped_size;
	void *p;
A
Arne Jansen 已提交
2187
	u32 crc = ~(u32)0;
2188 2189
	int fail_gen = 0;
	int fail_cor = 0;
2190 2191
	u64 len;
	int index;
A
Arne Jansen 已提交
2192

2193
	BUG_ON(sblock->page_count < 1);
2194
	page = sblock->pagev[0]->page;
2195
	mapped_buffer = kmap_atomic(page);
2196
	s = (struct btrfs_super_block *)mapped_buffer;
2197
	memcpy(on_disk_csum, s->csum, sctx->csum_size);
A
Arne Jansen 已提交
2198

2199
	if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
2200
		++fail_cor;
A
Arne Jansen 已提交
2201

2202
	if (sblock->pagev[0]->generation != btrfs_super_generation(s))
2203
		++fail_gen;
A
Arne Jansen 已提交
2204

M
Miao Xie 已提交
2205
	if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
2206
		++fail_cor;
A
Arne Jansen 已提交
2207

2208 2209 2210 2211 2212 2213 2214
	len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
	mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
	p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
	index = 0;
	for (;;) {
		u64 l = min_t(u64, len, mapped_size);

2215
		crc = btrfs_csum_data(p, crc, l);
2216
		kunmap_atomic(mapped_buffer);
2217 2218 2219 2220 2221
		len -= l;
		if (len == 0)
			break;
		index++;
		BUG_ON(index >= sblock->page_count);
2222 2223
		BUG_ON(!sblock->pagev[index]->page);
		page = sblock->pagev[index]->page;
2224
		mapped_buffer = kmap_atomic(page);
2225 2226 2227 2228 2229
		mapped_size = PAGE_SIZE;
		p = mapped_buffer;
	}

	btrfs_csum_final(crc, calculated_csum);
2230
	if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
2231
		++fail_cor;
A
Arne Jansen 已提交
2232

2233
	if (fail_cor + fail_gen) {
A
Arne Jansen 已提交
2234 2235 2236 2237 2238
		/*
		 * if we find an error in a super block, we just report it.
		 * They will get written with the next transaction commit
		 * anyway
		 */
2239 2240 2241
		spin_lock(&sctx->stat_lock);
		++sctx->stat.super_errors;
		spin_unlock(&sctx->stat_lock);
2242
		if (fail_cor)
2243
			btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
2244 2245
				BTRFS_DEV_STAT_CORRUPTION_ERRS);
		else
2246
			btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
2247
				BTRFS_DEV_STAT_GENERATION_ERRS);
A
Arne Jansen 已提交
2248 2249
	}

2250
	return fail_cor + fail_gen;
A
Arne Jansen 已提交
2251 2252
}

2253 2254
static void scrub_block_get(struct scrub_block *sblock)
{
2255
	refcount_inc(&sblock->refs);
2256 2257 2258 2259
}

static void scrub_block_put(struct scrub_block *sblock)
{
2260
	if (refcount_dec_and_test(&sblock->refs)) {
2261 2262
		int i;

2263 2264 2265
		if (sblock->sparity)
			scrub_parity_put(sblock->sparity);

2266
		for (i = 0; i < sblock->page_count; i++)
2267
			scrub_page_put(sblock->pagev[i]);
2268 2269 2270 2271
		kfree(sblock);
	}
}

2272 2273
static void scrub_page_get(struct scrub_page *spage)
{
2274
	atomic_inc(&spage->refs);
2275 2276 2277 2278
}

static void scrub_page_put(struct scrub_page *spage)
{
2279
	if (atomic_dec_and_test(&spage->refs)) {
2280 2281 2282 2283 2284 2285
		if (spage->page)
			__free_page(spage->page);
		kfree(spage);
	}
}

2286
static void scrub_submit(struct scrub_ctx *sctx)
A
Arne Jansen 已提交
2287 2288 2289
{
	struct scrub_bio *sbio;

2290
	if (sctx->curr == -1)
S
Stefan Behrens 已提交
2291
		return;
A
Arne Jansen 已提交
2292

2293 2294
	sbio = sctx->bios[sctx->curr];
	sctx->curr = -1;
2295
	scrub_pending_bio_inc(sctx);
2296
	btrfsic_submit_bio(sbio->bio);
A
Arne Jansen 已提交
2297 2298
}

2299 2300
static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
				    struct scrub_page *spage)
A
Arne Jansen 已提交
2301
{
2302
	struct scrub_block *sblock = spage->sblock;
A
Arne Jansen 已提交
2303
	struct scrub_bio *sbio;
2304
	int ret;
A
Arne Jansen 已提交
2305 2306 2307 2308 2309

again:
	/*
	 * grab a fresh bio or wait for one to become available
	 */
2310 2311 2312 2313 2314 2315 2316 2317
	while (sctx->curr == -1) {
		spin_lock(&sctx->list_lock);
		sctx->curr = sctx->first_free;
		if (sctx->curr != -1) {
			sctx->first_free = sctx->bios[sctx->curr]->next_free;
			sctx->bios[sctx->curr]->next_free = -1;
			sctx->bios[sctx->curr]->page_count = 0;
			spin_unlock(&sctx->list_lock);
A
Arne Jansen 已提交
2318
		} else {
2319 2320
			spin_unlock(&sctx->list_lock);
			wait_event(sctx->list_wait, sctx->first_free != -1);
A
Arne Jansen 已提交
2321 2322
		}
	}
2323
	sbio = sctx->bios[sctx->curr];
2324
	if (sbio->page_count == 0) {
2325 2326
		struct bio *bio;

2327 2328
		sbio->physical = spage->physical;
		sbio->logical = spage->logical;
2329
		sbio->dev = spage->dev;
2330 2331
		bio = sbio->bio;
		if (!bio) {
2332 2333
			bio = btrfs_io_bio_alloc(GFP_KERNEL,
					sctx->pages_per_rd_bio);
2334 2335 2336 2337
			if (!bio)
				return -ENOMEM;
			sbio->bio = bio;
		}
2338 2339 2340

		bio->bi_private = sbio;
		bio->bi_end_io = scrub_bio_end_io;
2341
		bio->bi_bdev = sbio->dev->bdev;
2342
		bio->bi_iter.bi_sector = sbio->physical >> 9;
M
Mike Christie 已提交
2343
		bio_set_op_attrs(bio, REQ_OP_READ, 0);
2344
		sbio->err = 0;
2345 2346 2347
	} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
		   spage->physical ||
		   sbio->logical + sbio->page_count * PAGE_SIZE !=
2348 2349
		   spage->logical ||
		   sbio->dev != spage->dev) {
2350
		scrub_submit(sctx);
A
Arne Jansen 已提交
2351 2352
		goto again;
	}
2353

2354 2355 2356 2357 2358 2359 2360 2361
	sbio->pagev[sbio->page_count] = spage;
	ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
	if (ret != PAGE_SIZE) {
		if (sbio->page_count < 1) {
			bio_put(sbio->bio);
			sbio->bio = NULL;
			return -EIO;
		}
2362
		scrub_submit(sctx);
2363 2364 2365
		goto again;
	}

2366
	scrub_block_get(sblock); /* one for the page added to the bio */
2367 2368
	atomic_inc(&sblock->outstanding_pages);
	sbio->page_count++;
2369
	if (sbio->page_count == sctx->pages_per_rd_bio)
2370
		scrub_submit(sctx);
2371 2372 2373 2374

	return 0;
}

2375
static void scrub_missing_raid56_end_io(struct bio *bio)
2376 2377
{
	struct scrub_block *sblock = bio->bi_private;
2378
	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
2379

2380
	if (bio->bi_error)
2381 2382
		sblock->no_io_error_seen = 0;

2383 2384
	bio_put(bio);

2385 2386 2387 2388 2389 2390 2391
	btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
}

static void scrub_missing_raid56_worker(struct btrfs_work *work)
{
	struct scrub_block *sblock = container_of(work, struct scrub_block, work);
	struct scrub_ctx *sctx = sblock->sctx;
2392
	struct btrfs_fs_info *fs_info = sctx->fs_info;
2393 2394 2395 2396 2397 2398
	u64 logical;
	struct btrfs_device *dev;

	logical = sblock->pagev[0]->logical;
	dev = sblock->pagev[0]->dev;

2399
	if (sblock->no_io_error_seen)
2400
		scrub_recheck_block_checksum(sblock);
2401 2402 2403 2404 2405

	if (!sblock->no_io_error_seen) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors++;
		spin_unlock(&sctx->stat_lock);
2406
		btrfs_err_rl_in_rcu(fs_info,
2407
			"IO error rebuilding logical %llu for dev %s",
2408 2409 2410 2411 2412
			logical, rcu_str_deref(dev->name));
	} else if (sblock->header_error || sblock->checksum_error) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
2413
		btrfs_err_rl_in_rcu(fs_info,
2414
			"failed to rebuild valid logical %llu for dev %s",
2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
			logical, rcu_str_deref(dev->name));
	} else {
		scrub_write_block_to_dev_replace(sblock);
	}

	scrub_block_put(sblock);

	if (sctx->is_dev_replace &&
	    atomic_read(&sctx->wr_ctx.flush_all_writes)) {
		mutex_lock(&sctx->wr_ctx.wr_lock);
		scrub_wr_submit(sctx);
		mutex_unlock(&sctx->wr_ctx.wr_lock);
	}

	scrub_pending_bio_dec(sctx);
}

static void scrub_missing_raid56_pages(struct scrub_block *sblock)
{
	struct scrub_ctx *sctx = sblock->sctx;
2435
	struct btrfs_fs_info *fs_info = sctx->fs_info;
2436 2437
	u64 length = sblock->page_count * PAGE_SIZE;
	u64 logical = sblock->pagev[0]->logical;
2438
	struct btrfs_bio *bbio = NULL;
2439 2440 2441 2442 2443
	struct bio *bio;
	struct btrfs_raid_bio *rbio;
	int ret;
	int i;

2444
	btrfs_bio_counter_inc_blocked(fs_info);
2445
	ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
2446
			&length, &bbio);
2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468
	if (ret || !bbio || !bbio->raid_map)
		goto bbio_out;

	if (WARN_ON(!sctx->is_dev_replace ||
		    !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
		/*
		 * We shouldn't be scrubbing a missing device. Even for dev
		 * replace, we should only get here for RAID 5/6. We either
		 * managed to mount something with no mirrors remaining or
		 * there's a bug in scrub_remap_extent()/btrfs_map_block().
		 */
		goto bbio_out;
	}

	bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
	if (!bio)
		goto bbio_out;

	bio->bi_iter.bi_sector = logical >> 9;
	bio->bi_private = sblock;
	bio->bi_end_io = scrub_missing_raid56_end_io;

2469
	rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length);
2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488
	if (!rbio)
		goto rbio_out;

	for (i = 0; i < sblock->page_count; i++) {
		struct scrub_page *spage = sblock->pagev[i];

		raid56_add_scrub_pages(rbio, spage->page, spage->logical);
	}

	btrfs_init_work(&sblock->work, btrfs_scrub_helper,
			scrub_missing_raid56_worker, NULL, NULL);
	scrub_block_get(sblock);
	scrub_pending_bio_inc(sctx);
	raid56_submit_missing_rbio(rbio);
	return;

rbio_out:
	bio_put(bio);
bbio_out:
2489
	btrfs_bio_counter_dec(fs_info);
2490 2491 2492 2493 2494 2495
	btrfs_put_bbio(bbio);
	spin_lock(&sctx->stat_lock);
	sctx->stat.malloc_errors++;
	spin_unlock(&sctx->stat_lock);
}

2496
static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
2497
		       u64 physical, struct btrfs_device *dev, u64 flags,
2498 2499
		       u64 gen, int mirror_num, u8 *csum, int force,
		       u64 physical_for_dev_replace)
2500 2501 2502 2503
{
	struct scrub_block *sblock;
	int index;

2504
	sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2505
	if (!sblock) {
2506 2507 2508
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
2509
		return -ENOMEM;
A
Arne Jansen 已提交
2510
	}
2511

2512 2513
	/* one ref inside this function, plus one for each page added to
	 * a bio later on */
2514
	refcount_set(&sblock->refs, 1);
2515
	sblock->sctx = sctx;
2516 2517 2518
	sblock->no_io_error_seen = 1;

	for (index = 0; len > 0; index++) {
2519
		struct scrub_page *spage;
2520 2521
		u64 l = min_t(u64, len, PAGE_SIZE);

2522
		spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2523 2524
		if (!spage) {
leave_nomem:
2525 2526 2527
			spin_lock(&sctx->stat_lock);
			sctx->stat.malloc_errors++;
			spin_unlock(&sctx->stat_lock);
2528
			scrub_block_put(sblock);
2529 2530
			return -ENOMEM;
		}
2531 2532 2533
		BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
		scrub_page_get(spage);
		sblock->pagev[index] = spage;
2534
		spage->sblock = sblock;
2535
		spage->dev = dev;
2536 2537 2538 2539
		spage->flags = flags;
		spage->generation = gen;
		spage->logical = logical;
		spage->physical = physical;
2540
		spage->physical_for_dev_replace = physical_for_dev_replace;
2541 2542 2543
		spage->mirror_num = mirror_num;
		if (csum) {
			spage->have_csum = 1;
2544
			memcpy(spage->csum, csum, sctx->csum_size);
2545 2546 2547 2548
		} else {
			spage->have_csum = 0;
		}
		sblock->page_count++;
2549
		spage->page = alloc_page(GFP_KERNEL);
2550 2551
		if (!spage->page)
			goto leave_nomem;
2552 2553 2554
		len -= l;
		logical += l;
		physical += l;
2555
		physical_for_dev_replace += l;
2556 2557
	}

2558
	WARN_ON(sblock->page_count == 0);
2559 2560 2561 2562 2563 2564 2565 2566 2567 2568
	if (dev->missing) {
		/*
		 * This case should only be hit for RAID 5/6 device replace. See
		 * the comment in scrub_missing_raid56_pages() for details.
		 */
		scrub_missing_raid56_pages(sblock);
	} else {
		for (index = 0; index < sblock->page_count; index++) {
			struct scrub_page *spage = sblock->pagev[index];
			int ret;
2569

2570 2571 2572 2573 2574
			ret = scrub_add_page_to_rd_bio(sctx, spage);
			if (ret) {
				scrub_block_put(sblock);
				return ret;
			}
2575
		}
A
Arne Jansen 已提交
2576

2577 2578 2579
		if (force)
			scrub_submit(sctx);
	}
A
Arne Jansen 已提交
2580

2581 2582
	/* last one frees, either here or in bio completion for last page */
	scrub_block_put(sblock);
A
Arne Jansen 已提交
2583 2584 2585
	return 0;
}

2586
static void scrub_bio_end_io(struct bio *bio)
2587 2588
{
	struct scrub_bio *sbio = bio->bi_private;
2589
	struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
2590

2591
	sbio->err = bio->bi_error;
2592 2593
	sbio->bio = bio;

2594
	btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
2595 2596 2597 2598 2599
}

static void scrub_bio_end_io_worker(struct btrfs_work *work)
{
	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2600
	struct scrub_ctx *sctx = sbio->sctx;
2601 2602
	int i;

2603
	BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624
	if (sbio->err) {
		for (i = 0; i < sbio->page_count; i++) {
			struct scrub_page *spage = sbio->pagev[i];

			spage->io_error = 1;
			spage->sblock->no_io_error_seen = 0;
		}
	}

	/* now complete the scrub_block items that have all pages completed */
	for (i = 0; i < sbio->page_count; i++) {
		struct scrub_page *spage = sbio->pagev[i];
		struct scrub_block *sblock = spage->sblock;

		if (atomic_dec_and_test(&sblock->outstanding_pages))
			scrub_block_complete(sblock);
		scrub_block_put(sblock);
	}

	bio_put(sbio->bio);
	sbio->bio = NULL;
2625 2626 2627 2628
	spin_lock(&sctx->list_lock);
	sbio->next_free = sctx->first_free;
	sctx->first_free = sbio->index;
	spin_unlock(&sctx->list_lock);
2629 2630 2631 2632 2633 2634 2635 2636

	if (sctx->is_dev_replace &&
	    atomic_read(&sctx->wr_ctx.flush_all_writes)) {
		mutex_lock(&sctx->wr_ctx.wr_lock);
		scrub_wr_submit(sctx);
		mutex_unlock(&sctx->wr_ctx.wr_lock);
	}

2637
	scrub_pending_bio_dec(sctx);
2638 2639
}

2640 2641 2642 2643
static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
				       unsigned long *bitmap,
				       u64 start, u64 len)
{
2644
	u64 offset;
2645
	int nsectors;
2646
	int sectorsize = sparity->sctx->fs_info->sectorsize;
2647 2648 2649 2650 2651 2652 2653

	if (len >= sparity->stripe_len) {
		bitmap_set(bitmap, 0, sparity->nsectors);
		return;
	}

	start -= sparity->logic_start;
2654 2655
	start = div64_u64_rem(start, sparity->stripe_len, &offset);
	offset = div_u64(offset, sectorsize);
2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678
	nsectors = (int)len / sectorsize;

	if (offset + nsectors <= sparity->nsectors) {
		bitmap_set(bitmap, offset, nsectors);
		return;
	}

	bitmap_set(bitmap, offset, sparity->nsectors - offset);
	bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
}

static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
						   u64 start, u64 len)
{
	__scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
}

static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
						  u64 start, u64 len)
{
	__scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
}

2679 2680
static void scrub_block_complete(struct scrub_block *sblock)
{
2681 2682
	int corrupted = 0;

2683
	if (!sblock->no_io_error_seen) {
2684
		corrupted = 1;
2685
		scrub_handle_errored_block(sblock);
2686 2687 2688 2689 2690 2691
	} else {
		/*
		 * if has checksum error, write via repair mechanism in
		 * dev replace case, otherwise write here in dev replace
		 * case.
		 */
2692 2693
		corrupted = scrub_checksum(sblock);
		if (!corrupted && sblock->sctx->is_dev_replace)
2694 2695
			scrub_write_block_to_dev_replace(sblock);
	}
2696 2697 2698 2699 2700 2701 2702 2703 2704

	if (sblock->sparity && corrupted && !sblock->data_corrected) {
		u64 start = sblock->pagev[0]->logical;
		u64 end = sblock->pagev[sblock->page_count - 1]->logical +
			  PAGE_SIZE;

		scrub_parity_mark_sectors_error(sblock->sparity,
						start, end - start);
	}
2705 2706
}

2707
static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
A
Arne Jansen 已提交
2708 2709
{
	struct btrfs_ordered_sum *sum = NULL;
2710
	unsigned long index;
A
Arne Jansen 已提交
2711 2712
	unsigned long num_sectors;

2713 2714
	while (!list_empty(&sctx->csum_list)) {
		sum = list_first_entry(&sctx->csum_list,
A
Arne Jansen 已提交
2715 2716 2717 2718 2719 2720
				       struct btrfs_ordered_sum, list);
		if (sum->bytenr > logical)
			return 0;
		if (sum->bytenr + sum->len > logical)
			break;

2721
		++sctx->stat.csum_discards;
A
Arne Jansen 已提交
2722 2723 2724 2725 2726 2727 2728
		list_del(&sum->list);
		kfree(sum);
		sum = NULL;
	}
	if (!sum)
		return 0;

2729
	index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
2730
	num_sectors = sum->len / sctx->sectorsize;
2731 2732
	memcpy(csum, sum->sums + index, sctx->csum_size);
	if (index == num_sectors - 1) {
A
Arne Jansen 已提交
2733 2734 2735
		list_del(&sum->list);
		kfree(sum);
	}
2736
	return 1;
A
Arne Jansen 已提交
2737 2738 2739
}

/* scrub extent tries to collect up to 64 kB for each bio */
2740
static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
2741
			u64 physical, struct btrfs_device *dev, u64 flags,
2742
			u64 gen, int mirror_num, u64 physical_for_dev_replace)
A
Arne Jansen 已提交
2743 2744 2745
{
	int ret;
	u8 csum[BTRFS_CSUM_SIZE];
2746 2747 2748
	u32 blocksize;

	if (flags & BTRFS_EXTENT_FLAG_DATA) {
2749 2750 2751 2752 2753
		blocksize = sctx->sectorsize;
		spin_lock(&sctx->stat_lock);
		sctx->stat.data_extents_scrubbed++;
		sctx->stat.data_bytes_scrubbed += len;
		spin_unlock(&sctx->stat_lock);
2754
	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2755 2756 2757 2758 2759
		blocksize = sctx->nodesize;
		spin_lock(&sctx->stat_lock);
		sctx->stat.tree_extents_scrubbed++;
		sctx->stat.tree_bytes_scrubbed += len;
		spin_unlock(&sctx->stat_lock);
2760
	} else {
2761
		blocksize = sctx->sectorsize;
2762
		WARN_ON(1);
2763
	}
A
Arne Jansen 已提交
2764 2765

	while (len) {
2766
		u64 l = min_t(u64, len, blocksize);
A
Arne Jansen 已提交
2767 2768 2769 2770
		int have_csum = 0;

		if (flags & BTRFS_EXTENT_FLAG_DATA) {
			/* push csums to sbio */
2771
			have_csum = scrub_find_csum(sctx, logical, csum);
A
Arne Jansen 已提交
2772
			if (have_csum == 0)
2773
				++sctx->stat.no_csum;
2774 2775 2776 2777 2778 2779
			if (sctx->is_dev_replace && !have_csum) {
				ret = copy_nocow_pages(sctx, logical, l,
						       mirror_num,
						      physical_for_dev_replace);
				goto behind_scrub_pages;
			}
A
Arne Jansen 已提交
2780
		}
2781
		ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
2782 2783 2784
				  mirror_num, have_csum ? csum : NULL, 0,
				  physical_for_dev_replace);
behind_scrub_pages:
A
Arne Jansen 已提交
2785 2786 2787 2788 2789
		if (ret)
			return ret;
		len -= l;
		logical += l;
		physical += l;
2790
		physical_for_dev_replace += l;
A
Arne Jansen 已提交
2791 2792 2793 2794
	}
	return 0;
}

2795 2796 2797 2798 2799 2800 2801 2802 2803
static int scrub_pages_for_parity(struct scrub_parity *sparity,
				  u64 logical, u64 len,
				  u64 physical, struct btrfs_device *dev,
				  u64 flags, u64 gen, int mirror_num, u8 *csum)
{
	struct scrub_ctx *sctx = sparity->sctx;
	struct scrub_block *sblock;
	int index;

2804
	sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2805 2806 2807 2808 2809 2810 2811 2812 2813
	if (!sblock) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
		return -ENOMEM;
	}

	/* one ref inside this function, plus one for each page added to
	 * a bio later on */
2814
	refcount_set(&sblock->refs, 1);
2815 2816 2817 2818 2819 2820 2821 2822 2823
	sblock->sctx = sctx;
	sblock->no_io_error_seen = 1;
	sblock->sparity = sparity;
	scrub_parity_get(sparity);

	for (index = 0; len > 0; index++) {
		struct scrub_page *spage;
		u64 l = min_t(u64, len, PAGE_SIZE);

2824
		spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853
		if (!spage) {
leave_nomem:
			spin_lock(&sctx->stat_lock);
			sctx->stat.malloc_errors++;
			spin_unlock(&sctx->stat_lock);
			scrub_block_put(sblock);
			return -ENOMEM;
		}
		BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
		/* For scrub block */
		scrub_page_get(spage);
		sblock->pagev[index] = spage;
		/* For scrub parity */
		scrub_page_get(spage);
		list_add_tail(&spage->list, &sparity->spages);
		spage->sblock = sblock;
		spage->dev = dev;
		spage->flags = flags;
		spage->generation = gen;
		spage->logical = logical;
		spage->physical = physical;
		spage->mirror_num = mirror_num;
		if (csum) {
			spage->have_csum = 1;
			memcpy(spage->csum, csum, sctx->csum_size);
		} else {
			spage->have_csum = 0;
		}
		sblock->page_count++;
2854
		spage->page = alloc_page(GFP_KERNEL);
2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888
		if (!spage->page)
			goto leave_nomem;
		len -= l;
		logical += l;
		physical += l;
	}

	WARN_ON(sblock->page_count == 0);
	for (index = 0; index < sblock->page_count; index++) {
		struct scrub_page *spage = sblock->pagev[index];
		int ret;

		ret = scrub_add_page_to_rd_bio(sctx, spage);
		if (ret) {
			scrub_block_put(sblock);
			return ret;
		}
	}

	/* last one frees, either here or in bio completion for last page */
	scrub_block_put(sblock);
	return 0;
}

static int scrub_extent_for_parity(struct scrub_parity *sparity,
				   u64 logical, u64 len,
				   u64 physical, struct btrfs_device *dev,
				   u64 flags, u64 gen, int mirror_num)
{
	struct scrub_ctx *sctx = sparity->sctx;
	int ret;
	u8 csum[BTRFS_CSUM_SIZE];
	u32 blocksize;

2889 2890 2891 2892 2893
	if (dev->missing) {
		scrub_parity_mark_sectors_error(sparity, logical, len);
		return 0;
	}

2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908
	if (flags & BTRFS_EXTENT_FLAG_DATA) {
		blocksize = sctx->sectorsize;
	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
		blocksize = sctx->nodesize;
	} else {
		blocksize = sctx->sectorsize;
		WARN_ON(1);
	}

	while (len) {
		u64 l = min_t(u64, len, blocksize);
		int have_csum = 0;

		if (flags & BTRFS_EXTENT_FLAG_DATA) {
			/* push csums to sbio */
2909
			have_csum = scrub_find_csum(sctx, logical, csum);
2910 2911 2912 2913 2914 2915 2916 2917
			if (have_csum == 0)
				goto skip;
		}
		ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
					     flags, gen, mirror_num,
					     have_csum ? csum : NULL);
		if (ret)
			return ret;
2918
skip:
2919 2920 2921 2922 2923 2924 2925
		len -= l;
		logical += l;
		physical += l;
	}
	return 0;
}

2926 2927 2928 2929 2930 2931 2932 2933
/*
 * Given a physical address, this will calculate it's
 * logical offset. if this is a parity stripe, it will return
 * the most left data stripe's logical offset.
 *
 * return 0 if it is a data stripe, 1 means parity stripe.
 */
static int get_raid56_logic_offset(u64 physical, int num,
2934 2935
				   struct map_lookup *map, u64 *offset,
				   u64 *stripe_start)
2936 2937 2938 2939 2940
{
	int i;
	int j = 0;
	u64 stripe_nr;
	u64 last_offset;
2941 2942
	u32 stripe_index;
	u32 rot;
2943 2944 2945

	last_offset = (physical - map->stripes[num].physical) *
		      nr_data_stripes(map);
2946 2947 2948
	if (stripe_start)
		*stripe_start = last_offset;

2949 2950 2951 2952
	*offset = last_offset;
	for (i = 0; i < nr_data_stripes(map); i++) {
		*offset = last_offset + i * map->stripe_len;

2953
		stripe_nr = div64_u64(*offset, map->stripe_len);
2954
		stripe_nr = div_u64(stripe_nr, nr_data_stripes(map));
2955 2956

		/* Work out the disk rotation on this stripe-set */
2957
		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
2958 2959
		/* calculate which stripe this data locates */
		rot += i;
2960
		stripe_index = rot % map->num_stripes;
2961 2962 2963 2964 2965 2966 2967 2968 2969
		if (stripe_index == num)
			return 0;
		if (stripe_index < num)
			j++;
	}
	*offset = last_offset + j * map->stripe_len;
	return 1;
}

2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991
static void scrub_free_parity(struct scrub_parity *sparity)
{
	struct scrub_ctx *sctx = sparity->sctx;
	struct scrub_page *curr, *next;
	int nbits;

	nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
	if (nbits) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors += nbits;
		sctx->stat.uncorrectable_errors += nbits;
		spin_unlock(&sctx->stat_lock);
	}

	list_for_each_entry_safe(curr, next, &sparity->spages, list) {
		list_del_init(&curr->list);
		scrub_page_put(curr);
	}

	kfree(sparity);
}

2992 2993 2994 2995 2996 2997 2998 2999 3000 3001
static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
{
	struct scrub_parity *sparity = container_of(work, struct scrub_parity,
						    work);
	struct scrub_ctx *sctx = sparity->sctx;

	scrub_free_parity(sparity);
	scrub_pending_bio_dec(sctx);
}

3002
static void scrub_parity_bio_endio(struct bio *bio)
3003 3004
{
	struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
3005
	struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
3006

3007
	if (bio->bi_error)
3008 3009 3010 3011
		bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
			  sparity->nsectors);

	bio_put(bio);
3012 3013 3014

	btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
			scrub_parity_bio_endio_worker, NULL, NULL);
3015
	btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
3016 3017 3018 3019 3020
}

static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
{
	struct scrub_ctx *sctx = sparity->sctx;
3021
	struct btrfs_fs_info *fs_info = sctx->fs_info;
3022 3023 3024 3025 3026 3027 3028 3029 3030 3031
	struct bio *bio;
	struct btrfs_raid_bio *rbio;
	struct btrfs_bio *bbio = NULL;
	u64 length;
	int ret;

	if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
			   sparity->nsectors))
		goto out;

3032
	length = sparity->logic_end - sparity->logic_start;
3033 3034

	btrfs_bio_counter_inc_blocked(fs_info);
3035
	ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
3036
			       &length, &bbio);
3037
	if (ret || !bbio || !bbio->raid_map)
3038 3039 3040 3041 3042 3043 3044 3045 3046 3047
		goto bbio_out;

	bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
	if (!bio)
		goto bbio_out;

	bio->bi_iter.bi_sector = sparity->logic_start >> 9;
	bio->bi_private = sparity;
	bio->bi_end_io = scrub_parity_bio_endio;

3048
	rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio,
3049
					      length, sparity->scrub_dev,
3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061
					      sparity->dbitmap,
					      sparity->nsectors);
	if (!rbio)
		goto rbio_out;

	scrub_pending_bio_inc(sctx);
	raid56_parity_submit_scrub_rbio(rbio);
	return;

rbio_out:
	bio_put(bio);
bbio_out:
3062
	btrfs_bio_counter_dec(fs_info);
3063
	btrfs_put_bbio(bbio);
3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074
	bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
		  sparity->nsectors);
	spin_lock(&sctx->stat_lock);
	sctx->stat.malloc_errors++;
	spin_unlock(&sctx->stat_lock);
out:
	scrub_free_parity(sparity);
}

static inline int scrub_calc_parity_bitmap_len(int nsectors)
{
3075
	return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long);
3076 3077 3078 3079
}

static void scrub_parity_get(struct scrub_parity *sparity)
{
3080
	refcount_inc(&sparity->refs);
3081 3082 3083 3084
}

static void scrub_parity_put(struct scrub_parity *sparity)
{
3085
	if (!refcount_dec_and_test(&sparity->refs))
3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097
		return;

	scrub_parity_check_and_repair(sparity);
}

static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
						  struct map_lookup *map,
						  struct btrfs_device *sdev,
						  struct btrfs_path *path,
						  u64 logic_start,
						  u64 logic_end)
{
3098
	struct btrfs_fs_info *fs_info = sctx->fs_info;
3099 3100 3101
	struct btrfs_root *root = fs_info->extent_root;
	struct btrfs_root *csum_root = fs_info->csum_root;
	struct btrfs_extent_item *extent;
3102
	struct btrfs_bio *bbio = NULL;
3103 3104 3105 3106 3107 3108 3109 3110 3111
	u64 flags;
	int ret;
	int slot;
	struct extent_buffer *l;
	struct btrfs_key key;
	u64 generation;
	u64 extent_logical;
	u64 extent_physical;
	u64 extent_len;
3112
	u64 mapped_length;
3113 3114 3115 3116 3117 3118 3119
	struct btrfs_device *extent_dev;
	struct scrub_parity *sparity;
	int nsectors;
	int bitmap_len;
	int extent_mirror_num;
	int stop_loop = 0;

3120
	nsectors = div_u64(map->stripe_len, fs_info->sectorsize);
3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136
	bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
	sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
			  GFP_NOFS);
	if (!sparity) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
		return -ENOMEM;
	}

	sparity->stripe_len = map->stripe_len;
	sparity->nsectors = nsectors;
	sparity->sctx = sctx;
	sparity->scrub_dev = sdev;
	sparity->logic_start = logic_start;
	sparity->logic_end = logic_end;
3137
	refcount_set(&sparity->refs, 1);
3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185
	INIT_LIST_HEAD(&sparity->spages);
	sparity->dbitmap = sparity->bitmap;
	sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;

	ret = 0;
	while (logic_start < logic_end) {
		if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
			key.type = BTRFS_METADATA_ITEM_KEY;
		else
			key.type = BTRFS_EXTENT_ITEM_KEY;
		key.objectid = logic_start;
		key.offset = (u64)-1;

		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		if (ret < 0)
			goto out;

		if (ret > 0) {
			ret = btrfs_previous_extent_item(root, path, 0);
			if (ret < 0)
				goto out;
			if (ret > 0) {
				btrfs_release_path(path);
				ret = btrfs_search_slot(NULL, root, &key,
							path, 0, 0);
				if (ret < 0)
					goto out;
			}
		}

		stop_loop = 0;
		while (1) {
			u64 bytes;

			l = path->nodes[0];
			slot = path->slots[0];
			if (slot >= btrfs_header_nritems(l)) {
				ret = btrfs_next_leaf(root, path);
				if (ret == 0)
					continue;
				if (ret < 0)
					goto out;

				stop_loop = 1;
				break;
			}
			btrfs_item_key_to_cpu(l, &key, slot);

3186 3187 3188 3189
			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
			    key.type != BTRFS_METADATA_ITEM_KEY)
				goto next;

3190
			if (key.type == BTRFS_METADATA_ITEM_KEY)
3191
				bytes = fs_info->nodesize;
3192 3193 3194 3195 3196 3197
			else
				bytes = key.offset;

			if (key.objectid + bytes <= logic_start)
				goto next;

3198
			if (key.objectid >= logic_end) {
3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210
				stop_loop = 1;
				break;
			}

			while (key.objectid >= logic_start + map->stripe_len)
				logic_start += map->stripe_len;

			extent = btrfs_item_ptr(l, slot,
						struct btrfs_extent_item);
			flags = btrfs_extent_flags(l, extent);
			generation = btrfs_extent_generation(l, extent);

3211 3212 3213 3214
			if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
			    (key.objectid < logic_start ||
			     key.objectid + bytes >
			     logic_start + map->stripe_len)) {
J
Jeff Mahoney 已提交
3215 3216
				btrfs_err(fs_info,
					  "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3217
					  key.objectid, logic_start);
3218 3219 3220
				spin_lock(&sctx->stat_lock);
				sctx->stat.uncorrectable_errors++;
				spin_unlock(&sctx->stat_lock);
3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239
				goto next;
			}
again:
			extent_logical = key.objectid;
			extent_len = bytes;

			if (extent_logical < logic_start) {
				extent_len -= logic_start - extent_logical;
				extent_logical = logic_start;
			}

			if (extent_logical + extent_len >
			    logic_start + map->stripe_len)
				extent_len = logic_start + map->stripe_len -
					     extent_logical;

			scrub_parity_mark_sectors_data(sparity, extent_logical,
						       extent_len);

3240
			mapped_length = extent_len;
3241
			bbio = NULL;
3242 3243 3244
			ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
					extent_logical, &mapped_length, &bbio,
					0);
3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256
			if (!ret) {
				if (!bbio || mapped_length < extent_len)
					ret = -EIO;
			}
			if (ret) {
				btrfs_put_bbio(bbio);
				goto out;
			}
			extent_physical = bbio->stripes[0].physical;
			extent_mirror_num = bbio->mirror_num;
			extent_dev = bbio->stripes[0].dev;
			btrfs_put_bbio(bbio);
3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270

			ret = btrfs_lookup_csums_range(csum_root,
						extent_logical,
						extent_logical + extent_len - 1,
						&sctx->csum_list, 1);
			if (ret)
				goto out;

			ret = scrub_extent_for_parity(sparity, extent_logical,
						      extent_len,
						      extent_physical,
						      extent_dev, flags,
						      generation,
						      extent_mirror_num);
3271 3272 3273

			scrub_free_csums(sctx);

3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304
			if (ret)
				goto out;

			if (extent_logical + extent_len <
			    key.objectid + bytes) {
				logic_start += map->stripe_len;

				if (logic_start >= logic_end) {
					stop_loop = 1;
					break;
				}

				if (logic_start < key.objectid + bytes) {
					cond_resched();
					goto again;
				}
			}
next:
			path->slots[0]++;
		}

		btrfs_release_path(path);

		if (stop_loop)
			break;

		logic_start += map->stripe_len;
	}
out:
	if (ret < 0)
		scrub_parity_mark_sectors_error(sparity, logic_start,
3305
						logic_end - logic_start);
3306 3307 3308 3309 3310 3311 3312 3313 3314 3315
	scrub_parity_put(sparity);
	scrub_submit(sctx);
	mutex_lock(&sctx->wr_ctx.wr_lock);
	scrub_wr_submit(sctx);
	mutex_unlock(&sctx->wr_ctx.wr_lock);

	btrfs_release_path(path);
	return ret < 0 ? ret : 0;
}

3316
static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3317 3318
					   struct map_lookup *map,
					   struct btrfs_device *scrub_dev,
3319 3320
					   int num, u64 base, u64 length,
					   int is_dev_replace)
A
Arne Jansen 已提交
3321
{
3322
	struct btrfs_path *path, *ppath;
3323
	struct btrfs_fs_info *fs_info = sctx->fs_info;
A
Arne Jansen 已提交
3324 3325 3326
	struct btrfs_root *root = fs_info->extent_root;
	struct btrfs_root *csum_root = fs_info->csum_root;
	struct btrfs_extent_item *extent;
3327
	struct blk_plug plug;
A
Arne Jansen 已提交
3328 3329 3330 3331 3332 3333 3334
	u64 flags;
	int ret;
	int slot;
	u64 nstripes;
	struct extent_buffer *l;
	u64 physical;
	u64 logical;
L
Liu Bo 已提交
3335
	u64 logic_end;
3336
	u64 physical_end;
A
Arne Jansen 已提交
3337
	u64 generation;
3338
	int mirror_num;
A
Arne Jansen 已提交
3339 3340
	struct reada_control *reada1;
	struct reada_control *reada2;
3341
	struct btrfs_key key;
A
Arne Jansen 已提交
3342
	struct btrfs_key key_end;
A
Arne Jansen 已提交
3343 3344
	u64 increment = map->stripe_len;
	u64 offset;
3345 3346 3347
	u64 extent_logical;
	u64 extent_physical;
	u64 extent_len;
3348 3349
	u64 stripe_logical;
	u64 stripe_end;
3350 3351
	struct btrfs_device *extent_dev;
	int extent_mirror_num;
3352
	int stop_loop = 0;
D
David Woodhouse 已提交
3353

3354
	physical = map->stripes[num].physical;
A
Arne Jansen 已提交
3355
	offset = 0;
3356
	nstripes = div64_u64(length, map->stripe_len);
A
Arne Jansen 已提交
3357 3358 3359
	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
		offset = map->stripe_len * num;
		increment = map->stripe_len * map->num_stripes;
3360
		mirror_num = 1;
A
Arne Jansen 已提交
3361 3362 3363 3364
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
		int factor = map->num_stripes / map->sub_stripes;
		offset = map->stripe_len * (num / map->sub_stripes);
		increment = map->stripe_len * factor;
3365
		mirror_num = num % map->sub_stripes + 1;
A
Arne Jansen 已提交
3366 3367
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
		increment = map->stripe_len;
3368
		mirror_num = num % map->num_stripes + 1;
A
Arne Jansen 已提交
3369 3370
	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
		increment = map->stripe_len;
3371
		mirror_num = num % map->num_stripes + 1;
3372
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3373
		get_raid56_logic_offset(physical, num, map, &offset, NULL);
3374 3375
		increment = map->stripe_len * nr_data_stripes(map);
		mirror_num = 1;
A
Arne Jansen 已提交
3376 3377
	} else {
		increment = map->stripe_len;
3378
		mirror_num = 1;
A
Arne Jansen 已提交
3379 3380 3381 3382 3383 3384
	}

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

3385 3386
	ppath = btrfs_alloc_path();
	if (!ppath) {
3387
		btrfs_free_path(path);
3388 3389 3390
		return -ENOMEM;
	}

3391 3392 3393 3394 3395
	/*
	 * work on commit root. The related disk blocks are static as
	 * long as COW is applied. This means, it is save to rewrite
	 * them to repair disk errors without any race conditions
	 */
A
Arne Jansen 已提交
3396 3397 3398
	path->search_commit_root = 1;
	path->skip_locking = 1;

3399 3400
	ppath->search_commit_root = 1;
	ppath->skip_locking = 1;
A
Arne Jansen 已提交
3401
	/*
A
Arne Jansen 已提交
3402 3403 3404
	 * trigger the readahead for extent tree csum tree and wait for
	 * completion. During readahead, the scrub is officially paused
	 * to not hold off transaction commits
A
Arne Jansen 已提交
3405 3406
	 */
	logical = base + offset;
3407
	physical_end = physical + nstripes * map->stripe_len;
3408
	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3409
		get_raid56_logic_offset(physical_end, num,
3410
					map, &logic_end, NULL);
3411 3412 3413 3414
		logic_end += base;
	} else {
		logic_end = logical + increment * nstripes;
	}
3415
	wait_event(sctx->list_wait,
3416
		   atomic_read(&sctx->bios_in_flight) == 0);
3417
	scrub_blocked_if_needed(fs_info);
A
Arne Jansen 已提交
3418 3419

	/* FIXME it might be better to start readahead at commit root */
3420 3421 3422
	key.objectid = logical;
	key.type = BTRFS_EXTENT_ITEM_KEY;
	key.offset = (u64)0;
3423
	key_end.objectid = logic_end;
3424 3425
	key_end.type = BTRFS_METADATA_ITEM_KEY;
	key_end.offset = (u64)-1;
3426
	reada1 = btrfs_reada_add(root, &key, &key_end);
A
Arne Jansen 已提交
3427

3428 3429 3430
	key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
	key.type = BTRFS_EXTENT_CSUM_KEY;
	key.offset = logical;
A
Arne Jansen 已提交
3431 3432
	key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
	key_end.type = BTRFS_EXTENT_CSUM_KEY;
3433
	key_end.offset = logic_end;
3434
	reada2 = btrfs_reada_add(csum_root, &key, &key_end);
A
Arne Jansen 已提交
3435 3436 3437 3438 3439 3440

	if (!IS_ERR(reada1))
		btrfs_reada_wait(reada1);
	if (!IS_ERR(reada2))
		btrfs_reada_wait(reada2);

A
Arne Jansen 已提交
3441 3442 3443 3444 3445

	/*
	 * collect all data csums for the stripe to avoid seeking during
	 * the scrub. This might currently (crc32) end up to be about 1MB
	 */
3446
	blk_start_plug(&plug);
A
Arne Jansen 已提交
3447 3448 3449 3450 3451

	/*
	 * now find all extents for each stripe and scrub them
	 */
	ret = 0;
3452
	while (physical < physical_end) {
A
Arne Jansen 已提交
3453 3454 3455 3456
		/*
		 * canceled?
		 */
		if (atomic_read(&fs_info->scrub_cancel_req) ||
3457
		    atomic_read(&sctx->cancel_req)) {
A
Arne Jansen 已提交
3458 3459 3460 3461 3462 3463 3464 3465
			ret = -ECANCELED;
			goto out;
		}
		/*
		 * check to see if we have to pause
		 */
		if (atomic_read(&fs_info->scrub_pause_req)) {
			/* push queued extents */
3466
			atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
3467
			scrub_submit(sctx);
3468 3469 3470
			mutex_lock(&sctx->wr_ctx.wr_lock);
			scrub_wr_submit(sctx);
			mutex_unlock(&sctx->wr_ctx.wr_lock);
3471
			wait_event(sctx->list_wait,
3472
				   atomic_read(&sctx->bios_in_flight) == 0);
3473
			atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3474
			scrub_blocked_if_needed(fs_info);
A
Arne Jansen 已提交
3475 3476
		}

3477 3478 3479 3480 3481 3482
		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
			ret = get_raid56_logic_offset(physical, num, map,
						      &logical,
						      &stripe_logical);
			logical += base;
			if (ret) {
3483
				/* it is parity strip */
3484
				stripe_logical += base;
3485
				stripe_end = stripe_logical + increment;
3486 3487 3488 3489 3490 3491 3492 3493 3494
				ret = scrub_raid56_parity(sctx, map, scrub_dev,
							  ppath, stripe_logical,
							  stripe_end);
				if (ret)
					goto out;
				goto skip;
			}
		}

3495 3496 3497 3498
		if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
			key.type = BTRFS_METADATA_ITEM_KEY;
		else
			key.type = BTRFS_EXTENT_ITEM_KEY;
A
Arne Jansen 已提交
3499
		key.objectid = logical;
L
Liu Bo 已提交
3500
		key.offset = (u64)-1;
A
Arne Jansen 已提交
3501 3502 3503 3504

		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		if (ret < 0)
			goto out;
3505

3506
		if (ret > 0) {
3507
			ret = btrfs_previous_extent_item(root, path, 0);
A
Arne Jansen 已提交
3508 3509
			if (ret < 0)
				goto out;
3510 3511 3512 3513 3514 3515 3516 3517 3518
			if (ret > 0) {
				/* there's no smaller item, so stick with the
				 * larger one */
				btrfs_release_path(path);
				ret = btrfs_search_slot(NULL, root, &key,
							path, 0, 0);
				if (ret < 0)
					goto out;
			}
A
Arne Jansen 已提交
3519 3520
		}

L
Liu Bo 已提交
3521
		stop_loop = 0;
A
Arne Jansen 已提交
3522
		while (1) {
3523 3524
			u64 bytes;

A
Arne Jansen 已提交
3525 3526 3527 3528 3529 3530 3531 3532 3533
			l = path->nodes[0];
			slot = path->slots[0];
			if (slot >= btrfs_header_nritems(l)) {
				ret = btrfs_next_leaf(root, path);
				if (ret == 0)
					continue;
				if (ret < 0)
					goto out;

L
Liu Bo 已提交
3534
				stop_loop = 1;
A
Arne Jansen 已提交
3535 3536 3537 3538
				break;
			}
			btrfs_item_key_to_cpu(l, &key, slot);

3539 3540 3541 3542
			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
			    key.type != BTRFS_METADATA_ITEM_KEY)
				goto next;

3543
			if (key.type == BTRFS_METADATA_ITEM_KEY)
3544
				bytes = fs_info->nodesize;
3545 3546 3547 3548
			else
				bytes = key.offset;

			if (key.objectid + bytes <= logical)
A
Arne Jansen 已提交
3549 3550
				goto next;

L
Liu Bo 已提交
3551 3552 3553 3554 3555 3556
			if (key.objectid >= logical + map->stripe_len) {
				/* out of this device extent */
				if (key.objectid >= logic_end)
					stop_loop = 1;
				break;
			}
A
Arne Jansen 已提交
3557 3558 3559 3560 3561 3562

			extent = btrfs_item_ptr(l, slot,
						struct btrfs_extent_item);
			flags = btrfs_extent_flags(l, extent);
			generation = btrfs_extent_generation(l, extent);

3563 3564 3565 3566
			if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
			    (key.objectid < logical ||
			     key.objectid + bytes >
			     logical + map->stripe_len)) {
3567
				btrfs_err(fs_info,
J
Jeff Mahoney 已提交
3568
					   "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3569
				       key.objectid, logical);
3570 3571 3572
				spin_lock(&sctx->stat_lock);
				sctx->stat.uncorrectable_errors++;
				spin_unlock(&sctx->stat_lock);
A
Arne Jansen 已提交
3573 3574 3575
				goto next;
			}

L
Liu Bo 已提交
3576 3577 3578 3579
again:
			extent_logical = key.objectid;
			extent_len = bytes;

A
Arne Jansen 已提交
3580 3581 3582
			/*
			 * trim extent to this stripe
			 */
L
Liu Bo 已提交
3583 3584 3585
			if (extent_logical < logical) {
				extent_len -= logical - extent_logical;
				extent_logical = logical;
A
Arne Jansen 已提交
3586
			}
L
Liu Bo 已提交
3587
			if (extent_logical + extent_len >
A
Arne Jansen 已提交
3588
			    logical + map->stripe_len) {
L
Liu Bo 已提交
3589 3590
				extent_len = logical + map->stripe_len -
					     extent_logical;
A
Arne Jansen 已提交
3591 3592
			}

L
Liu Bo 已提交
3593
			extent_physical = extent_logical - logical + physical;
3594 3595 3596 3597 3598 3599 3600
			extent_dev = scrub_dev;
			extent_mirror_num = mirror_num;
			if (is_dev_replace)
				scrub_remap_extent(fs_info, extent_logical,
						   extent_len, &extent_physical,
						   &extent_dev,
						   &extent_mirror_num);
L
Liu Bo 已提交
3601

3602 3603 3604 3605 3606
			ret = btrfs_lookup_csums_range(csum_root,
						       extent_logical,
						       extent_logical +
						       extent_len - 1,
						       &sctx->csum_list, 1);
L
Liu Bo 已提交
3607 3608 3609
			if (ret)
				goto out;

3610 3611 3612
			ret = scrub_extent(sctx, extent_logical, extent_len,
					   extent_physical, extent_dev, flags,
					   generation, extent_mirror_num,
3613
					   extent_logical - logical + physical);
3614 3615 3616

			scrub_free_csums(sctx);

A
Arne Jansen 已提交
3617 3618 3619
			if (ret)
				goto out;

L
Liu Bo 已提交
3620 3621
			if (extent_logical + extent_len <
			    key.objectid + bytes) {
3622
				if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3623 3624 3625 3626
					/*
					 * loop until we find next data stripe
					 * or we have finished all stripes.
					 */
3627 3628 3629 3630 3631 3632 3633 3634 3635 3636
loop:
					physical += map->stripe_len;
					ret = get_raid56_logic_offset(physical,
							num, map, &logical,
							&stripe_logical);
					logical += base;

					if (ret && physical < physical_end) {
						stripe_logical += base;
						stripe_end = stripe_logical +
3637
								increment;
3638 3639 3640 3641 3642 3643 3644 3645
						ret = scrub_raid56_parity(sctx,
							map, scrub_dev, ppath,
							stripe_logical,
							stripe_end);
						if (ret)
							goto out;
						goto loop;
					}
3646 3647 3648 3649
				} else {
					physical += map->stripe_len;
					logical += increment;
				}
L
Liu Bo 已提交
3650 3651 3652 3653 3654
				if (logical < key.objectid + bytes) {
					cond_resched();
					goto again;
				}

3655
				if (physical >= physical_end) {
L
Liu Bo 已提交
3656 3657 3658 3659
					stop_loop = 1;
					break;
				}
			}
A
Arne Jansen 已提交
3660 3661 3662
next:
			path->slots[0]++;
		}
C
Chris Mason 已提交
3663
		btrfs_release_path(path);
3664
skip:
A
Arne Jansen 已提交
3665 3666
		logical += increment;
		physical += map->stripe_len;
3667
		spin_lock(&sctx->stat_lock);
L
Liu Bo 已提交
3668 3669 3670 3671 3672
		if (stop_loop)
			sctx->stat.last_physical = map->stripes[num].physical +
						   length;
		else
			sctx->stat.last_physical = physical;
3673
		spin_unlock(&sctx->stat_lock);
L
Liu Bo 已提交
3674 3675
		if (stop_loop)
			break;
A
Arne Jansen 已提交
3676
	}
3677
out:
A
Arne Jansen 已提交
3678
	/* push queued extents */
3679
	scrub_submit(sctx);
3680 3681 3682
	mutex_lock(&sctx->wr_ctx.wr_lock);
	scrub_wr_submit(sctx);
	mutex_unlock(&sctx->wr_ctx.wr_lock);
A
Arne Jansen 已提交
3683

3684
	blk_finish_plug(&plug);
A
Arne Jansen 已提交
3685
	btrfs_free_path(path);
3686
	btrfs_free_path(ppath);
A
Arne Jansen 已提交
3687 3688 3689
	return ret < 0 ? ret : 0;
}

3690
static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3691 3692
					  struct btrfs_device *scrub_dev,
					  u64 chunk_offset, u64 length,
3693 3694 3695
					  u64 dev_offset,
					  struct btrfs_block_group_cache *cache,
					  int is_dev_replace)
A
Arne Jansen 已提交
3696
{
3697 3698
	struct btrfs_fs_info *fs_info = sctx->fs_info;
	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
A
Arne Jansen 已提交
3699 3700 3701
	struct map_lookup *map;
	struct extent_map *em;
	int i;
3702
	int ret = 0;
A
Arne Jansen 已提交
3703 3704 3705 3706 3707

	read_lock(&map_tree->map_tree.lock);
	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
	read_unlock(&map_tree->map_tree.lock);

3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719
	if (!em) {
		/*
		 * Might have been an unused block group deleted by the cleaner
		 * kthread or relocation.
		 */
		spin_lock(&cache->lock);
		if (!cache->removed)
			ret = -EINVAL;
		spin_unlock(&cache->lock);

		return ret;
	}
A
Arne Jansen 已提交
3720

3721
	map = em->map_lookup;
A
Arne Jansen 已提交
3722 3723 3724 3725 3726 3727 3728
	if (em->start != chunk_offset)
		goto out;

	if (em->len < length)
		goto out;

	for (i = 0; i < map->num_stripes; ++i) {
3729
		if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
3730
		    map->stripes[i].physical == dev_offset) {
3731
			ret = scrub_stripe(sctx, map, scrub_dev, i,
3732 3733
					   chunk_offset, length,
					   is_dev_replace);
A
Arne Jansen 已提交
3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744
			if (ret)
				goto out;
		}
	}
out:
	free_extent_map(em);

	return ret;
}

static noinline_for_stack
3745
int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3746 3747
			   struct btrfs_device *scrub_dev, u64 start, u64 end,
			   int is_dev_replace)
A
Arne Jansen 已提交
3748 3749 3750
{
	struct btrfs_dev_extent *dev_extent = NULL;
	struct btrfs_path *path;
3751 3752
	struct btrfs_fs_info *fs_info = sctx->fs_info;
	struct btrfs_root *root = fs_info->dev_root;
A
Arne Jansen 已提交
3753 3754
	u64 length;
	u64 chunk_offset;
3755
	int ret = 0;
3756
	int ro_set;
A
Arne Jansen 已提交
3757 3758 3759 3760 3761
	int slot;
	struct extent_buffer *l;
	struct btrfs_key key;
	struct btrfs_key found_key;
	struct btrfs_block_group_cache *cache;
3762
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
A
Arne Jansen 已提交
3763 3764 3765 3766 3767

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

3768
	path->reada = READA_FORWARD;
A
Arne Jansen 已提交
3769 3770 3771
	path->search_commit_root = 1;
	path->skip_locking = 1;

3772
	key.objectid = scrub_dev->devid;
A
Arne Jansen 已提交
3773 3774 3775 3776 3777 3778
	key.offset = 0ull;
	key.type = BTRFS_DEV_EXTENT_KEY;

	while (1) {
		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		if (ret < 0)
3779 3780 3781 3782 3783
			break;
		if (ret > 0) {
			if (path->slots[0] >=
			    btrfs_header_nritems(path->nodes[0])) {
				ret = btrfs_next_leaf(root, path);
3784 3785 3786 3787
				if (ret < 0)
					break;
				if (ret > 0) {
					ret = 0;
3788
					break;
3789 3790 3791
				}
			} else {
				ret = 0;
3792 3793
			}
		}
A
Arne Jansen 已提交
3794 3795 3796 3797 3798 3799

		l = path->nodes[0];
		slot = path->slots[0];

		btrfs_item_key_to_cpu(l, &found_key, slot);

3800
		if (found_key.objectid != scrub_dev->devid)
A
Arne Jansen 已提交
3801 3802
			break;

3803
		if (found_key.type != BTRFS_DEV_EXTENT_KEY)
A
Arne Jansen 已提交
3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814
			break;

		if (found_key.offset >= end)
			break;

		if (found_key.offset < key.offset)
			break;

		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
		length = btrfs_dev_extent_length(l, dev_extent);

3815 3816
		if (found_key.offset + length <= start)
			goto skip;
A
Arne Jansen 已提交
3817 3818 3819 3820 3821 3822 3823 3824

		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);

		/*
		 * get a reference on the corresponding block group to prevent
		 * the chunk from going away while we scrub it
		 */
		cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3825 3826 3827 3828 3829 3830

		/* some chunks are removed but not committed to disk yet,
		 * continue scrubbing */
		if (!cache)
			goto skip;

3831 3832 3833 3834 3835 3836 3837 3838 3839
		/*
		 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
		 * to avoid deadlock caused by:
		 * btrfs_inc_block_group_ro()
		 * -> btrfs_wait_for_commit()
		 * -> btrfs_commit_transaction()
		 * -> btrfs_scrub_pause()
		 */
		scrub_pause_on(fs_info);
3840
		ret = btrfs_inc_block_group_ro(fs_info, cache);
3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871
		if (!ret && is_dev_replace) {
			/*
			 * If we are doing a device replace wait for any tasks
			 * that started dellaloc right before we set the block
			 * group to RO mode, as they might have just allocated
			 * an extent from it or decided they could do a nocow
			 * write. And if any such tasks did that, wait for their
			 * ordered extents to complete and then commit the
			 * current transaction, so that we can later see the new
			 * extent items in the extent tree - the ordered extents
			 * create delayed data references (for cow writes) when
			 * they complete, which will be run and insert the
			 * corresponding extent items into the extent tree when
			 * we commit the transaction they used when running
			 * inode.c:btrfs_finish_ordered_io(). We later use
			 * the commit root of the extent tree to find extents
			 * to copy from the srcdev into the tgtdev, and we don't
			 * want to miss any new extents.
			 */
			btrfs_wait_block_group_reservations(cache);
			btrfs_wait_nocow_writers(cache);
			ret = btrfs_wait_ordered_roots(fs_info, -1,
						       cache->key.objectid,
						       cache->key.offset);
			if (ret > 0) {
				struct btrfs_trans_handle *trans;

				trans = btrfs_join_transaction(root);
				if (IS_ERR(trans))
					ret = PTR_ERR(trans);
				else
3872
					ret = btrfs_commit_transaction(trans);
3873 3874 3875 3876 3877 3878 3879
				if (ret) {
					scrub_pause_off(fs_info);
					btrfs_put_block_group(cache);
					break;
				}
			}
		}
3880
		scrub_pause_off(fs_info);
3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893

		if (ret == 0) {
			ro_set = 1;
		} else if (ret == -ENOSPC) {
			/*
			 * btrfs_inc_block_group_ro return -ENOSPC when it
			 * failed in creating new chunk for metadata.
			 * It is not a problem for scrub/replace, because
			 * metadata are always cowed, and our scrub paused
			 * commit_transactions.
			 */
			ro_set = 0;
		} else {
J
Jeff Mahoney 已提交
3894 3895
			btrfs_warn(fs_info,
				   "failed setting block group ro, ret=%d\n",
3896
				   ret);
3897 3898 3899 3900
			btrfs_put_block_group(cache);
			break;
		}

3901
		btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
3902 3903 3904
		dev_replace->cursor_right = found_key.offset + length;
		dev_replace->cursor_left = found_key.offset;
		dev_replace->item_needs_writeback = 1;
3905
		btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
3906
		ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
3907
				  found_key.offset, cache, is_dev_replace);
3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926

		/*
		 * flush, submit all pending read and write bios, afterwards
		 * wait for them.
		 * Note that in the dev replace case, a read request causes
		 * write requests that are submitted in the read completion
		 * worker. Therefore in the current situation, it is required
		 * that all write requests are flushed, so that all read and
		 * write requests are really completed when bios_in_flight
		 * changes to 0.
		 */
		atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
		scrub_submit(sctx);
		mutex_lock(&sctx->wr_ctx.wr_lock);
		scrub_wr_submit(sctx);
		mutex_unlock(&sctx->wr_ctx.wr_lock);

		wait_event(sctx->list_wait,
			   atomic_read(&sctx->bios_in_flight) == 0);
3927 3928

		scrub_pause_on(fs_info);
3929 3930 3931 3932 3933 3934

		/*
		 * must be called before we decrease @scrub_paused.
		 * make sure we don't block transaction commit while
		 * we are waiting pending workers finished.
		 */
3935 3936
		wait_event(sctx->list_wait,
			   atomic_read(&sctx->workers_pending) == 0);
3937 3938
		atomic_set(&sctx->wr_ctx.flush_all_writes, 0);

3939
		scrub_pause_off(fs_info);
3940

3941 3942 3943 3944 3945
		btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
		dev_replace->cursor_left = dev_replace->cursor_right;
		dev_replace->item_needs_writeback = 1;
		btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);

3946
		if (ro_set)
3947
			btrfs_dec_block_group_ro(cache);
3948

3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970
		/*
		 * We might have prevented the cleaner kthread from deleting
		 * this block group if it was already unused because we raced
		 * and set it to RO mode first. So add it back to the unused
		 * list, otherwise it might not ever be deleted unless a manual
		 * balance is triggered or it becomes used and unused again.
		 */
		spin_lock(&cache->lock);
		if (!cache->removed && !cache->ro && cache->reserved == 0 &&
		    btrfs_block_group_used(&cache->item) == 0) {
			spin_unlock(&cache->lock);
			spin_lock(&fs_info->unused_bgs_lock);
			if (list_empty(&cache->bg_list)) {
				btrfs_get_block_group(cache);
				list_add_tail(&cache->bg_list,
					      &fs_info->unused_bgs);
			}
			spin_unlock(&fs_info->unused_bgs_lock);
		} else {
			spin_unlock(&cache->lock);
		}

A
Arne Jansen 已提交
3971 3972 3973
		btrfs_put_block_group(cache);
		if (ret)
			break;
3974 3975
		if (is_dev_replace &&
		    atomic64_read(&dev_replace->num_write_errors) > 0) {
3976 3977 3978 3979 3980 3981 3982
			ret = -EIO;
			break;
		}
		if (sctx->stat.malloc_errors > 0) {
			ret = -ENOMEM;
			break;
		}
3983
skip:
A
Arne Jansen 已提交
3984
		key.offset = found_key.offset + length;
C
Chris Mason 已提交
3985
		btrfs_release_path(path);
A
Arne Jansen 已提交
3986 3987 3988
	}

	btrfs_free_path(path);
3989

3990
	return ret;
A
Arne Jansen 已提交
3991 3992
}

3993 3994
static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
					   struct btrfs_device *scrub_dev)
A
Arne Jansen 已提交
3995 3996 3997 3998 3999
{
	int	i;
	u64	bytenr;
	u64	gen;
	int	ret;
4000
	struct btrfs_fs_info *fs_info = sctx->fs_info;
A
Arne Jansen 已提交
4001

4002
	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
4003 4004
		return -EIO;

4005
	/* Seed devices of a new filesystem has their own generation. */
4006
	if (scrub_dev->fs_devices != fs_info->fs_devices)
4007 4008
		gen = scrub_dev->generation;
	else
4009
		gen = fs_info->last_trans_committed;
A
Arne Jansen 已提交
4010 4011 4012

	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
		bytenr = btrfs_sb_offset(i);
4013 4014
		if (bytenr + BTRFS_SUPER_INFO_SIZE >
		    scrub_dev->commit_total_bytes)
A
Arne Jansen 已提交
4015 4016
			break;

4017
		ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
4018
				  scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
4019
				  NULL, 1, bytenr);
A
Arne Jansen 已提交
4020 4021 4022
		if (ret)
			return ret;
	}
4023
	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
A
Arne Jansen 已提交
4024 4025 4026 4027 4028 4029 4030

	return 0;
}

/*
 * get a reference count on fs_info->scrub_workers. start worker if necessary
 */
4031 4032
static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
						int is_dev_replace)
A
Arne Jansen 已提交
4033
{
4034
	unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
4035
	int max_active = fs_info->thread_pool_size;
A
Arne Jansen 已提交
4036

A
Arne Jansen 已提交
4037
	if (fs_info->scrub_workers_refcnt == 0) {
4038
		if (is_dev_replace)
4039
			fs_info->scrub_workers =
4040
				btrfs_alloc_workqueue(fs_info, "scrub", flags,
4041
						      1, 4);
4042
		else
4043
			fs_info->scrub_workers =
4044
				btrfs_alloc_workqueue(fs_info, "scrub", flags,
4045
						      max_active, 4);
4046 4047 4048
		if (!fs_info->scrub_workers)
			goto fail_scrub_workers;

4049
		fs_info->scrub_wr_completion_workers =
4050
			btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
4051
					      max_active, 2);
4052 4053 4054
		if (!fs_info->scrub_wr_completion_workers)
			goto fail_scrub_wr_completion_workers;

4055
		fs_info->scrub_nocow_workers =
4056
			btrfs_alloc_workqueue(fs_info, "scrubnc", flags, 1, 0);
4057 4058
		if (!fs_info->scrub_nocow_workers)
			goto fail_scrub_nocow_workers;
4059
		fs_info->scrub_parity_workers =
4060
			btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
4061
					      max_active, 2);
4062 4063
		if (!fs_info->scrub_parity_workers)
			goto fail_scrub_parity_workers;
A
Arne Jansen 已提交
4064
	}
A
Arne Jansen 已提交
4065
	++fs_info->scrub_workers_refcnt;
4066 4067 4068 4069 4070 4071 4072 4073 4074 4075
	return 0;

fail_scrub_parity_workers:
	btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
fail_scrub_nocow_workers:
	btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
fail_scrub_wr_completion_workers:
	btrfs_destroy_workqueue(fs_info->scrub_workers);
fail_scrub_workers:
	return -ENOMEM;
A
Arne Jansen 已提交
4076 4077
}

4078
static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
A
Arne Jansen 已提交
4079
{
4080
	if (--fs_info->scrub_workers_refcnt == 0) {
4081 4082 4083
		btrfs_destroy_workqueue(fs_info->scrub_workers);
		btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
		btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
4084
		btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
4085
	}
A
Arne Jansen 已提交
4086 4087 4088
	WARN_ON(fs_info->scrub_workers_refcnt < 0);
}

4089 4090
int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
		    u64 end, struct btrfs_scrub_progress *progress,
4091
		    int readonly, int is_dev_replace)
A
Arne Jansen 已提交
4092
{
4093
	struct scrub_ctx *sctx;
A
Arne Jansen 已提交
4094 4095
	int ret;
	struct btrfs_device *dev;
4096
	struct rcu_string *name;
A
Arne Jansen 已提交
4097

4098
	if (btrfs_fs_closing(fs_info))
A
Arne Jansen 已提交
4099 4100
		return -EINVAL;

4101
	if (fs_info->nodesize > BTRFS_STRIPE_LEN) {
4102 4103 4104 4105 4106
		/*
		 * in this case scrub is unable to calculate the checksum
		 * the way scrub is implemented. Do not handle this
		 * situation at all because it won't ever happen.
		 */
4107 4108
		btrfs_err(fs_info,
			   "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
4109 4110
		       fs_info->nodesize,
		       BTRFS_STRIPE_LEN);
4111 4112 4113
		return -EINVAL;
	}

4114
	if (fs_info->sectorsize != PAGE_SIZE) {
4115
		/* not supported for data w/o checksums */
4116
		btrfs_err_rl(fs_info,
J
Jeff Mahoney 已提交
4117
			   "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
4118
		       fs_info->sectorsize, PAGE_SIZE);
A
Arne Jansen 已提交
4119 4120 4121
		return -EINVAL;
	}

4122
	if (fs_info->nodesize >
4123
	    PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
4124
	    fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
4125 4126 4127 4128
		/*
		 * would exhaust the array bounds of pagev member in
		 * struct scrub_block
		 */
J
Jeff Mahoney 已提交
4129 4130
		btrfs_err(fs_info,
			  "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
4131
		       fs_info->nodesize,
4132
		       SCRUB_MAX_PAGES_PER_BLOCK,
4133
		       fs_info->sectorsize,
4134 4135 4136 4137
		       SCRUB_MAX_PAGES_PER_BLOCK);
		return -EINVAL;
	}

A
Arne Jansen 已提交
4138

4139 4140
	mutex_lock(&fs_info->fs_devices->device_list_mutex);
	dev = btrfs_find_device(fs_info, devid, NULL, NULL);
4141
	if (!dev || (dev->missing && !is_dev_replace)) {
4142
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
A
Arne Jansen 已提交
4143 4144 4145
		return -ENODEV;
	}

4146 4147 4148 4149 4150 4151 4152 4153 4154 4155
	if (!is_dev_replace && !readonly && !dev->writeable) {
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
		rcu_read_lock();
		name = rcu_dereference(dev->name);
		btrfs_err(fs_info, "scrub: device %s is not writable",
			  name->str);
		rcu_read_unlock();
		return -EROFS;
	}

4156
	mutex_lock(&fs_info->scrub_lock);
4157
	if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
A
Arne Jansen 已提交
4158
		mutex_unlock(&fs_info->scrub_lock);
4159 4160
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
		return -EIO;
A
Arne Jansen 已提交
4161 4162
	}

4163
	btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
4164 4165 4166
	if (dev->scrub_device ||
	    (!is_dev_replace &&
	     btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
4167
		btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
A
Arne Jansen 已提交
4168
		mutex_unlock(&fs_info->scrub_lock);
4169
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
A
Arne Jansen 已提交
4170 4171
		return -EINPROGRESS;
	}
4172
	btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
4173 4174 4175 4176 4177 4178 4179 4180

	ret = scrub_workers_get(fs_info, is_dev_replace);
	if (ret) {
		mutex_unlock(&fs_info->scrub_lock);
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
		return ret;
	}

4181
	sctx = scrub_setup_ctx(dev, is_dev_replace);
4182
	if (IS_ERR(sctx)) {
A
Arne Jansen 已提交
4183
		mutex_unlock(&fs_info->scrub_lock);
4184 4185
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
		scrub_workers_put(fs_info);
4186
		return PTR_ERR(sctx);
A
Arne Jansen 已提交
4187
	}
4188 4189
	sctx->readonly = readonly;
	dev->scrub_device = sctx;
4190
	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
A
Arne Jansen 已提交
4191

4192 4193 4194 4195
	/*
	 * checking @scrub_pause_req here, we can avoid
	 * race between committing transaction and scrubbing.
	 */
4196
	__scrub_blocked_if_needed(fs_info);
A
Arne Jansen 已提交
4197 4198 4199
	atomic_inc(&fs_info->scrubs_running);
	mutex_unlock(&fs_info->scrub_lock);

4200
	if (!is_dev_replace) {
4201 4202 4203 4204
		/*
		 * by holding device list mutex, we can
		 * kick off writing super in log tree sync.
		 */
4205
		mutex_lock(&fs_info->fs_devices->device_list_mutex);
4206
		ret = scrub_supers(sctx, dev);
4207
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4208
	}
A
Arne Jansen 已提交
4209 4210

	if (!ret)
4211 4212
		ret = scrub_enumerate_chunks(sctx, dev, start, end,
					     is_dev_replace);
A
Arne Jansen 已提交
4213

4214
	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
A
Arne Jansen 已提交
4215 4216 4217
	atomic_dec(&fs_info->scrubs_running);
	wake_up(&fs_info->scrub_pause_wait);

4218
	wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
4219

A
Arne Jansen 已提交
4220
	if (progress)
4221
		memcpy(progress, &sctx->stat, sizeof(*progress));
A
Arne Jansen 已提交
4222 4223 4224

	mutex_lock(&fs_info->scrub_lock);
	dev->scrub_device = NULL;
4225
	scrub_workers_put(fs_info);
A
Arne Jansen 已提交
4226 4227
	mutex_unlock(&fs_info->scrub_lock);

4228
	scrub_put_ctx(sctx);
A
Arne Jansen 已提交
4229 4230 4231 4232

	return ret;
}

4233
void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
A
Arne Jansen 已提交
4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247
{
	mutex_lock(&fs_info->scrub_lock);
	atomic_inc(&fs_info->scrub_pause_req);
	while (atomic_read(&fs_info->scrubs_paused) !=
	       atomic_read(&fs_info->scrubs_running)) {
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
			   atomic_read(&fs_info->scrubs_paused) ==
			   atomic_read(&fs_info->scrubs_running));
		mutex_lock(&fs_info->scrub_lock);
	}
	mutex_unlock(&fs_info->scrub_lock);
}

4248
void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
A
Arne Jansen 已提交
4249 4250 4251 4252 4253
{
	atomic_dec(&fs_info->scrub_pause_req);
	wake_up(&fs_info->scrub_pause_wait);
}

4254
int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
A
Arne Jansen 已提交
4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274
{
	mutex_lock(&fs_info->scrub_lock);
	if (!atomic_read(&fs_info->scrubs_running)) {
		mutex_unlock(&fs_info->scrub_lock);
		return -ENOTCONN;
	}

	atomic_inc(&fs_info->scrub_cancel_req);
	while (atomic_read(&fs_info->scrubs_running)) {
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
			   atomic_read(&fs_info->scrubs_running) == 0);
		mutex_lock(&fs_info->scrub_lock);
	}
	atomic_dec(&fs_info->scrub_cancel_req);
	mutex_unlock(&fs_info->scrub_lock);

	return 0;
}

4275 4276
int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
			   struct btrfs_device *dev)
4277
{
4278
	struct scrub_ctx *sctx;
A
Arne Jansen 已提交
4279 4280

	mutex_lock(&fs_info->scrub_lock);
4281 4282
	sctx = dev->scrub_device;
	if (!sctx) {
A
Arne Jansen 已提交
4283 4284 4285
		mutex_unlock(&fs_info->scrub_lock);
		return -ENOTCONN;
	}
4286
	atomic_inc(&sctx->cancel_req);
A
Arne Jansen 已提交
4287 4288 4289 4290 4291 4292 4293 4294 4295 4296
	while (dev->scrub_device) {
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
			   dev->scrub_device == NULL);
		mutex_lock(&fs_info->scrub_lock);
	}
	mutex_unlock(&fs_info->scrub_lock);

	return 0;
}
S
Stefan Behrens 已提交
4297

4298
int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
A
Arne Jansen 已提交
4299 4300 4301
			 struct btrfs_scrub_progress *progress)
{
	struct btrfs_device *dev;
4302
	struct scrub_ctx *sctx = NULL;
A
Arne Jansen 已提交
4303

4304 4305
	mutex_lock(&fs_info->fs_devices->device_list_mutex);
	dev = btrfs_find_device(fs_info, devid, NULL, NULL);
A
Arne Jansen 已提交
4306
	if (dev)
4307 4308 4309
		sctx = dev->scrub_device;
	if (sctx)
		memcpy(progress, &sctx->stat, sizeof(*progress));
4310
	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
A
Arne Jansen 已提交
4311

4312
	return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
A
Arne Jansen 已提交
4313
}
4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325

static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
			       u64 extent_logical, u64 extent_len,
			       u64 *extent_physical,
			       struct btrfs_device **extent_dev,
			       int *extent_mirror_num)
{
	u64 mapped_length;
	struct btrfs_bio *bbio = NULL;
	int ret;

	mapped_length = extent_len;
4326
	ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
4327 4328 4329
			      &mapped_length, &bbio, 0);
	if (ret || !bbio || mapped_length < extent_len ||
	    !bbio->stripes[0].dev->bdev) {
4330
		btrfs_put_bbio(bbio);
4331 4332 4333 4334 4335 4336
		return;
	}

	*extent_physical = bbio->stripes[0].physical;
	*extent_mirror_num = bbio->mirror_num;
	*extent_dev = bbio->stripes[0].dev;
4337
	btrfs_put_bbio(bbio);
4338 4339
}

4340
static int scrub_setup_wr_ctx(struct scrub_wr_ctx *wr_ctx,
4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351
			      struct btrfs_device *dev,
			      int is_dev_replace)
{
	WARN_ON(wr_ctx->wr_curr_bio != NULL);

	mutex_init(&wr_ctx->wr_lock);
	wr_ctx->wr_curr_bio = NULL;
	if (!is_dev_replace)
		return 0;

	WARN_ON(!dev->bdev);
4352
	wr_ctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369
	wr_ctx->tgtdev = dev;
	atomic_set(&wr_ctx->flush_all_writes, 0);
	return 0;
}

static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
{
	mutex_lock(&wr_ctx->wr_lock);
	kfree(wr_ctx->wr_curr_bio);
	wr_ctx->wr_curr_bio = NULL;
	mutex_unlock(&wr_ctx->wr_lock);
}

static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
			    int mirror_num, u64 physical_for_dev_replace)
{
	struct scrub_copy_nocow_ctx *nocow_ctx;
4370
	struct btrfs_fs_info *fs_info = sctx->fs_info;
4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386

	nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
	if (!nocow_ctx) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
		return -ENOMEM;
	}

	scrub_pending_trans_workers_inc(sctx);

	nocow_ctx->sctx = sctx;
	nocow_ctx->logical = logical;
	nocow_ctx->len = len;
	nocow_ctx->mirror_num = mirror_num;
	nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
4387 4388
	btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
			copy_nocow_pages_worker, NULL, NULL);
4389
	INIT_LIST_HEAD(&nocow_ctx->inodes);
4390 4391
	btrfs_queue_work(fs_info->scrub_nocow_workers,
			 &nocow_ctx->work);
4392 4393 4394 4395

	return 0;
}

4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412
static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
{
	struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
	struct scrub_nocow_inode *nocow_inode;

	nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
	if (!nocow_inode)
		return -ENOMEM;
	nocow_inode->inum = inum;
	nocow_inode->offset = offset;
	nocow_inode->root = root;
	list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
	return 0;
}

#define COPY_COMPLETE 1

4413 4414 4415 4416 4417
static void copy_nocow_pages_worker(struct btrfs_work *work)
{
	struct scrub_copy_nocow_ctx *nocow_ctx =
		container_of(work, struct scrub_copy_nocow_ctx, work);
	struct scrub_ctx *sctx = nocow_ctx->sctx;
4418 4419
	struct btrfs_fs_info *fs_info = sctx->fs_info;
	struct btrfs_root *root = fs_info->extent_root;
4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444
	u64 logical = nocow_ctx->logical;
	u64 len = nocow_ctx->len;
	int mirror_num = nocow_ctx->mirror_num;
	u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
	int ret;
	struct btrfs_trans_handle *trans = NULL;
	struct btrfs_path *path;
	int not_written = 0;

	path = btrfs_alloc_path();
	if (!path) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
		not_written = 1;
		goto out;
	}

	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans)) {
		not_written = 1;
		goto out;
	}

	ret = iterate_inodes_from_logical(logical, fs_info, path,
4445
					  record_inode_for_nocow, nocow_ctx);
4446
	if (ret != 0 && ret != -ENOENT) {
J
Jeff Mahoney 已提交
4447 4448 4449 4450
		btrfs_warn(fs_info,
			   "iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d",
			   logical, physical_for_dev_replace, len, mirror_num,
			   ret);
4451 4452 4453 4454
		not_written = 1;
		goto out;
	}

4455
	btrfs_end_transaction(trans);
4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472
	trans = NULL;
	while (!list_empty(&nocow_ctx->inodes)) {
		struct scrub_nocow_inode *entry;
		entry = list_first_entry(&nocow_ctx->inodes,
					 struct scrub_nocow_inode,
					 list);
		list_del_init(&entry->list);
		ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
						 entry->root, nocow_ctx);
		kfree(entry);
		if (ret == COPY_COMPLETE) {
			ret = 0;
			break;
		} else if (ret) {
			break;
		}
	}
4473
out:
4474 4475 4476 4477 4478 4479 4480 4481
	while (!list_empty(&nocow_ctx->inodes)) {
		struct scrub_nocow_inode *entry;
		entry = list_first_entry(&nocow_ctx->inodes,
					 struct scrub_nocow_inode,
					 list);
		list_del_init(&entry->list);
		kfree(entry);
	}
4482
	if (trans && !IS_ERR(trans))
4483
		btrfs_end_transaction(trans);
4484 4485 4486 4487 4488 4489 4490 4491 4492 4493
	if (not_written)
		btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
					    num_uncorrectable_read_errors);

	btrfs_free_path(path);
	kfree(nocow_ctx);

	scrub_pending_trans_workers_dec(sctx);
}

4494
static int check_extent_to_block(struct btrfs_inode *inode, u64 start, u64 len,
4495 4496 4497 4498 4499 4500 4501 4502 4503
				 u64 logical)
{
	struct extent_state *cached_state = NULL;
	struct btrfs_ordered_extent *ordered;
	struct extent_io_tree *io_tree;
	struct extent_map *em;
	u64 lockstart = start, lockend = start + len - 1;
	int ret = 0;

4504
	io_tree = &inode->io_tree;
4505

4506
	lock_extent_bits(io_tree, lockstart, lockend, &cached_state);
4507
	ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537
	if (ordered) {
		btrfs_put_ordered_extent(ordered);
		ret = 1;
		goto out_unlock;
	}

	em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
	if (IS_ERR(em)) {
		ret = PTR_ERR(em);
		goto out_unlock;
	}

	/*
	 * This extent does not actually cover the logical extent anymore,
	 * move on to the next inode.
	 */
	if (em->block_start > logical ||
	    em->block_start + em->block_len < logical + len) {
		free_extent_map(em);
		ret = 1;
		goto out_unlock;
	}
	free_extent_map(em);

out_unlock:
	unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
			     GFP_NOFS);
	return ret;
}

4538 4539
static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
				      struct scrub_copy_nocow_ctx *nocow_ctx)
4540
{
4541
	struct btrfs_fs_info *fs_info = nocow_ctx->sctx->fs_info;
4542
	struct btrfs_key key;
4543 4544
	struct inode *inode;
	struct page *page;
4545
	struct btrfs_root *local_root;
4546
	struct extent_io_tree *io_tree;
4547
	u64 physical_for_dev_replace;
4548
	u64 nocow_ctx_logical;
4549
	u64 len = nocow_ctx->len;
4550
	unsigned long index;
4551
	int srcu_index;
4552 4553
	int ret = 0;
	int err = 0;
4554 4555 4556 4557

	key.objectid = root;
	key.type = BTRFS_ROOT_ITEM_KEY;
	key.offset = (u64)-1;
4558 4559 4560

	srcu_index = srcu_read_lock(&fs_info->subvol_srcu);

4561
	local_root = btrfs_read_fs_root_no_name(fs_info, &key);
4562 4563
	if (IS_ERR(local_root)) {
		srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4564
		return PTR_ERR(local_root);
4565
	}
4566 4567 4568 4569 4570

	key.type = BTRFS_INODE_ITEM_KEY;
	key.objectid = inum;
	key.offset = 0;
	inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
4571
	srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4572 4573 4574
	if (IS_ERR(inode))
		return PTR_ERR(inode);

4575
	/* Avoid truncate/dio/punch hole.. */
A
Al Viro 已提交
4576
	inode_lock(inode);
4577 4578
	inode_dio_wait(inode);

4579
	physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4580
	io_tree = &BTRFS_I(inode)->io_tree;
4581
	nocow_ctx_logical = nocow_ctx->logical;
4582

4583 4584
	ret = check_extent_to_block(BTRFS_I(inode), offset, len,
			nocow_ctx_logical);
4585 4586 4587
	if (ret) {
		ret = ret > 0 ? 0 : ret;
		goto out;
4588 4589
	}

4590 4591
	while (len >= PAGE_SIZE) {
		index = offset >> PAGE_SHIFT;
4592
again:
4593 4594
		page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
		if (!page) {
4595
			btrfs_err(fs_info, "find_or_create_page() failed");
4596
			ret = -ENOMEM;
4597
			goto out;
4598 4599 4600 4601 4602 4603 4604
		}

		if (PageUptodate(page)) {
			if (PageDirty(page))
				goto next_page;
		} else {
			ClearPageError(page);
4605
			err = extent_read_full_page(io_tree, page,
4606 4607
							   btrfs_get_extent,
							   nocow_ctx->mirror_num);
4608 4609
			if (err) {
				ret = err;
4610 4611
				goto next_page;
			}
4612

4613
			lock_page(page);
4614 4615 4616 4617 4618 4619 4620
			/*
			 * If the page has been remove from the page cache,
			 * the data on it is meaningless, because it may be
			 * old one, the new data may be written into the new
			 * page in the page cache.
			 */
			if (page->mapping != inode->i_mapping) {
4621
				unlock_page(page);
4622
				put_page(page);
4623 4624
				goto again;
			}
4625 4626 4627 4628 4629
			if (!PageUptodate(page)) {
				ret = -EIO;
				goto next_page;
			}
		}
4630

4631
		ret = check_extent_to_block(BTRFS_I(inode), offset, len,
4632 4633 4634 4635 4636 4637
					    nocow_ctx_logical);
		if (ret) {
			ret = ret > 0 ? 0 : ret;
			goto next_page;
		}

4638 4639 4640 4641
		err = write_page_nocow(nocow_ctx->sctx,
				       physical_for_dev_replace, page);
		if (err)
			ret = err;
4642
next_page:
4643
		unlock_page(page);
4644
		put_page(page);
4645 4646 4647 4648

		if (ret)
			break;

4649 4650 4651 4652
		offset += PAGE_SIZE;
		physical_for_dev_replace += PAGE_SIZE;
		nocow_ctx_logical += PAGE_SIZE;
		len -= PAGE_SIZE;
4653
	}
4654
	ret = COPY_COMPLETE;
4655
out:
A
Al Viro 已提交
4656
	inode_unlock(inode);
4657
	iput(inode);
4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671
	return ret;
}

static int write_page_nocow(struct scrub_ctx *sctx,
			    u64 physical_for_dev_replace, struct page *page)
{
	struct bio *bio;
	struct btrfs_device *dev;
	int ret;

	dev = sctx->wr_ctx.tgtdev;
	if (!dev)
		return -EIO;
	if (!dev->bdev) {
4672
		btrfs_warn_rl(dev->fs_info,
4673
			"scrub write_page_nocow(bdev == NULL) is unexpected");
4674 4675
		return -EIO;
	}
4676
	bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
4677 4678 4679 4680 4681 4682
	if (!bio) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
		return -ENOMEM;
	}
4683 4684
	bio->bi_iter.bi_size = 0;
	bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
4685
	bio->bi_bdev = dev->bdev;
4686
	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
4687 4688
	ret = bio_add_page(bio, page, PAGE_SIZE, 0);
	if (ret != PAGE_SIZE) {
4689 4690 4691 4692 4693 4694
leave_with_eio:
		bio_put(bio);
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
		return -EIO;
	}

4695
	if (btrfsic_submit_bio_wait(bio))
4696 4697 4698 4699 4700
		goto leave_with_eio;

	bio_put(bio);
	return 0;
}