scrub.c 108.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
A
Arne Jansen 已提交
2
/*
3
 * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
A
Arne Jansen 已提交
4 5 6
 */

#include <linux/blkdev.h>
7
#include <linux/ratelimit.h>
8
#include <linux/sched/mm.h>
9
#include <crypto/hash.h>
A
Arne Jansen 已提交
10
#include "ctree.h"
11
#include "discard.h"
A
Arne Jansen 已提交
12 13 14
#include "volumes.h"
#include "disk-io.h"
#include "ordered-data.h"
15
#include "transaction.h"
16
#include "backref.h"
17
#include "extent_io.h"
18
#include "dev-replace.h"
19
#include "check-integrity.h"
20
#include "rcu-string.h"
D
David Woodhouse 已提交
21
#include "raid56.h"
22
#include "block-group.h"
A
Arne Jansen 已提交
23 24 25 26 27 28 29 30 31 32 33 34 35 36

/*
 * This is only the first step towards a full-features scrub. It reads all
 * extent and super block and verifies the checksums. In case a bad checksum
 * is found or the extent cannot be read, good data will be written back if
 * any can be found.
 *
 * Future enhancements:
 *  - In case an unrepairable extent is encountered, track which files are
 *    affected and report them
 *  - track and record media errors, throw out bad devices
 *  - add a mode to also read unallocated space
 */

37
struct scrub_block;
38
struct scrub_ctx;
A
Arne Jansen 已提交
39

40 41 42 43 44 45 46 47 48
/*
 * the following three values only influence the performance.
 * The last one configures the number of parallel and outstanding I/O
 * operations. The first two values configure an upper limit for the number
 * of (dynamically allocated) pages that are added to a bio.
 */
#define SCRUB_PAGES_PER_RD_BIO	32	/* 128k per bio */
#define SCRUB_PAGES_PER_WR_BIO	32	/* 128k per bio */
#define SCRUB_BIOS_PER_SCTX	64	/* 8MB per device in flight */
49 50 51 52 53 54

/*
 * the following value times PAGE_SIZE needs to be large enough to match the
 * largest node/leaf/sector size that shall be supported.
 * Values larger than BTRFS_STRIPE_LEN are not supported.
 */
55
#define SCRUB_MAX_PAGES_PER_BLOCK	16	/* 64k per node/leaf/sector */
A
Arne Jansen 已提交
56

57
struct scrub_recover {
58
	refcount_t		refs;
59 60 61 62
	struct btrfs_bio	*bbio;
	u64			map_length;
};

A
Arne Jansen 已提交
63
struct scrub_page {
64 65
	struct scrub_block	*sblock;
	struct page		*page;
66
	struct btrfs_device	*dev;
67
	struct list_head	list;
A
Arne Jansen 已提交
68 69
	u64			flags;  /* extent flags */
	u64			generation;
70 71
	u64			logical;
	u64			physical;
72
	u64			physical_for_dev_replace;
73
	atomic_t		refs;
74 75 76 77 78
	struct {
		unsigned int	mirror_num:8;
		unsigned int	have_csum:1;
		unsigned int	io_error:1;
	};
A
Arne Jansen 已提交
79
	u8			csum[BTRFS_CSUM_SIZE];
80 81

	struct scrub_recover	*recover;
A
Arne Jansen 已提交
82 83 84 85
};

struct scrub_bio {
	int			index;
86
	struct scrub_ctx	*sctx;
87
	struct btrfs_device	*dev;
A
Arne Jansen 已提交
88
	struct bio		*bio;
89
	blk_status_t		status;
A
Arne Jansen 已提交
90 91
	u64			logical;
	u64			physical;
92 93 94 95 96
#if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
	struct scrub_page	*pagev[SCRUB_PAGES_PER_WR_BIO];
#else
	struct scrub_page	*pagev[SCRUB_PAGES_PER_RD_BIO];
#endif
97
	int			page_count;
A
Arne Jansen 已提交
98 99 100 101
	int			next_free;
	struct btrfs_work	work;
};

102
struct scrub_block {
103
	struct scrub_page	*pagev[SCRUB_MAX_PAGES_PER_BLOCK];
104 105
	int			page_count;
	atomic_t		outstanding_pages;
106
	refcount_t		refs; /* free mem on transition to zero */
107
	struct scrub_ctx	*sctx;
108
	struct scrub_parity	*sparity;
109 110 111 112
	struct {
		unsigned int	header_error:1;
		unsigned int	checksum_error:1;
		unsigned int	no_io_error_seen:1;
113
		unsigned int	generation_error:1; /* also sets header_error */
114 115 116 117

		/* The following is for the data used to check parity */
		/* It is for the data with checksum */
		unsigned int	data_corrected:1;
118
	};
119
	struct btrfs_work	work;
120 121
};

122 123 124 125 126 127 128 129 130 131 132 133
/* Used for the chunks with parity stripe such RAID5/6 */
struct scrub_parity {
	struct scrub_ctx	*sctx;

	struct btrfs_device	*scrub_dev;

	u64			logic_start;

	u64			logic_end;

	int			nsectors;

134
	u64			stripe_len;
135

136
	refcount_t		refs;
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151

	struct list_head	spages;

	/* Work of parity check and repair */
	struct btrfs_work	work;

	/* Mark the parity blocks which have data */
	unsigned long		*dbitmap;

	/*
	 * Mark the parity blocks which have data, but errors happen when
	 * read data or check data
	 */
	unsigned long		*ebitmap;

152
	unsigned long		bitmap[];
153 154
};

155
struct scrub_ctx {
156
	struct scrub_bio	*bios[SCRUB_BIOS_PER_SCTX];
157
	struct btrfs_fs_info	*fs_info;
A
Arne Jansen 已提交
158 159
	int			first_free;
	int			curr;
160 161
	atomic_t		bios_in_flight;
	atomic_t		workers_pending;
A
Arne Jansen 已提交
162 163 164 165
	spinlock_t		list_lock;
	wait_queue_head_t	list_wait;
	struct list_head	csum_list;
	atomic_t		cancel_req;
A
Arne Jansen 已提交
166
	int			readonly;
167
	int			pages_per_rd_bio;
168 169

	int			is_dev_replace;
170 171 172 173 174

	struct scrub_bio        *wr_curr_bio;
	struct mutex            wr_lock;
	int                     pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
	struct btrfs_device     *wr_tgtdev;
175
	bool                    flush_all_writes;
176

A
Arne Jansen 已提交
177 178 179 180 181
	/*
	 * statistics
	 */
	struct btrfs_scrub_progress stat;
	spinlock_t		stat_lock;
182 183 184 185 186 187 188 189

	/*
	 * Use a ref counter to avoid use-after-free issues. Scrub workers
	 * decrement bios_in_flight and workers_pending and then do a wakeup
	 * on the list_wait wait queue. We must ensure the main scrub task
	 * doesn't free the scrub context before or while the workers are
	 * doing the wakeup() call.
	 */
190
	refcount_t              refs;
A
Arne Jansen 已提交
191 192
};

193 194 195 196
struct scrub_warning {
	struct btrfs_path	*path;
	u64			extent_item_size;
	const char		*errstr;
D
David Sterba 已提交
197
	u64			physical;
198 199 200 201
	u64			logical;
	struct btrfs_device	*dev;
};

202 203 204 205 206 207 208
struct full_stripe_lock {
	struct rb_node node;
	u64 logical;
	u64 refs;
	struct mutex mutex;
};

209 210
static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
211
static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
212
static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
213
				     struct scrub_block *sblocks_for_recheck);
214
static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
215 216
				struct scrub_block *sblock,
				int retry_failed_mirror);
217
static void scrub_recheck_block_checksum(struct scrub_block *sblock);
218
static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
219
					     struct scrub_block *sblock_good);
220 221 222
static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
					    struct scrub_block *sblock_good,
					    int page_num, int force_write);
223 224 225
static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
					   int page_num);
226 227 228 229 230
static int scrub_checksum_data(struct scrub_block *sblock);
static int scrub_checksum_tree_block(struct scrub_block *sblock);
static int scrub_checksum_super(struct scrub_block *sblock);
static void scrub_block_get(struct scrub_block *sblock);
static void scrub_block_put(struct scrub_block *sblock);
231 232
static void scrub_page_get(struct scrub_page *spage);
static void scrub_page_put(struct scrub_page *spage);
233 234
static void scrub_parity_get(struct scrub_parity *sparity);
static void scrub_parity_put(struct scrub_parity *sparity);
235 236
static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
				    struct scrub_page *spage);
237
static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
238
		       u64 physical, struct btrfs_device *dev, u64 flags,
239
		       u64 gen, int mirror_num, u8 *csum,
240
		       u64 physical_for_dev_replace);
241
static void scrub_bio_end_io(struct bio *bio);
242 243
static void scrub_bio_end_io_worker(struct btrfs_work *work);
static void scrub_block_complete(struct scrub_block *sblock);
244 245 246 247 248 249 250 251
static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
			       u64 extent_logical, u64 extent_len,
			       u64 *extent_physical,
			       struct btrfs_device **extent_dev,
			       int *extent_mirror_num);
static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
				    struct scrub_page *spage);
static void scrub_wr_submit(struct scrub_ctx *sctx);
252
static void scrub_wr_bio_end_io(struct bio *bio);
253
static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
254
static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
255
static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
256
static void scrub_put_ctx(struct scrub_ctx *sctx);
S
Stefan Behrens 已提交
257

258
static inline int scrub_is_page_on_raid56(struct scrub_page *spage)
259
{
260 261
	return spage->recover &&
	       (spage->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
262
}
S
Stefan Behrens 已提交
263

264 265
static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
{
266
	refcount_inc(&sctx->refs);
267 268 269 270 271 272 273
	atomic_inc(&sctx->bios_in_flight);
}

static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
{
	atomic_dec(&sctx->bios_in_flight);
	wake_up(&sctx->list_wait);
274
	scrub_put_ctx(sctx);
275 276
}

277
static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
278 279 280 281 282 283 284 285 286
{
	while (atomic_read(&fs_info->scrub_pause_req)) {
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
		   atomic_read(&fs_info->scrub_pause_req) == 0);
		mutex_lock(&fs_info->scrub_lock);
	}
}

287
static void scrub_pause_on(struct btrfs_fs_info *fs_info)
288 289 290
{
	atomic_inc(&fs_info->scrubs_paused);
	wake_up(&fs_info->scrub_pause_wait);
291
}
292

293 294
static void scrub_pause_off(struct btrfs_fs_info *fs_info)
{
295 296 297 298 299 300 301 302
	mutex_lock(&fs_info->scrub_lock);
	__scrub_blocked_if_needed(fs_info);
	atomic_dec(&fs_info->scrubs_paused);
	mutex_unlock(&fs_info->scrub_lock);

	wake_up(&fs_info->scrub_pause_wait);
}

303 304 305 306 307 308
static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
{
	scrub_pause_on(fs_info);
	scrub_pause_off(fs_info);
}

309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
/*
 * Insert new full stripe lock into full stripe locks tree
 *
 * Return pointer to existing or newly inserted full_stripe_lock structure if
 * everything works well.
 * Return ERR_PTR(-ENOMEM) if we failed to allocate memory
 *
 * NOTE: caller must hold full_stripe_locks_root->lock before calling this
 * function
 */
static struct full_stripe_lock *insert_full_stripe_lock(
		struct btrfs_full_stripe_locks_tree *locks_root,
		u64 fstripe_logical)
{
	struct rb_node **p;
	struct rb_node *parent = NULL;
	struct full_stripe_lock *entry;
	struct full_stripe_lock *ret;

328
	lockdep_assert_held(&locks_root->lock);
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343

	p = &locks_root->root.rb_node;
	while (*p) {
		parent = *p;
		entry = rb_entry(parent, struct full_stripe_lock, node);
		if (fstripe_logical < entry->logical) {
			p = &(*p)->rb_left;
		} else if (fstripe_logical > entry->logical) {
			p = &(*p)->rb_right;
		} else {
			entry->refs++;
			return entry;
		}
	}

344 345 346
	/*
	 * Insert new lock.
	 */
347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
	ret = kmalloc(sizeof(*ret), GFP_KERNEL);
	if (!ret)
		return ERR_PTR(-ENOMEM);
	ret->logical = fstripe_logical;
	ret->refs = 1;
	mutex_init(&ret->mutex);

	rb_link_node(&ret->node, parent, p);
	rb_insert_color(&ret->node, &locks_root->root);
	return ret;
}

/*
 * Search for a full stripe lock of a block group
 *
 * Return pointer to existing full stripe lock if found
 * Return NULL if not found
 */
static struct full_stripe_lock *search_full_stripe_lock(
		struct btrfs_full_stripe_locks_tree *locks_root,
		u64 fstripe_logical)
{
	struct rb_node *node;
	struct full_stripe_lock *entry;

372
	lockdep_assert_held(&locks_root->lock);
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391

	node = locks_root->root.rb_node;
	while (node) {
		entry = rb_entry(node, struct full_stripe_lock, node);
		if (fstripe_logical < entry->logical)
			node = node->rb_left;
		else if (fstripe_logical > entry->logical)
			node = node->rb_right;
		else
			return entry;
	}
	return NULL;
}

/*
 * Helper to get full stripe logical from a normal bytenr.
 *
 * Caller must ensure @cache is a RAID56 block group.
 */
392
static u64 get_full_stripe_logical(struct btrfs_block_group *cache, u64 bytenr)
393 394 395 396 397 398 399 400 401 402 403 404 405
{
	u64 ret;

	/*
	 * Due to chunk item size limit, full stripe length should not be
	 * larger than U32_MAX. Just a sanity check here.
	 */
	WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX);

	/*
	 * round_down() can only handle power of 2, while RAID56 full
	 * stripe length can be 64KiB * n, so we need to manually round down.
	 */
406 407
	ret = div64_u64(bytenr - cache->start, cache->full_stripe_len) *
			cache->full_stripe_len + cache->start;
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424
	return ret;
}

/*
 * Lock a full stripe to avoid concurrency of recovery and read
 *
 * It's only used for profiles with parities (RAID5/6), for other profiles it
 * does nothing.
 *
 * Return 0 if we locked full stripe covering @bytenr, with a mutex held.
 * So caller must call unlock_full_stripe() at the same context.
 *
 * Return <0 if encounters error.
 */
static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
			    bool *locked_ret)
{
425
	struct btrfs_block_group *bg_cache;
426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
	struct btrfs_full_stripe_locks_tree *locks_root;
	struct full_stripe_lock *existing;
	u64 fstripe_start;
	int ret = 0;

	*locked_ret = false;
	bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
	if (!bg_cache) {
		ASSERT(0);
		return -ENOENT;
	}

	/* Profiles not based on parity don't need full stripe lock */
	if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
		goto out;
	locks_root = &bg_cache->full_stripe_locks_root;

	fstripe_start = get_full_stripe_logical(bg_cache, bytenr);

	/* Now insert the full stripe lock */
	mutex_lock(&locks_root->lock);
	existing = insert_full_stripe_lock(locks_root, fstripe_start);
	mutex_unlock(&locks_root->lock);
	if (IS_ERR(existing)) {
		ret = PTR_ERR(existing);
		goto out;
	}
	mutex_lock(&existing->mutex);
	*locked_ret = true;
out:
	btrfs_put_block_group(bg_cache);
	return ret;
}

/*
 * Unlock a full stripe.
 *
 * NOTE: Caller must ensure it's the same context calling corresponding
 * lock_full_stripe().
 *
 * Return 0 if we unlock full stripe without problem.
 * Return <0 for error
 */
static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
			      bool locked)
{
472
	struct btrfs_block_group *bg_cache;
473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
	struct btrfs_full_stripe_locks_tree *locks_root;
	struct full_stripe_lock *fstripe_lock;
	u64 fstripe_start;
	bool freeit = false;
	int ret = 0;

	/* If we didn't acquire full stripe lock, no need to continue */
	if (!locked)
		return 0;

	bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
	if (!bg_cache) {
		ASSERT(0);
		return -ENOENT;
	}
	if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
		goto out;

	locks_root = &bg_cache->full_stripe_locks_root;
	fstripe_start = get_full_stripe_logical(bg_cache, bytenr);

	mutex_lock(&locks_root->lock);
	fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start);
	/* Unpaired unlock_full_stripe() detected */
	if (!fstripe_lock) {
		WARN_ON(1);
		ret = -ENOENT;
		mutex_unlock(&locks_root->lock);
		goto out;
	}

	if (fstripe_lock->refs == 0) {
		WARN_ON(1);
		btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow",
			fstripe_lock->logical);
	} else {
		fstripe_lock->refs--;
	}

	if (fstripe_lock->refs == 0) {
		rb_erase(&fstripe_lock->node, &locks_root->root);
		freeit = true;
	}
	mutex_unlock(&locks_root->lock);

	mutex_unlock(&fstripe_lock->mutex);
	if (freeit)
		kfree(fstripe_lock);
out:
	btrfs_put_block_group(bg_cache);
	return ret;
}

526
static void scrub_free_csums(struct scrub_ctx *sctx)
A
Arne Jansen 已提交
527
{
528
	while (!list_empty(&sctx->csum_list)) {
A
Arne Jansen 已提交
529
		struct btrfs_ordered_sum *sum;
530
		sum = list_first_entry(&sctx->csum_list,
A
Arne Jansen 已提交
531 532 533 534 535 536
				       struct btrfs_ordered_sum, list);
		list_del(&sum->list);
		kfree(sum);
	}
}

537
static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
A
Arne Jansen 已提交
538 539 540
{
	int i;

541
	if (!sctx)
A
Arne Jansen 已提交
542 543
		return;

544
	/* this can happen when scrub is cancelled */
545 546
	if (sctx->curr != -1) {
		struct scrub_bio *sbio = sctx->bios[sctx->curr];
547 548

		for (i = 0; i < sbio->page_count; i++) {
549
			WARN_ON(!sbio->pagev[i]->page);
550 551 552 553 554
			scrub_block_put(sbio->pagev[i]->sblock);
		}
		bio_put(sbio->bio);
	}

555
	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
556
		struct scrub_bio *sbio = sctx->bios[i];
A
Arne Jansen 已提交
557 558 559 560 561 562

		if (!sbio)
			break;
		kfree(sbio);
	}

563
	kfree(sctx->wr_curr_bio);
564 565
	scrub_free_csums(sctx);
	kfree(sctx);
A
Arne Jansen 已提交
566 567
}

568 569
static void scrub_put_ctx(struct scrub_ctx *sctx)
{
570
	if (refcount_dec_and_test(&sctx->refs))
571 572 573
		scrub_free_ctx(sctx);
}

574 575
static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
		struct btrfs_fs_info *fs_info, int is_dev_replace)
A
Arne Jansen 已提交
576
{
577
	struct scrub_ctx *sctx;
A
Arne Jansen 已提交
578 579
	int		i;

580
	sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
581
	if (!sctx)
A
Arne Jansen 已提交
582
		goto nomem;
583
	refcount_set(&sctx->refs, 1);
584
	sctx->is_dev_replace = is_dev_replace;
585
	sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
586
	sctx->curr = -1;
587
	sctx->fs_info = fs_info;
588
	INIT_LIST_HEAD(&sctx->csum_list);
589
	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
A
Arne Jansen 已提交
590 591
		struct scrub_bio *sbio;

592
		sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
A
Arne Jansen 已提交
593 594
		if (!sbio)
			goto nomem;
595
		sctx->bios[i] = sbio;
A
Arne Jansen 已提交
596 597

		sbio->index = i;
598
		sbio->sctx = sctx;
599
		sbio->page_count = 0;
600 601
		btrfs_init_work(&sbio->work, scrub_bio_end_io_worker, NULL,
				NULL);
A
Arne Jansen 已提交
602

603
		if (i != SCRUB_BIOS_PER_SCTX - 1)
604
			sctx->bios[i]->next_free = i + 1;
605
		else
606 607 608
			sctx->bios[i]->next_free = -1;
	}
	sctx->first_free = 0;
609 610
	atomic_set(&sctx->bios_in_flight, 0);
	atomic_set(&sctx->workers_pending, 0);
611 612 613 614 615
	atomic_set(&sctx->cancel_req, 0);

	spin_lock_init(&sctx->list_lock);
	spin_lock_init(&sctx->stat_lock);
	init_waitqueue_head(&sctx->list_wait);
616

617 618 619
	WARN_ON(sctx->wr_curr_bio != NULL);
	mutex_init(&sctx->wr_lock);
	sctx->wr_curr_bio = NULL;
620
	if (is_dev_replace) {
621
		WARN_ON(!fs_info->dev_replace.tgtdev);
622
		sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
623
		sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
624
		sctx->flush_all_writes = false;
625
	}
626

627
	return sctx;
A
Arne Jansen 已提交
628 629

nomem:
630
	scrub_free_ctx(sctx);
A
Arne Jansen 已提交
631 632 633
	return ERR_PTR(-ENOMEM);
}

634 635
static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
				     void *warn_ctx)
636 637 638 639 640
{
	u64 isize;
	u32 nlink;
	int ret;
	int i;
641
	unsigned nofs_flag;
642 643
	struct extent_buffer *eb;
	struct btrfs_inode_item *inode_item;
644
	struct scrub_warning *swarn = warn_ctx;
645
	struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
646 647
	struct inode_fs_paths *ipath = NULL;
	struct btrfs_root *local_root;
648
	struct btrfs_key key;
649

D
David Sterba 已提交
650
	local_root = btrfs_get_fs_root(fs_info, root, true);
651 652 653 654 655
	if (IS_ERR(local_root)) {
		ret = PTR_ERR(local_root);
		goto err;
	}

656 657 658
	/*
	 * this makes the path point to (inum INODE_ITEM ioff)
	 */
659 660 661 662 663
	key.objectid = inum;
	key.type = BTRFS_INODE_ITEM_KEY;
	key.offset = 0;

	ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
664
	if (ret) {
665
		btrfs_put_root(local_root);
666 667 668 669 670 671 672 673 674 675 676
		btrfs_release_path(swarn->path);
		goto err;
	}

	eb = swarn->path->nodes[0];
	inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
					struct btrfs_inode_item);
	isize = btrfs_inode_size(eb, inode_item);
	nlink = btrfs_inode_nlink(eb, inode_item);
	btrfs_release_path(swarn->path);

677 678 679 680 681 682
	/*
	 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
	 * uses GFP_NOFS in this context, so we keep it consistent but it does
	 * not seem to be strictly necessary.
	 */
	nofs_flag = memalloc_nofs_save();
683
	ipath = init_ipath(4096, local_root, swarn->path);
684
	memalloc_nofs_restore(nofs_flag);
685
	if (IS_ERR(ipath)) {
686
		btrfs_put_root(local_root);
687 688 689 690
		ret = PTR_ERR(ipath);
		ipath = NULL;
		goto err;
	}
691 692 693 694 695 696 697 698 699 700
	ret = paths_from_inode(inum, ipath);

	if (ret < 0)
		goto err;

	/*
	 * we deliberately ignore the bit ipath might have been too small to
	 * hold all of the paths here
	 */
	for (i = 0; i < ipath->fspath->elem_cnt; ++i)
J
Jeff Mahoney 已提交
701
		btrfs_warn_in_rcu(fs_info,
D
David Sterba 已提交
702
"%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
J
Jeff Mahoney 已提交
703 704
				  swarn->errstr, swarn->logical,
				  rcu_str_deref(swarn->dev->name),
D
David Sterba 已提交
705
				  swarn->physical,
J
Jeff Mahoney 已提交
706 707 708
				  root, inum, offset,
				  min(isize - offset, (u64)PAGE_SIZE), nlink,
				  (char *)(unsigned long)ipath->fspath->val[i]);
709

710
	btrfs_put_root(local_root);
711 712 713 714
	free_ipath(ipath);
	return 0;

err:
J
Jeff Mahoney 已提交
715
	btrfs_warn_in_rcu(fs_info,
D
David Sterba 已提交
716
			  "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
J
Jeff Mahoney 已提交
717 718
			  swarn->errstr, swarn->logical,
			  rcu_str_deref(swarn->dev->name),
D
David Sterba 已提交
719
			  swarn->physical,
J
Jeff Mahoney 已提交
720
			  root, inum, offset, ret);
721 722 723 724 725

	free_ipath(ipath);
	return 0;
}

726
static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
727
{
728 729
	struct btrfs_device *dev;
	struct btrfs_fs_info *fs_info;
730 731 732 733 734
	struct btrfs_path *path;
	struct btrfs_key found_key;
	struct extent_buffer *eb;
	struct btrfs_extent_item *ei;
	struct scrub_warning swarn;
735 736 737
	unsigned long ptr = 0;
	u64 extent_item_pos;
	u64 flags = 0;
738
	u64 ref_root;
739
	u32 item_size;
740
	u8 ref_level = 0;
741
	int ret;
742

743
	WARN_ON(sblock->page_count < 1);
744
	dev = sblock->pagev[0]->dev;
745
	fs_info = sblock->sctx->fs_info;
746

747
	path = btrfs_alloc_path();
748 749
	if (!path)
		return;
750

D
David Sterba 已提交
751
	swarn.physical = sblock->pagev[0]->physical;
752
	swarn.logical = sblock->pagev[0]->logical;
753
	swarn.errstr = errstr;
754
	swarn.dev = NULL;
755

756 757
	ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
				  &flags);
758 759 760
	if (ret < 0)
		goto out;

J
Jan Schmidt 已提交
761
	extent_item_pos = swarn.logical - found_key.objectid;
762 763 764 765 766 767
	swarn.extent_item_size = found_key.offset;

	eb = path->nodes[0];
	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
	item_size = btrfs_item_size_nr(eb, path->slots[0]);

768
	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
769
		do {
770 771 772
			ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
						      item_size, &ref_root,
						      &ref_level);
773
			btrfs_warn_in_rcu(fs_info,
D
David Sterba 已提交
774
"%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
J
Jeff Mahoney 已提交
775
				errstr, swarn.logical,
776
				rcu_str_deref(dev->name),
D
David Sterba 已提交
777
				swarn.physical,
778 779 780 781
				ref_level ? "node" : "leaf",
				ret < 0 ? -1 : ref_level,
				ret < 0 ? -1 : ref_root);
		} while (ret != 1);
782
		btrfs_release_path(path);
783
	} else {
784
		btrfs_release_path(path);
785
		swarn.path = path;
786
		swarn.dev = dev;
787 788
		iterate_extent_inodes(fs_info, found_key.objectid,
					extent_item_pos, 1,
789
					scrub_print_warning_inode, &swarn, false);
790 791 792 793 794 795
	}

out:
	btrfs_free_path(path);
}

796 797
static inline void scrub_get_recover(struct scrub_recover *recover)
{
798
	refcount_inc(&recover->refs);
799 800
}

801 802
static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
				     struct scrub_recover *recover)
803
{
804
	if (refcount_dec_and_test(&recover->refs)) {
805
		btrfs_bio_counter_dec(fs_info);
806
		btrfs_put_bbio(recover->bbio);
807 808 809 810
		kfree(recover);
	}
}

A
Arne Jansen 已提交
811
/*
812 813 814 815 816 817
 * scrub_handle_errored_block gets called when either verification of the
 * pages failed or the bio failed to read, e.g. with EIO. In the latter
 * case, this function handles all pages in the bio, even though only one
 * may be bad.
 * The goal of this function is to repair the errored block by using the
 * contents of one of the mirrors.
A
Arne Jansen 已提交
818
 */
819
static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
A
Arne Jansen 已提交
820
{
821
	struct scrub_ctx *sctx = sblock_to_check->sctx;
822
	struct btrfs_device *dev;
823 824 825 826 827 828 829 830 831 832 833
	struct btrfs_fs_info *fs_info;
	u64 logical;
	unsigned int failed_mirror_index;
	unsigned int is_metadata;
	unsigned int have_csum;
	struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
	struct scrub_block *sblock_bad;
	int ret;
	int mirror_index;
	int page_num;
	int success;
834
	bool full_stripe_locked;
835
	unsigned int nofs_flag;
836
	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
837 838 839
				      DEFAULT_RATELIMIT_BURST);

	BUG_ON(sblock_to_check->page_count < 1);
840
	fs_info = sctx->fs_info;
841 842 843 844 845 846 847 848 849 850 851
	if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
		/*
		 * if we find an error in a super block, we just report it.
		 * They will get written with the next transaction commit
		 * anyway
		 */
		spin_lock(&sctx->stat_lock);
		++sctx->stat.super_errors;
		spin_unlock(&sctx->stat_lock);
		return 0;
	}
852 853 854 855
	logical = sblock_to_check->pagev[0]->logical;
	BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
	failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
	is_metadata = !(sblock_to_check->pagev[0]->flags &
856
			BTRFS_EXTENT_FLAG_DATA);
857 858
	have_csum = sblock_to_check->pagev[0]->have_csum;
	dev = sblock_to_check->pagev[0]->dev;
859

860 861 862 863 864 865 866 867 868 869
	/*
	 * We must use GFP_NOFS because the scrub task might be waiting for a
	 * worker task executing this function and in turn a transaction commit
	 * might be waiting the scrub task to pause (which needs to wait for all
	 * the worker tasks to complete before pausing).
	 * We do allocations in the workers through insert_full_stripe_lock()
	 * and scrub_add_page_to_wr_bio(), which happens down the call chain of
	 * this function.
	 */
	nofs_flag = memalloc_nofs_save();
870 871 872 873 874 875 876 877 878
	/*
	 * For RAID5/6, race can happen for a different device scrub thread.
	 * For data corruption, Parity and Data threads will both try
	 * to recovery the data.
	 * Race can lead to doubly added csum error, or even unrecoverable
	 * error.
	 */
	ret = lock_full_stripe(fs_info, logical, &full_stripe_locked);
	if (ret < 0) {
879
		memalloc_nofs_restore(nofs_flag);
880 881 882 883 884 885 886 887 888
		spin_lock(&sctx->stat_lock);
		if (ret == -ENOMEM)
			sctx->stat.malloc_errors++;
		sctx->stat.read_errors++;
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
		return ret;
	}

889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917
	/*
	 * read all mirrors one after the other. This includes to
	 * re-read the extent or metadata block that failed (that was
	 * the cause that this fixup code is called) another time,
	 * page by page this time in order to know which pages
	 * caused I/O errors and which ones are good (for all mirrors).
	 * It is the goal to handle the situation when more than one
	 * mirror contains I/O errors, but the errors do not
	 * overlap, i.e. the data can be repaired by selecting the
	 * pages from those mirrors without I/O error on the
	 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
	 * would be that mirror #1 has an I/O error on the first page,
	 * the second page is good, and mirror #2 has an I/O error on
	 * the second page, but the first page is good.
	 * Then the first page of the first mirror can be repaired by
	 * taking the first page of the second mirror, and the
	 * second page of the second mirror can be repaired by
	 * copying the contents of the 2nd page of the 1st mirror.
	 * One more note: if the pages of one mirror contain I/O
	 * errors, the checksum cannot be verified. In order to get
	 * the best data for repairing, the first attempt is to find
	 * a mirror without I/O errors and with a validated checksum.
	 * Only if this is not possible, the pages are picked from
	 * mirrors with I/O errors without considering the checksum.
	 * If the latter is the case, at the end, the checksum of the
	 * repaired area is verified in order to correctly maintain
	 * the statistics.
	 */

918
	sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
919
				      sizeof(*sblocks_for_recheck), GFP_KERNEL);
920
	if (!sblocks_for_recheck) {
921 922 923 924 925
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		sctx->stat.read_errors++;
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
926
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
927
		goto out;
A
Arne Jansen 已提交
928 929
	}

930
	/* setup the context, map the logical blocks and alloc the pages */
931
	ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
932
	if (ret) {
933 934 935 936
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors++;
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
937
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
938 939 940 941
		goto out;
	}
	BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
	sblock_bad = sblocks_for_recheck + failed_mirror_index;
942

943
	/* build and submit the bios for the failed mirror, check checksums */
944
	scrub_recheck_block(fs_info, sblock_bad, 1);
A
Arne Jansen 已提交
945

946 947 948 949 950 951 952 953 954 955
	if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
	    sblock_bad->no_io_error_seen) {
		/*
		 * the error disappeared after reading page by page, or
		 * the area was part of a huge bio and other parts of the
		 * bio caused I/O errors, or the block layer merged several
		 * read requests into one and the error is caused by a
		 * different bio (usually one of the two latter cases is
		 * the cause)
		 */
956 957
		spin_lock(&sctx->stat_lock);
		sctx->stat.unverified_errors++;
958
		sblock_to_check->data_corrected = 1;
959
		spin_unlock(&sctx->stat_lock);
A
Arne Jansen 已提交
960

961 962
		if (sctx->is_dev_replace)
			scrub_write_block_to_dev_replace(sblock_bad);
963
		goto out;
A
Arne Jansen 已提交
964 965
	}

966
	if (!sblock_bad->no_io_error_seen) {
967 968 969
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors++;
		spin_unlock(&sctx->stat_lock);
970
		if (__ratelimit(&rs))
971
			scrub_print_warning("i/o error", sblock_to_check);
972
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
973
	} else if (sblock_bad->checksum_error) {
974 975 976
		spin_lock(&sctx->stat_lock);
		sctx->stat.csum_errors++;
		spin_unlock(&sctx->stat_lock);
977
		if (__ratelimit(&rs))
978
			scrub_print_warning("checksum error", sblock_to_check);
979
		btrfs_dev_stat_inc_and_print(dev,
980
					     BTRFS_DEV_STAT_CORRUPTION_ERRS);
981
	} else if (sblock_bad->header_error) {
982 983 984
		spin_lock(&sctx->stat_lock);
		sctx->stat.verify_errors++;
		spin_unlock(&sctx->stat_lock);
985
		if (__ratelimit(&rs))
986 987
			scrub_print_warning("checksum/header error",
					    sblock_to_check);
988
		if (sblock_bad->generation_error)
989
			btrfs_dev_stat_inc_and_print(dev,
990 991
				BTRFS_DEV_STAT_GENERATION_ERRS);
		else
992
			btrfs_dev_stat_inc_and_print(dev,
993
				BTRFS_DEV_STAT_CORRUPTION_ERRS);
994
	}
A
Arne Jansen 已提交
995

996 997 998 999
	if (sctx->readonly) {
		ASSERT(!sctx->is_dev_replace);
		goto out;
	}
A
Arne Jansen 已提交
1000

1001 1002
	/*
	 * now build and submit the bios for the other mirrors, check
1003 1004
	 * checksums.
	 * First try to pick the mirror which is completely without I/O
1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
	 * errors and also does not have a checksum error.
	 * If one is found, and if a checksum is present, the full block
	 * that is known to contain an error is rewritten. Afterwards
	 * the block is known to be corrected.
	 * If a mirror is found which is completely correct, and no
	 * checksum is present, only those pages are rewritten that had
	 * an I/O error in the block to be repaired, since it cannot be
	 * determined, which copy of the other pages is better (and it
	 * could happen otherwise that a correct page would be
	 * overwritten by a bad one).
	 */
1016
	for (mirror_index = 0; ;mirror_index++) {
1017
		struct scrub_block *sblock_other;
1018

1019 1020
		if (mirror_index == failed_mirror_index)
			continue;
1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043

		/* raid56's mirror can be more than BTRFS_MAX_MIRRORS */
		if (!scrub_is_page_on_raid56(sblock_bad->pagev[0])) {
			if (mirror_index >= BTRFS_MAX_MIRRORS)
				break;
			if (!sblocks_for_recheck[mirror_index].page_count)
				break;

			sblock_other = sblocks_for_recheck + mirror_index;
		} else {
			struct scrub_recover *r = sblock_bad->pagev[0]->recover;
			int max_allowed = r->bbio->num_stripes -
						r->bbio->num_tgtdevs;

			if (mirror_index >= max_allowed)
				break;
			if (!sblocks_for_recheck[1].page_count)
				break;

			ASSERT(failed_mirror_index == 0);
			sblock_other = sblocks_for_recheck + 1;
			sblock_other->pagev[0]->mirror_num = 1 + mirror_index;
		}
1044 1045

		/* build and submit the bios, check checksums */
1046
		scrub_recheck_block(fs_info, sblock_other, 0);
1047 1048

		if (!sblock_other->header_error &&
1049 1050
		    !sblock_other->checksum_error &&
		    sblock_other->no_io_error_seen) {
1051 1052
			if (sctx->is_dev_replace) {
				scrub_write_block_to_dev_replace(sblock_other);
1053
				goto corrected_error;
1054 1055
			} else {
				ret = scrub_repair_block_from_good_copy(
1056 1057 1058
						sblock_bad, sblock_other);
				if (!ret)
					goto corrected_error;
1059
			}
1060 1061
		}
	}
A
Arne Jansen 已提交
1062

1063 1064
	if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
		goto did_not_correct_error;
1065 1066 1067

	/*
	 * In case of I/O errors in the area that is supposed to be
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
	 * repaired, continue by picking good copies of those pages.
	 * Select the good pages from mirrors to rewrite bad pages from
	 * the area to fix. Afterwards verify the checksum of the block
	 * that is supposed to be repaired. This verification step is
	 * only done for the purpose of statistic counting and for the
	 * final scrub report, whether errors remain.
	 * A perfect algorithm could make use of the checksum and try
	 * all possible combinations of pages from the different mirrors
	 * until the checksum verification succeeds. For example, when
	 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
	 * of mirror #2 is readable but the final checksum test fails,
	 * then the 2nd page of mirror #3 could be tried, whether now
1080
	 * the final checksum succeeds. But this would be a rare
1081 1082 1083 1084 1085 1086 1087 1088
	 * exception and is therefore not implemented. At least it is
	 * avoided that the good copy is overwritten.
	 * A more useful improvement would be to pick the sectors
	 * without I/O error based on sector sizes (512 bytes on legacy
	 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
	 * mirror could be repaired by taking 512 byte of a different
	 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
	 * area are unreadable.
A
Arne Jansen 已提交
1089
	 */
1090
	success = 1;
1091 1092
	for (page_num = 0; page_num < sblock_bad->page_count;
	     page_num++) {
1093
		struct scrub_page *spage_bad = sblock_bad->pagev[page_num];
1094
		struct scrub_block *sblock_other = NULL;
1095

1096
		/* skip no-io-error page in scrub */
1097
		if (!spage_bad->io_error && !sctx->is_dev_replace)
A
Arne Jansen 已提交
1098
			continue;
1099

1100 1101 1102 1103 1104 1105 1106 1107 1108
		if (scrub_is_page_on_raid56(sblock_bad->pagev[0])) {
			/*
			 * In case of dev replace, if raid56 rebuild process
			 * didn't work out correct data, then copy the content
			 * in sblock_bad to make sure target device is identical
			 * to source device, instead of writing garbage data in
			 * sblock_for_recheck array to target device.
			 */
			sblock_other = NULL;
1109
		} else if (spage_bad->io_error) {
1110
			/* try to find no-io-error page in mirrors */
1111 1112 1113 1114 1115 1116 1117 1118 1119
			for (mirror_index = 0;
			     mirror_index < BTRFS_MAX_MIRRORS &&
			     sblocks_for_recheck[mirror_index].page_count > 0;
			     mirror_index++) {
				if (!sblocks_for_recheck[mirror_index].
				    pagev[page_num]->io_error) {
					sblock_other = sblocks_for_recheck +
						       mirror_index;
					break;
1120 1121
				}
			}
1122 1123
			if (!sblock_other)
				success = 0;
I
Ilya Dryomov 已提交
1124
		}
A
Arne Jansen 已提交
1125

1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
		if (sctx->is_dev_replace) {
			/*
			 * did not find a mirror to fetch the page
			 * from. scrub_write_page_to_dev_replace()
			 * handles this case (page->io_error), by
			 * filling the block with zeros before
			 * submitting the write request
			 */
			if (!sblock_other)
				sblock_other = sblock_bad;

			if (scrub_write_page_to_dev_replace(sblock_other,
							    page_num) != 0) {
1139
				atomic64_inc(
1140
					&fs_info->dev_replace.num_write_errors);
1141 1142 1143 1144 1145 1146 1147
				success = 0;
			}
		} else if (sblock_other) {
			ret = scrub_repair_page_from_good_copy(sblock_bad,
							       sblock_other,
							       page_num, 0);
			if (0 == ret)
1148
				spage_bad->io_error = 0;
1149 1150
			else
				success = 0;
1151
		}
A
Arne Jansen 已提交
1152 1153
	}

1154
	if (success && !sctx->is_dev_replace) {
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
		if (is_metadata || have_csum) {
			/*
			 * need to verify the checksum now that all
			 * sectors on disk are repaired (the write
			 * request for data to be repaired is on its way).
			 * Just be lazy and use scrub_recheck_block()
			 * which re-reads the data before the checksum
			 * is verified, but most likely the data comes out
			 * of the page cache.
			 */
1165
			scrub_recheck_block(fs_info, sblock_bad, 1);
1166
			if (!sblock_bad->header_error &&
1167 1168 1169 1170 1171 1172 1173
			    !sblock_bad->checksum_error &&
			    sblock_bad->no_io_error_seen)
				goto corrected_error;
			else
				goto did_not_correct_error;
		} else {
corrected_error:
1174 1175
			spin_lock(&sctx->stat_lock);
			sctx->stat.corrected_errors++;
1176
			sblock_to_check->data_corrected = 1;
1177
			spin_unlock(&sctx->stat_lock);
1178 1179
			btrfs_err_rl_in_rcu(fs_info,
				"fixed up error at logical %llu on dev %s",
1180
				logical, rcu_str_deref(dev->name));
A
Arne Jansen 已提交
1181
		}
1182 1183
	} else {
did_not_correct_error:
1184 1185 1186
		spin_lock(&sctx->stat_lock);
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
1187 1188
		btrfs_err_rl_in_rcu(fs_info,
			"unable to fixup (regular) error at logical %llu on dev %s",
1189
			logical, rcu_str_deref(dev->name));
I
Ilya Dryomov 已提交
1190
	}
A
Arne Jansen 已提交
1191

1192 1193 1194 1195 1196 1197
out:
	if (sblocks_for_recheck) {
		for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
		     mirror_index++) {
			struct scrub_block *sblock = sblocks_for_recheck +
						     mirror_index;
1198
			struct scrub_recover *recover;
1199 1200
			int page_index;

1201 1202 1203
			for (page_index = 0; page_index < sblock->page_count;
			     page_index++) {
				sblock->pagev[page_index]->sblock = NULL;
1204 1205
				recover = sblock->pagev[page_index]->recover;
				if (recover) {
1206
					scrub_put_recover(fs_info, recover);
1207 1208 1209
					sblock->pagev[page_index]->recover =
									NULL;
				}
1210 1211
				scrub_page_put(sblock->pagev[page_index]);
			}
1212 1213 1214
		}
		kfree(sblocks_for_recheck);
	}
A
Arne Jansen 已提交
1215

1216
	ret = unlock_full_stripe(fs_info, logical, full_stripe_locked);
1217
	memalloc_nofs_restore(nofs_flag);
1218 1219
	if (ret < 0)
		return ret;
1220 1221
	return 0;
}
A
Arne Jansen 已提交
1222

1223
static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
1224
{
Z
Zhao Lei 已提交
1225 1226 1227 1228 1229
	if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
		return 2;
	else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
		return 3;
	else
1230 1231 1232
		return (int)bbio->num_stripes;
}

Z
Zhao Lei 已提交
1233 1234
static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
						 u64 *raid_map,
1235 1236 1237 1238 1239 1240 1241
						 u64 mapped_length,
						 int nstripes, int mirror,
						 int *stripe_index,
						 u64 *stripe_offset)
{
	int i;

1242
	if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
		/* RAID5/6 */
		for (i = 0; i < nstripes; i++) {
			if (raid_map[i] == RAID6_Q_STRIPE ||
			    raid_map[i] == RAID5_P_STRIPE)
				continue;

			if (logical >= raid_map[i] &&
			    logical < raid_map[i] + mapped_length)
				break;
		}

		*stripe_index = i;
		*stripe_offset = logical - raid_map[i];
	} else {
		/* The other RAID type */
		*stripe_index = mirror;
		*stripe_offset = 0;
	}
}

1263
static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
1264 1265
				     struct scrub_block *sblocks_for_recheck)
{
1266
	struct scrub_ctx *sctx = original_sblock->sctx;
1267
	struct btrfs_fs_info *fs_info = sctx->fs_info;
1268 1269
	u64 length = original_sblock->page_count * PAGE_SIZE;
	u64 logical = original_sblock->pagev[0]->logical;
1270 1271 1272
	u64 generation = original_sblock->pagev[0]->generation;
	u64 flags = original_sblock->pagev[0]->flags;
	u64 have_csum = original_sblock->pagev[0]->have_csum;
1273 1274 1275 1276 1277 1278
	struct scrub_recover *recover;
	struct btrfs_bio *bbio;
	u64 sublen;
	u64 mapped_length;
	u64 stripe_offset;
	int stripe_index;
1279
	int page_index = 0;
1280
	int mirror_index;
1281
	int nmirrors;
1282 1283 1284
	int ret;

	/*
1285
	 * note: the two members refs and outstanding_pages
1286 1287 1288 1289 1290
	 * are not used (and not set) in the blocks that are used for
	 * the recheck procedure
	 */

	while (length > 0) {
1291 1292 1293
		sublen = min_t(u64, length, PAGE_SIZE);
		mapped_length = sublen;
		bbio = NULL;
A
Arne Jansen 已提交
1294

1295 1296 1297 1298
		/*
		 * with a length of PAGE_SIZE, each returned stripe
		 * represents one mirror
		 */
1299
		btrfs_bio_counter_inc_blocked(fs_info);
1300
		ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
1301
				logical, &mapped_length, &bbio);
1302
		if (ret || !bbio || mapped_length < sublen) {
1303
			btrfs_put_bbio(bbio);
1304
			btrfs_bio_counter_dec(fs_info);
1305 1306
			return -EIO;
		}
A
Arne Jansen 已提交
1307

1308 1309
		recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
		if (!recover) {
1310
			btrfs_put_bbio(bbio);
1311
			btrfs_bio_counter_dec(fs_info);
1312 1313 1314
			return -ENOMEM;
		}

1315
		refcount_set(&recover->refs, 1);
1316 1317 1318
		recover->bbio = bbio;
		recover->map_length = mapped_length;

1319
		BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK);
1320

1321
		nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
Z
Zhao Lei 已提交
1322

1323
		for (mirror_index = 0; mirror_index < nmirrors;
1324 1325
		     mirror_index++) {
			struct scrub_block *sblock;
1326
			struct scrub_page *spage;
1327 1328

			sblock = sblocks_for_recheck + mirror_index;
1329
			sblock->sctx = sctx;
1330

1331 1332
			spage = kzalloc(sizeof(*spage), GFP_NOFS);
			if (!spage) {
1333
leave_nomem:
1334 1335 1336
				spin_lock(&sctx->stat_lock);
				sctx->stat.malloc_errors++;
				spin_unlock(&sctx->stat_lock);
1337
				scrub_put_recover(fs_info, recover);
1338 1339
				return -ENOMEM;
			}
1340 1341 1342 1343 1344 1345 1346
			scrub_page_get(spage);
			sblock->pagev[page_index] = spage;
			spage->sblock = sblock;
			spage->flags = flags;
			spage->generation = generation;
			spage->logical = logical;
			spage->have_csum = have_csum;
1347
			if (have_csum)
1348
				memcpy(spage->csum,
1349
				       original_sblock->pagev[0]->csum,
1350
				       sctx->fs_info->csum_size);
1351

Z
Zhao Lei 已提交
1352 1353 1354
			scrub_stripe_index_and_offset(logical,
						      bbio->map_type,
						      bbio->raid_map,
1355
						      mapped_length,
1356 1357
						      bbio->num_stripes -
						      bbio->num_tgtdevs,
1358 1359 1360
						      mirror_index,
						      &stripe_index,
						      &stripe_offset);
1361
			spage->physical = bbio->stripes[stripe_index].physical +
1362
					 stripe_offset;
1363
			spage->dev = bbio->stripes[stripe_index].dev;
1364

1365
			BUG_ON(page_index >= original_sblock->page_count);
1366
			spage->physical_for_dev_replace =
1367 1368
				original_sblock->pagev[page_index]->
				physical_for_dev_replace;
1369
			/* for missing devices, dev->bdev is NULL */
1370
			spage->mirror_num = mirror_index + 1;
1371
			sblock->page_count++;
1372 1373
			spage->page = alloc_page(GFP_NOFS);
			if (!spage->page)
1374
				goto leave_nomem;
1375 1376

			scrub_get_recover(recover);
1377
			spage->recover = recover;
1378
		}
1379
		scrub_put_recover(fs_info, recover);
1380 1381 1382 1383 1384 1385
		length -= sublen;
		logical += sublen;
		page_index++;
	}

	return 0;
I
Ilya Dryomov 已提交
1386 1387
}

1388
static void scrub_bio_wait_endio(struct bio *bio)
1389
{
1390
	complete(bio->bi_private);
1391 1392 1393 1394
}

static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
					struct bio *bio,
1395
					struct scrub_page *spage)
1396
{
1397
	DECLARE_COMPLETION_ONSTACK(done);
1398
	int ret;
1399
	int mirror_num;
1400

1401
	bio->bi_iter.bi_sector = spage->logical >> 9;
1402 1403 1404
	bio->bi_private = &done;
	bio->bi_end_io = scrub_bio_wait_endio;

1405 1406 1407
	mirror_num = spage->sblock->pagev[0]->mirror_num;
	ret = raid56_parity_recover(fs_info, bio, spage->recover->bbio,
				    spage->recover->map_length,
1408
				    mirror_num, 0);
1409 1410 1411
	if (ret)
		return ret;

1412 1413
	wait_for_completion_io(&done);
	return blk_status_to_errno(bio->bi_status);
1414 1415
}

L
Liu Bo 已提交
1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info,
					  struct scrub_block *sblock)
{
	struct scrub_page *first_page = sblock->pagev[0];
	struct bio *bio;
	int page_num;

	/* All pages in sblock belong to the same stripe on the same device. */
	ASSERT(first_page->dev);
	if (!first_page->dev->bdev)
		goto out;

	bio = btrfs_io_bio_alloc(BIO_MAX_PAGES);
	bio_set_dev(bio, first_page->dev->bdev);

	for (page_num = 0; page_num < sblock->page_count; page_num++) {
1432
		struct scrub_page *spage = sblock->pagev[page_num];
L
Liu Bo 已提交
1433

1434 1435
		WARN_ON(!spage->page);
		bio_add_page(bio, spage->page, PAGE_SIZE, 0);
L
Liu Bo 已提交
1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454
	}

	if (scrub_submit_raid56_bio_wait(fs_info, bio, first_page)) {
		bio_put(bio);
		goto out;
	}

	bio_put(bio);

	scrub_recheck_block_checksum(sblock);

	return;
out:
	for (page_num = 0; page_num < sblock->page_count; page_num++)
		sblock->pagev[page_num]->io_error = 1;

	sblock->no_io_error_seen = 0;
}

1455 1456 1457 1458 1459 1460 1461
/*
 * this function will check the on disk data for checksum errors, header
 * errors and read I/O errors. If any I/O errors happen, the exact pages
 * which are errored are marked as being bad. The goal is to enable scrub
 * to take those pages that are not errored from all the mirrors so that
 * the pages that are errored in the just handled mirror can be repaired.
 */
1462
static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1463 1464
				struct scrub_block *sblock,
				int retry_failed_mirror)
I
Ilya Dryomov 已提交
1465
{
1466
	int page_num;
I
Ilya Dryomov 已提交
1467

1468
	sblock->no_io_error_seen = 1;
I
Ilya Dryomov 已提交
1469

L
Liu Bo 已提交
1470 1471 1472 1473
	/* short cut for raid56 */
	if (!retry_failed_mirror && scrub_is_page_on_raid56(sblock->pagev[0]))
		return scrub_recheck_block_on_raid56(fs_info, sblock);

1474 1475
	for (page_num = 0; page_num < sblock->page_count; page_num++) {
		struct bio *bio;
1476
		struct scrub_page *spage = sblock->pagev[page_num];
1477

1478 1479
		if (spage->dev->bdev == NULL) {
			spage->io_error = 1;
1480 1481 1482 1483
			sblock->no_io_error_seen = 0;
			continue;
		}

1484
		WARN_ON(!spage->page);
1485
		bio = btrfs_io_bio_alloc(1);
1486
		bio_set_dev(bio, spage->dev->bdev);
1487

1488 1489
		bio_add_page(bio, spage->page, PAGE_SIZE, 0);
		bio->bi_iter.bi_sector = spage->physical >> 9;
L
Liu Bo 已提交
1490
		bio->bi_opf = REQ_OP_READ;
1491

L
Liu Bo 已提交
1492
		if (btrfsic_submit_bio_wait(bio)) {
1493
			spage->io_error = 1;
L
Liu Bo 已提交
1494
			sblock->no_io_error_seen = 0;
1495
		}
1496

1497 1498
		bio_put(bio);
	}
I
Ilya Dryomov 已提交
1499

1500
	if (sblock->no_io_error_seen)
1501
		scrub_recheck_block_checksum(sblock);
A
Arne Jansen 已提交
1502 1503
}

M
Miao Xie 已提交
1504 1505 1506 1507 1508 1509
static inline int scrub_check_fsid(u8 fsid[],
				   struct scrub_page *spage)
{
	struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
	int ret;

1510
	ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
M
Miao Xie 已提交
1511 1512 1513
	return !ret;
}

1514
static void scrub_recheck_block_checksum(struct scrub_block *sblock)
A
Arne Jansen 已提交
1515
{
1516 1517 1518
	sblock->header_error = 0;
	sblock->checksum_error = 0;
	sblock->generation_error = 0;
1519

1520 1521 1522 1523
	if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA)
		scrub_checksum_data(sblock);
	else
		scrub_checksum_tree_block(sblock);
A
Arne Jansen 已提交
1524 1525
}

1526
static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1527
					     struct scrub_block *sblock_good)
1528 1529 1530
{
	int page_num;
	int ret = 0;
I
Ilya Dryomov 已提交
1531

1532 1533
	for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
		int ret_sub;
I
Ilya Dryomov 已提交
1534

1535 1536
		ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
							   sblock_good,
1537
							   page_num, 1);
1538 1539
		if (ret_sub)
			ret = ret_sub;
A
Arne Jansen 已提交
1540
	}
1541 1542 1543 1544 1545 1546 1547 1548

	return ret;
}

static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
					    struct scrub_block *sblock_good,
					    int page_num, int force_write)
{
1549 1550
	struct scrub_page *spage_bad = sblock_bad->pagev[page_num];
	struct scrub_page *spage_good = sblock_good->pagev[page_num];
1551
	struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
1552

1553 1554
	BUG_ON(spage_bad->page == NULL);
	BUG_ON(spage_good->page == NULL);
1555
	if (force_write || sblock_bad->header_error ||
1556
	    sblock_bad->checksum_error || spage_bad->io_error) {
1557 1558 1559
		struct bio *bio;
		int ret;

1560
		if (!spage_bad->dev->bdev) {
1561
			btrfs_warn_rl(fs_info,
J
Jeff Mahoney 已提交
1562
				"scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
1563 1564 1565
			return -EIO;
		}

1566
		bio = btrfs_io_bio_alloc(1);
1567 1568
		bio_set_dev(bio, spage_bad->dev->bdev);
		bio->bi_iter.bi_sector = spage_bad->physical >> 9;
D
David Sterba 已提交
1569
		bio->bi_opf = REQ_OP_WRITE;
1570

1571
		ret = bio_add_page(bio, spage_good->page, PAGE_SIZE, 0);
1572 1573 1574
		if (PAGE_SIZE != ret) {
			bio_put(bio);
			return -EIO;
1575
		}
1576

1577
		if (btrfsic_submit_bio_wait(bio)) {
1578
			btrfs_dev_stat_inc_and_print(spage_bad->dev,
1579
				BTRFS_DEV_STAT_WRITE_ERRS);
1580
			atomic64_inc(&fs_info->dev_replace.num_write_errors);
1581 1582 1583
			bio_put(bio);
			return -EIO;
		}
1584
		bio_put(bio);
A
Arne Jansen 已提交
1585 1586
	}

1587 1588 1589
	return 0;
}

1590 1591
static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
{
1592
	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
1593 1594
	int page_num;

1595 1596 1597 1598 1599 1600 1601
	/*
	 * This block is used for the check of the parity on the source device,
	 * so the data needn't be written into the destination device.
	 */
	if (sblock->sparity)
		return;

1602 1603 1604 1605 1606
	for (page_num = 0; page_num < sblock->page_count; page_num++) {
		int ret;

		ret = scrub_write_page_to_dev_replace(sblock, page_num);
		if (ret)
1607
			atomic64_inc(&fs_info->dev_replace.num_write_errors);
1608 1609 1610 1611 1612 1613 1614 1615 1616
	}
}

static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
					   int page_num)
{
	struct scrub_page *spage = sblock->pagev[page_num];

	BUG_ON(spage->page == NULL);
1617 1618
	if (spage->io_error)
		clear_page(page_address(spage->page));
1619 1620 1621 1622 1623 1624 1625 1626 1627 1628

	return scrub_add_page_to_wr_bio(sblock->sctx, spage);
}

static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
				    struct scrub_page *spage)
{
	struct scrub_bio *sbio;
	int ret;

1629
	mutex_lock(&sctx->wr_lock);
1630
again:
1631 1632
	if (!sctx->wr_curr_bio) {
		sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
1633
					      GFP_KERNEL);
1634 1635
		if (!sctx->wr_curr_bio) {
			mutex_unlock(&sctx->wr_lock);
1636 1637
			return -ENOMEM;
		}
1638 1639
		sctx->wr_curr_bio->sctx = sctx;
		sctx->wr_curr_bio->page_count = 0;
1640
	}
1641
	sbio = sctx->wr_curr_bio;
1642 1643 1644 1645 1646
	if (sbio->page_count == 0) {
		struct bio *bio;

		sbio->physical = spage->physical_for_dev_replace;
		sbio->logical = spage->logical;
1647
		sbio->dev = sctx->wr_tgtdev;
1648 1649
		bio = sbio->bio;
		if (!bio) {
1650
			bio = btrfs_io_bio_alloc(sctx->pages_per_wr_bio);
1651 1652 1653 1654 1655
			sbio->bio = bio;
		}

		bio->bi_private = sbio;
		bio->bi_end_io = scrub_wr_bio_end_io;
1656
		bio_set_dev(bio, sbio->dev->bdev);
1657
		bio->bi_iter.bi_sector = sbio->physical >> 9;
D
David Sterba 已提交
1658
		bio->bi_opf = REQ_OP_WRITE;
1659
		sbio->status = 0;
1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672
	} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
		   spage->physical_for_dev_replace ||
		   sbio->logical + sbio->page_count * PAGE_SIZE !=
		   spage->logical) {
		scrub_wr_submit(sctx);
		goto again;
	}

	ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
	if (ret != PAGE_SIZE) {
		if (sbio->page_count < 1) {
			bio_put(sbio->bio);
			sbio->bio = NULL;
1673
			mutex_unlock(&sctx->wr_lock);
1674 1675 1676 1677 1678 1679 1680 1681 1682
			return -EIO;
		}
		scrub_wr_submit(sctx);
		goto again;
	}

	sbio->pagev[sbio->page_count] = spage;
	scrub_page_get(spage);
	sbio->page_count++;
1683
	if (sbio->page_count == sctx->pages_per_wr_bio)
1684
		scrub_wr_submit(sctx);
1685
	mutex_unlock(&sctx->wr_lock);
1686 1687 1688 1689 1690 1691 1692 1693

	return 0;
}

static void scrub_wr_submit(struct scrub_ctx *sctx)
{
	struct scrub_bio *sbio;

1694
	if (!sctx->wr_curr_bio)
1695 1696
		return;

1697 1698
	sbio = sctx->wr_curr_bio;
	sctx->wr_curr_bio = NULL;
1699
	WARN_ON(!sbio->bio->bi_disk);
1700 1701 1702 1703 1704
	scrub_pending_bio_inc(sctx);
	/* process all writes in a single worker thread. Then the block layer
	 * orders the requests before sending them to the driver which
	 * doubled the write performance on spinning disks when measured
	 * with Linux 3.5 */
1705
	btrfsic_submit_bio(sbio->bio);
1706 1707
}

1708
static void scrub_wr_bio_end_io(struct bio *bio)
1709 1710
{
	struct scrub_bio *sbio = bio->bi_private;
1711
	struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
1712

1713
	sbio->status = bio->bi_status;
1714 1715
	sbio->bio = bio;

1716
	btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL);
1717
	btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1718 1719 1720 1721 1722 1723 1724 1725 1726
}

static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
{
	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
	struct scrub_ctx *sctx = sbio->sctx;
	int i;

	WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1727
	if (sbio->status) {
1728
		struct btrfs_dev_replace *dev_replace =
1729
			&sbio->sctx->fs_info->dev_replace;
1730 1731 1732 1733 1734

		for (i = 0; i < sbio->page_count; i++) {
			struct scrub_page *spage = sbio->pagev[i];

			spage->io_error = 1;
1735
			atomic64_inc(&dev_replace->num_write_errors);
1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747
		}
	}

	for (i = 0; i < sbio->page_count; i++)
		scrub_page_put(sbio->pagev[i]);

	bio_put(sbio->bio);
	kfree(sbio);
	scrub_pending_bio_dec(sctx);
}

static int scrub_checksum(struct scrub_block *sblock)
1748 1749 1750 1751
{
	u64 flags;
	int ret;

1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763
	/*
	 * No need to initialize these stats currently,
	 * because this function only use return value
	 * instead of these stats value.
	 *
	 * Todo:
	 * always use stats
	 */
	sblock->header_error = 0;
	sblock->generation_error = 0;
	sblock->checksum_error = 0;

1764 1765
	WARN_ON(sblock->page_count < 1);
	flags = sblock->pagev[0]->flags;
1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776
	ret = 0;
	if (flags & BTRFS_EXTENT_FLAG_DATA)
		ret = scrub_checksum_data(sblock);
	else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
		ret = scrub_checksum_tree_block(sblock);
	else if (flags & BTRFS_EXTENT_FLAG_SUPER)
		(void)scrub_checksum_super(sblock);
	else
		WARN_ON(1);
	if (ret)
		scrub_handle_errored_block(sblock);
1777 1778

	return ret;
A
Arne Jansen 已提交
1779 1780
}

1781
static int scrub_checksum_data(struct scrub_block *sblock)
A
Arne Jansen 已提交
1782
{
1783
	struct scrub_ctx *sctx = sblock->sctx;
1784 1785
	struct btrfs_fs_info *fs_info = sctx->fs_info;
	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
A
Arne Jansen 已提交
1786
	u8 csum[BTRFS_CSUM_SIZE];
1787
	struct scrub_page *spage;
1788
	char *kaddr;
A
Arne Jansen 已提交
1789

1790
	BUG_ON(sblock->page_count < 1);
1791 1792
	spage = sblock->pagev[0];
	if (!spage->have_csum)
A
Arne Jansen 已提交
1793 1794
		return 0;

1795
	kaddr = page_address(spage->page);
1796

1797 1798 1799
	shash->tfm = fs_info->csum_shash;
	crypto_shash_init(shash);
	crypto_shash_digest(shash, kaddr, PAGE_SIZE, csum);
1800

1801
	if (memcmp(csum, spage->csum, sctx->fs_info->csum_size))
1802
		sblock->checksum_error = 1;
A
Arne Jansen 已提交
1803

1804
	return sblock->checksum_error;
A
Arne Jansen 已提交
1805 1806
}

1807
static int scrub_checksum_tree_block(struct scrub_block *sblock)
A
Arne Jansen 已提交
1808
{
1809
	struct scrub_ctx *sctx = sblock->sctx;
A
Arne Jansen 已提交
1810
	struct btrfs_header *h;
1811
	struct btrfs_fs_info *fs_info = sctx->fs_info;
1812
	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
1813 1814
	u8 calculated_csum[BTRFS_CSUM_SIZE];
	u8 on_disk_csum[BTRFS_CSUM_SIZE];
1815 1816
	const int num_pages = sctx->fs_info->nodesize >> PAGE_SHIFT;
	int i;
1817
	struct scrub_page *spage;
1818
	char *kaddr;
1819

1820
	BUG_ON(sblock->page_count < 1);
1821 1822
	spage = sblock->pagev[0];
	kaddr = page_address(spage->page);
1823
	h = (struct btrfs_header *)kaddr;
1824
	memcpy(on_disk_csum, h->csum, sctx->fs_info->csum_size);
A
Arne Jansen 已提交
1825 1826 1827 1828 1829 1830

	/*
	 * we don't use the getter functions here, as we
	 * a) don't have an extent buffer and
	 * b) the page is already kmapped
	 */
1831
	if (spage->logical != btrfs_stack_header_bytenr(h))
1832
		sblock->header_error = 1;
A
Arne Jansen 已提交
1833

1834
	if (spage->generation != btrfs_stack_header_generation(h)) {
1835 1836 1837
		sblock->header_error = 1;
		sblock->generation_error = 1;
	}
A
Arne Jansen 已提交
1838

1839
	if (!scrub_check_fsid(h->fsid, spage))
1840
		sblock->header_error = 1;
A
Arne Jansen 已提交
1841 1842 1843

	if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
		   BTRFS_UUID_SIZE))
1844
		sblock->header_error = 1;
A
Arne Jansen 已提交
1845

1846 1847 1848 1849
	shash->tfm = fs_info->csum_shash;
	crypto_shash_init(shash);
	crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
			    PAGE_SIZE - BTRFS_CSUM_SIZE);
1850

1851 1852 1853
	for (i = 1; i < num_pages; i++) {
		kaddr = page_address(sblock->pagev[i]->page);
		crypto_shash_update(shash, kaddr, PAGE_SIZE);
1854 1855
	}

1856
	crypto_shash_final(shash, calculated_csum);
1857
	if (memcmp(calculated_csum, on_disk_csum, sctx->fs_info->csum_size))
1858
		sblock->checksum_error = 1;
A
Arne Jansen 已提交
1859

1860
	return sblock->header_error || sblock->checksum_error;
A
Arne Jansen 已提交
1861 1862
}

1863
static int scrub_checksum_super(struct scrub_block *sblock)
A
Arne Jansen 已提交
1864 1865
{
	struct btrfs_super_block *s;
1866
	struct scrub_ctx *sctx = sblock->sctx;
1867 1868
	struct btrfs_fs_info *fs_info = sctx->fs_info;
	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
1869
	u8 calculated_csum[BTRFS_CSUM_SIZE];
1870
	struct scrub_page *spage;
1871
	char *kaddr;
1872 1873
	int fail_gen = 0;
	int fail_cor = 0;
1874

1875
	BUG_ON(sblock->page_count < 1);
1876 1877
	spage = sblock->pagev[0];
	kaddr = page_address(spage->page);
1878
	s = (struct btrfs_super_block *)kaddr;
A
Arne Jansen 已提交
1879

1880
	if (spage->logical != btrfs_super_bytenr(s))
1881
		++fail_cor;
A
Arne Jansen 已提交
1882

1883
	if (spage->generation != btrfs_super_generation(s))
1884
		++fail_gen;
A
Arne Jansen 已提交
1885

1886
	if (!scrub_check_fsid(s->fsid, spage))
1887
		++fail_cor;
A
Arne Jansen 已提交
1888

1889 1890 1891 1892
	shash->tfm = fs_info->csum_shash;
	crypto_shash_init(shash);
	crypto_shash_digest(shash, kaddr + BTRFS_CSUM_SIZE,
			BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, calculated_csum);
1893

1894
	if (memcmp(calculated_csum, s->csum, sctx->fs_info->csum_size))
1895
		++fail_cor;
A
Arne Jansen 已提交
1896

1897
	if (fail_cor + fail_gen) {
A
Arne Jansen 已提交
1898 1899 1900 1901 1902
		/*
		 * if we find an error in a super block, we just report it.
		 * They will get written with the next transaction commit
		 * anyway
		 */
1903 1904 1905
		spin_lock(&sctx->stat_lock);
		++sctx->stat.super_errors;
		spin_unlock(&sctx->stat_lock);
1906
		if (fail_cor)
1907
			btrfs_dev_stat_inc_and_print(spage->dev,
1908 1909
				BTRFS_DEV_STAT_CORRUPTION_ERRS);
		else
1910
			btrfs_dev_stat_inc_and_print(spage->dev,
1911
				BTRFS_DEV_STAT_GENERATION_ERRS);
A
Arne Jansen 已提交
1912 1913
	}

1914
	return fail_cor + fail_gen;
A
Arne Jansen 已提交
1915 1916
}

1917 1918
static void scrub_block_get(struct scrub_block *sblock)
{
1919
	refcount_inc(&sblock->refs);
1920 1921 1922 1923
}

static void scrub_block_put(struct scrub_block *sblock)
{
1924
	if (refcount_dec_and_test(&sblock->refs)) {
1925 1926
		int i;

1927 1928 1929
		if (sblock->sparity)
			scrub_parity_put(sblock->sparity);

1930
		for (i = 0; i < sblock->page_count; i++)
1931
			scrub_page_put(sblock->pagev[i]);
1932 1933 1934 1935
		kfree(sblock);
	}
}

1936 1937
static void scrub_page_get(struct scrub_page *spage)
{
1938
	atomic_inc(&spage->refs);
1939 1940 1941 1942
}

static void scrub_page_put(struct scrub_page *spage)
{
1943
	if (atomic_dec_and_test(&spage->refs)) {
1944 1945 1946 1947 1948 1949
		if (spage->page)
			__free_page(spage->page);
		kfree(spage);
	}
}

1950
static void scrub_submit(struct scrub_ctx *sctx)
A
Arne Jansen 已提交
1951 1952 1953
{
	struct scrub_bio *sbio;

1954
	if (sctx->curr == -1)
S
Stefan Behrens 已提交
1955
		return;
A
Arne Jansen 已提交
1956

1957 1958
	sbio = sctx->bios[sctx->curr];
	sctx->curr = -1;
1959
	scrub_pending_bio_inc(sctx);
1960
	btrfsic_submit_bio(sbio->bio);
A
Arne Jansen 已提交
1961 1962
}

1963 1964
static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
				    struct scrub_page *spage)
A
Arne Jansen 已提交
1965
{
1966
	struct scrub_block *sblock = spage->sblock;
A
Arne Jansen 已提交
1967
	struct scrub_bio *sbio;
1968
	int ret;
A
Arne Jansen 已提交
1969 1970 1971 1972 1973

again:
	/*
	 * grab a fresh bio or wait for one to become available
	 */
1974 1975 1976 1977 1978 1979 1980 1981
	while (sctx->curr == -1) {
		spin_lock(&sctx->list_lock);
		sctx->curr = sctx->first_free;
		if (sctx->curr != -1) {
			sctx->first_free = sctx->bios[sctx->curr]->next_free;
			sctx->bios[sctx->curr]->next_free = -1;
			sctx->bios[sctx->curr]->page_count = 0;
			spin_unlock(&sctx->list_lock);
A
Arne Jansen 已提交
1982
		} else {
1983 1984
			spin_unlock(&sctx->list_lock);
			wait_event(sctx->list_wait, sctx->first_free != -1);
A
Arne Jansen 已提交
1985 1986
		}
	}
1987
	sbio = sctx->bios[sctx->curr];
1988
	if (sbio->page_count == 0) {
1989 1990
		struct bio *bio;

1991 1992
		sbio->physical = spage->physical;
		sbio->logical = spage->logical;
1993
		sbio->dev = spage->dev;
1994 1995
		bio = sbio->bio;
		if (!bio) {
1996
			bio = btrfs_io_bio_alloc(sctx->pages_per_rd_bio);
1997 1998
			sbio->bio = bio;
		}
1999 2000 2001

		bio->bi_private = sbio;
		bio->bi_end_io = scrub_bio_end_io;
2002
		bio_set_dev(bio, sbio->dev->bdev);
2003
		bio->bi_iter.bi_sector = sbio->physical >> 9;
D
David Sterba 已提交
2004
		bio->bi_opf = REQ_OP_READ;
2005
		sbio->status = 0;
2006 2007 2008
	} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
		   spage->physical ||
		   sbio->logical + sbio->page_count * PAGE_SIZE !=
2009 2010
		   spage->logical ||
		   sbio->dev != spage->dev) {
2011
		scrub_submit(sctx);
A
Arne Jansen 已提交
2012 2013
		goto again;
	}
2014

2015 2016 2017 2018 2019 2020 2021 2022
	sbio->pagev[sbio->page_count] = spage;
	ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
	if (ret != PAGE_SIZE) {
		if (sbio->page_count < 1) {
			bio_put(sbio->bio);
			sbio->bio = NULL;
			return -EIO;
		}
2023
		scrub_submit(sctx);
2024 2025 2026
		goto again;
	}

2027
	scrub_block_get(sblock); /* one for the page added to the bio */
2028 2029
	atomic_inc(&sblock->outstanding_pages);
	sbio->page_count++;
2030
	if (sbio->page_count == sctx->pages_per_rd_bio)
2031
		scrub_submit(sctx);
2032 2033 2034 2035

	return 0;
}

2036
static void scrub_missing_raid56_end_io(struct bio *bio)
2037 2038
{
	struct scrub_block *sblock = bio->bi_private;
2039
	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
2040

2041
	if (bio->bi_status)
2042 2043
		sblock->no_io_error_seen = 0;

2044 2045
	bio_put(bio);

2046 2047 2048 2049 2050 2051 2052
	btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
}

static void scrub_missing_raid56_worker(struct btrfs_work *work)
{
	struct scrub_block *sblock = container_of(work, struct scrub_block, work);
	struct scrub_ctx *sctx = sblock->sctx;
2053
	struct btrfs_fs_info *fs_info = sctx->fs_info;
2054 2055 2056 2057 2058 2059
	u64 logical;
	struct btrfs_device *dev;

	logical = sblock->pagev[0]->logical;
	dev = sblock->pagev[0]->dev;

2060
	if (sblock->no_io_error_seen)
2061
		scrub_recheck_block_checksum(sblock);
2062 2063 2064 2065 2066

	if (!sblock->no_io_error_seen) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors++;
		spin_unlock(&sctx->stat_lock);
2067
		btrfs_err_rl_in_rcu(fs_info,
2068
			"IO error rebuilding logical %llu for dev %s",
2069 2070 2071 2072 2073
			logical, rcu_str_deref(dev->name));
	} else if (sblock->header_error || sblock->checksum_error) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
2074
		btrfs_err_rl_in_rcu(fs_info,
2075
			"failed to rebuild valid logical %llu for dev %s",
2076 2077 2078 2079 2080
			logical, rcu_str_deref(dev->name));
	} else {
		scrub_write_block_to_dev_replace(sblock);
	}

2081
	if (sctx->is_dev_replace && sctx->flush_all_writes) {
2082
		mutex_lock(&sctx->wr_lock);
2083
		scrub_wr_submit(sctx);
2084
		mutex_unlock(&sctx->wr_lock);
2085 2086
	}

2087
	scrub_block_put(sblock);
2088 2089 2090 2091 2092 2093
	scrub_pending_bio_dec(sctx);
}

static void scrub_missing_raid56_pages(struct scrub_block *sblock)
{
	struct scrub_ctx *sctx = sblock->sctx;
2094
	struct btrfs_fs_info *fs_info = sctx->fs_info;
2095 2096
	u64 length = sblock->page_count * PAGE_SIZE;
	u64 logical = sblock->pagev[0]->logical;
2097
	struct btrfs_bio *bbio = NULL;
2098 2099 2100 2101 2102
	struct bio *bio;
	struct btrfs_raid_bio *rbio;
	int ret;
	int i;

2103
	btrfs_bio_counter_inc_blocked(fs_info);
2104
	ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
2105
			&length, &bbio);
2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119
	if (ret || !bbio || !bbio->raid_map)
		goto bbio_out;

	if (WARN_ON(!sctx->is_dev_replace ||
		    !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
		/*
		 * We shouldn't be scrubbing a missing device. Even for dev
		 * replace, we should only get here for RAID 5/6. We either
		 * managed to mount something with no mirrors remaining or
		 * there's a bug in scrub_remap_extent()/btrfs_map_block().
		 */
		goto bbio_out;
	}

2120
	bio = btrfs_io_bio_alloc(0);
2121 2122 2123 2124
	bio->bi_iter.bi_sector = logical >> 9;
	bio->bi_private = sblock;
	bio->bi_end_io = scrub_missing_raid56_end_io;

2125
	rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length);
2126 2127 2128 2129 2130 2131 2132 2133 2134
	if (!rbio)
		goto rbio_out;

	for (i = 0; i < sblock->page_count; i++) {
		struct scrub_page *spage = sblock->pagev[i];

		raid56_add_scrub_pages(rbio, spage->page, spage->logical);
	}

2135
	btrfs_init_work(&sblock->work, scrub_missing_raid56_worker, NULL, NULL);
2136 2137 2138 2139 2140 2141 2142 2143
	scrub_block_get(sblock);
	scrub_pending_bio_inc(sctx);
	raid56_submit_missing_rbio(rbio);
	return;

rbio_out:
	bio_put(bio);
bbio_out:
2144
	btrfs_bio_counter_dec(fs_info);
2145 2146 2147 2148 2149 2150
	btrfs_put_bbio(bbio);
	spin_lock(&sctx->stat_lock);
	sctx->stat.malloc_errors++;
	spin_unlock(&sctx->stat_lock);
}

2151
static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
2152
		       u64 physical, struct btrfs_device *dev, u64 flags,
2153
		       u64 gen, int mirror_num, u8 *csum,
2154
		       u64 physical_for_dev_replace)
2155 2156 2157 2158
{
	struct scrub_block *sblock;
	int index;

2159
	sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2160
	if (!sblock) {
2161 2162 2163
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
2164
		return -ENOMEM;
A
Arne Jansen 已提交
2165
	}
2166

2167 2168
	/* one ref inside this function, plus one for each page added to
	 * a bio later on */
2169
	refcount_set(&sblock->refs, 1);
2170
	sblock->sctx = sctx;
2171 2172 2173
	sblock->no_io_error_seen = 1;

	for (index = 0; len > 0; index++) {
2174
		struct scrub_page *spage;
2175 2176
		u64 l = min_t(u64, len, PAGE_SIZE);

2177
		spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2178 2179
		if (!spage) {
leave_nomem:
2180 2181 2182
			spin_lock(&sctx->stat_lock);
			sctx->stat.malloc_errors++;
			spin_unlock(&sctx->stat_lock);
2183
			scrub_block_put(sblock);
2184 2185
			return -ENOMEM;
		}
2186 2187 2188
		BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
		scrub_page_get(spage);
		sblock->pagev[index] = spage;
2189
		spage->sblock = sblock;
2190
		spage->dev = dev;
2191 2192 2193 2194
		spage->flags = flags;
		spage->generation = gen;
		spage->logical = logical;
		spage->physical = physical;
2195
		spage->physical_for_dev_replace = physical_for_dev_replace;
2196 2197 2198
		spage->mirror_num = mirror_num;
		if (csum) {
			spage->have_csum = 1;
2199
			memcpy(spage->csum, csum, sctx->fs_info->csum_size);
2200 2201 2202 2203
		} else {
			spage->have_csum = 0;
		}
		sblock->page_count++;
2204
		spage->page = alloc_page(GFP_KERNEL);
2205 2206
		if (!spage->page)
			goto leave_nomem;
2207 2208 2209
		len -= l;
		logical += l;
		physical += l;
2210
		physical_for_dev_replace += l;
2211 2212
	}

2213
	WARN_ON(sblock->page_count == 0);
2214
	if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
2215 2216 2217 2218 2219 2220 2221 2222 2223
		/*
		 * This case should only be hit for RAID 5/6 device replace. See
		 * the comment in scrub_missing_raid56_pages() for details.
		 */
		scrub_missing_raid56_pages(sblock);
	} else {
		for (index = 0; index < sblock->page_count; index++) {
			struct scrub_page *spage = sblock->pagev[index];
			int ret;
2224

2225 2226 2227 2228 2229
			ret = scrub_add_page_to_rd_bio(sctx, spage);
			if (ret) {
				scrub_block_put(sblock);
				return ret;
			}
2230
		}
A
Arne Jansen 已提交
2231

2232
		if (flags & BTRFS_EXTENT_FLAG_SUPER)
2233 2234
			scrub_submit(sctx);
	}
A
Arne Jansen 已提交
2235

2236 2237
	/* last one frees, either here or in bio completion for last page */
	scrub_block_put(sblock);
A
Arne Jansen 已提交
2238 2239 2240
	return 0;
}

2241
static void scrub_bio_end_io(struct bio *bio)
2242 2243
{
	struct scrub_bio *sbio = bio->bi_private;
2244
	struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
2245

2246
	sbio->status = bio->bi_status;
2247 2248
	sbio->bio = bio;

2249
	btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
2250 2251 2252 2253 2254
}

static void scrub_bio_end_io_worker(struct btrfs_work *work)
{
	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2255
	struct scrub_ctx *sctx = sbio->sctx;
2256 2257
	int i;

2258
	BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
2259
	if (sbio->status) {
2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279
		for (i = 0; i < sbio->page_count; i++) {
			struct scrub_page *spage = sbio->pagev[i];

			spage->io_error = 1;
			spage->sblock->no_io_error_seen = 0;
		}
	}

	/* now complete the scrub_block items that have all pages completed */
	for (i = 0; i < sbio->page_count; i++) {
		struct scrub_page *spage = sbio->pagev[i];
		struct scrub_block *sblock = spage->sblock;

		if (atomic_dec_and_test(&sblock->outstanding_pages))
			scrub_block_complete(sblock);
		scrub_block_put(sblock);
	}

	bio_put(sbio->bio);
	sbio->bio = NULL;
2280 2281 2282 2283
	spin_lock(&sctx->list_lock);
	sbio->next_free = sctx->first_free;
	sctx->first_free = sbio->index;
	spin_unlock(&sctx->list_lock);
2284

2285
	if (sctx->is_dev_replace && sctx->flush_all_writes) {
2286
		mutex_lock(&sctx->wr_lock);
2287
		scrub_wr_submit(sctx);
2288
		mutex_unlock(&sctx->wr_lock);
2289 2290
	}

2291
	scrub_pending_bio_dec(sctx);
2292 2293
}

2294 2295 2296 2297
static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
				       unsigned long *bitmap,
				       u64 start, u64 len)
{
2298
	u64 offset;
2299 2300
	u64 nsectors64;
	u32 nsectors;
2301
	u32 sectorsize_bits = sparity->sctx->fs_info->sectorsize_bits;
2302 2303 2304 2305 2306 2307 2308

	if (len >= sparity->stripe_len) {
		bitmap_set(bitmap, 0, sparity->nsectors);
		return;
	}

	start -= sparity->logic_start;
2309
	start = div64_u64_rem(start, sparity->stripe_len, &offset);
2310 2311
	offset = offset >> sectorsize_bits;
	nsectors64 = len >> sectorsize_bits;
2312 2313 2314

	ASSERT(nsectors64 < UINT_MAX);
	nsectors = (u32)nsectors64;
2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336

	if (offset + nsectors <= sparity->nsectors) {
		bitmap_set(bitmap, offset, nsectors);
		return;
	}

	bitmap_set(bitmap, offset, sparity->nsectors - offset);
	bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
}

static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
						   u64 start, u64 len)
{
	__scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
}

static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
						  u64 start, u64 len)
{
	__scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
}

2337 2338
static void scrub_block_complete(struct scrub_block *sblock)
{
2339 2340
	int corrupted = 0;

2341
	if (!sblock->no_io_error_seen) {
2342
		corrupted = 1;
2343
		scrub_handle_errored_block(sblock);
2344 2345 2346 2347 2348 2349
	} else {
		/*
		 * if has checksum error, write via repair mechanism in
		 * dev replace case, otherwise write here in dev replace
		 * case.
		 */
2350 2351
		corrupted = scrub_checksum(sblock);
		if (!corrupted && sblock->sctx->is_dev_replace)
2352 2353
			scrub_write_block_to_dev_replace(sblock);
	}
2354 2355 2356 2357 2358 2359 2360 2361 2362

	if (sblock->sparity && corrupted && !sblock->data_corrected) {
		u64 start = sblock->pagev[0]->logical;
		u64 end = sblock->pagev[sblock->page_count - 1]->logical +
			  PAGE_SIZE;

		scrub_parity_mark_sectors_error(sblock->sparity,
						start, end - start);
	}
2363 2364
}

2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382
static void drop_csum_range(struct scrub_ctx *sctx, struct btrfs_ordered_sum *sum)
{
	sctx->stat.csum_discards += sum->len >> sctx->fs_info->sectorsize_bits;
	list_del(&sum->list);
	kfree(sum);
}

/*
 * Find the desired csum for range [logical, logical + sectorsize), and store
 * the csum into @csum.
 *
 * The search source is sctx->csum_list, which is a pre-populated list
 * storing bytenr ordered csum ranges.  We're reponsible to cleanup any range
 * that is before @logical.
 *
 * Return 0 if there is no csum for the range.
 * Return 1 if there is csum for the range and copied to @csum.
 */
2383
static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
A
Arne Jansen 已提交
2384
{
2385
	bool found = false;
A
Arne Jansen 已提交
2386

2387
	while (!list_empty(&sctx->csum_list)) {
2388 2389 2390 2391
		struct btrfs_ordered_sum *sum = NULL;
		unsigned long index;
		unsigned long num_sectors;

2392
		sum = list_first_entry(&sctx->csum_list,
A
Arne Jansen 已提交
2393
				       struct btrfs_ordered_sum, list);
2394
		/* The current csum range is beyond our range, no csum found */
A
Arne Jansen 已提交
2395 2396 2397
		if (sum->bytenr > logical)
			break;

2398 2399 2400 2401 2402 2403 2404 2405 2406 2407
		/*
		 * The current sum is before our bytenr, since scrub is always
		 * done in bytenr order, the csum will never be used anymore,
		 * clean it up so that later calls won't bother with the range,
		 * and continue search the next range.
		 */
		if (sum->bytenr + sum->len <= logical) {
			drop_csum_range(sctx, sum);
			continue;
		}
A
Arne Jansen 已提交
2408

2409 2410 2411 2412
		/* Now the csum range covers our bytenr, copy the csum */
		found = true;
		index = (logical - sum->bytenr) >> sctx->fs_info->sectorsize_bits;
		num_sectors = sum->len >> sctx->fs_info->sectorsize_bits;
2413

2414 2415 2416 2417 2418 2419 2420
		memcpy(csum, sum->sums + index * sctx->fs_info->csum_size,
		       sctx->fs_info->csum_size);

		/* Cleanup the range if we're at the end of the csum range */
		if (index == num_sectors - 1)
			drop_csum_range(sctx, sum);
		break;
A
Arne Jansen 已提交
2421
	}
2422 2423
	if (!found)
		return 0;
2424
	return 1;
A
Arne Jansen 已提交
2425 2426 2427
}

/* scrub extent tries to collect up to 64 kB for each bio */
L
Liu Bo 已提交
2428 2429
static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
			u64 logical, u64 len,
2430
			u64 physical, struct btrfs_device *dev, u64 flags,
2431
			u64 gen, int mirror_num, u64 physical_for_dev_replace)
A
Arne Jansen 已提交
2432 2433 2434
{
	int ret;
	u8 csum[BTRFS_CSUM_SIZE];
2435 2436 2437
	u32 blocksize;

	if (flags & BTRFS_EXTENT_FLAG_DATA) {
L
Liu Bo 已提交
2438 2439 2440 2441
		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
			blocksize = map->stripe_len;
		else
			blocksize = sctx->fs_info->sectorsize;
2442 2443 2444 2445
		spin_lock(&sctx->stat_lock);
		sctx->stat.data_extents_scrubbed++;
		sctx->stat.data_bytes_scrubbed += len;
		spin_unlock(&sctx->stat_lock);
2446
	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
L
Liu Bo 已提交
2447 2448 2449 2450
		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
			blocksize = map->stripe_len;
		else
			blocksize = sctx->fs_info->nodesize;
2451 2452 2453 2454
		spin_lock(&sctx->stat_lock);
		sctx->stat.tree_extents_scrubbed++;
		sctx->stat.tree_bytes_scrubbed += len;
		spin_unlock(&sctx->stat_lock);
2455
	} else {
2456
		blocksize = sctx->fs_info->sectorsize;
2457
		WARN_ON(1);
2458
	}
A
Arne Jansen 已提交
2459 2460

	while (len) {
2461
		u64 l = min_t(u64, len, blocksize);
A
Arne Jansen 已提交
2462 2463 2464 2465
		int have_csum = 0;

		if (flags & BTRFS_EXTENT_FLAG_DATA) {
			/* push csums to sbio */
2466
			have_csum = scrub_find_csum(sctx, logical, csum);
A
Arne Jansen 已提交
2467
			if (have_csum == 0)
2468
				++sctx->stat.no_csum;
A
Arne Jansen 已提交
2469
		}
2470
		ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
2471
				  mirror_num, have_csum ? csum : NULL,
2472
				  physical_for_dev_replace);
A
Arne Jansen 已提交
2473 2474 2475 2476 2477
		if (ret)
			return ret;
		len -= l;
		logical += l;
		physical += l;
2478
		physical_for_dev_replace += l;
A
Arne Jansen 已提交
2479 2480 2481 2482
	}
	return 0;
}

2483 2484 2485 2486 2487 2488 2489 2490 2491
static int scrub_pages_for_parity(struct scrub_parity *sparity,
				  u64 logical, u64 len,
				  u64 physical, struct btrfs_device *dev,
				  u64 flags, u64 gen, int mirror_num, u8 *csum)
{
	struct scrub_ctx *sctx = sparity->sctx;
	struct scrub_block *sblock;
	int index;

2492
	sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2493 2494 2495 2496 2497 2498 2499 2500 2501
	if (!sblock) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
		return -ENOMEM;
	}

	/* one ref inside this function, plus one for each page added to
	 * a bio later on */
2502
	refcount_set(&sblock->refs, 1);
2503 2504 2505 2506 2507 2508 2509 2510 2511
	sblock->sctx = sctx;
	sblock->no_io_error_seen = 1;
	sblock->sparity = sparity;
	scrub_parity_get(sparity);

	for (index = 0; len > 0; index++) {
		struct scrub_page *spage;
		u64 l = min_t(u64, len, PAGE_SIZE);

2512
		spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536
		if (!spage) {
leave_nomem:
			spin_lock(&sctx->stat_lock);
			sctx->stat.malloc_errors++;
			spin_unlock(&sctx->stat_lock);
			scrub_block_put(sblock);
			return -ENOMEM;
		}
		BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
		/* For scrub block */
		scrub_page_get(spage);
		sblock->pagev[index] = spage;
		/* For scrub parity */
		scrub_page_get(spage);
		list_add_tail(&spage->list, &sparity->spages);
		spage->sblock = sblock;
		spage->dev = dev;
		spage->flags = flags;
		spage->generation = gen;
		spage->logical = logical;
		spage->physical = physical;
		spage->mirror_num = mirror_num;
		if (csum) {
			spage->have_csum = 1;
2537
			memcpy(spage->csum, csum, sctx->fs_info->csum_size);
2538 2539 2540 2541
		} else {
			spage->have_csum = 0;
		}
		sblock->page_count++;
2542
		spage->page = alloc_page(GFP_KERNEL);
2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576
		if (!spage->page)
			goto leave_nomem;
		len -= l;
		logical += l;
		physical += l;
	}

	WARN_ON(sblock->page_count == 0);
	for (index = 0; index < sblock->page_count; index++) {
		struct scrub_page *spage = sblock->pagev[index];
		int ret;

		ret = scrub_add_page_to_rd_bio(sctx, spage);
		if (ret) {
			scrub_block_put(sblock);
			return ret;
		}
	}

	/* last one frees, either here or in bio completion for last page */
	scrub_block_put(sblock);
	return 0;
}

static int scrub_extent_for_parity(struct scrub_parity *sparity,
				   u64 logical, u64 len,
				   u64 physical, struct btrfs_device *dev,
				   u64 flags, u64 gen, int mirror_num)
{
	struct scrub_ctx *sctx = sparity->sctx;
	int ret;
	u8 csum[BTRFS_CSUM_SIZE];
	u32 blocksize;

2577
	if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
2578 2579 2580 2581
		scrub_parity_mark_sectors_error(sparity, logical, len);
		return 0;
	}

2582
	if (flags & BTRFS_EXTENT_FLAG_DATA) {
L
Liu Bo 已提交
2583
		blocksize = sparity->stripe_len;
2584
	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
L
Liu Bo 已提交
2585
		blocksize = sparity->stripe_len;
2586
	} else {
2587
		blocksize = sctx->fs_info->sectorsize;
2588 2589 2590 2591 2592 2593 2594 2595 2596
		WARN_ON(1);
	}

	while (len) {
		u64 l = min_t(u64, len, blocksize);
		int have_csum = 0;

		if (flags & BTRFS_EXTENT_FLAG_DATA) {
			/* push csums to sbio */
2597
			have_csum = scrub_find_csum(sctx, logical, csum);
2598 2599 2600 2601 2602 2603 2604 2605
			if (have_csum == 0)
				goto skip;
		}
		ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
					     flags, gen, mirror_num,
					     have_csum ? csum : NULL);
		if (ret)
			return ret;
2606
skip:
2607 2608 2609 2610 2611 2612 2613
		len -= l;
		logical += l;
		physical += l;
	}
	return 0;
}

2614 2615 2616 2617 2618 2619 2620 2621
/*
 * Given a physical address, this will calculate it's
 * logical offset. if this is a parity stripe, it will return
 * the most left data stripe's logical offset.
 *
 * return 0 if it is a data stripe, 1 means parity stripe.
 */
static int get_raid56_logic_offset(u64 physical, int num,
2622 2623
				   struct map_lookup *map, u64 *offset,
				   u64 *stripe_start)
2624 2625 2626 2627 2628
{
	int i;
	int j = 0;
	u64 stripe_nr;
	u64 last_offset;
2629 2630
	u32 stripe_index;
	u32 rot;
2631
	const int data_stripes = nr_data_stripes(map);
2632

2633
	last_offset = (physical - map->stripes[num].physical) * data_stripes;
2634 2635 2636
	if (stripe_start)
		*stripe_start = last_offset;

2637
	*offset = last_offset;
2638
	for (i = 0; i < data_stripes; i++) {
2639 2640
		*offset = last_offset + i * map->stripe_len;

2641
		stripe_nr = div64_u64(*offset, map->stripe_len);
2642
		stripe_nr = div_u64(stripe_nr, data_stripes);
2643 2644

		/* Work out the disk rotation on this stripe-set */
2645
		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
2646 2647
		/* calculate which stripe this data locates */
		rot += i;
2648
		stripe_index = rot % map->num_stripes;
2649 2650 2651 2652 2653 2654 2655 2656 2657
		if (stripe_index == num)
			return 0;
		if (stripe_index < num)
			j++;
	}
	*offset = last_offset + j * map->stripe_len;
	return 1;
}

2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679
static void scrub_free_parity(struct scrub_parity *sparity)
{
	struct scrub_ctx *sctx = sparity->sctx;
	struct scrub_page *curr, *next;
	int nbits;

	nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
	if (nbits) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors += nbits;
		sctx->stat.uncorrectable_errors += nbits;
		spin_unlock(&sctx->stat_lock);
	}

	list_for_each_entry_safe(curr, next, &sparity->spages, list) {
		list_del_init(&curr->list);
		scrub_page_put(curr);
	}

	kfree(sparity);
}

2680 2681 2682 2683 2684 2685 2686 2687 2688 2689
static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
{
	struct scrub_parity *sparity = container_of(work, struct scrub_parity,
						    work);
	struct scrub_ctx *sctx = sparity->sctx;

	scrub_free_parity(sparity);
	scrub_pending_bio_dec(sctx);
}

2690
static void scrub_parity_bio_endio(struct bio *bio)
2691 2692
{
	struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
2693
	struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
2694

2695
	if (bio->bi_status)
2696 2697 2698 2699
		bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
			  sparity->nsectors);

	bio_put(bio);
2700

2701 2702
	btrfs_init_work(&sparity->work, scrub_parity_bio_endio_worker, NULL,
			NULL);
2703
	btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
2704 2705 2706 2707 2708
}

static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
{
	struct scrub_ctx *sctx = sparity->sctx;
2709
	struct btrfs_fs_info *fs_info = sctx->fs_info;
2710 2711 2712 2713 2714 2715 2716 2717 2718 2719
	struct bio *bio;
	struct btrfs_raid_bio *rbio;
	struct btrfs_bio *bbio = NULL;
	u64 length;
	int ret;

	if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
			   sparity->nsectors))
		goto out;

2720
	length = sparity->logic_end - sparity->logic_start;
2721 2722

	btrfs_bio_counter_inc_blocked(fs_info);
2723
	ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
2724
			       &length, &bbio);
2725
	if (ret || !bbio || !bbio->raid_map)
2726 2727
		goto bbio_out;

2728
	bio = btrfs_io_bio_alloc(0);
2729 2730 2731 2732
	bio->bi_iter.bi_sector = sparity->logic_start >> 9;
	bio->bi_private = sparity;
	bio->bi_end_io = scrub_parity_bio_endio;

2733
	rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio,
2734
					      length, sparity->scrub_dev,
2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746
					      sparity->dbitmap,
					      sparity->nsectors);
	if (!rbio)
		goto rbio_out;

	scrub_pending_bio_inc(sctx);
	raid56_parity_submit_scrub_rbio(rbio);
	return;

rbio_out:
	bio_put(bio);
bbio_out:
2747
	btrfs_bio_counter_dec(fs_info);
2748
	btrfs_put_bbio(bbio);
2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759
	bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
		  sparity->nsectors);
	spin_lock(&sctx->stat_lock);
	sctx->stat.malloc_errors++;
	spin_unlock(&sctx->stat_lock);
out:
	scrub_free_parity(sparity);
}

static inline int scrub_calc_parity_bitmap_len(int nsectors)
{
2760
	return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long);
2761 2762 2763 2764
}

static void scrub_parity_get(struct scrub_parity *sparity)
{
2765
	refcount_inc(&sparity->refs);
2766 2767 2768 2769
}

static void scrub_parity_put(struct scrub_parity *sparity)
{
2770
	if (!refcount_dec_and_test(&sparity->refs))
2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782
		return;

	scrub_parity_check_and_repair(sparity);
}

static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
						  struct map_lookup *map,
						  struct btrfs_device *sdev,
						  struct btrfs_path *path,
						  u64 logic_start,
						  u64 logic_end)
{
2783
	struct btrfs_fs_info *fs_info = sctx->fs_info;
2784 2785 2786
	struct btrfs_root *root = fs_info->extent_root;
	struct btrfs_root *csum_root = fs_info->csum_root;
	struct btrfs_extent_item *extent;
2787
	struct btrfs_bio *bbio = NULL;
2788 2789 2790 2791 2792 2793 2794 2795 2796
	u64 flags;
	int ret;
	int slot;
	struct extent_buffer *l;
	struct btrfs_key key;
	u64 generation;
	u64 extent_logical;
	u64 extent_physical;
	u64 extent_len;
2797
	u64 mapped_length;
2798 2799 2800 2801 2802 2803 2804
	struct btrfs_device *extent_dev;
	struct scrub_parity *sparity;
	int nsectors;
	int bitmap_len;
	int extent_mirror_num;
	int stop_loop = 0;

2805
	nsectors = map->stripe_len >> fs_info->sectorsize_bits;
2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821
	bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
	sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
			  GFP_NOFS);
	if (!sparity) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
		return -ENOMEM;
	}

	sparity->stripe_len = map->stripe_len;
	sparity->nsectors = nsectors;
	sparity->sctx = sctx;
	sparity->scrub_dev = sdev;
	sparity->logic_start = logic_start;
	sparity->logic_end = logic_end;
2822
	refcount_set(&sparity->refs, 1);
2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870
	INIT_LIST_HEAD(&sparity->spages);
	sparity->dbitmap = sparity->bitmap;
	sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;

	ret = 0;
	while (logic_start < logic_end) {
		if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
			key.type = BTRFS_METADATA_ITEM_KEY;
		else
			key.type = BTRFS_EXTENT_ITEM_KEY;
		key.objectid = logic_start;
		key.offset = (u64)-1;

		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		if (ret < 0)
			goto out;

		if (ret > 0) {
			ret = btrfs_previous_extent_item(root, path, 0);
			if (ret < 0)
				goto out;
			if (ret > 0) {
				btrfs_release_path(path);
				ret = btrfs_search_slot(NULL, root, &key,
							path, 0, 0);
				if (ret < 0)
					goto out;
			}
		}

		stop_loop = 0;
		while (1) {
			u64 bytes;

			l = path->nodes[0];
			slot = path->slots[0];
			if (slot >= btrfs_header_nritems(l)) {
				ret = btrfs_next_leaf(root, path);
				if (ret == 0)
					continue;
				if (ret < 0)
					goto out;

				stop_loop = 1;
				break;
			}
			btrfs_item_key_to_cpu(l, &key, slot);

2871 2872 2873 2874
			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
			    key.type != BTRFS_METADATA_ITEM_KEY)
				goto next;

2875
			if (key.type == BTRFS_METADATA_ITEM_KEY)
2876
				bytes = fs_info->nodesize;
2877 2878 2879 2880 2881 2882
			else
				bytes = key.offset;

			if (key.objectid + bytes <= logic_start)
				goto next;

2883
			if (key.objectid >= logic_end) {
2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895
				stop_loop = 1;
				break;
			}

			while (key.objectid >= logic_start + map->stripe_len)
				logic_start += map->stripe_len;

			extent = btrfs_item_ptr(l, slot,
						struct btrfs_extent_item);
			flags = btrfs_extent_flags(l, extent);
			generation = btrfs_extent_generation(l, extent);

2896 2897 2898 2899
			if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
			    (key.objectid < logic_start ||
			     key.objectid + bytes >
			     logic_start + map->stripe_len)) {
J
Jeff Mahoney 已提交
2900 2901
				btrfs_err(fs_info,
					  "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
2902
					  key.objectid, logic_start);
2903 2904 2905
				spin_lock(&sctx->stat_lock);
				sctx->stat.uncorrectable_errors++;
				spin_unlock(&sctx->stat_lock);
2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924
				goto next;
			}
again:
			extent_logical = key.objectid;
			extent_len = bytes;

			if (extent_logical < logic_start) {
				extent_len -= logic_start - extent_logical;
				extent_logical = logic_start;
			}

			if (extent_logical + extent_len >
			    logic_start + map->stripe_len)
				extent_len = logic_start + map->stripe_len -
					     extent_logical;

			scrub_parity_mark_sectors_data(sparity, extent_logical,
						       extent_len);

2925
			mapped_length = extent_len;
2926
			bbio = NULL;
2927 2928 2929
			ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
					extent_logical, &mapped_length, &bbio,
					0);
2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941
			if (!ret) {
				if (!bbio || mapped_length < extent_len)
					ret = -EIO;
			}
			if (ret) {
				btrfs_put_bbio(bbio);
				goto out;
			}
			extent_physical = bbio->stripes[0].physical;
			extent_mirror_num = bbio->mirror_num;
			extent_dev = bbio->stripes[0].dev;
			btrfs_put_bbio(bbio);
2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955

			ret = btrfs_lookup_csums_range(csum_root,
						extent_logical,
						extent_logical + extent_len - 1,
						&sctx->csum_list, 1);
			if (ret)
				goto out;

			ret = scrub_extent_for_parity(sparity, extent_logical,
						      extent_len,
						      extent_physical,
						      extent_dev, flags,
						      generation,
						      extent_mirror_num);
2956 2957 2958

			scrub_free_csums(sctx);

2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989
			if (ret)
				goto out;

			if (extent_logical + extent_len <
			    key.objectid + bytes) {
				logic_start += map->stripe_len;

				if (logic_start >= logic_end) {
					stop_loop = 1;
					break;
				}

				if (logic_start < key.objectid + bytes) {
					cond_resched();
					goto again;
				}
			}
next:
			path->slots[0]++;
		}

		btrfs_release_path(path);

		if (stop_loop)
			break;

		logic_start += map->stripe_len;
	}
out:
	if (ret < 0)
		scrub_parity_mark_sectors_error(sparity, logic_start,
2990
						logic_end - logic_start);
2991 2992
	scrub_parity_put(sparity);
	scrub_submit(sctx);
2993
	mutex_lock(&sctx->wr_lock);
2994
	scrub_wr_submit(sctx);
2995
	mutex_unlock(&sctx->wr_lock);
2996 2997 2998 2999 3000

	btrfs_release_path(path);
	return ret < 0 ? ret : 0;
}

3001
static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3002 3003
					   struct map_lookup *map,
					   struct btrfs_device *scrub_dev,
3004 3005
					   int num, u64 base, u64 length,
					   struct btrfs_block_group *cache)
A
Arne Jansen 已提交
3006
{
3007
	struct btrfs_path *path, *ppath;
3008
	struct btrfs_fs_info *fs_info = sctx->fs_info;
A
Arne Jansen 已提交
3009 3010 3011
	struct btrfs_root *root = fs_info->extent_root;
	struct btrfs_root *csum_root = fs_info->csum_root;
	struct btrfs_extent_item *extent;
3012
	struct blk_plug plug;
A
Arne Jansen 已提交
3013 3014 3015 3016 3017 3018 3019
	u64 flags;
	int ret;
	int slot;
	u64 nstripes;
	struct extent_buffer *l;
	u64 physical;
	u64 logical;
L
Liu Bo 已提交
3020
	u64 logic_end;
3021
	u64 physical_end;
A
Arne Jansen 已提交
3022
	u64 generation;
3023
	int mirror_num;
A
Arne Jansen 已提交
3024 3025
	struct reada_control *reada1;
	struct reada_control *reada2;
3026
	struct btrfs_key key;
A
Arne Jansen 已提交
3027
	struct btrfs_key key_end;
A
Arne Jansen 已提交
3028 3029
	u64 increment = map->stripe_len;
	u64 offset;
3030 3031 3032
	u64 extent_logical;
	u64 extent_physical;
	u64 extent_len;
3033 3034
	u64 stripe_logical;
	u64 stripe_end;
3035 3036
	struct btrfs_device *extent_dev;
	int extent_mirror_num;
3037
	int stop_loop = 0;
D
David Woodhouse 已提交
3038

3039
	physical = map->stripes[num].physical;
A
Arne Jansen 已提交
3040
	offset = 0;
3041
	nstripes = div64_u64(length, map->stripe_len);
A
Arne Jansen 已提交
3042 3043 3044
	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
		offset = map->stripe_len * num;
		increment = map->stripe_len * map->num_stripes;
3045
		mirror_num = 1;
A
Arne Jansen 已提交
3046 3047 3048 3049
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
		int factor = map->num_stripes / map->sub_stripes;
		offset = map->stripe_len * (num / map->sub_stripes);
		increment = map->stripe_len * factor;
3050
		mirror_num = num % map->sub_stripes + 1;
3051
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
A
Arne Jansen 已提交
3052
		increment = map->stripe_len;
3053
		mirror_num = num % map->num_stripes + 1;
A
Arne Jansen 已提交
3054 3055
	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
		increment = map->stripe_len;
3056
		mirror_num = num % map->num_stripes + 1;
3057
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3058
		get_raid56_logic_offset(physical, num, map, &offset, NULL);
3059 3060
		increment = map->stripe_len * nr_data_stripes(map);
		mirror_num = 1;
A
Arne Jansen 已提交
3061 3062
	} else {
		increment = map->stripe_len;
3063
		mirror_num = 1;
A
Arne Jansen 已提交
3064 3065 3066 3067 3068 3069
	}

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

3070 3071
	ppath = btrfs_alloc_path();
	if (!ppath) {
3072
		btrfs_free_path(path);
3073 3074 3075
		return -ENOMEM;
	}

3076 3077 3078 3079 3080
	/*
	 * work on commit root. The related disk blocks are static as
	 * long as COW is applied. This means, it is save to rewrite
	 * them to repair disk errors without any race conditions
	 */
A
Arne Jansen 已提交
3081 3082 3083
	path->search_commit_root = 1;
	path->skip_locking = 1;

3084 3085
	ppath->search_commit_root = 1;
	ppath->skip_locking = 1;
A
Arne Jansen 已提交
3086
	/*
A
Arne Jansen 已提交
3087 3088 3089
	 * trigger the readahead for extent tree csum tree and wait for
	 * completion. During readahead, the scrub is officially paused
	 * to not hold off transaction commits
A
Arne Jansen 已提交
3090 3091
	 */
	logical = base + offset;
3092
	physical_end = physical + nstripes * map->stripe_len;
3093
	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3094
		get_raid56_logic_offset(physical_end, num,
3095
					map, &logic_end, NULL);
3096 3097 3098 3099
		logic_end += base;
	} else {
		logic_end = logical + increment * nstripes;
	}
3100
	wait_event(sctx->list_wait,
3101
		   atomic_read(&sctx->bios_in_flight) == 0);
3102
	scrub_blocked_if_needed(fs_info);
A
Arne Jansen 已提交
3103 3104

	/* FIXME it might be better to start readahead at commit root */
3105 3106 3107
	key.objectid = logical;
	key.type = BTRFS_EXTENT_ITEM_KEY;
	key.offset = (u64)0;
3108
	key_end.objectid = logic_end;
3109 3110
	key_end.type = BTRFS_METADATA_ITEM_KEY;
	key_end.offset = (u64)-1;
3111
	reada1 = btrfs_reada_add(root, &key, &key_end);
A
Arne Jansen 已提交
3112

3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123
	if (cache->flags & BTRFS_BLOCK_GROUP_DATA) {
		key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
		key.type = BTRFS_EXTENT_CSUM_KEY;
		key.offset = logical;
		key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
		key_end.type = BTRFS_EXTENT_CSUM_KEY;
		key_end.offset = logic_end;
		reada2 = btrfs_reada_add(csum_root, &key, &key_end);
	} else {
		reada2 = NULL;
	}
A
Arne Jansen 已提交
3124 3125 3126

	if (!IS_ERR(reada1))
		btrfs_reada_wait(reada1);
3127
	if (!IS_ERR_OR_NULL(reada2))
A
Arne Jansen 已提交
3128 3129
		btrfs_reada_wait(reada2);

A
Arne Jansen 已提交
3130 3131 3132 3133 3134

	/*
	 * collect all data csums for the stripe to avoid seeking during
	 * the scrub. This might currently (crc32) end up to be about 1MB
	 */
3135
	blk_start_plug(&plug);
A
Arne Jansen 已提交
3136 3137 3138 3139 3140

	/*
	 * now find all extents for each stripe and scrub them
	 */
	ret = 0;
3141
	while (physical < physical_end) {
A
Arne Jansen 已提交
3142 3143 3144 3145
		/*
		 * canceled?
		 */
		if (atomic_read(&fs_info->scrub_cancel_req) ||
3146
		    atomic_read(&sctx->cancel_req)) {
A
Arne Jansen 已提交
3147 3148 3149 3150 3151 3152 3153 3154
			ret = -ECANCELED;
			goto out;
		}
		/*
		 * check to see if we have to pause
		 */
		if (atomic_read(&fs_info->scrub_pause_req)) {
			/* push queued extents */
3155
			sctx->flush_all_writes = true;
3156
			scrub_submit(sctx);
3157
			mutex_lock(&sctx->wr_lock);
3158
			scrub_wr_submit(sctx);
3159
			mutex_unlock(&sctx->wr_lock);
3160
			wait_event(sctx->list_wait,
3161
				   atomic_read(&sctx->bios_in_flight) == 0);
3162
			sctx->flush_all_writes = false;
3163
			scrub_blocked_if_needed(fs_info);
A
Arne Jansen 已提交
3164 3165
		}

3166 3167 3168 3169 3170 3171
		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
			ret = get_raid56_logic_offset(physical, num, map,
						      &logical,
						      &stripe_logical);
			logical += base;
			if (ret) {
3172
				/* it is parity strip */
3173
				stripe_logical += base;
3174
				stripe_end = stripe_logical + increment;
3175 3176 3177 3178 3179 3180 3181 3182 3183
				ret = scrub_raid56_parity(sctx, map, scrub_dev,
							  ppath, stripe_logical,
							  stripe_end);
				if (ret)
					goto out;
				goto skip;
			}
		}

3184 3185 3186 3187
		if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
			key.type = BTRFS_METADATA_ITEM_KEY;
		else
			key.type = BTRFS_EXTENT_ITEM_KEY;
A
Arne Jansen 已提交
3188
		key.objectid = logical;
L
Liu Bo 已提交
3189
		key.offset = (u64)-1;
A
Arne Jansen 已提交
3190 3191 3192 3193

		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		if (ret < 0)
			goto out;
3194

3195
		if (ret > 0) {
3196
			ret = btrfs_previous_extent_item(root, path, 0);
A
Arne Jansen 已提交
3197 3198
			if (ret < 0)
				goto out;
3199 3200 3201 3202 3203 3204 3205 3206 3207
			if (ret > 0) {
				/* there's no smaller item, so stick with the
				 * larger one */
				btrfs_release_path(path);
				ret = btrfs_search_slot(NULL, root, &key,
							path, 0, 0);
				if (ret < 0)
					goto out;
			}
A
Arne Jansen 已提交
3208 3209
		}

L
Liu Bo 已提交
3210
		stop_loop = 0;
A
Arne Jansen 已提交
3211
		while (1) {
3212 3213
			u64 bytes;

A
Arne Jansen 已提交
3214 3215 3216 3217 3218 3219 3220 3221 3222
			l = path->nodes[0];
			slot = path->slots[0];
			if (slot >= btrfs_header_nritems(l)) {
				ret = btrfs_next_leaf(root, path);
				if (ret == 0)
					continue;
				if (ret < 0)
					goto out;

L
Liu Bo 已提交
3223
				stop_loop = 1;
A
Arne Jansen 已提交
3224 3225 3226 3227
				break;
			}
			btrfs_item_key_to_cpu(l, &key, slot);

3228 3229 3230 3231
			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
			    key.type != BTRFS_METADATA_ITEM_KEY)
				goto next;

3232
			if (key.type == BTRFS_METADATA_ITEM_KEY)
3233
				bytes = fs_info->nodesize;
3234 3235 3236 3237
			else
				bytes = key.offset;

			if (key.objectid + bytes <= logical)
A
Arne Jansen 已提交
3238 3239
				goto next;

L
Liu Bo 已提交
3240 3241 3242 3243 3244 3245
			if (key.objectid >= logical + map->stripe_len) {
				/* out of this device extent */
				if (key.objectid >= logic_end)
					stop_loop = 1;
				break;
			}
A
Arne Jansen 已提交
3246

3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260
			/*
			 * If our block group was removed in the meanwhile, just
			 * stop scrubbing since there is no point in continuing.
			 * Continuing would prevent reusing its device extents
			 * for new block groups for a long time.
			 */
			spin_lock(&cache->lock);
			if (cache->removed) {
				spin_unlock(&cache->lock);
				ret = 0;
				goto out;
			}
			spin_unlock(&cache->lock);

A
Arne Jansen 已提交
3261 3262 3263 3264 3265
			extent = btrfs_item_ptr(l, slot,
						struct btrfs_extent_item);
			flags = btrfs_extent_flags(l, extent);
			generation = btrfs_extent_generation(l, extent);

3266 3267 3268 3269
			if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
			    (key.objectid < logical ||
			     key.objectid + bytes >
			     logical + map->stripe_len)) {
3270
				btrfs_err(fs_info,
J
Jeff Mahoney 已提交
3271
					   "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3272
				       key.objectid, logical);
3273 3274 3275
				spin_lock(&sctx->stat_lock);
				sctx->stat.uncorrectable_errors++;
				spin_unlock(&sctx->stat_lock);
A
Arne Jansen 已提交
3276 3277 3278
				goto next;
			}

L
Liu Bo 已提交
3279 3280 3281 3282
again:
			extent_logical = key.objectid;
			extent_len = bytes;

A
Arne Jansen 已提交
3283 3284 3285
			/*
			 * trim extent to this stripe
			 */
L
Liu Bo 已提交
3286 3287 3288
			if (extent_logical < logical) {
				extent_len -= logical - extent_logical;
				extent_logical = logical;
A
Arne Jansen 已提交
3289
			}
L
Liu Bo 已提交
3290
			if (extent_logical + extent_len >
A
Arne Jansen 已提交
3291
			    logical + map->stripe_len) {
L
Liu Bo 已提交
3292 3293
				extent_len = logical + map->stripe_len -
					     extent_logical;
A
Arne Jansen 已提交
3294 3295
			}

L
Liu Bo 已提交
3296
			extent_physical = extent_logical - logical + physical;
3297 3298
			extent_dev = scrub_dev;
			extent_mirror_num = mirror_num;
3299
			if (sctx->is_dev_replace)
3300 3301 3302 3303
				scrub_remap_extent(fs_info, extent_logical,
						   extent_len, &extent_physical,
						   &extent_dev,
						   &extent_mirror_num);
L
Liu Bo 已提交
3304

3305 3306 3307 3308 3309 3310 3311 3312
			if (flags & BTRFS_EXTENT_FLAG_DATA) {
				ret = btrfs_lookup_csums_range(csum_root,
						extent_logical,
						extent_logical + extent_len - 1,
						&sctx->csum_list, 1);
				if (ret)
					goto out;
			}
L
Liu Bo 已提交
3313

L
Liu Bo 已提交
3314
			ret = scrub_extent(sctx, map, extent_logical, extent_len,
3315 3316
					   extent_physical, extent_dev, flags,
					   generation, extent_mirror_num,
3317
					   extent_logical - logical + physical);
3318 3319 3320

			scrub_free_csums(sctx);

A
Arne Jansen 已提交
3321 3322 3323
			if (ret)
				goto out;

L
Liu Bo 已提交
3324 3325
			if (extent_logical + extent_len <
			    key.objectid + bytes) {
3326
				if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3327 3328 3329 3330
					/*
					 * loop until we find next data stripe
					 * or we have finished all stripes.
					 */
3331 3332 3333 3334 3335 3336 3337 3338 3339 3340
loop:
					physical += map->stripe_len;
					ret = get_raid56_logic_offset(physical,
							num, map, &logical,
							&stripe_logical);
					logical += base;

					if (ret && physical < physical_end) {
						stripe_logical += base;
						stripe_end = stripe_logical +
3341
								increment;
3342 3343 3344 3345 3346 3347 3348 3349
						ret = scrub_raid56_parity(sctx,
							map, scrub_dev, ppath,
							stripe_logical,
							stripe_end);
						if (ret)
							goto out;
						goto loop;
					}
3350 3351 3352 3353
				} else {
					physical += map->stripe_len;
					logical += increment;
				}
L
Liu Bo 已提交
3354 3355 3356 3357 3358
				if (logical < key.objectid + bytes) {
					cond_resched();
					goto again;
				}

3359
				if (physical >= physical_end) {
L
Liu Bo 已提交
3360 3361 3362 3363
					stop_loop = 1;
					break;
				}
			}
A
Arne Jansen 已提交
3364 3365 3366
next:
			path->slots[0]++;
		}
C
Chris Mason 已提交
3367
		btrfs_release_path(path);
3368
skip:
A
Arne Jansen 已提交
3369 3370
		logical += increment;
		physical += map->stripe_len;
3371
		spin_lock(&sctx->stat_lock);
L
Liu Bo 已提交
3372 3373 3374 3375 3376
		if (stop_loop)
			sctx->stat.last_physical = map->stripes[num].physical +
						   length;
		else
			sctx->stat.last_physical = physical;
3377
		spin_unlock(&sctx->stat_lock);
L
Liu Bo 已提交
3378 3379
		if (stop_loop)
			break;
A
Arne Jansen 已提交
3380
	}
3381
out:
A
Arne Jansen 已提交
3382
	/* push queued extents */
3383
	scrub_submit(sctx);
3384
	mutex_lock(&sctx->wr_lock);
3385
	scrub_wr_submit(sctx);
3386
	mutex_unlock(&sctx->wr_lock);
A
Arne Jansen 已提交
3387

3388
	blk_finish_plug(&plug);
A
Arne Jansen 已提交
3389
	btrfs_free_path(path);
3390
	btrfs_free_path(ppath);
A
Arne Jansen 已提交
3391 3392 3393
	return ret < 0 ? ret : 0;
}

3394
static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3395 3396
					  struct btrfs_device *scrub_dev,
					  u64 chunk_offset, u64 length,
3397
					  u64 dev_offset,
3398
					  struct btrfs_block_group *cache)
A
Arne Jansen 已提交
3399
{
3400
	struct btrfs_fs_info *fs_info = sctx->fs_info;
3401
	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
A
Arne Jansen 已提交
3402 3403 3404
	struct map_lookup *map;
	struct extent_map *em;
	int i;
3405
	int ret = 0;
A
Arne Jansen 已提交
3406

3407 3408 3409
	read_lock(&map_tree->lock);
	em = lookup_extent_mapping(map_tree, chunk_offset, 1);
	read_unlock(&map_tree->lock);
A
Arne Jansen 已提交
3410

3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422
	if (!em) {
		/*
		 * Might have been an unused block group deleted by the cleaner
		 * kthread or relocation.
		 */
		spin_lock(&cache->lock);
		if (!cache->removed)
			ret = -EINVAL;
		spin_unlock(&cache->lock);

		return ret;
	}
A
Arne Jansen 已提交
3423

3424
	map = em->map_lookup;
A
Arne Jansen 已提交
3425 3426 3427 3428 3429 3430 3431
	if (em->start != chunk_offset)
		goto out;

	if (em->len < length)
		goto out;

	for (i = 0; i < map->num_stripes; ++i) {
3432
		if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
3433
		    map->stripes[i].physical == dev_offset) {
3434
			ret = scrub_stripe(sctx, map, scrub_dev, i,
3435
					   chunk_offset, length, cache);
A
Arne Jansen 已提交
3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446
			if (ret)
				goto out;
		}
	}
out:
	free_extent_map(em);

	return ret;
}

static noinline_for_stack
3447
int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3448
			   struct btrfs_device *scrub_dev, u64 start, u64 end)
A
Arne Jansen 已提交
3449 3450 3451
{
	struct btrfs_dev_extent *dev_extent = NULL;
	struct btrfs_path *path;
3452 3453
	struct btrfs_fs_info *fs_info = sctx->fs_info;
	struct btrfs_root *root = fs_info->dev_root;
A
Arne Jansen 已提交
3454 3455
	u64 length;
	u64 chunk_offset;
3456
	int ret = 0;
3457
	int ro_set;
A
Arne Jansen 已提交
3458 3459 3460 3461
	int slot;
	struct extent_buffer *l;
	struct btrfs_key key;
	struct btrfs_key found_key;
3462
	struct btrfs_block_group *cache;
3463
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
A
Arne Jansen 已提交
3464 3465 3466 3467 3468

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

3469
	path->reada = READA_FORWARD;
A
Arne Jansen 已提交
3470 3471 3472
	path->search_commit_root = 1;
	path->skip_locking = 1;

3473
	key.objectid = scrub_dev->devid;
A
Arne Jansen 已提交
3474 3475 3476 3477 3478 3479
	key.offset = 0ull;
	key.type = BTRFS_DEV_EXTENT_KEY;

	while (1) {
		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		if (ret < 0)
3480 3481 3482 3483 3484
			break;
		if (ret > 0) {
			if (path->slots[0] >=
			    btrfs_header_nritems(path->nodes[0])) {
				ret = btrfs_next_leaf(root, path);
3485 3486 3487 3488
				if (ret < 0)
					break;
				if (ret > 0) {
					ret = 0;
3489
					break;
3490 3491 3492
				}
			} else {
				ret = 0;
3493 3494
			}
		}
A
Arne Jansen 已提交
3495 3496 3497 3498 3499 3500

		l = path->nodes[0];
		slot = path->slots[0];

		btrfs_item_key_to_cpu(l, &found_key, slot);

3501
		if (found_key.objectid != scrub_dev->devid)
A
Arne Jansen 已提交
3502 3503
			break;

3504
		if (found_key.type != BTRFS_DEV_EXTENT_KEY)
A
Arne Jansen 已提交
3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515
			break;

		if (found_key.offset >= end)
			break;

		if (found_key.offset < key.offset)
			break;

		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
		length = btrfs_dev_extent_length(l, dev_extent);

3516 3517
		if (found_key.offset + length <= start)
			goto skip;
A
Arne Jansen 已提交
3518 3519 3520 3521 3522 3523 3524 3525

		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);

		/*
		 * get a reference on the corresponding block group to prevent
		 * the chunk from going away while we scrub it
		 */
		cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3526 3527 3528 3529 3530 3531

		/* some chunks are removed but not committed to disk yet,
		 * continue scrubbing */
		if (!cache)
			goto skip;

3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545
		/*
		 * Make sure that while we are scrubbing the corresponding block
		 * group doesn't get its logical address and its device extents
		 * reused for another block group, which can possibly be of a
		 * different type and different profile. We do this to prevent
		 * false error detections and crashes due to bogus attempts to
		 * repair extents.
		 */
		spin_lock(&cache->lock);
		if (cache->removed) {
			spin_unlock(&cache->lock);
			btrfs_put_block_group(cache);
			goto skip;
		}
3546
		btrfs_freeze_block_group(cache);
3547 3548
		spin_unlock(&cache->lock);

3549 3550 3551 3552 3553 3554 3555 3556 3557
		/*
		 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
		 * to avoid deadlock caused by:
		 * btrfs_inc_block_group_ro()
		 * -> btrfs_wait_for_commit()
		 * -> btrfs_commit_transaction()
		 * -> btrfs_scrub_pause()
		 */
		scrub_pause_on(fs_info);
3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575

		/*
		 * Don't do chunk preallocation for scrub.
		 *
		 * This is especially important for SYSTEM bgs, or we can hit
		 * -EFBIG from btrfs_finish_chunk_alloc() like:
		 * 1. The only SYSTEM bg is marked RO.
		 *    Since SYSTEM bg is small, that's pretty common.
		 * 2. New SYSTEM bg will be allocated
		 *    Due to regular version will allocate new chunk.
		 * 3. New SYSTEM bg is empty and will get cleaned up
		 *    Before cleanup really happens, it's marked RO again.
		 * 4. Empty SYSTEM bg get scrubbed
		 *    We go back to 2.
		 *
		 * This can easily boost the amount of SYSTEM chunks if cleaner
		 * thread can't be triggered fast enough, and use up all space
		 * of btrfs_super_block::sys_chunk_array
3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587
		 *
		 * While for dev replace, we need to try our best to mark block
		 * group RO, to prevent race between:
		 * - Write duplication
		 *   Contains latest data
		 * - Scrub copy
		 *   Contains data from commit tree
		 *
		 * If target block group is not marked RO, nocow writes can
		 * be overwritten by scrub copy, causing data corruption.
		 * So for dev-replace, it's not allowed to continue if a block
		 * group is not RO.
3588
		 */
3589
		ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
3590 3591
		if (ret == 0) {
			ro_set = 1;
3592
		} else if (ret == -ENOSPC && !sctx->is_dev_replace) {
3593 3594 3595
			/*
			 * btrfs_inc_block_group_ro return -ENOSPC when it
			 * failed in creating new chunk for metadata.
3596
			 * It is not a problem for scrub, because
3597 3598 3599 3600 3601
			 * metadata are always cowed, and our scrub paused
			 * commit_transactions.
			 */
			ro_set = 0;
		} else {
J
Jeff Mahoney 已提交
3602
			btrfs_warn(fs_info,
3603
				   "failed setting block group ro: %d", ret);
3604
			btrfs_unfreeze_block_group(cache);
3605
			btrfs_put_block_group(cache);
3606
			scrub_pause_off(fs_info);
3607 3608 3609
			break;
		}

3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621
		/*
		 * Now the target block is marked RO, wait for nocow writes to
		 * finish before dev-replace.
		 * COW is fine, as COW never overwrites extents in commit tree.
		 */
		if (sctx->is_dev_replace) {
			btrfs_wait_nocow_writers(cache);
			btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
					cache->length);
		}

		scrub_pause_off(fs_info);
3622
		down_write(&dev_replace->rwsem);
3623 3624 3625
		dev_replace->cursor_right = found_key.offset + length;
		dev_replace->cursor_left = found_key.offset;
		dev_replace->item_needs_writeback = 1;
3626 3627
		up_write(&dev_replace->rwsem);

3628
		ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
3629
				  found_key.offset, cache);
3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640

		/*
		 * flush, submit all pending read and write bios, afterwards
		 * wait for them.
		 * Note that in the dev replace case, a read request causes
		 * write requests that are submitted in the read completion
		 * worker. Therefore in the current situation, it is required
		 * that all write requests are flushed, so that all read and
		 * write requests are really completed when bios_in_flight
		 * changes to 0.
		 */
3641
		sctx->flush_all_writes = true;
3642
		scrub_submit(sctx);
3643
		mutex_lock(&sctx->wr_lock);
3644
		scrub_wr_submit(sctx);
3645
		mutex_unlock(&sctx->wr_lock);
3646 3647 3648

		wait_event(sctx->list_wait,
			   atomic_read(&sctx->bios_in_flight) == 0);
3649 3650

		scrub_pause_on(fs_info);
3651 3652 3653 3654 3655 3656

		/*
		 * must be called before we decrease @scrub_paused.
		 * make sure we don't block transaction commit while
		 * we are waiting pending workers finished.
		 */
3657 3658
		wait_event(sctx->list_wait,
			   atomic_read(&sctx->workers_pending) == 0);
3659
		sctx->flush_all_writes = false;
3660

3661
		scrub_pause_off(fs_info);
3662

3663
		down_write(&dev_replace->rwsem);
3664 3665
		dev_replace->cursor_left = dev_replace->cursor_right;
		dev_replace->item_needs_writeback = 1;
3666
		up_write(&dev_replace->rwsem);
3667

3668
		if (ro_set)
3669
			btrfs_dec_block_group_ro(cache);
3670

3671 3672 3673 3674 3675 3676 3677 3678 3679
		/*
		 * We might have prevented the cleaner kthread from deleting
		 * this block group if it was already unused because we raced
		 * and set it to RO mode first. So add it back to the unused
		 * list, otherwise it might not ever be deleted unless a manual
		 * balance is triggered or it becomes used and unused again.
		 */
		spin_lock(&cache->lock);
		if (!cache->removed && !cache->ro && cache->reserved == 0 &&
3680
		    cache->used == 0) {
3681
			spin_unlock(&cache->lock);
3682 3683 3684 3685 3686
			if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
				btrfs_discard_queue_work(&fs_info->discard_ctl,
							 cache);
			else
				btrfs_mark_bg_unused(cache);
3687 3688 3689 3690
		} else {
			spin_unlock(&cache->lock);
		}

3691
		btrfs_unfreeze_block_group(cache);
A
Arne Jansen 已提交
3692 3693 3694
		btrfs_put_block_group(cache);
		if (ret)
			break;
3695
		if (sctx->is_dev_replace &&
3696
		    atomic64_read(&dev_replace->num_write_errors) > 0) {
3697 3698 3699 3700 3701 3702 3703
			ret = -EIO;
			break;
		}
		if (sctx->stat.malloc_errors > 0) {
			ret = -ENOMEM;
			break;
		}
3704
skip:
A
Arne Jansen 已提交
3705
		key.offset = found_key.offset + length;
C
Chris Mason 已提交
3706
		btrfs_release_path(path);
A
Arne Jansen 已提交
3707 3708 3709
	}

	btrfs_free_path(path);
3710

3711
	return ret;
A
Arne Jansen 已提交
3712 3713
}

3714 3715
static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
					   struct btrfs_device *scrub_dev)
A
Arne Jansen 已提交
3716 3717 3718 3719 3720
{
	int	i;
	u64	bytenr;
	u64	gen;
	int	ret;
3721
	struct btrfs_fs_info *fs_info = sctx->fs_info;
A
Arne Jansen 已提交
3722

3723
	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3724
		return -EROFS;
3725

3726
	/* Seed devices of a new filesystem has their own generation. */
3727
	if (scrub_dev->fs_devices != fs_info->fs_devices)
3728 3729
		gen = scrub_dev->generation;
	else
3730
		gen = fs_info->last_trans_committed;
A
Arne Jansen 已提交
3731 3732 3733

	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
		bytenr = btrfs_sb_offset(i);
3734 3735
		if (bytenr + BTRFS_SUPER_INFO_SIZE >
		    scrub_dev->commit_total_bytes)
A
Arne Jansen 已提交
3736 3737
			break;

3738
		ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
3739
				  scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
3740
				  NULL, bytenr);
A
Arne Jansen 已提交
3741 3742 3743
		if (ret)
			return ret;
	}
3744
	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
A
Arne Jansen 已提交
3745 3746 3747 3748

	return 0;
}

3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771
static void scrub_workers_put(struct btrfs_fs_info *fs_info)
{
	if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
					&fs_info->scrub_lock)) {
		struct btrfs_workqueue *scrub_workers = NULL;
		struct btrfs_workqueue *scrub_wr_comp = NULL;
		struct btrfs_workqueue *scrub_parity = NULL;

		scrub_workers = fs_info->scrub_workers;
		scrub_wr_comp = fs_info->scrub_wr_completion_workers;
		scrub_parity = fs_info->scrub_parity_workers;

		fs_info->scrub_workers = NULL;
		fs_info->scrub_wr_completion_workers = NULL;
		fs_info->scrub_parity_workers = NULL;
		mutex_unlock(&fs_info->scrub_lock);

		btrfs_destroy_workqueue(scrub_workers);
		btrfs_destroy_workqueue(scrub_wr_comp);
		btrfs_destroy_workqueue(scrub_parity);
	}
}

A
Arne Jansen 已提交
3772 3773 3774
/*
 * get a reference count on fs_info->scrub_workers. start worker if necessary
 */
3775 3776
static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
						int is_dev_replace)
A
Arne Jansen 已提交
3777
{
3778 3779 3780
	struct btrfs_workqueue *scrub_workers = NULL;
	struct btrfs_workqueue *scrub_wr_comp = NULL;
	struct btrfs_workqueue *scrub_parity = NULL;
3781
	unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
3782
	int max_active = fs_info->thread_pool_size;
3783
	int ret = -ENOMEM;
A
Arne Jansen 已提交
3784

3785 3786
	if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
		return 0;
3787

3788 3789 3790 3791
	scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub", flags,
					      is_dev_replace ? 1 : max_active, 4);
	if (!scrub_workers)
		goto fail_scrub_workers;
3792

3793
	scrub_wr_comp = btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
3794
					      max_active, 2);
3795 3796
	if (!scrub_wr_comp)
		goto fail_scrub_wr_completion_workers;
3797

3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810
	scrub_parity = btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
					     max_active, 2);
	if (!scrub_parity)
		goto fail_scrub_parity_workers;

	mutex_lock(&fs_info->scrub_lock);
	if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
		ASSERT(fs_info->scrub_workers == NULL &&
		       fs_info->scrub_wr_completion_workers == NULL &&
		       fs_info->scrub_parity_workers == NULL);
		fs_info->scrub_workers = scrub_workers;
		fs_info->scrub_wr_completion_workers = scrub_wr_comp;
		fs_info->scrub_parity_workers = scrub_parity;
3811
		refcount_set(&fs_info->scrub_workers_refcnt, 1);
3812 3813
		mutex_unlock(&fs_info->scrub_lock);
		return 0;
A
Arne Jansen 已提交
3814
	}
3815 3816 3817
	/* Other thread raced in and created the workers for us */
	refcount_inc(&fs_info->scrub_workers_refcnt);
	mutex_unlock(&fs_info->scrub_lock);
3818

3819 3820
	ret = 0;
	btrfs_destroy_workqueue(scrub_parity);
3821
fail_scrub_parity_workers:
3822
	btrfs_destroy_workqueue(scrub_wr_comp);
3823
fail_scrub_wr_completion_workers:
3824
	btrfs_destroy_workqueue(scrub_workers);
3825
fail_scrub_workers:
3826
	return ret;
A
Arne Jansen 已提交
3827 3828
}

3829 3830
int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
		    u64 end, struct btrfs_scrub_progress *progress,
3831
		    int readonly, int is_dev_replace)
A
Arne Jansen 已提交
3832
{
3833
	struct scrub_ctx *sctx;
A
Arne Jansen 已提交
3834 3835
	int ret;
	struct btrfs_device *dev;
3836
	unsigned int nofs_flag;
A
Arne Jansen 已提交
3837

3838
	if (btrfs_fs_closing(fs_info))
3839
		return -EAGAIN;
A
Arne Jansen 已提交
3840

3841
	if (fs_info->nodesize > BTRFS_STRIPE_LEN) {
3842 3843 3844 3845 3846
		/*
		 * in this case scrub is unable to calculate the checksum
		 * the way scrub is implemented. Do not handle this
		 * situation at all because it won't ever happen.
		 */
3847 3848
		btrfs_err(fs_info,
			   "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
3849 3850
		       fs_info->nodesize,
		       BTRFS_STRIPE_LEN);
3851 3852 3853
		return -EINVAL;
	}

3854
	if (fs_info->sectorsize != PAGE_SIZE) {
3855
		/* not supported for data w/o checksums */
3856
		btrfs_err_rl(fs_info,
J
Jeff Mahoney 已提交
3857
			   "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
3858
		       fs_info->sectorsize, PAGE_SIZE);
A
Arne Jansen 已提交
3859 3860 3861
		return -EINVAL;
	}

3862
	if (fs_info->nodesize >
3863
	    PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
3864
	    fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
3865 3866 3867 3868
		/*
		 * would exhaust the array bounds of pagev member in
		 * struct scrub_block
		 */
J
Jeff Mahoney 已提交
3869 3870
		btrfs_err(fs_info,
			  "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
3871
		       fs_info->nodesize,
3872
		       SCRUB_MAX_PAGES_PER_BLOCK,
3873
		       fs_info->sectorsize,
3874 3875 3876 3877
		       SCRUB_MAX_PAGES_PER_BLOCK);
		return -EINVAL;
	}

3878 3879 3880 3881
	/* Allocate outside of device_list_mutex */
	sctx = scrub_setup_ctx(fs_info, is_dev_replace);
	if (IS_ERR(sctx))
		return PTR_ERR(sctx);
A
Arne Jansen 已提交
3882

3883 3884 3885 3886
	ret = scrub_workers_get(fs_info, is_dev_replace);
	if (ret)
		goto out_free_ctx;

3887
	mutex_lock(&fs_info->fs_devices->device_list_mutex);
3888
	dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL);
3889 3890
	if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
		     !is_dev_replace)) {
3891
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3892
		ret = -ENODEV;
3893
		goto out;
A
Arne Jansen 已提交
3894 3895
	}

3896 3897
	if (!is_dev_replace && !readonly &&
	    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
3898
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3899 3900 3901
		btrfs_err_in_rcu(fs_info,
			"scrub on devid %llu: filesystem on %s is not writable",
				 devid, rcu_str_deref(dev->name));
3902
		ret = -EROFS;
3903
		goto out;
3904 3905
	}

3906
	mutex_lock(&fs_info->scrub_lock);
3907
	if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3908
	    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
A
Arne Jansen 已提交
3909
		mutex_unlock(&fs_info->scrub_lock);
3910
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3911
		ret = -EIO;
3912
		goto out;
A
Arne Jansen 已提交
3913 3914
	}

3915
	down_read(&fs_info->dev_replace.rwsem);
3916
	if (dev->scrub_ctx ||
3917 3918
	    (!is_dev_replace &&
	     btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
3919
		up_read(&fs_info->dev_replace.rwsem);
A
Arne Jansen 已提交
3920
		mutex_unlock(&fs_info->scrub_lock);
3921
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3922
		ret = -EINPROGRESS;
3923
		goto out;
A
Arne Jansen 已提交
3924
	}
3925
	up_read(&fs_info->dev_replace.rwsem);
3926

3927
	sctx->readonly = readonly;
3928
	dev->scrub_ctx = sctx;
3929
	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
A
Arne Jansen 已提交
3930

3931 3932 3933 3934
	/*
	 * checking @scrub_pause_req here, we can avoid
	 * race between committing transaction and scrubbing.
	 */
3935
	__scrub_blocked_if_needed(fs_info);
A
Arne Jansen 已提交
3936 3937 3938
	atomic_inc(&fs_info->scrubs_running);
	mutex_unlock(&fs_info->scrub_lock);

3939 3940 3941 3942 3943 3944 3945 3946 3947 3948
	/*
	 * In order to avoid deadlock with reclaim when there is a transaction
	 * trying to pause scrub, make sure we use GFP_NOFS for all the
	 * allocations done at btrfs_scrub_pages() and scrub_pages_for_parity()
	 * invoked by our callees. The pausing request is done when the
	 * transaction commit starts, and it blocks the transaction until scrub
	 * is paused (done at specific points at scrub_stripe() or right above
	 * before incrementing fs_info->scrubs_running).
	 */
	nofs_flag = memalloc_nofs_save();
3949
	if (!is_dev_replace) {
3950
		btrfs_info(fs_info, "scrub: started on devid %llu", devid);
3951 3952 3953 3954
		/*
		 * by holding device list mutex, we can
		 * kick off writing super in log tree sync.
		 */
3955
		mutex_lock(&fs_info->fs_devices->device_list_mutex);
3956
		ret = scrub_supers(sctx, dev);
3957
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3958
	}
A
Arne Jansen 已提交
3959 3960

	if (!ret)
3961
		ret = scrub_enumerate_chunks(sctx, dev, start, end);
3962
	memalloc_nofs_restore(nofs_flag);
A
Arne Jansen 已提交
3963

3964
	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
A
Arne Jansen 已提交
3965 3966 3967
	atomic_dec(&fs_info->scrubs_running);
	wake_up(&fs_info->scrub_pause_wait);

3968
	wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
3969

A
Arne Jansen 已提交
3970
	if (progress)
3971
		memcpy(progress, &sctx->stat, sizeof(*progress));
A
Arne Jansen 已提交
3972

3973 3974 3975 3976
	if (!is_dev_replace)
		btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d",
			ret ? "not finished" : "finished", devid, ret);

A
Arne Jansen 已提交
3977
	mutex_lock(&fs_info->scrub_lock);
3978
	dev->scrub_ctx = NULL;
A
Arne Jansen 已提交
3979 3980
	mutex_unlock(&fs_info->scrub_lock);

3981
	scrub_workers_put(fs_info);
3982
	scrub_put_ctx(sctx);
A
Arne Jansen 已提交
3983

3984
	return ret;
3985 3986
out:
	scrub_workers_put(fs_info);
3987 3988 3989
out_free_ctx:
	scrub_free_ctx(sctx);

A
Arne Jansen 已提交
3990 3991 3992
	return ret;
}

3993
void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
A
Arne Jansen 已提交
3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007
{
	mutex_lock(&fs_info->scrub_lock);
	atomic_inc(&fs_info->scrub_pause_req);
	while (atomic_read(&fs_info->scrubs_paused) !=
	       atomic_read(&fs_info->scrubs_running)) {
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
			   atomic_read(&fs_info->scrubs_paused) ==
			   atomic_read(&fs_info->scrubs_running));
		mutex_lock(&fs_info->scrub_lock);
	}
	mutex_unlock(&fs_info->scrub_lock);
}

4008
void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
A
Arne Jansen 已提交
4009 4010 4011 4012 4013
{
	atomic_dec(&fs_info->scrub_pause_req);
	wake_up(&fs_info->scrub_pause_wait);
}

4014
int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
A
Arne Jansen 已提交
4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034
{
	mutex_lock(&fs_info->scrub_lock);
	if (!atomic_read(&fs_info->scrubs_running)) {
		mutex_unlock(&fs_info->scrub_lock);
		return -ENOTCONN;
	}

	atomic_inc(&fs_info->scrub_cancel_req);
	while (atomic_read(&fs_info->scrubs_running)) {
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
			   atomic_read(&fs_info->scrubs_running) == 0);
		mutex_lock(&fs_info->scrub_lock);
	}
	atomic_dec(&fs_info->scrub_cancel_req);
	mutex_unlock(&fs_info->scrub_lock);

	return 0;
}

4035
int btrfs_scrub_cancel_dev(struct btrfs_device *dev)
4036
{
4037
	struct btrfs_fs_info *fs_info = dev->fs_info;
4038
	struct scrub_ctx *sctx;
A
Arne Jansen 已提交
4039 4040

	mutex_lock(&fs_info->scrub_lock);
4041
	sctx = dev->scrub_ctx;
4042
	if (!sctx) {
A
Arne Jansen 已提交
4043 4044 4045
		mutex_unlock(&fs_info->scrub_lock);
		return -ENOTCONN;
	}
4046
	atomic_inc(&sctx->cancel_req);
4047
	while (dev->scrub_ctx) {
A
Arne Jansen 已提交
4048 4049
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
4050
			   dev->scrub_ctx == NULL);
A
Arne Jansen 已提交
4051 4052 4053 4054 4055 4056
		mutex_lock(&fs_info->scrub_lock);
	}
	mutex_unlock(&fs_info->scrub_lock);

	return 0;
}
S
Stefan Behrens 已提交
4057

4058
int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
A
Arne Jansen 已提交
4059 4060 4061
			 struct btrfs_scrub_progress *progress)
{
	struct btrfs_device *dev;
4062
	struct scrub_ctx *sctx = NULL;
A
Arne Jansen 已提交
4063

4064
	mutex_lock(&fs_info->fs_devices->device_list_mutex);
4065
	dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL);
A
Arne Jansen 已提交
4066
	if (dev)
4067
		sctx = dev->scrub_ctx;
4068 4069
	if (sctx)
		memcpy(progress, &sctx->stat, sizeof(*progress));
4070
	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
A
Arne Jansen 已提交
4071

4072
	return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
A
Arne Jansen 已提交
4073
}
4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085

static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
			       u64 extent_logical, u64 extent_len,
			       u64 *extent_physical,
			       struct btrfs_device **extent_dev,
			       int *extent_mirror_num)
{
	u64 mapped_length;
	struct btrfs_bio *bbio = NULL;
	int ret;

	mapped_length = extent_len;
4086
	ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
4087 4088 4089
			      &mapped_length, &bbio, 0);
	if (ret || !bbio || mapped_length < extent_len ||
	    !bbio->stripes[0].dev->bdev) {
4090
		btrfs_put_bbio(bbio);
4091 4092 4093 4094 4095 4096
		return;
	}

	*extent_physical = bbio->stripes[0].physical;
	*extent_mirror_num = bbio->mirror_num;
	*extent_dev = bbio->stripes[0].dev;
4097
	btrfs_put_bbio(bbio);
4098
}