scrub.c 116.0 KB
Newer Older
A
Arne Jansen 已提交
1
/*
2
 * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
A
Arne Jansen 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

#include <linux/blkdev.h>
20
#include <linux/ratelimit.h>
A
Arne Jansen 已提交
21 22 23 24
#include "ctree.h"
#include "volumes.h"
#include "disk-io.h"
#include "ordered-data.h"
25
#include "transaction.h"
26
#include "backref.h"
27
#include "extent_io.h"
28
#include "dev-replace.h"
29
#include "check-integrity.h"
30
#include "rcu-string.h"
D
David Woodhouse 已提交
31
#include "raid56.h"
A
Arne Jansen 已提交
32 33 34 35 36 37 38 39 40 41 42 43 44 45

/*
 * This is only the first step towards a full-features scrub. It reads all
 * extent and super block and verifies the checksums. In case a bad checksum
 * is found or the extent cannot be read, good data will be written back if
 * any can be found.
 *
 * Future enhancements:
 *  - In case an unrepairable extent is encountered, track which files are
 *    affected and report them
 *  - track and record media errors, throw out bad devices
 *  - add a mode to also read unallocated space
 */

46
struct scrub_block;
47
struct scrub_ctx;
A
Arne Jansen 已提交
48

49 50 51 52 53 54 55 56 57
/*
 * the following three values only influence the performance.
 * The last one configures the number of parallel and outstanding I/O
 * operations. The first two values configure an upper limit for the number
 * of (dynamically allocated) pages that are added to a bio.
 */
#define SCRUB_PAGES_PER_RD_BIO	32	/* 128k per bio */
#define SCRUB_PAGES_PER_WR_BIO	32	/* 128k per bio */
#define SCRUB_BIOS_PER_SCTX	64	/* 8MB per device in flight */
58 59 60 61 62 63

/*
 * the following value times PAGE_SIZE needs to be large enough to match the
 * largest node/leaf/sector size that shall be supported.
 * Values larger than BTRFS_STRIPE_LEN are not supported.
 */
64
#define SCRUB_MAX_PAGES_PER_BLOCK	16	/* 64k per node/leaf/sector */
A
Arne Jansen 已提交
65

66 67 68 69 70 71
struct scrub_recover {
	atomic_t		refs;
	struct btrfs_bio	*bbio;
	u64			map_length;
};

A
Arne Jansen 已提交
72
struct scrub_page {
73 74
	struct scrub_block	*sblock;
	struct page		*page;
75
	struct btrfs_device	*dev;
76
	struct list_head	list;
A
Arne Jansen 已提交
77 78
	u64			flags;  /* extent flags */
	u64			generation;
79 80
	u64			logical;
	u64			physical;
81
	u64			physical_for_dev_replace;
82
	atomic_t		refs;
83 84 85 86 87
	struct {
		unsigned int	mirror_num:8;
		unsigned int	have_csum:1;
		unsigned int	io_error:1;
	};
A
Arne Jansen 已提交
88
	u8			csum[BTRFS_CSUM_SIZE];
89 90

	struct scrub_recover	*recover;
A
Arne Jansen 已提交
91 92 93 94
};

struct scrub_bio {
	int			index;
95
	struct scrub_ctx	*sctx;
96
	struct btrfs_device	*dev;
A
Arne Jansen 已提交
97 98 99 100
	struct bio		*bio;
	int			err;
	u64			logical;
	u64			physical;
101 102 103 104 105
#if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
	struct scrub_page	*pagev[SCRUB_PAGES_PER_WR_BIO];
#else
	struct scrub_page	*pagev[SCRUB_PAGES_PER_RD_BIO];
#endif
106
	int			page_count;
A
Arne Jansen 已提交
107 108 109 110
	int			next_free;
	struct btrfs_work	work;
};

111
struct scrub_block {
112
	struct scrub_page	*pagev[SCRUB_MAX_PAGES_PER_BLOCK];
113 114
	int			page_count;
	atomic_t		outstanding_pages;
115
	atomic_t		refs; /* free mem on transition to zero */
116
	struct scrub_ctx	*sctx;
117
	struct scrub_parity	*sparity;
118 119 120 121
	struct {
		unsigned int	header_error:1;
		unsigned int	checksum_error:1;
		unsigned int	no_io_error_seen:1;
122
		unsigned int	generation_error:1; /* also sets header_error */
123 124 125 126

		/* The following is for the data used to check parity */
		/* It is for the data with checksum */
		unsigned int	data_corrected:1;
127
	};
128
	struct btrfs_work	work;
129 130
};

131 132 133 134 135 136 137 138 139 140 141 142 143 144
/* Used for the chunks with parity stripe such RAID5/6 */
struct scrub_parity {
	struct scrub_ctx	*sctx;

	struct btrfs_device	*scrub_dev;

	u64			logic_start;

	u64			logic_end;

	int			nsectors;

	int			stripe_len;

145
	atomic_t		refs;
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163

	struct list_head	spages;

	/* Work of parity check and repair */
	struct btrfs_work	work;

	/* Mark the parity blocks which have data */
	unsigned long		*dbitmap;

	/*
	 * Mark the parity blocks which have data, but errors happen when
	 * read data or check data
	 */
	unsigned long		*ebitmap;

	unsigned long		bitmap[0];
};

164 165 166 167 168 169 170 171
struct scrub_wr_ctx {
	struct scrub_bio *wr_curr_bio;
	struct btrfs_device *tgtdev;
	int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
	atomic_t flush_all_writes;
	struct mutex wr_lock;
};

172
struct scrub_ctx {
173
	struct scrub_bio	*bios[SCRUB_BIOS_PER_SCTX];
174
	struct btrfs_fs_info	*fs_info;
A
Arne Jansen 已提交
175 176
	int			first_free;
	int			curr;
177 178
	atomic_t		bios_in_flight;
	atomic_t		workers_pending;
A
Arne Jansen 已提交
179 180 181 182 183
	spinlock_t		list_lock;
	wait_queue_head_t	list_wait;
	u16			csum_size;
	struct list_head	csum_list;
	atomic_t		cancel_req;
A
Arne Jansen 已提交
184
	int			readonly;
185
	int			pages_per_rd_bio;
186 187
	u32			sectorsize;
	u32			nodesize;
188 189

	int			is_dev_replace;
190
	struct scrub_wr_ctx	wr_ctx;
191

A
Arne Jansen 已提交
192 193 194 195 196
	/*
	 * statistics
	 */
	struct btrfs_scrub_progress stat;
	spinlock_t		stat_lock;
197 198 199 200 201 202 203 204 205

	/*
	 * Use a ref counter to avoid use-after-free issues. Scrub workers
	 * decrement bios_in_flight and workers_pending and then do a wakeup
	 * on the list_wait wait queue. We must ensure the main scrub task
	 * doesn't free the scrub context before or while the workers are
	 * doing the wakeup() call.
	 */
	atomic_t                refs;
A
Arne Jansen 已提交
206 207
};

208
struct scrub_fixup_nodatasum {
209
	struct scrub_ctx	*sctx;
210
	struct btrfs_device	*dev;
211 212 213 214 215 216
	u64			logical;
	struct btrfs_root	*root;
	struct btrfs_work	work;
	int			mirror_num;
};

217 218 219 220 221 222 223
struct scrub_nocow_inode {
	u64			inum;
	u64			offset;
	u64			root;
	struct list_head	list;
};

224 225 226 227 228 229
struct scrub_copy_nocow_ctx {
	struct scrub_ctx	*sctx;
	u64			logical;
	u64			len;
	int			mirror_num;
	u64			physical_for_dev_replace;
230
	struct list_head	inodes;
231 232 233
	struct btrfs_work	work;
};

234 235 236 237 238 239 240 241 242
struct scrub_warning {
	struct btrfs_path	*path;
	u64			extent_item_size;
	const char		*errstr;
	sector_t		sector;
	u64			logical;
	struct btrfs_device	*dev;
};

243 244 245 246
static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
247
static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
248
static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
249
				     struct scrub_block *sblocks_for_recheck);
250
static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
251 252
				struct scrub_block *sblock,
				int retry_failed_mirror);
253
static void scrub_recheck_block_checksum(struct scrub_block *sblock);
254
static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
255
					     struct scrub_block *sblock_good);
256 257 258
static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
					    struct scrub_block *sblock_good,
					    int page_num, int force_write);
259 260 261
static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
					   int page_num);
262 263 264 265 266
static int scrub_checksum_data(struct scrub_block *sblock);
static int scrub_checksum_tree_block(struct scrub_block *sblock);
static int scrub_checksum_super(struct scrub_block *sblock);
static void scrub_block_get(struct scrub_block *sblock);
static void scrub_block_put(struct scrub_block *sblock);
267 268
static void scrub_page_get(struct scrub_page *spage);
static void scrub_page_put(struct scrub_page *spage);
269 270
static void scrub_parity_get(struct scrub_parity *sparity);
static void scrub_parity_put(struct scrub_parity *sparity);
271 272
static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
				    struct scrub_page *spage);
273
static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
274
		       u64 physical, struct btrfs_device *dev, u64 flags,
275 276
		       u64 gen, int mirror_num, u8 *csum, int force,
		       u64 physical_for_dev_replace);
277
static void scrub_bio_end_io(struct bio *bio);
278 279
static void scrub_bio_end_io_worker(struct btrfs_work *work);
static void scrub_block_complete(struct scrub_block *sblock);
280 281 282 283 284 285 286 287 288 289 290 291 292 293
static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
			       u64 extent_logical, u64 extent_len,
			       u64 *extent_physical,
			       struct btrfs_device **extent_dev,
			       int *extent_mirror_num);
static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
			      struct scrub_wr_ctx *wr_ctx,
			      struct btrfs_fs_info *fs_info,
			      struct btrfs_device *dev,
			      int is_dev_replace);
static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
				    struct scrub_page *spage);
static void scrub_wr_submit(struct scrub_ctx *sctx);
294
static void scrub_wr_bio_end_io(struct bio *bio);
295 296 297 298
static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
static int write_page_nocow(struct scrub_ctx *sctx,
			    u64 physical_for_dev_replace, struct page *page);
static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
299
				      struct scrub_copy_nocow_ctx *ctx);
300 301 302
static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
			    int mirror_num, u64 physical_for_dev_replace);
static void copy_nocow_pages_worker(struct btrfs_work *work);
303
static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
304
static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
305
static void scrub_put_ctx(struct scrub_ctx *sctx);
S
Stefan Behrens 已提交
306 307


308 309
static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
{
310
	atomic_inc(&sctx->refs);
311 312 313 314 315 316 317
	atomic_inc(&sctx->bios_in_flight);
}

static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
{
	atomic_dec(&sctx->bios_in_flight);
	wake_up(&sctx->list_wait);
318
	scrub_put_ctx(sctx);
319 320
}

321
static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
322 323 324 325 326 327 328 329 330
{
	while (atomic_read(&fs_info->scrub_pause_req)) {
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
		   atomic_read(&fs_info->scrub_pause_req) == 0);
		mutex_lock(&fs_info->scrub_lock);
	}
}

331
static void scrub_pause_on(struct btrfs_fs_info *fs_info)
332 333 334
{
	atomic_inc(&fs_info->scrubs_paused);
	wake_up(&fs_info->scrub_pause_wait);
335
}
336

337 338
static void scrub_pause_off(struct btrfs_fs_info *fs_info)
{
339 340 341 342 343 344 345 346
	mutex_lock(&fs_info->scrub_lock);
	__scrub_blocked_if_needed(fs_info);
	atomic_dec(&fs_info->scrubs_paused);
	mutex_unlock(&fs_info->scrub_lock);

	wake_up(&fs_info->scrub_pause_wait);
}

347 348 349 350 351 352
static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
{
	scrub_pause_on(fs_info);
	scrub_pause_off(fs_info);
}

353 354 355 356 357 358
/*
 * used for workers that require transaction commits (i.e., for the
 * NOCOW case)
 */
static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
{
359
	struct btrfs_fs_info *fs_info = sctx->fs_info;
360

361
	atomic_inc(&sctx->refs);
362 363 364 365 366 367 368 369 370 371 372 373 374
	/*
	 * increment scrubs_running to prevent cancel requests from
	 * completing as long as a worker is running. we must also
	 * increment scrubs_paused to prevent deadlocking on pause
	 * requests used for transactions commits (as the worker uses a
	 * transaction context). it is safe to regard the worker
	 * as paused for all matters practical. effectively, we only
	 * avoid cancellation requests from completing.
	 */
	mutex_lock(&fs_info->scrub_lock);
	atomic_inc(&fs_info->scrubs_running);
	atomic_inc(&fs_info->scrubs_paused);
	mutex_unlock(&fs_info->scrub_lock);
375 376 377 378 379 380 381 382 383 384

	/*
	 * check if @scrubs_running=@scrubs_paused condition
	 * inside wait_event() is not an atomic operation.
	 * which means we may inc/dec @scrub_running/paused
	 * at any time. Let's wake up @scrub_pause_wait as
	 * much as we can to let commit transaction blocked less.
	 */
	wake_up(&fs_info->scrub_pause_wait);

385 386 387 388 389 390
	atomic_inc(&sctx->workers_pending);
}

/* used for workers that require transaction commits */
static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
{
391
	struct btrfs_fs_info *fs_info = sctx->fs_info;
392 393 394 395 396 397 398 399 400 401 402 403

	/*
	 * see scrub_pending_trans_workers_inc() why we're pretending
	 * to be paused in the scrub counters
	 */
	mutex_lock(&fs_info->scrub_lock);
	atomic_dec(&fs_info->scrubs_running);
	atomic_dec(&fs_info->scrubs_paused);
	mutex_unlock(&fs_info->scrub_lock);
	atomic_dec(&sctx->workers_pending);
	wake_up(&fs_info->scrub_pause_wait);
	wake_up(&sctx->list_wait);
404
	scrub_put_ctx(sctx);
405 406
}

407
static void scrub_free_csums(struct scrub_ctx *sctx)
A
Arne Jansen 已提交
408
{
409
	while (!list_empty(&sctx->csum_list)) {
A
Arne Jansen 已提交
410
		struct btrfs_ordered_sum *sum;
411
		sum = list_first_entry(&sctx->csum_list,
A
Arne Jansen 已提交
412 413 414 415 416 417
				       struct btrfs_ordered_sum, list);
		list_del(&sum->list);
		kfree(sum);
	}
}

418
static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
A
Arne Jansen 已提交
419 420 421
{
	int i;

422
	if (!sctx)
A
Arne Jansen 已提交
423 424
		return;

425 426
	scrub_free_wr_ctx(&sctx->wr_ctx);

427
	/* this can happen when scrub is cancelled */
428 429
	if (sctx->curr != -1) {
		struct scrub_bio *sbio = sctx->bios[sctx->curr];
430 431

		for (i = 0; i < sbio->page_count; i++) {
432
			WARN_ON(!sbio->pagev[i]->page);
433 434 435 436 437
			scrub_block_put(sbio->pagev[i]->sblock);
		}
		bio_put(sbio->bio);
	}

438
	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
439
		struct scrub_bio *sbio = sctx->bios[i];
A
Arne Jansen 已提交
440 441 442 443 444 445

		if (!sbio)
			break;
		kfree(sbio);
	}

446 447
	scrub_free_csums(sctx);
	kfree(sctx);
A
Arne Jansen 已提交
448 449
}

450 451 452 453 454 455
static void scrub_put_ctx(struct scrub_ctx *sctx)
{
	if (atomic_dec_and_test(&sctx->refs))
		scrub_free_ctx(sctx);
}

A
Arne Jansen 已提交
456
static noinline_for_stack
457
struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
A
Arne Jansen 已提交
458
{
459
	struct scrub_ctx *sctx;
A
Arne Jansen 已提交
460
	int		i;
461
	struct btrfs_fs_info *fs_info = dev->fs_info;
462
	int ret;
A
Arne Jansen 已提交
463

464
	sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
465
	if (!sctx)
A
Arne Jansen 已提交
466
		goto nomem;
467
	atomic_set(&sctx->refs, 1);
468
	sctx->is_dev_replace = is_dev_replace;
469
	sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
470
	sctx->curr = -1;
471
	sctx->fs_info = dev->fs_info;
472
	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
A
Arne Jansen 已提交
473 474
		struct scrub_bio *sbio;

475
		sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
A
Arne Jansen 已提交
476 477
		if (!sbio)
			goto nomem;
478
		sctx->bios[i] = sbio;
A
Arne Jansen 已提交
479 480

		sbio->index = i;
481
		sbio->sctx = sctx;
482
		sbio->page_count = 0;
483 484
		btrfs_init_work(&sbio->work, btrfs_scrub_helper,
				scrub_bio_end_io_worker, NULL, NULL);
A
Arne Jansen 已提交
485

486
		if (i != SCRUB_BIOS_PER_SCTX - 1)
487
			sctx->bios[i]->next_free = i + 1;
488
		else
489 490 491
			sctx->bios[i]->next_free = -1;
	}
	sctx->first_free = 0;
492 493
	sctx->nodesize = dev->fs_info->nodesize;
	sctx->sectorsize = dev->fs_info->sectorsize;
494 495
	atomic_set(&sctx->bios_in_flight, 0);
	atomic_set(&sctx->workers_pending, 0);
496 497 498 499 500 501 502
	atomic_set(&sctx->cancel_req, 0);
	sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
	INIT_LIST_HEAD(&sctx->csum_list);

	spin_lock_init(&sctx->list_lock);
	spin_lock_init(&sctx->stat_lock);
	init_waitqueue_head(&sctx->list_wait);
503 504 505 506 507 508 509

	ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info,
				 fs_info->dev_replace.tgtdev, is_dev_replace);
	if (ret) {
		scrub_free_ctx(sctx);
		return ERR_PTR(ret);
	}
510
	return sctx;
A
Arne Jansen 已提交
511 512

nomem:
513
	scrub_free_ctx(sctx);
A
Arne Jansen 已提交
514 515 516
	return ERR_PTR(-ENOMEM);
}

517 518
static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
				     void *warn_ctx)
519 520 521 522 523 524 525
{
	u64 isize;
	u32 nlink;
	int ret;
	int i;
	struct extent_buffer *eb;
	struct btrfs_inode_item *inode_item;
526
	struct scrub_warning *swarn = warn_ctx;
527
	struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
528 529 530
	struct inode_fs_paths *ipath = NULL;
	struct btrfs_root *local_root;
	struct btrfs_key root_key;
531
	struct btrfs_key key;
532 533 534 535 536 537 538 539 540 541

	root_key.objectid = root;
	root_key.type = BTRFS_ROOT_ITEM_KEY;
	root_key.offset = (u64)-1;
	local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
	if (IS_ERR(local_root)) {
		ret = PTR_ERR(local_root);
		goto err;
	}

542 543 544
	/*
	 * this makes the path point to (inum INODE_ITEM ioff)
	 */
545 546 547 548 549
	key.objectid = inum;
	key.type = BTRFS_INODE_ITEM_KEY;
	key.offset = 0;

	ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
550 551 552 553 554 555 556 557 558 559 560 561 562
	if (ret) {
		btrfs_release_path(swarn->path);
		goto err;
	}

	eb = swarn->path->nodes[0];
	inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
					struct btrfs_inode_item);
	isize = btrfs_inode_size(eb, inode_item);
	nlink = btrfs_inode_nlink(eb, inode_item);
	btrfs_release_path(swarn->path);

	ipath = init_ipath(4096, local_root, swarn->path);
563 564 565 566 567
	if (IS_ERR(ipath)) {
		ret = PTR_ERR(ipath);
		ipath = NULL;
		goto err;
	}
568 569 570 571 572 573 574 575 576 577
	ret = paths_from_inode(inum, ipath);

	if (ret < 0)
		goto err;

	/*
	 * we deliberately ignore the bit ipath might have been too small to
	 * hold all of the paths here
	 */
	for (i = 0; i < ipath->fspath->elem_cnt; ++i)
J
Jeff Mahoney 已提交
578 579 580 581 582 583 584 585
		btrfs_warn_in_rcu(fs_info,
				  "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
				  swarn->errstr, swarn->logical,
				  rcu_str_deref(swarn->dev->name),
				  (unsigned long long)swarn->sector,
				  root, inum, offset,
				  min(isize - offset, (u64)PAGE_SIZE), nlink,
				  (char *)(unsigned long)ipath->fspath->val[i]);
586 587 588 589 590

	free_ipath(ipath);
	return 0;

err:
J
Jeff Mahoney 已提交
591 592 593 594 595 596
	btrfs_warn_in_rcu(fs_info,
			  "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
			  swarn->errstr, swarn->logical,
			  rcu_str_deref(swarn->dev->name),
			  (unsigned long long)swarn->sector,
			  root, inum, offset, ret);
597 598 599 600 601

	free_ipath(ipath);
	return 0;
}

602
static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
603
{
604 605
	struct btrfs_device *dev;
	struct btrfs_fs_info *fs_info;
606 607 608 609 610
	struct btrfs_path *path;
	struct btrfs_key found_key;
	struct extent_buffer *eb;
	struct btrfs_extent_item *ei;
	struct scrub_warning swarn;
611 612 613
	unsigned long ptr = 0;
	u64 extent_item_pos;
	u64 flags = 0;
614
	u64 ref_root;
615
	u32 item_size;
616
	u8 ref_level = 0;
617
	int ret;
618

619
	WARN_ON(sblock->page_count < 1);
620
	dev = sblock->pagev[0]->dev;
621
	fs_info = sblock->sctx->fs_info;
622

623
	path = btrfs_alloc_path();
624 625
	if (!path)
		return;
626

627 628
	swarn.sector = (sblock->pagev[0]->physical) >> 9;
	swarn.logical = sblock->pagev[0]->logical;
629
	swarn.errstr = errstr;
630
	swarn.dev = NULL;
631

632 633
	ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
				  &flags);
634 635 636
	if (ret < 0)
		goto out;

J
Jan Schmidt 已提交
637
	extent_item_pos = swarn.logical - found_key.objectid;
638 639 640 641 642 643
	swarn.extent_item_size = found_key.offset;

	eb = path->nodes[0];
	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
	item_size = btrfs_item_size_nr(eb, path->slots[0]);

644
	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
645
		do {
646 647 648
			ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
						      item_size, &ref_root,
						      &ref_level);
649
			btrfs_warn_in_rcu(fs_info,
J
Jeff Mahoney 已提交
650 651
				"%s at logical %llu on dev %s, sector %llu: metadata %s (level %d) in tree %llu",
				errstr, swarn.logical,
652
				rcu_str_deref(dev->name),
653 654 655 656 657
				(unsigned long long)swarn.sector,
				ref_level ? "node" : "leaf",
				ret < 0 ? -1 : ref_level,
				ret < 0 ? -1 : ref_root);
		} while (ret != 1);
658
		btrfs_release_path(path);
659
	} else {
660
		btrfs_release_path(path);
661
		swarn.path = path;
662
		swarn.dev = dev;
663 664
		iterate_extent_inodes(fs_info, found_key.objectid,
					extent_item_pos, 1,
665 666 667 668 669 670 671
					scrub_print_warning_inode, &swarn);
	}

out:
	btrfs_free_path(path);
}

672
static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
673
{
674
	struct page *page = NULL;
675
	unsigned long index;
676
	struct scrub_fixup_nodatasum *fixup = fixup_ctx;
677
	int ret;
678
	int corrected = 0;
679
	struct btrfs_key key;
680
	struct inode *inode = NULL;
681
	struct btrfs_fs_info *fs_info;
682 683
	u64 end = offset + PAGE_SIZE - 1;
	struct btrfs_root *local_root;
684
	int srcu_index;
685 686 687 688

	key.objectid = root;
	key.type = BTRFS_ROOT_ITEM_KEY;
	key.offset = (u64)-1;
689 690 691 692 693 694 695

	fs_info = fixup->root->fs_info;
	srcu_index = srcu_read_lock(&fs_info->subvol_srcu);

	local_root = btrfs_read_fs_root_no_name(fs_info, &key);
	if (IS_ERR(local_root)) {
		srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
696
		return PTR_ERR(local_root);
697
	}
698 699 700 701

	key.type = BTRFS_INODE_ITEM_KEY;
	key.objectid = inum;
	key.offset = 0;
702 703
	inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
	srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
704 705 706
	if (IS_ERR(inode))
		return PTR_ERR(inode);

707
	index = offset >> PAGE_SHIFT;
708 709

	page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735
	if (!page) {
		ret = -ENOMEM;
		goto out;
	}

	if (PageUptodate(page)) {
		if (PageDirty(page)) {
			/*
			 * we need to write the data to the defect sector. the
			 * data that was in that sector is not in memory,
			 * because the page was modified. we must not write the
			 * modified page to that sector.
			 *
			 * TODO: what could be done here: wait for the delalloc
			 *       runner to write out that page (might involve
			 *       COW) and see whether the sector is still
			 *       referenced afterwards.
			 *
			 * For the meantime, we'll treat this error
			 * incorrectable, although there is a chance that a
			 * later scrub will find the bad sector again and that
			 * there's no dirty page in memory, then.
			 */
			ret = -EIO;
			goto out;
		}
736
		ret = repair_io_failure(inode, offset, PAGE_SIZE,
737
					fixup->logical, page,
738
					offset - page_offset(page),
739 740 741 742 743 744 745 746 747 748
					fixup->mirror_num);
		unlock_page(page);
		corrected = !ret;
	} else {
		/*
		 * we need to get good data first. the general readpage path
		 * will call repair_io_failure for us, we just have to make
		 * sure we read the bad mirror.
		 */
		ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
749
					EXTENT_DAMAGED);
750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766
		if (ret) {
			/* set_extent_bits should give proper error */
			WARN_ON(ret > 0);
			if (ret > 0)
				ret = -EFAULT;
			goto out;
		}

		ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
						btrfs_get_extent,
						fixup->mirror_num);
		wait_on_page_locked(page);

		corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
						end, EXTENT_DAMAGED, 0, NULL);
		if (!corrected)
			clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
767
						EXTENT_DAMAGED);
768 769 770 771 772
	}

out:
	if (page)
		put_page(page);
773 774

	iput(inode);
775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793

	if (ret < 0)
		return ret;

	if (ret == 0 && corrected) {
		/*
		 * we only need to call readpage for one of the inodes belonging
		 * to this extent. so make iterate_extent_inodes stop
		 */
		return 1;
	}

	return -EIO;
}

static void scrub_fixup_nodatasum(struct btrfs_work *work)
{
	int ret;
	struct scrub_fixup_nodatasum *fixup;
794
	struct scrub_ctx *sctx;
795 796 797 798 799
	struct btrfs_trans_handle *trans = NULL;
	struct btrfs_path *path;
	int uncorrectable = 0;

	fixup = container_of(work, struct scrub_fixup_nodatasum, work);
800
	sctx = fixup->sctx;
801 802 803

	path = btrfs_alloc_path();
	if (!path) {
804 805 806
		spin_lock(&sctx->stat_lock);
		++sctx->stat.malloc_errors;
		spin_unlock(&sctx->stat_lock);
807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834
		uncorrectable = 1;
		goto out;
	}

	trans = btrfs_join_transaction(fixup->root);
	if (IS_ERR(trans)) {
		uncorrectable = 1;
		goto out;
	}

	/*
	 * the idea is to trigger a regular read through the standard path. we
	 * read a page from the (failed) logical address by specifying the
	 * corresponding copynum of the failed sector. thus, that readpage is
	 * expected to fail.
	 * that is the point where on-the-fly error correction will kick in
	 * (once it's finished) and rewrite the failed sector if a good copy
	 * can be found.
	 */
	ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
						path, scrub_fixup_readpage,
						fixup);
	if (ret < 0) {
		uncorrectable = 1;
		goto out;
	}
	WARN_ON(ret != 1);

835 836 837
	spin_lock(&sctx->stat_lock);
	++sctx->stat.corrected_errors;
	spin_unlock(&sctx->stat_lock);
838 839 840 841 842

out:
	if (trans && !IS_ERR(trans))
		btrfs_end_transaction(trans, fixup->root);
	if (uncorrectable) {
843 844 845
		spin_lock(&sctx->stat_lock);
		++sctx->stat.uncorrectable_errors;
		spin_unlock(&sctx->stat_lock);
846 847 848
		btrfs_dev_replace_stats_inc(&sctx->fs_info->dev_replace.
						num_uncorrectable_read_errors);
		btrfs_err_rl_in_rcu(sctx->fs_info,
849
		    "unable to fixup (nodatasum) error at logical %llu on dev %s",
850
			fixup->logical, rcu_str_deref(fixup->dev->name));
851 852 853 854 855
	}

	btrfs_free_path(path);
	kfree(fixup);

856
	scrub_pending_trans_workers_dec(sctx);
857 858
}

859 860 861 862 863 864 865 866
static inline void scrub_get_recover(struct scrub_recover *recover)
{
	atomic_inc(&recover->refs);
}

static inline void scrub_put_recover(struct scrub_recover *recover)
{
	if (atomic_dec_and_test(&recover->refs)) {
867
		btrfs_put_bbio(recover->bbio);
868 869 870 871
		kfree(recover);
	}
}

A
Arne Jansen 已提交
872
/*
873 874 875 876 877 878
 * scrub_handle_errored_block gets called when either verification of the
 * pages failed or the bio failed to read, e.g. with EIO. In the latter
 * case, this function handles all pages in the bio, even though only one
 * may be bad.
 * The goal of this function is to repair the errored block by using the
 * contents of one of the mirrors.
A
Arne Jansen 已提交
879
 */
880
static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
A
Arne Jansen 已提交
881
{
882
	struct scrub_ctx *sctx = sblock_to_check->sctx;
883
	struct btrfs_device *dev;
884 885 886 887 888 889 890 891 892 893 894 895
	struct btrfs_fs_info *fs_info;
	u64 length;
	u64 logical;
	unsigned int failed_mirror_index;
	unsigned int is_metadata;
	unsigned int have_csum;
	struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
	struct scrub_block *sblock_bad;
	int ret;
	int mirror_index;
	int page_num;
	int success;
896
	static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
897 898 899
				      DEFAULT_RATELIMIT_BURST);

	BUG_ON(sblock_to_check->page_count < 1);
900
	fs_info = sctx->fs_info;
901 902 903 904 905 906 907 908 909 910 911
	if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
		/*
		 * if we find an error in a super block, we just report it.
		 * They will get written with the next transaction commit
		 * anyway
		 */
		spin_lock(&sctx->stat_lock);
		++sctx->stat.super_errors;
		spin_unlock(&sctx->stat_lock);
		return 0;
	}
912
	length = sblock_to_check->page_count * PAGE_SIZE;
913 914 915 916
	logical = sblock_to_check->pagev[0]->logical;
	BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
	failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
	is_metadata = !(sblock_to_check->pagev[0]->flags &
917
			BTRFS_EXTENT_FLAG_DATA);
918 919
	have_csum = sblock_to_check->pagev[0]->have_csum;
	dev = sblock_to_check->pagev[0]->dev;
920

921 922 923 924 925
	if (sctx->is_dev_replace && !is_metadata && !have_csum) {
		sblocks_for_recheck = NULL;
		goto nodatasum_case;
	}

926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954
	/*
	 * read all mirrors one after the other. This includes to
	 * re-read the extent or metadata block that failed (that was
	 * the cause that this fixup code is called) another time,
	 * page by page this time in order to know which pages
	 * caused I/O errors and which ones are good (for all mirrors).
	 * It is the goal to handle the situation when more than one
	 * mirror contains I/O errors, but the errors do not
	 * overlap, i.e. the data can be repaired by selecting the
	 * pages from those mirrors without I/O error on the
	 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
	 * would be that mirror #1 has an I/O error on the first page,
	 * the second page is good, and mirror #2 has an I/O error on
	 * the second page, but the first page is good.
	 * Then the first page of the first mirror can be repaired by
	 * taking the first page of the second mirror, and the
	 * second page of the second mirror can be repaired by
	 * copying the contents of the 2nd page of the 1st mirror.
	 * One more note: if the pages of one mirror contain I/O
	 * errors, the checksum cannot be verified. In order to get
	 * the best data for repairing, the first attempt is to find
	 * a mirror without I/O errors and with a validated checksum.
	 * Only if this is not possible, the pages are picked from
	 * mirrors with I/O errors without considering the checksum.
	 * If the latter is the case, at the end, the checksum of the
	 * repaired area is verified in order to correctly maintain
	 * the statistics.
	 */

955 956
	sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
				      sizeof(*sblocks_for_recheck), GFP_NOFS);
957
	if (!sblocks_for_recheck) {
958 959 960 961 962
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		sctx->stat.read_errors++;
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
963
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
964
		goto out;
A
Arne Jansen 已提交
965 966
	}

967
	/* setup the context, map the logical blocks and alloc the pages */
968
	ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
969
	if (ret) {
970 971 972 973
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors++;
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
974
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
975 976 977 978
		goto out;
	}
	BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
	sblock_bad = sblocks_for_recheck + failed_mirror_index;
979

980
	/* build and submit the bios for the failed mirror, check checksums */
981
	scrub_recheck_block(fs_info, sblock_bad, 1);
A
Arne Jansen 已提交
982

983 984 985 986 987 988 989 990 991 992
	if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
	    sblock_bad->no_io_error_seen) {
		/*
		 * the error disappeared after reading page by page, or
		 * the area was part of a huge bio and other parts of the
		 * bio caused I/O errors, or the block layer merged several
		 * read requests into one and the error is caused by a
		 * different bio (usually one of the two latter cases is
		 * the cause)
		 */
993 994
		spin_lock(&sctx->stat_lock);
		sctx->stat.unverified_errors++;
995
		sblock_to_check->data_corrected = 1;
996
		spin_unlock(&sctx->stat_lock);
A
Arne Jansen 已提交
997

998 999
		if (sctx->is_dev_replace)
			scrub_write_block_to_dev_replace(sblock_bad);
1000
		goto out;
A
Arne Jansen 已提交
1001 1002
	}

1003
	if (!sblock_bad->no_io_error_seen) {
1004 1005 1006
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors++;
		spin_unlock(&sctx->stat_lock);
1007 1008
		if (__ratelimit(&_rs))
			scrub_print_warning("i/o error", sblock_to_check);
1009
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1010
	} else if (sblock_bad->checksum_error) {
1011 1012 1013
		spin_lock(&sctx->stat_lock);
		sctx->stat.csum_errors++;
		spin_unlock(&sctx->stat_lock);
1014 1015
		if (__ratelimit(&_rs))
			scrub_print_warning("checksum error", sblock_to_check);
1016
		btrfs_dev_stat_inc_and_print(dev,
1017
					     BTRFS_DEV_STAT_CORRUPTION_ERRS);
1018
	} else if (sblock_bad->header_error) {
1019 1020 1021
		spin_lock(&sctx->stat_lock);
		sctx->stat.verify_errors++;
		spin_unlock(&sctx->stat_lock);
1022 1023 1024
		if (__ratelimit(&_rs))
			scrub_print_warning("checksum/header error",
					    sblock_to_check);
1025
		if (sblock_bad->generation_error)
1026
			btrfs_dev_stat_inc_and_print(dev,
1027 1028
				BTRFS_DEV_STAT_GENERATION_ERRS);
		else
1029
			btrfs_dev_stat_inc_and_print(dev,
1030
				BTRFS_DEV_STAT_CORRUPTION_ERRS);
1031
	}
A
Arne Jansen 已提交
1032

1033 1034 1035 1036
	if (sctx->readonly) {
		ASSERT(!sctx->is_dev_replace);
		goto out;
	}
A
Arne Jansen 已提交
1037

1038 1039
	if (!is_metadata && !have_csum) {
		struct scrub_fixup_nodatasum *fixup_nodatasum;
A
Arne Jansen 已提交
1040

1041 1042
		WARN_ON(sctx->is_dev_replace);

1043 1044
nodatasum_case:

1045 1046
		/*
		 * !is_metadata and !have_csum, this means that the data
1047
		 * might not be COWed, that it might be modified
1048 1049 1050 1051 1052 1053 1054
		 * concurrently. The general strategy to work on the
		 * commit root does not help in the case when COW is not
		 * used.
		 */
		fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
		if (!fixup_nodatasum)
			goto did_not_correct_error;
1055
		fixup_nodatasum->sctx = sctx;
1056
		fixup_nodatasum->dev = dev;
1057 1058 1059
		fixup_nodatasum->logical = logical;
		fixup_nodatasum->root = fs_info->extent_root;
		fixup_nodatasum->mirror_num = failed_mirror_index + 1;
1060
		scrub_pending_trans_workers_inc(sctx);
1061 1062
		btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
				scrub_fixup_nodatasum, NULL, NULL);
1063 1064
		btrfs_queue_work(fs_info->scrub_workers,
				 &fixup_nodatasum->work);
1065
		goto out;
A
Arne Jansen 已提交
1066 1067
	}

1068 1069
	/*
	 * now build and submit the bios for the other mirrors, check
1070 1071
	 * checksums.
	 * First try to pick the mirror which is completely without I/O
1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
	 * errors and also does not have a checksum error.
	 * If one is found, and if a checksum is present, the full block
	 * that is known to contain an error is rewritten. Afterwards
	 * the block is known to be corrected.
	 * If a mirror is found which is completely correct, and no
	 * checksum is present, only those pages are rewritten that had
	 * an I/O error in the block to be repaired, since it cannot be
	 * determined, which copy of the other pages is better (and it
	 * could happen otherwise that a correct page would be
	 * overwritten by a bad one).
	 */
	for (mirror_index = 0;
	     mirror_index < BTRFS_MAX_MIRRORS &&
	     sblocks_for_recheck[mirror_index].page_count > 0;
	     mirror_index++) {
1087
		struct scrub_block *sblock_other;
1088

1089 1090 1091 1092 1093
		if (mirror_index == failed_mirror_index)
			continue;
		sblock_other = sblocks_for_recheck + mirror_index;

		/* build and submit the bios, check checksums */
1094
		scrub_recheck_block(fs_info, sblock_other, 0);
1095 1096

		if (!sblock_other->header_error &&
1097 1098
		    !sblock_other->checksum_error &&
		    sblock_other->no_io_error_seen) {
1099 1100
			if (sctx->is_dev_replace) {
				scrub_write_block_to_dev_replace(sblock_other);
1101
				goto corrected_error;
1102 1103
			} else {
				ret = scrub_repair_block_from_good_copy(
1104 1105 1106
						sblock_bad, sblock_other);
				if (!ret)
					goto corrected_error;
1107
			}
1108 1109
		}
	}
A
Arne Jansen 已提交
1110

1111 1112
	if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
		goto did_not_correct_error;
1113 1114 1115

	/*
	 * In case of I/O errors in the area that is supposed to be
1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127
	 * repaired, continue by picking good copies of those pages.
	 * Select the good pages from mirrors to rewrite bad pages from
	 * the area to fix. Afterwards verify the checksum of the block
	 * that is supposed to be repaired. This verification step is
	 * only done for the purpose of statistic counting and for the
	 * final scrub report, whether errors remain.
	 * A perfect algorithm could make use of the checksum and try
	 * all possible combinations of pages from the different mirrors
	 * until the checksum verification succeeds. For example, when
	 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
	 * of mirror #2 is readable but the final checksum test fails,
	 * then the 2nd page of mirror #3 could be tried, whether now
1128
	 * the final checksum succeeds. But this would be a rare
1129 1130 1131 1132 1133 1134 1135 1136
	 * exception and is therefore not implemented. At least it is
	 * avoided that the good copy is overwritten.
	 * A more useful improvement would be to pick the sectors
	 * without I/O error based on sector sizes (512 bytes on legacy
	 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
	 * mirror could be repaired by taking 512 byte of a different
	 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
	 * area are unreadable.
A
Arne Jansen 已提交
1137
	 */
1138
	success = 1;
1139 1140
	for (page_num = 0; page_num < sblock_bad->page_count;
	     page_num++) {
1141
		struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1142
		struct scrub_block *sblock_other = NULL;
1143

1144 1145
		/* skip no-io-error page in scrub */
		if (!page_bad->io_error && !sctx->is_dev_replace)
A
Arne Jansen 已提交
1146
			continue;
1147

1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158
		/* try to find no-io-error page in mirrors */
		if (page_bad->io_error) {
			for (mirror_index = 0;
			     mirror_index < BTRFS_MAX_MIRRORS &&
			     sblocks_for_recheck[mirror_index].page_count > 0;
			     mirror_index++) {
				if (!sblocks_for_recheck[mirror_index].
				    pagev[page_num]->io_error) {
					sblock_other = sblocks_for_recheck +
						       mirror_index;
					break;
1159 1160
				}
			}
1161 1162
			if (!sblock_other)
				success = 0;
I
Ilya Dryomov 已提交
1163
		}
A
Arne Jansen 已提交
1164

1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178
		if (sctx->is_dev_replace) {
			/*
			 * did not find a mirror to fetch the page
			 * from. scrub_write_page_to_dev_replace()
			 * handles this case (page->io_error), by
			 * filling the block with zeros before
			 * submitting the write request
			 */
			if (!sblock_other)
				sblock_other = sblock_bad;

			if (scrub_write_page_to_dev_replace(sblock_other,
							    page_num) != 0) {
				btrfs_dev_replace_stats_inc(
1179
					&sctx->fs_info->dev_replace.
1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
					num_write_errors);
				success = 0;
			}
		} else if (sblock_other) {
			ret = scrub_repair_page_from_good_copy(sblock_bad,
							       sblock_other,
							       page_num, 0);
			if (0 == ret)
				page_bad->io_error = 0;
			else
				success = 0;
1191
		}
A
Arne Jansen 已提交
1192 1193
	}

1194
	if (success && !sctx->is_dev_replace) {
1195 1196 1197 1198 1199 1200 1201 1202 1203 1204
		if (is_metadata || have_csum) {
			/*
			 * need to verify the checksum now that all
			 * sectors on disk are repaired (the write
			 * request for data to be repaired is on its way).
			 * Just be lazy and use scrub_recheck_block()
			 * which re-reads the data before the checksum
			 * is verified, but most likely the data comes out
			 * of the page cache.
			 */
1205
			scrub_recheck_block(fs_info, sblock_bad, 1);
1206
			if (!sblock_bad->header_error &&
1207 1208 1209 1210 1211 1212 1213
			    !sblock_bad->checksum_error &&
			    sblock_bad->no_io_error_seen)
				goto corrected_error;
			else
				goto did_not_correct_error;
		} else {
corrected_error:
1214 1215
			spin_lock(&sctx->stat_lock);
			sctx->stat.corrected_errors++;
1216
			sblock_to_check->data_corrected = 1;
1217
			spin_unlock(&sctx->stat_lock);
1218 1219
			btrfs_err_rl_in_rcu(fs_info,
				"fixed up error at logical %llu on dev %s",
1220
				logical, rcu_str_deref(dev->name));
A
Arne Jansen 已提交
1221
		}
1222 1223
	} else {
did_not_correct_error:
1224 1225 1226
		spin_lock(&sctx->stat_lock);
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
1227 1228
		btrfs_err_rl_in_rcu(fs_info,
			"unable to fixup (regular) error at logical %llu on dev %s",
1229
			logical, rcu_str_deref(dev->name));
I
Ilya Dryomov 已提交
1230
	}
A
Arne Jansen 已提交
1231

1232 1233 1234 1235 1236 1237
out:
	if (sblocks_for_recheck) {
		for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
		     mirror_index++) {
			struct scrub_block *sblock = sblocks_for_recheck +
						     mirror_index;
1238
			struct scrub_recover *recover;
1239 1240
			int page_index;

1241 1242 1243
			for (page_index = 0; page_index < sblock->page_count;
			     page_index++) {
				sblock->pagev[page_index]->sblock = NULL;
1244 1245 1246 1247 1248 1249
				recover = sblock->pagev[page_index]->recover;
				if (recover) {
					scrub_put_recover(recover);
					sblock->pagev[page_index]->recover =
									NULL;
				}
1250 1251
				scrub_page_put(sblock->pagev[page_index]);
			}
1252 1253 1254
		}
		kfree(sblocks_for_recheck);
	}
A
Arne Jansen 已提交
1255

1256 1257
	return 0;
}
A
Arne Jansen 已提交
1258

1259
static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
1260
{
Z
Zhao Lei 已提交
1261 1262 1263 1264 1265
	if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
		return 2;
	else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
		return 3;
	else
1266 1267 1268
		return (int)bbio->num_stripes;
}

Z
Zhao Lei 已提交
1269 1270
static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
						 u64 *raid_map,
1271 1272 1273 1274 1275 1276 1277
						 u64 mapped_length,
						 int nstripes, int mirror,
						 int *stripe_index,
						 u64 *stripe_offset)
{
	int i;

1278
	if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298
		/* RAID5/6 */
		for (i = 0; i < nstripes; i++) {
			if (raid_map[i] == RAID6_Q_STRIPE ||
			    raid_map[i] == RAID5_P_STRIPE)
				continue;

			if (logical >= raid_map[i] &&
			    logical < raid_map[i] + mapped_length)
				break;
		}

		*stripe_index = i;
		*stripe_offset = logical - raid_map[i];
	} else {
		/* The other RAID type */
		*stripe_index = mirror;
		*stripe_offset = 0;
	}
}

1299
static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
1300 1301
				     struct scrub_block *sblocks_for_recheck)
{
1302
	struct scrub_ctx *sctx = original_sblock->sctx;
1303
	struct btrfs_fs_info *fs_info = sctx->fs_info;
1304 1305
	u64 length = original_sblock->page_count * PAGE_SIZE;
	u64 logical = original_sblock->pagev[0]->logical;
1306 1307 1308
	u64 generation = original_sblock->pagev[0]->generation;
	u64 flags = original_sblock->pagev[0]->flags;
	u64 have_csum = original_sblock->pagev[0]->have_csum;
1309 1310 1311 1312 1313 1314
	struct scrub_recover *recover;
	struct btrfs_bio *bbio;
	u64 sublen;
	u64 mapped_length;
	u64 stripe_offset;
	int stripe_index;
1315
	int page_index = 0;
1316
	int mirror_index;
1317
	int nmirrors;
1318 1319 1320
	int ret;

	/*
1321
	 * note: the two members refs and outstanding_pages
1322 1323 1324 1325 1326
	 * are not used (and not set) in the blocks that are used for
	 * the recheck procedure
	 */

	while (length > 0) {
1327 1328 1329
		sublen = min_t(u64, length, PAGE_SIZE);
		mapped_length = sublen;
		bbio = NULL;
A
Arne Jansen 已提交
1330

1331 1332 1333 1334
		/*
		 * with a length of PAGE_SIZE, each returned stripe
		 * represents one mirror
		 */
1335 1336
		ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
				logical, &mapped_length, &bbio, 0, 1);
1337
		if (ret || !bbio || mapped_length < sublen) {
1338
			btrfs_put_bbio(bbio);
1339 1340
			return -EIO;
		}
A
Arne Jansen 已提交
1341

1342 1343
		recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
		if (!recover) {
1344
			btrfs_put_bbio(bbio);
1345 1346 1347 1348 1349 1350 1351
			return -ENOMEM;
		}

		atomic_set(&recover->refs, 1);
		recover->bbio = bbio;
		recover->map_length = mapped_length;

1352
		BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK);
1353

1354
		nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
Z
Zhao Lei 已提交
1355

1356
		for (mirror_index = 0; mirror_index < nmirrors;
1357 1358 1359 1360 1361
		     mirror_index++) {
			struct scrub_block *sblock;
			struct scrub_page *page;

			sblock = sblocks_for_recheck + mirror_index;
1362
			sblock->sctx = sctx;
1363

1364 1365 1366
			page = kzalloc(sizeof(*page), GFP_NOFS);
			if (!page) {
leave_nomem:
1367 1368 1369
				spin_lock(&sctx->stat_lock);
				sctx->stat.malloc_errors++;
				spin_unlock(&sctx->stat_lock);
1370
				scrub_put_recover(recover);
1371 1372
				return -ENOMEM;
			}
1373 1374
			scrub_page_get(page);
			sblock->pagev[page_index] = page;
1375 1376 1377
			page->sblock = sblock;
			page->flags = flags;
			page->generation = generation;
1378
			page->logical = logical;
1379 1380 1381 1382 1383
			page->have_csum = have_csum;
			if (have_csum)
				memcpy(page->csum,
				       original_sblock->pagev[0]->csum,
				       sctx->csum_size);
1384

Z
Zhao Lei 已提交
1385 1386 1387
			scrub_stripe_index_and_offset(logical,
						      bbio->map_type,
						      bbio->raid_map,
1388
						      mapped_length,
1389 1390
						      bbio->num_stripes -
						      bbio->num_tgtdevs,
1391 1392 1393 1394 1395 1396 1397
						      mirror_index,
						      &stripe_index,
						      &stripe_offset);
			page->physical = bbio->stripes[stripe_index].physical +
					 stripe_offset;
			page->dev = bbio->stripes[stripe_index].dev;

1398 1399 1400 1401
			BUG_ON(page_index >= original_sblock->page_count);
			page->physical_for_dev_replace =
				original_sblock->pagev[page_index]->
				physical_for_dev_replace;
1402 1403
			/* for missing devices, dev->bdev is NULL */
			page->mirror_num = mirror_index + 1;
1404
			sblock->page_count++;
1405 1406 1407
			page->page = alloc_page(GFP_NOFS);
			if (!page->page)
				goto leave_nomem;
1408 1409 1410

			scrub_get_recover(recover);
			page->recover = recover;
1411
		}
1412
		scrub_put_recover(recover);
1413 1414 1415 1416 1417 1418
		length -= sublen;
		logical += sublen;
		page_index++;
	}

	return 0;
I
Ilya Dryomov 已提交
1419 1420
}

1421 1422 1423 1424 1425
struct scrub_bio_ret {
	struct completion event;
	int error;
};

1426
static void scrub_bio_wait_endio(struct bio *bio)
1427 1428 1429
{
	struct scrub_bio_ret *ret = bio->bi_private;

1430
	ret->error = bio->bi_error;
1431 1432 1433 1434 1435
	complete(&ret->event);
}

static inline int scrub_is_page_on_raid56(struct scrub_page *page)
{
Z
Zhao Lei 已提交
1436
	return page->recover &&
1437
	       (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454
}

static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
					struct bio *bio,
					struct scrub_page *page)
{
	struct scrub_bio_ret done;
	int ret;

	init_completion(&done.event);
	done.error = 0;
	bio->bi_iter.bi_sector = page->logical >> 9;
	bio->bi_private = &done;
	bio->bi_end_io = scrub_bio_wait_endio;

	ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
				    page->recover->map_length,
1455
				    page->mirror_num, 0);
1456 1457 1458 1459 1460 1461 1462 1463 1464 1465
	if (ret)
		return ret;

	wait_for_completion(&done.event);
	if (done.error)
		return -EIO;

	return 0;
}

1466 1467 1468 1469 1470 1471 1472
/*
 * this function will check the on disk data for checksum errors, header
 * errors and read I/O errors. If any I/O errors happen, the exact pages
 * which are errored are marked as being bad. The goal is to enable scrub
 * to take those pages that are not errored from all the mirrors so that
 * the pages that are errored in the just handled mirror can be repaired.
 */
1473
static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1474 1475
				struct scrub_block *sblock,
				int retry_failed_mirror)
I
Ilya Dryomov 已提交
1476
{
1477
	int page_num;
I
Ilya Dryomov 已提交
1478

1479
	sblock->no_io_error_seen = 1;
I
Ilya Dryomov 已提交
1480

1481 1482
	for (page_num = 0; page_num < sblock->page_count; page_num++) {
		struct bio *bio;
1483
		struct scrub_page *page = sblock->pagev[page_num];
1484

1485
		if (page->dev->bdev == NULL) {
1486 1487 1488 1489 1490
			page->io_error = 1;
			sblock->no_io_error_seen = 0;
			continue;
		}

1491
		WARN_ON(!page->page);
1492
		bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1493 1494 1495 1496 1497
		if (!bio) {
			page->io_error = 1;
			sblock->no_io_error_seen = 0;
			continue;
		}
1498
		bio->bi_bdev = page->dev->bdev;
1499

1500
		bio_add_page(bio, page->page, PAGE_SIZE, 0);
1501 1502 1503 1504 1505
		if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
			if (scrub_submit_raid56_bio_wait(fs_info, bio, page))
				sblock->no_io_error_seen = 0;
		} else {
			bio->bi_iter.bi_sector = page->physical >> 9;
M
Mike Christie 已提交
1506
			bio_set_op_attrs(bio, REQ_OP_READ, 0);
1507

1508
			if (btrfsic_submit_bio_wait(bio))
1509 1510
				sblock->no_io_error_seen = 0;
		}
1511

1512 1513
		bio_put(bio);
	}
I
Ilya Dryomov 已提交
1514

1515
	if (sblock->no_io_error_seen)
1516
		scrub_recheck_block_checksum(sblock);
A
Arne Jansen 已提交
1517 1518
}

M
Miao Xie 已提交
1519 1520 1521 1522 1523 1524 1525 1526 1527 1528
static inline int scrub_check_fsid(u8 fsid[],
				   struct scrub_page *spage)
{
	struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
	int ret;

	ret = memcmp(fsid, fs_devices->fsid, BTRFS_UUID_SIZE);
	return !ret;
}

1529
static void scrub_recheck_block_checksum(struct scrub_block *sblock)
A
Arne Jansen 已提交
1530
{
1531 1532 1533
	sblock->header_error = 0;
	sblock->checksum_error = 0;
	sblock->generation_error = 0;
1534

1535 1536 1537 1538
	if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA)
		scrub_checksum_data(sblock);
	else
		scrub_checksum_tree_block(sblock);
A
Arne Jansen 已提交
1539 1540
}

1541
static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1542
					     struct scrub_block *sblock_good)
1543 1544 1545
{
	int page_num;
	int ret = 0;
I
Ilya Dryomov 已提交
1546

1547 1548
	for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
		int ret_sub;
I
Ilya Dryomov 已提交
1549

1550 1551
		ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
							   sblock_good,
1552
							   page_num, 1);
1553 1554
		if (ret_sub)
			ret = ret_sub;
A
Arne Jansen 已提交
1555
	}
1556 1557 1558 1559 1560 1561 1562 1563

	return ret;
}

static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
					    struct scrub_block *sblock_good,
					    int page_num, int force_write)
{
1564 1565
	struct scrub_page *page_bad = sblock_bad->pagev[page_num];
	struct scrub_page *page_good = sblock_good->pagev[page_num];
1566

1567 1568
	BUG_ON(page_bad->page == NULL);
	BUG_ON(page_good->page == NULL);
1569 1570 1571 1572 1573
	if (force_write || sblock_bad->header_error ||
	    sblock_bad->checksum_error || page_bad->io_error) {
		struct bio *bio;
		int ret;

1574
		if (!page_bad->dev->bdev) {
1575
			btrfs_warn_rl(sblock_bad->sctx->fs_info,
J
Jeff Mahoney 已提交
1576
				"scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
1577 1578 1579
			return -EIO;
		}

1580
		bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1581 1582
		if (!bio)
			return -EIO;
1583
		bio->bi_bdev = page_bad->dev->bdev;
1584
		bio->bi_iter.bi_sector = page_bad->physical >> 9;
M
Mike Christie 已提交
1585
		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1586 1587 1588 1589 1590

		ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
		if (PAGE_SIZE != ret) {
			bio_put(bio);
			return -EIO;
1591
		}
1592

1593
		if (btrfsic_submit_bio_wait(bio)) {
1594 1595
			btrfs_dev_stat_inc_and_print(page_bad->dev,
				BTRFS_DEV_STAT_WRITE_ERRS);
1596
			btrfs_dev_replace_stats_inc(
1597
				&sblock_bad->sctx->fs_info->
1598
				dev_replace.num_write_errors);
1599 1600 1601
			bio_put(bio);
			return -EIO;
		}
1602
		bio_put(bio);
A
Arne Jansen 已提交
1603 1604
	}

1605 1606 1607
	return 0;
}

1608 1609 1610 1611
static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
{
	int page_num;

1612 1613 1614 1615 1616 1617 1618
	/*
	 * This block is used for the check of the parity on the source device,
	 * so the data needn't be written into the destination device.
	 */
	if (sblock->sparity)
		return;

1619 1620 1621 1622 1623 1624
	for (page_num = 0; page_num < sblock->page_count; page_num++) {
		int ret;

		ret = scrub_write_page_to_dev_replace(sblock, page_num);
		if (ret)
			btrfs_dev_replace_stats_inc(
1625
				&sblock->sctx->fs_info->dev_replace.
1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638
				num_write_errors);
	}
}

static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
					   int page_num)
{
	struct scrub_page *spage = sblock->pagev[page_num];

	BUG_ON(spage->page == NULL);
	if (spage->io_error) {
		void *mapped_buffer = kmap_atomic(spage->page);

1639
		memset(mapped_buffer, 0, PAGE_SIZE);
1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656
		flush_dcache_page(spage->page);
		kunmap_atomic(mapped_buffer);
	}
	return scrub_add_page_to_wr_bio(sblock->sctx, spage);
}

static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
				    struct scrub_page *spage)
{
	struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
	struct scrub_bio *sbio;
	int ret;

	mutex_lock(&wr_ctx->wr_lock);
again:
	if (!wr_ctx->wr_curr_bio) {
		wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
1657
					      GFP_KERNEL);
1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673
		if (!wr_ctx->wr_curr_bio) {
			mutex_unlock(&wr_ctx->wr_lock);
			return -ENOMEM;
		}
		wr_ctx->wr_curr_bio->sctx = sctx;
		wr_ctx->wr_curr_bio->page_count = 0;
	}
	sbio = wr_ctx->wr_curr_bio;
	if (sbio->page_count == 0) {
		struct bio *bio;

		sbio->physical = spage->physical_for_dev_replace;
		sbio->logical = spage->logical;
		sbio->dev = wr_ctx->tgtdev;
		bio = sbio->bio;
		if (!bio) {
1674 1675
			bio = btrfs_io_bio_alloc(GFP_KERNEL,
					wr_ctx->pages_per_wr_bio);
1676 1677 1678 1679 1680 1681 1682 1683 1684 1685
			if (!bio) {
				mutex_unlock(&wr_ctx->wr_lock);
				return -ENOMEM;
			}
			sbio->bio = bio;
		}

		bio->bi_private = sbio;
		bio->bi_end_io = scrub_wr_bio_end_io;
		bio->bi_bdev = sbio->dev->bdev;
1686
		bio->bi_iter.bi_sector = sbio->physical >> 9;
M
Mike Christie 已提交
1687
		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734
		sbio->err = 0;
	} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
		   spage->physical_for_dev_replace ||
		   sbio->logical + sbio->page_count * PAGE_SIZE !=
		   spage->logical) {
		scrub_wr_submit(sctx);
		goto again;
	}

	ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
	if (ret != PAGE_SIZE) {
		if (sbio->page_count < 1) {
			bio_put(sbio->bio);
			sbio->bio = NULL;
			mutex_unlock(&wr_ctx->wr_lock);
			return -EIO;
		}
		scrub_wr_submit(sctx);
		goto again;
	}

	sbio->pagev[sbio->page_count] = spage;
	scrub_page_get(spage);
	sbio->page_count++;
	if (sbio->page_count == wr_ctx->pages_per_wr_bio)
		scrub_wr_submit(sctx);
	mutex_unlock(&wr_ctx->wr_lock);

	return 0;
}

static void scrub_wr_submit(struct scrub_ctx *sctx)
{
	struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
	struct scrub_bio *sbio;

	if (!wr_ctx->wr_curr_bio)
		return;

	sbio = wr_ctx->wr_curr_bio;
	wr_ctx->wr_curr_bio = NULL;
	WARN_ON(!sbio->bio->bi_bdev);
	scrub_pending_bio_inc(sctx);
	/* process all writes in a single worker thread. Then the block layer
	 * orders the requests before sending them to the driver which
	 * doubled the write performance on spinning disks when measured
	 * with Linux 3.5 */
1735
	btrfsic_submit_bio(sbio->bio);
1736 1737
}

1738
static void scrub_wr_bio_end_io(struct bio *bio)
1739 1740
{
	struct scrub_bio *sbio = bio->bi_private;
1741
	struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
1742

1743
	sbio->err = bio->bi_error;
1744 1745
	sbio->bio = bio;

1746 1747
	btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
			 scrub_wr_bio_end_io_worker, NULL, NULL);
1748
	btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759
}

static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
{
	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
	struct scrub_ctx *sctx = sbio->sctx;
	int i;

	WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
	if (sbio->err) {
		struct btrfs_dev_replace *dev_replace =
1760
			&sbio->sctx->fs_info->dev_replace;
1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779

		for (i = 0; i < sbio->page_count; i++) {
			struct scrub_page *spage = sbio->pagev[i];

			spage->io_error = 1;
			btrfs_dev_replace_stats_inc(&dev_replace->
						    num_write_errors);
		}
	}

	for (i = 0; i < sbio->page_count; i++)
		scrub_page_put(sbio->pagev[i]);

	bio_put(sbio->bio);
	kfree(sbio);
	scrub_pending_bio_dec(sctx);
}

static int scrub_checksum(struct scrub_block *sblock)
1780 1781 1782 1783
{
	u64 flags;
	int ret;

1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795
	/*
	 * No need to initialize these stats currently,
	 * because this function only use return value
	 * instead of these stats value.
	 *
	 * Todo:
	 * always use stats
	 */
	sblock->header_error = 0;
	sblock->generation_error = 0;
	sblock->checksum_error = 0;

1796 1797
	WARN_ON(sblock->page_count < 1);
	flags = sblock->pagev[0]->flags;
1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808
	ret = 0;
	if (flags & BTRFS_EXTENT_FLAG_DATA)
		ret = scrub_checksum_data(sblock);
	else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
		ret = scrub_checksum_tree_block(sblock);
	else if (flags & BTRFS_EXTENT_FLAG_SUPER)
		(void)scrub_checksum_super(sblock);
	else
		WARN_ON(1);
	if (ret)
		scrub_handle_errored_block(sblock);
1809 1810

	return ret;
A
Arne Jansen 已提交
1811 1812
}

1813
static int scrub_checksum_data(struct scrub_block *sblock)
A
Arne Jansen 已提交
1814
{
1815
	struct scrub_ctx *sctx = sblock->sctx;
A
Arne Jansen 已提交
1816
	u8 csum[BTRFS_CSUM_SIZE];
1817 1818 1819
	u8 *on_disk_csum;
	struct page *page;
	void *buffer;
A
Arne Jansen 已提交
1820
	u32 crc = ~(u32)0;
1821 1822
	u64 len;
	int index;
A
Arne Jansen 已提交
1823

1824
	BUG_ON(sblock->page_count < 1);
1825
	if (!sblock->pagev[0]->have_csum)
A
Arne Jansen 已提交
1826 1827
		return 0;

1828 1829
	on_disk_csum = sblock->pagev[0]->csum;
	page = sblock->pagev[0]->page;
1830
	buffer = kmap_atomic(page);
1831

1832
	len = sctx->sectorsize;
1833 1834 1835 1836
	index = 0;
	for (;;) {
		u64 l = min_t(u64, len, PAGE_SIZE);

1837
		crc = btrfs_csum_data(buffer, crc, l);
1838
		kunmap_atomic(buffer);
1839 1840 1841 1842 1843
		len -= l;
		if (len == 0)
			break;
		index++;
		BUG_ON(index >= sblock->page_count);
1844 1845
		BUG_ON(!sblock->pagev[index]->page);
		page = sblock->pagev[index]->page;
1846
		buffer = kmap_atomic(page);
1847 1848
	}

A
Arne Jansen 已提交
1849
	btrfs_csum_final(crc, csum);
1850
	if (memcmp(csum, on_disk_csum, sctx->csum_size))
1851
		sblock->checksum_error = 1;
A
Arne Jansen 已提交
1852

1853
	return sblock->checksum_error;
A
Arne Jansen 已提交
1854 1855
}

1856
static int scrub_checksum_tree_block(struct scrub_block *sblock)
A
Arne Jansen 已提交
1857
{
1858
	struct scrub_ctx *sctx = sblock->sctx;
A
Arne Jansen 已提交
1859
	struct btrfs_header *h;
1860
	struct btrfs_root *root = sctx->fs_info->dev_root;
A
Arne Jansen 已提交
1861
	struct btrfs_fs_info *fs_info = root->fs_info;
1862 1863 1864 1865 1866 1867
	u8 calculated_csum[BTRFS_CSUM_SIZE];
	u8 on_disk_csum[BTRFS_CSUM_SIZE];
	struct page *page;
	void *mapped_buffer;
	u64 mapped_size;
	void *p;
A
Arne Jansen 已提交
1868
	u32 crc = ~(u32)0;
1869 1870 1871 1872
	u64 len;
	int index;

	BUG_ON(sblock->page_count < 1);
1873
	page = sblock->pagev[0]->page;
1874
	mapped_buffer = kmap_atomic(page);
1875
	h = (struct btrfs_header *)mapped_buffer;
1876
	memcpy(on_disk_csum, h->csum, sctx->csum_size);
A
Arne Jansen 已提交
1877 1878 1879 1880 1881 1882

	/*
	 * we don't use the getter functions here, as we
	 * a) don't have an extent buffer and
	 * b) the page is already kmapped
	 */
1883
	if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
1884
		sblock->header_error = 1;
A
Arne Jansen 已提交
1885

1886 1887 1888 1889
	if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) {
		sblock->header_error = 1;
		sblock->generation_error = 1;
	}
A
Arne Jansen 已提交
1890

M
Miao Xie 已提交
1891
	if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
1892
		sblock->header_error = 1;
A
Arne Jansen 已提交
1893 1894 1895

	if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
		   BTRFS_UUID_SIZE))
1896
		sblock->header_error = 1;
A
Arne Jansen 已提交
1897

1898
	len = sctx->nodesize - BTRFS_CSUM_SIZE;
1899 1900 1901 1902 1903 1904
	mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
	p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
	index = 0;
	for (;;) {
		u64 l = min_t(u64, len, mapped_size);

1905
		crc = btrfs_csum_data(p, crc, l);
1906
		kunmap_atomic(mapped_buffer);
1907 1908 1909 1910 1911
		len -= l;
		if (len == 0)
			break;
		index++;
		BUG_ON(index >= sblock->page_count);
1912 1913
		BUG_ON(!sblock->pagev[index]->page);
		page = sblock->pagev[index]->page;
1914
		mapped_buffer = kmap_atomic(page);
1915 1916 1917 1918 1919
		mapped_size = PAGE_SIZE;
		p = mapped_buffer;
	}

	btrfs_csum_final(crc, calculated_csum);
1920
	if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1921
		sblock->checksum_error = 1;
A
Arne Jansen 已提交
1922

1923
	return sblock->header_error || sblock->checksum_error;
A
Arne Jansen 已提交
1924 1925
}

1926
static int scrub_checksum_super(struct scrub_block *sblock)
A
Arne Jansen 已提交
1927 1928
{
	struct btrfs_super_block *s;
1929
	struct scrub_ctx *sctx = sblock->sctx;
1930 1931 1932 1933 1934 1935
	u8 calculated_csum[BTRFS_CSUM_SIZE];
	u8 on_disk_csum[BTRFS_CSUM_SIZE];
	struct page *page;
	void *mapped_buffer;
	u64 mapped_size;
	void *p;
A
Arne Jansen 已提交
1936
	u32 crc = ~(u32)0;
1937 1938
	int fail_gen = 0;
	int fail_cor = 0;
1939 1940
	u64 len;
	int index;
A
Arne Jansen 已提交
1941

1942
	BUG_ON(sblock->page_count < 1);
1943
	page = sblock->pagev[0]->page;
1944
	mapped_buffer = kmap_atomic(page);
1945
	s = (struct btrfs_super_block *)mapped_buffer;
1946
	memcpy(on_disk_csum, s->csum, sctx->csum_size);
A
Arne Jansen 已提交
1947

1948
	if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
1949
		++fail_cor;
A
Arne Jansen 已提交
1950

1951
	if (sblock->pagev[0]->generation != btrfs_super_generation(s))
1952
		++fail_gen;
A
Arne Jansen 已提交
1953

M
Miao Xie 已提交
1954
	if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
1955
		++fail_cor;
A
Arne Jansen 已提交
1956

1957 1958 1959 1960 1961 1962 1963
	len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
	mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
	p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
	index = 0;
	for (;;) {
		u64 l = min_t(u64, len, mapped_size);

1964
		crc = btrfs_csum_data(p, crc, l);
1965
		kunmap_atomic(mapped_buffer);
1966 1967 1968 1969 1970
		len -= l;
		if (len == 0)
			break;
		index++;
		BUG_ON(index >= sblock->page_count);
1971 1972
		BUG_ON(!sblock->pagev[index]->page);
		page = sblock->pagev[index]->page;
1973
		mapped_buffer = kmap_atomic(page);
1974 1975 1976 1977 1978
		mapped_size = PAGE_SIZE;
		p = mapped_buffer;
	}

	btrfs_csum_final(crc, calculated_csum);
1979
	if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1980
		++fail_cor;
A
Arne Jansen 已提交
1981

1982
	if (fail_cor + fail_gen) {
A
Arne Jansen 已提交
1983 1984 1985 1986 1987
		/*
		 * if we find an error in a super block, we just report it.
		 * They will get written with the next transaction commit
		 * anyway
		 */
1988 1989 1990
		spin_lock(&sctx->stat_lock);
		++sctx->stat.super_errors;
		spin_unlock(&sctx->stat_lock);
1991
		if (fail_cor)
1992
			btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
1993 1994
				BTRFS_DEV_STAT_CORRUPTION_ERRS);
		else
1995
			btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
1996
				BTRFS_DEV_STAT_GENERATION_ERRS);
A
Arne Jansen 已提交
1997 1998
	}

1999
	return fail_cor + fail_gen;
A
Arne Jansen 已提交
2000 2001
}

2002 2003
static void scrub_block_get(struct scrub_block *sblock)
{
2004
	atomic_inc(&sblock->refs);
2005 2006 2007 2008
}

static void scrub_block_put(struct scrub_block *sblock)
{
2009
	if (atomic_dec_and_test(&sblock->refs)) {
2010 2011
		int i;

2012 2013 2014
		if (sblock->sparity)
			scrub_parity_put(sblock->sparity);

2015
		for (i = 0; i < sblock->page_count; i++)
2016
			scrub_page_put(sblock->pagev[i]);
2017 2018 2019 2020
		kfree(sblock);
	}
}

2021 2022
static void scrub_page_get(struct scrub_page *spage)
{
2023
	atomic_inc(&spage->refs);
2024 2025 2026 2027
}

static void scrub_page_put(struct scrub_page *spage)
{
2028
	if (atomic_dec_and_test(&spage->refs)) {
2029 2030 2031 2032 2033 2034
		if (spage->page)
			__free_page(spage->page);
		kfree(spage);
	}
}

2035
static void scrub_submit(struct scrub_ctx *sctx)
A
Arne Jansen 已提交
2036 2037 2038
{
	struct scrub_bio *sbio;

2039
	if (sctx->curr == -1)
S
Stefan Behrens 已提交
2040
		return;
A
Arne Jansen 已提交
2041

2042 2043
	sbio = sctx->bios[sctx->curr];
	sctx->curr = -1;
2044
	scrub_pending_bio_inc(sctx);
2045
	btrfsic_submit_bio(sbio->bio);
A
Arne Jansen 已提交
2046 2047
}

2048 2049
static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
				    struct scrub_page *spage)
A
Arne Jansen 已提交
2050
{
2051
	struct scrub_block *sblock = spage->sblock;
A
Arne Jansen 已提交
2052
	struct scrub_bio *sbio;
2053
	int ret;
A
Arne Jansen 已提交
2054 2055 2056 2057 2058

again:
	/*
	 * grab a fresh bio or wait for one to become available
	 */
2059 2060 2061 2062 2063 2064 2065 2066
	while (sctx->curr == -1) {
		spin_lock(&sctx->list_lock);
		sctx->curr = sctx->first_free;
		if (sctx->curr != -1) {
			sctx->first_free = sctx->bios[sctx->curr]->next_free;
			sctx->bios[sctx->curr]->next_free = -1;
			sctx->bios[sctx->curr]->page_count = 0;
			spin_unlock(&sctx->list_lock);
A
Arne Jansen 已提交
2067
		} else {
2068 2069
			spin_unlock(&sctx->list_lock);
			wait_event(sctx->list_wait, sctx->first_free != -1);
A
Arne Jansen 已提交
2070 2071
		}
	}
2072
	sbio = sctx->bios[sctx->curr];
2073
	if (sbio->page_count == 0) {
2074 2075
		struct bio *bio;

2076 2077
		sbio->physical = spage->physical;
		sbio->logical = spage->logical;
2078
		sbio->dev = spage->dev;
2079 2080
		bio = sbio->bio;
		if (!bio) {
2081 2082
			bio = btrfs_io_bio_alloc(GFP_KERNEL,
					sctx->pages_per_rd_bio);
2083 2084 2085 2086
			if (!bio)
				return -ENOMEM;
			sbio->bio = bio;
		}
2087 2088 2089

		bio->bi_private = sbio;
		bio->bi_end_io = scrub_bio_end_io;
2090
		bio->bi_bdev = sbio->dev->bdev;
2091
		bio->bi_iter.bi_sector = sbio->physical >> 9;
M
Mike Christie 已提交
2092
		bio_set_op_attrs(bio, REQ_OP_READ, 0);
2093
		sbio->err = 0;
2094 2095 2096
	} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
		   spage->physical ||
		   sbio->logical + sbio->page_count * PAGE_SIZE !=
2097 2098
		   spage->logical ||
		   sbio->dev != spage->dev) {
2099
		scrub_submit(sctx);
A
Arne Jansen 已提交
2100 2101
		goto again;
	}
2102

2103 2104 2105 2106 2107 2108 2109 2110
	sbio->pagev[sbio->page_count] = spage;
	ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
	if (ret != PAGE_SIZE) {
		if (sbio->page_count < 1) {
			bio_put(sbio->bio);
			sbio->bio = NULL;
			return -EIO;
		}
2111
		scrub_submit(sctx);
2112 2113 2114
		goto again;
	}

2115
	scrub_block_get(sblock); /* one for the page added to the bio */
2116 2117
	atomic_inc(&sblock->outstanding_pages);
	sbio->page_count++;
2118
	if (sbio->page_count == sctx->pages_per_rd_bio)
2119
		scrub_submit(sctx);
2120 2121 2122 2123

	return 0;
}

2124
static void scrub_missing_raid56_end_io(struct bio *bio)
2125 2126
{
	struct scrub_block *sblock = bio->bi_private;
2127
	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
2128

2129
	if (bio->bi_error)
2130 2131
		sblock->no_io_error_seen = 0;

2132 2133
	bio_put(bio);

2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146
	btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
}

static void scrub_missing_raid56_worker(struct btrfs_work *work)
{
	struct scrub_block *sblock = container_of(work, struct scrub_block, work);
	struct scrub_ctx *sctx = sblock->sctx;
	u64 logical;
	struct btrfs_device *dev;

	logical = sblock->pagev[0]->logical;
	dev = sblock->pagev[0]->dev;

2147
	if (sblock->no_io_error_seen)
2148
		scrub_recheck_block_checksum(sblock);
2149 2150 2151 2152 2153

	if (!sblock->no_io_error_seen) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors++;
		spin_unlock(&sctx->stat_lock);
2154
		btrfs_err_rl_in_rcu(sctx->fs_info,
2155
			"IO error rebuilding logical %llu for dev %s",
2156 2157 2158 2159 2160
			logical, rcu_str_deref(dev->name));
	} else if (sblock->header_error || sblock->checksum_error) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
2161
		btrfs_err_rl_in_rcu(sctx->fs_info,
2162
			"failed to rebuild valid logical %llu for dev %s",
2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182
			logical, rcu_str_deref(dev->name));
	} else {
		scrub_write_block_to_dev_replace(sblock);
	}

	scrub_block_put(sblock);

	if (sctx->is_dev_replace &&
	    atomic_read(&sctx->wr_ctx.flush_all_writes)) {
		mutex_lock(&sctx->wr_ctx.wr_lock);
		scrub_wr_submit(sctx);
		mutex_unlock(&sctx->wr_ctx.wr_lock);
	}

	scrub_pending_bio_dec(sctx);
}

static void scrub_missing_raid56_pages(struct scrub_block *sblock)
{
	struct scrub_ctx *sctx = sblock->sctx;
2183 2184
	struct btrfs_fs_info *fs_info = sctx->fs_info;
	struct btrfs_root *dev_root = fs_info->dev_root;
2185 2186
	u64 length = sblock->page_count * PAGE_SIZE;
	u64 logical = sblock->pagev[0]->logical;
2187
	struct btrfs_bio *bbio = NULL;
2188 2189 2190 2191 2192
	struct bio *bio;
	struct btrfs_raid_bio *rbio;
	int ret;
	int i;

2193 2194
	ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
			&length, &bbio, 0, 1);
2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216
	if (ret || !bbio || !bbio->raid_map)
		goto bbio_out;

	if (WARN_ON(!sctx->is_dev_replace ||
		    !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
		/*
		 * We shouldn't be scrubbing a missing device. Even for dev
		 * replace, we should only get here for RAID 5/6. We either
		 * managed to mount something with no mirrors remaining or
		 * there's a bug in scrub_remap_extent()/btrfs_map_block().
		 */
		goto bbio_out;
	}

	bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
	if (!bio)
		goto bbio_out;

	bio->bi_iter.bi_sector = logical >> 9;
	bio->bi_private = sblock;
	bio->bi_end_io = scrub_missing_raid56_end_io;

2217
	rbio = raid56_alloc_missing_rbio(dev_root, bio, bbio, length);
2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242
	if (!rbio)
		goto rbio_out;

	for (i = 0; i < sblock->page_count; i++) {
		struct scrub_page *spage = sblock->pagev[i];

		raid56_add_scrub_pages(rbio, spage->page, spage->logical);
	}

	btrfs_init_work(&sblock->work, btrfs_scrub_helper,
			scrub_missing_raid56_worker, NULL, NULL);
	scrub_block_get(sblock);
	scrub_pending_bio_inc(sctx);
	raid56_submit_missing_rbio(rbio);
	return;

rbio_out:
	bio_put(bio);
bbio_out:
	btrfs_put_bbio(bbio);
	spin_lock(&sctx->stat_lock);
	sctx->stat.malloc_errors++;
	spin_unlock(&sctx->stat_lock);
}

2243
static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
2244
		       u64 physical, struct btrfs_device *dev, u64 flags,
2245 2246
		       u64 gen, int mirror_num, u8 *csum, int force,
		       u64 physical_for_dev_replace)
2247 2248 2249 2250
{
	struct scrub_block *sblock;
	int index;

2251
	sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2252
	if (!sblock) {
2253 2254 2255
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
2256
		return -ENOMEM;
A
Arne Jansen 已提交
2257
	}
2258

2259 2260
	/* one ref inside this function, plus one for each page added to
	 * a bio later on */
2261
	atomic_set(&sblock->refs, 1);
2262
	sblock->sctx = sctx;
2263 2264 2265
	sblock->no_io_error_seen = 1;

	for (index = 0; len > 0; index++) {
2266
		struct scrub_page *spage;
2267 2268
		u64 l = min_t(u64, len, PAGE_SIZE);

2269
		spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2270 2271
		if (!spage) {
leave_nomem:
2272 2273 2274
			spin_lock(&sctx->stat_lock);
			sctx->stat.malloc_errors++;
			spin_unlock(&sctx->stat_lock);
2275
			scrub_block_put(sblock);
2276 2277
			return -ENOMEM;
		}
2278 2279 2280
		BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
		scrub_page_get(spage);
		sblock->pagev[index] = spage;
2281
		spage->sblock = sblock;
2282
		spage->dev = dev;
2283 2284 2285 2286
		spage->flags = flags;
		spage->generation = gen;
		spage->logical = logical;
		spage->physical = physical;
2287
		spage->physical_for_dev_replace = physical_for_dev_replace;
2288 2289 2290
		spage->mirror_num = mirror_num;
		if (csum) {
			spage->have_csum = 1;
2291
			memcpy(spage->csum, csum, sctx->csum_size);
2292 2293 2294 2295
		} else {
			spage->have_csum = 0;
		}
		sblock->page_count++;
2296
		spage->page = alloc_page(GFP_KERNEL);
2297 2298
		if (!spage->page)
			goto leave_nomem;
2299 2300 2301
		len -= l;
		logical += l;
		physical += l;
2302
		physical_for_dev_replace += l;
2303 2304
	}

2305
	WARN_ON(sblock->page_count == 0);
2306 2307 2308 2309 2310 2311 2312 2313 2314 2315
	if (dev->missing) {
		/*
		 * This case should only be hit for RAID 5/6 device replace. See
		 * the comment in scrub_missing_raid56_pages() for details.
		 */
		scrub_missing_raid56_pages(sblock);
	} else {
		for (index = 0; index < sblock->page_count; index++) {
			struct scrub_page *spage = sblock->pagev[index];
			int ret;
2316

2317 2318 2319 2320 2321
			ret = scrub_add_page_to_rd_bio(sctx, spage);
			if (ret) {
				scrub_block_put(sblock);
				return ret;
			}
2322
		}
A
Arne Jansen 已提交
2323

2324 2325 2326
		if (force)
			scrub_submit(sctx);
	}
A
Arne Jansen 已提交
2327

2328 2329
	/* last one frees, either here or in bio completion for last page */
	scrub_block_put(sblock);
A
Arne Jansen 已提交
2330 2331 2332
	return 0;
}

2333
static void scrub_bio_end_io(struct bio *bio)
2334 2335
{
	struct scrub_bio *sbio = bio->bi_private;
2336
	struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
2337

2338
	sbio->err = bio->bi_error;
2339 2340
	sbio->bio = bio;

2341
	btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
2342 2343 2344 2345 2346
}

static void scrub_bio_end_io_worker(struct btrfs_work *work)
{
	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2347
	struct scrub_ctx *sctx = sbio->sctx;
2348 2349
	int i;

2350
	BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371
	if (sbio->err) {
		for (i = 0; i < sbio->page_count; i++) {
			struct scrub_page *spage = sbio->pagev[i];

			spage->io_error = 1;
			spage->sblock->no_io_error_seen = 0;
		}
	}

	/* now complete the scrub_block items that have all pages completed */
	for (i = 0; i < sbio->page_count; i++) {
		struct scrub_page *spage = sbio->pagev[i];
		struct scrub_block *sblock = spage->sblock;

		if (atomic_dec_and_test(&sblock->outstanding_pages))
			scrub_block_complete(sblock);
		scrub_block_put(sblock);
	}

	bio_put(sbio->bio);
	sbio->bio = NULL;
2372 2373 2374 2375
	spin_lock(&sctx->list_lock);
	sbio->next_free = sctx->first_free;
	sctx->first_free = sbio->index;
	spin_unlock(&sctx->list_lock);
2376 2377 2378 2379 2380 2381 2382 2383

	if (sctx->is_dev_replace &&
	    atomic_read(&sctx->wr_ctx.flush_all_writes)) {
		mutex_lock(&sctx->wr_ctx.wr_lock);
		scrub_wr_submit(sctx);
		mutex_unlock(&sctx->wr_ctx.wr_lock);
	}

2384
	scrub_pending_bio_dec(sctx);
2385 2386
}

2387 2388 2389 2390
static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
				       unsigned long *bitmap,
				       u64 start, u64 len)
{
2391
	u32 offset;
2392
	int nsectors;
2393
	int sectorsize = sparity->sctx->fs_info->sectorsize;
2394 2395 2396 2397 2398 2399 2400

	if (len >= sparity->stripe_len) {
		bitmap_set(bitmap, 0, sparity->nsectors);
		return;
	}

	start -= sparity->logic_start;
2401
	start = div_u64_rem(start, sparity->stripe_len, &offset);
2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425
	offset /= sectorsize;
	nsectors = (int)len / sectorsize;

	if (offset + nsectors <= sparity->nsectors) {
		bitmap_set(bitmap, offset, nsectors);
		return;
	}

	bitmap_set(bitmap, offset, sparity->nsectors - offset);
	bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
}

static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
						   u64 start, u64 len)
{
	__scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
}

static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
						  u64 start, u64 len)
{
	__scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
}

2426 2427
static void scrub_block_complete(struct scrub_block *sblock)
{
2428 2429
	int corrupted = 0;

2430
	if (!sblock->no_io_error_seen) {
2431
		corrupted = 1;
2432
		scrub_handle_errored_block(sblock);
2433 2434 2435 2436 2437 2438
	} else {
		/*
		 * if has checksum error, write via repair mechanism in
		 * dev replace case, otherwise write here in dev replace
		 * case.
		 */
2439 2440
		corrupted = scrub_checksum(sblock);
		if (!corrupted && sblock->sctx->is_dev_replace)
2441 2442
			scrub_write_block_to_dev_replace(sblock);
	}
2443 2444 2445 2446 2447 2448 2449 2450 2451

	if (sblock->sparity && corrupted && !sblock->data_corrected) {
		u64 start = sblock->pagev[0]->logical;
		u64 end = sblock->pagev[sblock->page_count - 1]->logical +
			  PAGE_SIZE;

		scrub_parity_mark_sectors_error(sblock->sparity,
						start, end - start);
	}
2452 2453
}

2454
static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
A
Arne Jansen 已提交
2455 2456
{
	struct btrfs_ordered_sum *sum = NULL;
2457
	unsigned long index;
A
Arne Jansen 已提交
2458 2459
	unsigned long num_sectors;

2460 2461
	while (!list_empty(&sctx->csum_list)) {
		sum = list_first_entry(&sctx->csum_list,
A
Arne Jansen 已提交
2462 2463 2464 2465 2466 2467
				       struct btrfs_ordered_sum, list);
		if (sum->bytenr > logical)
			return 0;
		if (sum->bytenr + sum->len > logical)
			break;

2468
		++sctx->stat.csum_discards;
A
Arne Jansen 已提交
2469 2470 2471 2472 2473 2474 2475
		list_del(&sum->list);
		kfree(sum);
		sum = NULL;
	}
	if (!sum)
		return 0;

2476
	index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
2477
	num_sectors = sum->len / sctx->sectorsize;
2478 2479
	memcpy(csum, sum->sums + index, sctx->csum_size);
	if (index == num_sectors - 1) {
A
Arne Jansen 已提交
2480 2481 2482
		list_del(&sum->list);
		kfree(sum);
	}
2483
	return 1;
A
Arne Jansen 已提交
2484 2485 2486
}

/* scrub extent tries to collect up to 64 kB for each bio */
2487
static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
2488
			u64 physical, struct btrfs_device *dev, u64 flags,
2489
			u64 gen, int mirror_num, u64 physical_for_dev_replace)
A
Arne Jansen 已提交
2490 2491 2492
{
	int ret;
	u8 csum[BTRFS_CSUM_SIZE];
2493 2494 2495
	u32 blocksize;

	if (flags & BTRFS_EXTENT_FLAG_DATA) {
2496 2497 2498 2499 2500
		blocksize = sctx->sectorsize;
		spin_lock(&sctx->stat_lock);
		sctx->stat.data_extents_scrubbed++;
		sctx->stat.data_bytes_scrubbed += len;
		spin_unlock(&sctx->stat_lock);
2501
	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2502 2503 2504 2505 2506
		blocksize = sctx->nodesize;
		spin_lock(&sctx->stat_lock);
		sctx->stat.tree_extents_scrubbed++;
		sctx->stat.tree_bytes_scrubbed += len;
		spin_unlock(&sctx->stat_lock);
2507
	} else {
2508
		blocksize = sctx->sectorsize;
2509
		WARN_ON(1);
2510
	}
A
Arne Jansen 已提交
2511 2512

	while (len) {
2513
		u64 l = min_t(u64, len, blocksize);
A
Arne Jansen 已提交
2514 2515 2516 2517
		int have_csum = 0;

		if (flags & BTRFS_EXTENT_FLAG_DATA) {
			/* push csums to sbio */
2518
			have_csum = scrub_find_csum(sctx, logical, csum);
A
Arne Jansen 已提交
2519
			if (have_csum == 0)
2520
				++sctx->stat.no_csum;
2521 2522 2523 2524 2525 2526
			if (sctx->is_dev_replace && !have_csum) {
				ret = copy_nocow_pages(sctx, logical, l,
						       mirror_num,
						      physical_for_dev_replace);
				goto behind_scrub_pages;
			}
A
Arne Jansen 已提交
2527
		}
2528
		ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
2529 2530 2531
				  mirror_num, have_csum ? csum : NULL, 0,
				  physical_for_dev_replace);
behind_scrub_pages:
A
Arne Jansen 已提交
2532 2533 2534 2535 2536
		if (ret)
			return ret;
		len -= l;
		logical += l;
		physical += l;
2537
		physical_for_dev_replace += l;
A
Arne Jansen 已提交
2538 2539 2540 2541
	}
	return 0;
}

2542 2543 2544 2545 2546 2547 2548 2549 2550
static int scrub_pages_for_parity(struct scrub_parity *sparity,
				  u64 logical, u64 len,
				  u64 physical, struct btrfs_device *dev,
				  u64 flags, u64 gen, int mirror_num, u8 *csum)
{
	struct scrub_ctx *sctx = sparity->sctx;
	struct scrub_block *sblock;
	int index;

2551
	sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2552 2553 2554 2555 2556 2557 2558 2559 2560
	if (!sblock) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
		return -ENOMEM;
	}

	/* one ref inside this function, plus one for each page added to
	 * a bio later on */
2561
	atomic_set(&sblock->refs, 1);
2562 2563 2564 2565 2566 2567 2568 2569 2570
	sblock->sctx = sctx;
	sblock->no_io_error_seen = 1;
	sblock->sparity = sparity;
	scrub_parity_get(sparity);

	for (index = 0; len > 0; index++) {
		struct scrub_page *spage;
		u64 l = min_t(u64, len, PAGE_SIZE);

2571
		spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600
		if (!spage) {
leave_nomem:
			spin_lock(&sctx->stat_lock);
			sctx->stat.malloc_errors++;
			spin_unlock(&sctx->stat_lock);
			scrub_block_put(sblock);
			return -ENOMEM;
		}
		BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
		/* For scrub block */
		scrub_page_get(spage);
		sblock->pagev[index] = spage;
		/* For scrub parity */
		scrub_page_get(spage);
		list_add_tail(&spage->list, &sparity->spages);
		spage->sblock = sblock;
		spage->dev = dev;
		spage->flags = flags;
		spage->generation = gen;
		spage->logical = logical;
		spage->physical = physical;
		spage->mirror_num = mirror_num;
		if (csum) {
			spage->have_csum = 1;
			memcpy(spage->csum, csum, sctx->csum_size);
		} else {
			spage->have_csum = 0;
		}
		sblock->page_count++;
2601
		spage->page = alloc_page(GFP_KERNEL);
2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635
		if (!spage->page)
			goto leave_nomem;
		len -= l;
		logical += l;
		physical += l;
	}

	WARN_ON(sblock->page_count == 0);
	for (index = 0; index < sblock->page_count; index++) {
		struct scrub_page *spage = sblock->pagev[index];
		int ret;

		ret = scrub_add_page_to_rd_bio(sctx, spage);
		if (ret) {
			scrub_block_put(sblock);
			return ret;
		}
	}

	/* last one frees, either here or in bio completion for last page */
	scrub_block_put(sblock);
	return 0;
}

static int scrub_extent_for_parity(struct scrub_parity *sparity,
				   u64 logical, u64 len,
				   u64 physical, struct btrfs_device *dev,
				   u64 flags, u64 gen, int mirror_num)
{
	struct scrub_ctx *sctx = sparity->sctx;
	int ret;
	u8 csum[BTRFS_CSUM_SIZE];
	u32 blocksize;

2636 2637 2638 2639 2640
	if (dev->missing) {
		scrub_parity_mark_sectors_error(sparity, logical, len);
		return 0;
	}

2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655
	if (flags & BTRFS_EXTENT_FLAG_DATA) {
		blocksize = sctx->sectorsize;
	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
		blocksize = sctx->nodesize;
	} else {
		blocksize = sctx->sectorsize;
		WARN_ON(1);
	}

	while (len) {
		u64 l = min_t(u64, len, blocksize);
		int have_csum = 0;

		if (flags & BTRFS_EXTENT_FLAG_DATA) {
			/* push csums to sbio */
2656
			have_csum = scrub_find_csum(sctx, logical, csum);
2657 2658 2659 2660 2661 2662 2663 2664
			if (have_csum == 0)
				goto skip;
		}
		ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
					     flags, gen, mirror_num,
					     have_csum ? csum : NULL);
		if (ret)
			return ret;
2665
skip:
2666 2667 2668 2669 2670 2671 2672
		len -= l;
		logical += l;
		physical += l;
	}
	return 0;
}

2673 2674 2675 2676 2677 2678 2679 2680
/*
 * Given a physical address, this will calculate it's
 * logical offset. if this is a parity stripe, it will return
 * the most left data stripe's logical offset.
 *
 * return 0 if it is a data stripe, 1 means parity stripe.
 */
static int get_raid56_logic_offset(u64 physical, int num,
2681 2682
				   struct map_lookup *map, u64 *offset,
				   u64 *stripe_start)
2683 2684 2685 2686 2687
{
	int i;
	int j = 0;
	u64 stripe_nr;
	u64 last_offset;
2688 2689
	u32 stripe_index;
	u32 rot;
2690 2691 2692

	last_offset = (physical - map->stripes[num].physical) *
		      nr_data_stripes(map);
2693 2694 2695
	if (stripe_start)
		*stripe_start = last_offset;

2696 2697 2698 2699
	*offset = last_offset;
	for (i = 0; i < nr_data_stripes(map); i++) {
		*offset = last_offset + i * map->stripe_len;

2700 2701
		stripe_nr = div_u64(*offset, map->stripe_len);
		stripe_nr = div_u64(stripe_nr, nr_data_stripes(map));
2702 2703

		/* Work out the disk rotation on this stripe-set */
2704
		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
2705 2706
		/* calculate which stripe this data locates */
		rot += i;
2707
		stripe_index = rot % map->num_stripes;
2708 2709 2710 2711 2712 2713 2714 2715 2716
		if (stripe_index == num)
			return 0;
		if (stripe_index < num)
			j++;
	}
	*offset = last_offset + j * map->stripe_len;
	return 1;
}

2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738
static void scrub_free_parity(struct scrub_parity *sparity)
{
	struct scrub_ctx *sctx = sparity->sctx;
	struct scrub_page *curr, *next;
	int nbits;

	nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
	if (nbits) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors += nbits;
		sctx->stat.uncorrectable_errors += nbits;
		spin_unlock(&sctx->stat_lock);
	}

	list_for_each_entry_safe(curr, next, &sparity->spages, list) {
		list_del_init(&curr->list);
		scrub_page_put(curr);
	}

	kfree(sparity);
}

2739 2740 2741 2742 2743 2744 2745 2746 2747 2748
static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
{
	struct scrub_parity *sparity = container_of(work, struct scrub_parity,
						    work);
	struct scrub_ctx *sctx = sparity->sctx;

	scrub_free_parity(sparity);
	scrub_pending_bio_dec(sctx);
}

2749
static void scrub_parity_bio_endio(struct bio *bio)
2750 2751 2752
{
	struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;

2753
	if (bio->bi_error)
2754 2755 2756 2757
		bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
			  sparity->nsectors);

	bio_put(bio);
2758 2759 2760

	btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
			scrub_parity_bio_endio_worker, NULL, NULL);
2761
	btrfs_queue_work(sparity->sctx->fs_info->scrub_parity_workers,
2762
			 &sparity->work);
2763 2764 2765 2766 2767
}

static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
{
	struct scrub_ctx *sctx = sparity->sctx;
2768
	struct btrfs_root *dev_root = sctx->fs_info->dev_root;
2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779
	struct bio *bio;
	struct btrfs_raid_bio *rbio;
	struct scrub_page *spage;
	struct btrfs_bio *bbio = NULL;
	u64 length;
	int ret;

	if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
			   sparity->nsectors))
		goto out;

2780
	length = sparity->logic_end - sparity->logic_start;
2781
	ret = btrfs_map_sblock(sctx->fs_info, BTRFS_MAP_WRITE,
2782
			       sparity->logic_start,
2783 2784
			       &length, &bbio, 0, 1);
	if (ret || !bbio || !bbio->raid_map)
2785 2786 2787 2788 2789 2790 2791 2792 2793 2794
		goto bbio_out;

	bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
	if (!bio)
		goto bbio_out;

	bio->bi_iter.bi_sector = sparity->logic_start >> 9;
	bio->bi_private = sparity;
	bio->bi_end_io = scrub_parity_bio_endio;

2795
	rbio = raid56_parity_alloc_scrub_rbio(dev_root, bio, bbio,
2796
					      length, sparity->scrub_dev,
2797 2798 2799 2800 2801 2802
					      sparity->dbitmap,
					      sparity->nsectors);
	if (!rbio)
		goto rbio_out;

	list_for_each_entry(spage, &sparity->spages, list)
2803
		raid56_add_scrub_pages(rbio, spage->page, spage->logical);
2804 2805 2806 2807 2808 2809 2810 2811

	scrub_pending_bio_inc(sctx);
	raid56_parity_submit_scrub_rbio(rbio);
	return;

rbio_out:
	bio_put(bio);
bbio_out:
2812
	btrfs_put_bbio(bbio);
2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823
	bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
		  sparity->nsectors);
	spin_lock(&sctx->stat_lock);
	sctx->stat.malloc_errors++;
	spin_unlock(&sctx->stat_lock);
out:
	scrub_free_parity(sparity);
}

static inline int scrub_calc_parity_bitmap_len(int nsectors)
{
2824
	return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long);
2825 2826 2827 2828
}

static void scrub_parity_get(struct scrub_parity *sparity)
{
2829
	atomic_inc(&sparity->refs);
2830 2831 2832 2833
}

static void scrub_parity_put(struct scrub_parity *sparity)
{
2834
	if (!atomic_dec_and_test(&sparity->refs))
2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846
		return;

	scrub_parity_check_and_repair(sparity);
}

static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
						  struct map_lookup *map,
						  struct btrfs_device *sdev,
						  struct btrfs_path *path,
						  u64 logic_start,
						  u64 logic_end)
{
2847
	struct btrfs_fs_info *fs_info = sctx->fs_info;
2848 2849 2850
	struct btrfs_root *root = fs_info->extent_root;
	struct btrfs_root *csum_root = fs_info->csum_root;
	struct btrfs_extent_item *extent;
2851
	struct btrfs_bio *bbio = NULL;
2852 2853 2854 2855 2856 2857 2858 2859 2860
	u64 flags;
	int ret;
	int slot;
	struct extent_buffer *l;
	struct btrfs_key key;
	u64 generation;
	u64 extent_logical;
	u64 extent_physical;
	u64 extent_len;
2861
	u64 mapped_length;
2862 2863 2864 2865 2866 2867 2868
	struct btrfs_device *extent_dev;
	struct scrub_parity *sparity;
	int nsectors;
	int bitmap_len;
	int extent_mirror_num;
	int stop_loop = 0;

2869
	nsectors = div_u64(map->stripe_len, root->fs_info->sectorsize);
2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885
	bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
	sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
			  GFP_NOFS);
	if (!sparity) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
		return -ENOMEM;
	}

	sparity->stripe_len = map->stripe_len;
	sparity->nsectors = nsectors;
	sparity->sctx = sctx;
	sparity->scrub_dev = sdev;
	sparity->logic_start = logic_start;
	sparity->logic_end = logic_end;
2886
	atomic_set(&sparity->refs, 1);
2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934
	INIT_LIST_HEAD(&sparity->spages);
	sparity->dbitmap = sparity->bitmap;
	sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;

	ret = 0;
	while (logic_start < logic_end) {
		if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
			key.type = BTRFS_METADATA_ITEM_KEY;
		else
			key.type = BTRFS_EXTENT_ITEM_KEY;
		key.objectid = logic_start;
		key.offset = (u64)-1;

		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		if (ret < 0)
			goto out;

		if (ret > 0) {
			ret = btrfs_previous_extent_item(root, path, 0);
			if (ret < 0)
				goto out;
			if (ret > 0) {
				btrfs_release_path(path);
				ret = btrfs_search_slot(NULL, root, &key,
							path, 0, 0);
				if (ret < 0)
					goto out;
			}
		}

		stop_loop = 0;
		while (1) {
			u64 bytes;

			l = path->nodes[0];
			slot = path->slots[0];
			if (slot >= btrfs_header_nritems(l)) {
				ret = btrfs_next_leaf(root, path);
				if (ret == 0)
					continue;
				if (ret < 0)
					goto out;

				stop_loop = 1;
				break;
			}
			btrfs_item_key_to_cpu(l, &key, slot);

2935 2936 2937 2938
			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
			    key.type != BTRFS_METADATA_ITEM_KEY)
				goto next;

2939
			if (key.type == BTRFS_METADATA_ITEM_KEY)
2940
				bytes = root->fs_info->nodesize;
2941 2942 2943 2944 2945 2946
			else
				bytes = key.offset;

			if (key.objectid + bytes <= logic_start)
				goto next;

2947
			if (key.objectid >= logic_end) {
2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959
				stop_loop = 1;
				break;
			}

			while (key.objectid >= logic_start + map->stripe_len)
				logic_start += map->stripe_len;

			extent = btrfs_item_ptr(l, slot,
						struct btrfs_extent_item);
			flags = btrfs_extent_flags(l, extent);
			generation = btrfs_extent_generation(l, extent);

2960 2961 2962 2963
			if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
			    (key.objectid < logic_start ||
			     key.objectid + bytes >
			     logic_start + map->stripe_len)) {
J
Jeff Mahoney 已提交
2964 2965
				btrfs_err(fs_info,
					  "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
2966
					  key.objectid, logic_start);
2967 2968 2969
				spin_lock(&sctx->stat_lock);
				sctx->stat.uncorrectable_errors++;
				spin_unlock(&sctx->stat_lock);
2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988
				goto next;
			}
again:
			extent_logical = key.objectid;
			extent_len = bytes;

			if (extent_logical < logic_start) {
				extent_len -= logic_start - extent_logical;
				extent_logical = logic_start;
			}

			if (extent_logical + extent_len >
			    logic_start + map->stripe_len)
				extent_len = logic_start + map->stripe_len -
					     extent_logical;

			scrub_parity_mark_sectors_data(sparity, extent_logical,
						       extent_len);

2989
			mapped_length = extent_len;
2990
			bbio = NULL;
2991 2992 2993
			ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
					extent_logical, &mapped_length, &bbio,
					0);
2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005
			if (!ret) {
				if (!bbio || mapped_length < extent_len)
					ret = -EIO;
			}
			if (ret) {
				btrfs_put_bbio(bbio);
				goto out;
			}
			extent_physical = bbio->stripes[0].physical;
			extent_mirror_num = bbio->mirror_num;
			extent_dev = bbio->stripes[0].dev;
			btrfs_put_bbio(bbio);
3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019

			ret = btrfs_lookup_csums_range(csum_root,
						extent_logical,
						extent_logical + extent_len - 1,
						&sctx->csum_list, 1);
			if (ret)
				goto out;

			ret = scrub_extent_for_parity(sparity, extent_logical,
						      extent_len,
						      extent_physical,
						      extent_dev, flags,
						      generation,
						      extent_mirror_num);
3020 3021 3022

			scrub_free_csums(sctx);

3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053
			if (ret)
				goto out;

			if (extent_logical + extent_len <
			    key.objectid + bytes) {
				logic_start += map->stripe_len;

				if (logic_start >= logic_end) {
					stop_loop = 1;
					break;
				}

				if (logic_start < key.objectid + bytes) {
					cond_resched();
					goto again;
				}
			}
next:
			path->slots[0]++;
		}

		btrfs_release_path(path);

		if (stop_loop)
			break;

		logic_start += map->stripe_len;
	}
out:
	if (ret < 0)
		scrub_parity_mark_sectors_error(sparity, logic_start,
3054
						logic_end - logic_start);
3055 3056 3057 3058 3059 3060 3061 3062 3063 3064
	scrub_parity_put(sparity);
	scrub_submit(sctx);
	mutex_lock(&sctx->wr_ctx.wr_lock);
	scrub_wr_submit(sctx);
	mutex_unlock(&sctx->wr_ctx.wr_lock);

	btrfs_release_path(path);
	return ret < 0 ? ret : 0;
}

3065
static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3066 3067
					   struct map_lookup *map,
					   struct btrfs_device *scrub_dev,
3068 3069
					   int num, u64 base, u64 length,
					   int is_dev_replace)
A
Arne Jansen 已提交
3070
{
3071
	struct btrfs_path *path, *ppath;
3072
	struct btrfs_fs_info *fs_info = sctx->fs_info;
A
Arne Jansen 已提交
3073 3074 3075
	struct btrfs_root *root = fs_info->extent_root;
	struct btrfs_root *csum_root = fs_info->csum_root;
	struct btrfs_extent_item *extent;
3076
	struct blk_plug plug;
A
Arne Jansen 已提交
3077 3078 3079 3080 3081 3082 3083
	u64 flags;
	int ret;
	int slot;
	u64 nstripes;
	struct extent_buffer *l;
	u64 physical;
	u64 logical;
L
Liu Bo 已提交
3084
	u64 logic_end;
3085
	u64 physical_end;
A
Arne Jansen 已提交
3086
	u64 generation;
3087
	int mirror_num;
A
Arne Jansen 已提交
3088 3089
	struct reada_control *reada1;
	struct reada_control *reada2;
3090
	struct btrfs_key key;
A
Arne Jansen 已提交
3091
	struct btrfs_key key_end;
A
Arne Jansen 已提交
3092 3093
	u64 increment = map->stripe_len;
	u64 offset;
3094 3095 3096
	u64 extent_logical;
	u64 extent_physical;
	u64 extent_len;
3097 3098
	u64 stripe_logical;
	u64 stripe_end;
3099 3100
	struct btrfs_device *extent_dev;
	int extent_mirror_num;
3101
	int stop_loop = 0;
D
David Woodhouse 已提交
3102

3103
	physical = map->stripes[num].physical;
A
Arne Jansen 已提交
3104
	offset = 0;
3105
	nstripes = div_u64(length, map->stripe_len);
A
Arne Jansen 已提交
3106 3107 3108
	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
		offset = map->stripe_len * num;
		increment = map->stripe_len * map->num_stripes;
3109
		mirror_num = 1;
A
Arne Jansen 已提交
3110 3111 3112 3113
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
		int factor = map->num_stripes / map->sub_stripes;
		offset = map->stripe_len * (num / map->sub_stripes);
		increment = map->stripe_len * factor;
3114
		mirror_num = num % map->sub_stripes + 1;
A
Arne Jansen 已提交
3115 3116
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
		increment = map->stripe_len;
3117
		mirror_num = num % map->num_stripes + 1;
A
Arne Jansen 已提交
3118 3119
	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
		increment = map->stripe_len;
3120
		mirror_num = num % map->num_stripes + 1;
3121
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3122
		get_raid56_logic_offset(physical, num, map, &offset, NULL);
3123 3124
		increment = map->stripe_len * nr_data_stripes(map);
		mirror_num = 1;
A
Arne Jansen 已提交
3125 3126
	} else {
		increment = map->stripe_len;
3127
		mirror_num = 1;
A
Arne Jansen 已提交
3128 3129 3130 3131 3132 3133
	}

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

3134 3135
	ppath = btrfs_alloc_path();
	if (!ppath) {
3136
		btrfs_free_path(path);
3137 3138 3139
		return -ENOMEM;
	}

3140 3141 3142 3143 3144
	/*
	 * work on commit root. The related disk blocks are static as
	 * long as COW is applied. This means, it is save to rewrite
	 * them to repair disk errors without any race conditions
	 */
A
Arne Jansen 已提交
3145 3146 3147
	path->search_commit_root = 1;
	path->skip_locking = 1;

3148 3149
	ppath->search_commit_root = 1;
	ppath->skip_locking = 1;
A
Arne Jansen 已提交
3150
	/*
A
Arne Jansen 已提交
3151 3152 3153
	 * trigger the readahead for extent tree csum tree and wait for
	 * completion. During readahead, the scrub is officially paused
	 * to not hold off transaction commits
A
Arne Jansen 已提交
3154 3155
	 */
	logical = base + offset;
3156
	physical_end = physical + nstripes * map->stripe_len;
3157
	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3158
		get_raid56_logic_offset(physical_end, num,
3159
					map, &logic_end, NULL);
3160 3161 3162 3163
		logic_end += base;
	} else {
		logic_end = logical + increment * nstripes;
	}
3164
	wait_event(sctx->list_wait,
3165
		   atomic_read(&sctx->bios_in_flight) == 0);
3166
	scrub_blocked_if_needed(fs_info);
A
Arne Jansen 已提交
3167 3168

	/* FIXME it might be better to start readahead at commit root */
3169 3170 3171
	key.objectid = logical;
	key.type = BTRFS_EXTENT_ITEM_KEY;
	key.offset = (u64)0;
3172
	key_end.objectid = logic_end;
3173 3174
	key_end.type = BTRFS_METADATA_ITEM_KEY;
	key_end.offset = (u64)-1;
3175
	reada1 = btrfs_reada_add(root, &key, &key_end);
A
Arne Jansen 已提交
3176

3177 3178 3179
	key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
	key.type = BTRFS_EXTENT_CSUM_KEY;
	key.offset = logical;
A
Arne Jansen 已提交
3180 3181
	key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
	key_end.type = BTRFS_EXTENT_CSUM_KEY;
3182
	key_end.offset = logic_end;
3183
	reada2 = btrfs_reada_add(csum_root, &key, &key_end);
A
Arne Jansen 已提交
3184 3185 3186 3187 3188 3189

	if (!IS_ERR(reada1))
		btrfs_reada_wait(reada1);
	if (!IS_ERR(reada2))
		btrfs_reada_wait(reada2);

A
Arne Jansen 已提交
3190 3191 3192 3193 3194

	/*
	 * collect all data csums for the stripe to avoid seeking during
	 * the scrub. This might currently (crc32) end up to be about 1MB
	 */
3195
	blk_start_plug(&plug);
A
Arne Jansen 已提交
3196 3197 3198 3199 3200

	/*
	 * now find all extents for each stripe and scrub them
	 */
	ret = 0;
3201
	while (physical < physical_end) {
A
Arne Jansen 已提交
3202 3203 3204 3205
		/*
		 * canceled?
		 */
		if (atomic_read(&fs_info->scrub_cancel_req) ||
3206
		    atomic_read(&sctx->cancel_req)) {
A
Arne Jansen 已提交
3207 3208 3209 3210 3211 3212 3213 3214
			ret = -ECANCELED;
			goto out;
		}
		/*
		 * check to see if we have to pause
		 */
		if (atomic_read(&fs_info->scrub_pause_req)) {
			/* push queued extents */
3215
			atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
3216
			scrub_submit(sctx);
3217 3218 3219
			mutex_lock(&sctx->wr_ctx.wr_lock);
			scrub_wr_submit(sctx);
			mutex_unlock(&sctx->wr_ctx.wr_lock);
3220
			wait_event(sctx->list_wait,
3221
				   atomic_read(&sctx->bios_in_flight) == 0);
3222
			atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3223
			scrub_blocked_if_needed(fs_info);
A
Arne Jansen 已提交
3224 3225
		}

3226 3227 3228 3229 3230 3231
		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
			ret = get_raid56_logic_offset(physical, num, map,
						      &logical,
						      &stripe_logical);
			logical += base;
			if (ret) {
3232
				/* it is parity strip */
3233
				stripe_logical += base;
3234
				stripe_end = stripe_logical + increment;
3235 3236 3237 3238 3239 3240 3241 3242 3243
				ret = scrub_raid56_parity(sctx, map, scrub_dev,
							  ppath, stripe_logical,
							  stripe_end);
				if (ret)
					goto out;
				goto skip;
			}
		}

3244 3245 3246 3247
		if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
			key.type = BTRFS_METADATA_ITEM_KEY;
		else
			key.type = BTRFS_EXTENT_ITEM_KEY;
A
Arne Jansen 已提交
3248
		key.objectid = logical;
L
Liu Bo 已提交
3249
		key.offset = (u64)-1;
A
Arne Jansen 已提交
3250 3251 3252 3253

		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		if (ret < 0)
			goto out;
3254

3255
		if (ret > 0) {
3256
			ret = btrfs_previous_extent_item(root, path, 0);
A
Arne Jansen 已提交
3257 3258
			if (ret < 0)
				goto out;
3259 3260 3261 3262 3263 3264 3265 3266 3267
			if (ret > 0) {
				/* there's no smaller item, so stick with the
				 * larger one */
				btrfs_release_path(path);
				ret = btrfs_search_slot(NULL, root, &key,
							path, 0, 0);
				if (ret < 0)
					goto out;
			}
A
Arne Jansen 已提交
3268 3269
		}

L
Liu Bo 已提交
3270
		stop_loop = 0;
A
Arne Jansen 已提交
3271
		while (1) {
3272 3273
			u64 bytes;

A
Arne Jansen 已提交
3274 3275 3276 3277 3278 3279 3280 3281 3282
			l = path->nodes[0];
			slot = path->slots[0];
			if (slot >= btrfs_header_nritems(l)) {
				ret = btrfs_next_leaf(root, path);
				if (ret == 0)
					continue;
				if (ret < 0)
					goto out;

L
Liu Bo 已提交
3283
				stop_loop = 1;
A
Arne Jansen 已提交
3284 3285 3286 3287
				break;
			}
			btrfs_item_key_to_cpu(l, &key, slot);

3288 3289 3290 3291
			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
			    key.type != BTRFS_METADATA_ITEM_KEY)
				goto next;

3292
			if (key.type == BTRFS_METADATA_ITEM_KEY)
3293
				bytes = root->fs_info->nodesize;
3294 3295 3296 3297
			else
				bytes = key.offset;

			if (key.objectid + bytes <= logical)
A
Arne Jansen 已提交
3298 3299
				goto next;

L
Liu Bo 已提交
3300 3301 3302 3303 3304 3305
			if (key.objectid >= logical + map->stripe_len) {
				/* out of this device extent */
				if (key.objectid >= logic_end)
					stop_loop = 1;
				break;
			}
A
Arne Jansen 已提交
3306 3307 3308 3309 3310 3311

			extent = btrfs_item_ptr(l, slot,
						struct btrfs_extent_item);
			flags = btrfs_extent_flags(l, extent);
			generation = btrfs_extent_generation(l, extent);

3312 3313 3314 3315
			if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
			    (key.objectid < logical ||
			     key.objectid + bytes >
			     logical + map->stripe_len)) {
3316
				btrfs_err(fs_info,
J
Jeff Mahoney 已提交
3317
					   "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3318
				       key.objectid, logical);
3319 3320 3321
				spin_lock(&sctx->stat_lock);
				sctx->stat.uncorrectable_errors++;
				spin_unlock(&sctx->stat_lock);
A
Arne Jansen 已提交
3322 3323 3324
				goto next;
			}

L
Liu Bo 已提交
3325 3326 3327 3328
again:
			extent_logical = key.objectid;
			extent_len = bytes;

A
Arne Jansen 已提交
3329 3330 3331
			/*
			 * trim extent to this stripe
			 */
L
Liu Bo 已提交
3332 3333 3334
			if (extent_logical < logical) {
				extent_len -= logical - extent_logical;
				extent_logical = logical;
A
Arne Jansen 已提交
3335
			}
L
Liu Bo 已提交
3336
			if (extent_logical + extent_len >
A
Arne Jansen 已提交
3337
			    logical + map->stripe_len) {
L
Liu Bo 已提交
3338 3339
				extent_len = logical + map->stripe_len -
					     extent_logical;
A
Arne Jansen 已提交
3340 3341
			}

L
Liu Bo 已提交
3342
			extent_physical = extent_logical - logical + physical;
3343 3344 3345 3346 3347 3348 3349
			extent_dev = scrub_dev;
			extent_mirror_num = mirror_num;
			if (is_dev_replace)
				scrub_remap_extent(fs_info, extent_logical,
						   extent_len, &extent_physical,
						   &extent_dev,
						   &extent_mirror_num);
L
Liu Bo 已提交
3350

3351 3352 3353 3354 3355
			ret = btrfs_lookup_csums_range(csum_root,
						       extent_logical,
						       extent_logical +
						       extent_len - 1,
						       &sctx->csum_list, 1);
L
Liu Bo 已提交
3356 3357 3358
			if (ret)
				goto out;

3359 3360 3361
			ret = scrub_extent(sctx, extent_logical, extent_len,
					   extent_physical, extent_dev, flags,
					   generation, extent_mirror_num,
3362
					   extent_logical - logical + physical);
3363 3364 3365

			scrub_free_csums(sctx);

A
Arne Jansen 已提交
3366 3367 3368
			if (ret)
				goto out;

L
Liu Bo 已提交
3369 3370
			if (extent_logical + extent_len <
			    key.objectid + bytes) {
3371
				if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3372 3373 3374 3375
					/*
					 * loop until we find next data stripe
					 * or we have finished all stripes.
					 */
3376 3377 3378 3379 3380 3381 3382 3383 3384 3385
loop:
					physical += map->stripe_len;
					ret = get_raid56_logic_offset(physical,
							num, map, &logical,
							&stripe_logical);
					logical += base;

					if (ret && physical < physical_end) {
						stripe_logical += base;
						stripe_end = stripe_logical +
3386
								increment;
3387 3388 3389 3390 3391 3392 3393 3394
						ret = scrub_raid56_parity(sctx,
							map, scrub_dev, ppath,
							stripe_logical,
							stripe_end);
						if (ret)
							goto out;
						goto loop;
					}
3395 3396 3397 3398
				} else {
					physical += map->stripe_len;
					logical += increment;
				}
L
Liu Bo 已提交
3399 3400 3401 3402 3403
				if (logical < key.objectid + bytes) {
					cond_resched();
					goto again;
				}

3404
				if (physical >= physical_end) {
L
Liu Bo 已提交
3405 3406 3407 3408
					stop_loop = 1;
					break;
				}
			}
A
Arne Jansen 已提交
3409 3410 3411
next:
			path->slots[0]++;
		}
C
Chris Mason 已提交
3412
		btrfs_release_path(path);
3413
skip:
A
Arne Jansen 已提交
3414 3415
		logical += increment;
		physical += map->stripe_len;
3416
		spin_lock(&sctx->stat_lock);
L
Liu Bo 已提交
3417 3418 3419 3420 3421
		if (stop_loop)
			sctx->stat.last_physical = map->stripes[num].physical +
						   length;
		else
			sctx->stat.last_physical = physical;
3422
		spin_unlock(&sctx->stat_lock);
L
Liu Bo 已提交
3423 3424
		if (stop_loop)
			break;
A
Arne Jansen 已提交
3425
	}
3426
out:
A
Arne Jansen 已提交
3427
	/* push queued extents */
3428
	scrub_submit(sctx);
3429 3430 3431
	mutex_lock(&sctx->wr_ctx.wr_lock);
	scrub_wr_submit(sctx);
	mutex_unlock(&sctx->wr_ctx.wr_lock);
A
Arne Jansen 已提交
3432

3433
	blk_finish_plug(&plug);
A
Arne Jansen 已提交
3434
	btrfs_free_path(path);
3435
	btrfs_free_path(ppath);
A
Arne Jansen 已提交
3436 3437 3438
	return ret < 0 ? ret : 0;
}

3439
static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3440 3441
					  struct btrfs_device *scrub_dev,
					  u64 chunk_offset, u64 length,
3442 3443 3444
					  u64 dev_offset,
					  struct btrfs_block_group_cache *cache,
					  int is_dev_replace)
A
Arne Jansen 已提交
3445
{
3446 3447
	struct btrfs_fs_info *fs_info = sctx->fs_info;
	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
A
Arne Jansen 已提交
3448 3449 3450
	struct map_lookup *map;
	struct extent_map *em;
	int i;
3451
	int ret = 0;
A
Arne Jansen 已提交
3452 3453 3454 3455 3456

	read_lock(&map_tree->map_tree.lock);
	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
	read_unlock(&map_tree->map_tree.lock);

3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468
	if (!em) {
		/*
		 * Might have been an unused block group deleted by the cleaner
		 * kthread or relocation.
		 */
		spin_lock(&cache->lock);
		if (!cache->removed)
			ret = -EINVAL;
		spin_unlock(&cache->lock);

		return ret;
	}
A
Arne Jansen 已提交
3469

3470
	map = em->map_lookup;
A
Arne Jansen 已提交
3471 3472 3473 3474 3475 3476 3477
	if (em->start != chunk_offset)
		goto out;

	if (em->len < length)
		goto out;

	for (i = 0; i < map->num_stripes; ++i) {
3478
		if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
3479
		    map->stripes[i].physical == dev_offset) {
3480
			ret = scrub_stripe(sctx, map, scrub_dev, i,
3481 3482
					   chunk_offset, length,
					   is_dev_replace);
A
Arne Jansen 已提交
3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493
			if (ret)
				goto out;
		}
	}
out:
	free_extent_map(em);

	return ret;
}

static noinline_for_stack
3494
int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3495 3496
			   struct btrfs_device *scrub_dev, u64 start, u64 end,
			   int is_dev_replace)
A
Arne Jansen 已提交
3497 3498 3499
{
	struct btrfs_dev_extent *dev_extent = NULL;
	struct btrfs_path *path;
3500
	struct btrfs_root *root = sctx->fs_info->dev_root;
A
Arne Jansen 已提交
3501 3502 3503
	struct btrfs_fs_info *fs_info = root->fs_info;
	u64 length;
	u64 chunk_offset;
3504
	int ret = 0;
3505
	int ro_set;
A
Arne Jansen 已提交
3506 3507 3508 3509 3510
	int slot;
	struct extent_buffer *l;
	struct btrfs_key key;
	struct btrfs_key found_key;
	struct btrfs_block_group_cache *cache;
3511
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
A
Arne Jansen 已提交
3512 3513 3514 3515 3516

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

3517
	path->reada = READA_FORWARD;
A
Arne Jansen 已提交
3518 3519 3520
	path->search_commit_root = 1;
	path->skip_locking = 1;

3521
	key.objectid = scrub_dev->devid;
A
Arne Jansen 已提交
3522 3523 3524 3525 3526 3527
	key.offset = 0ull;
	key.type = BTRFS_DEV_EXTENT_KEY;

	while (1) {
		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		if (ret < 0)
3528 3529 3530 3531 3532
			break;
		if (ret > 0) {
			if (path->slots[0] >=
			    btrfs_header_nritems(path->nodes[0])) {
				ret = btrfs_next_leaf(root, path);
3533 3534 3535 3536
				if (ret < 0)
					break;
				if (ret > 0) {
					ret = 0;
3537
					break;
3538 3539 3540
				}
			} else {
				ret = 0;
3541 3542
			}
		}
A
Arne Jansen 已提交
3543 3544 3545 3546 3547 3548

		l = path->nodes[0];
		slot = path->slots[0];

		btrfs_item_key_to_cpu(l, &found_key, slot);

3549
		if (found_key.objectid != scrub_dev->devid)
A
Arne Jansen 已提交
3550 3551
			break;

3552
		if (found_key.type != BTRFS_DEV_EXTENT_KEY)
A
Arne Jansen 已提交
3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563
			break;

		if (found_key.offset >= end)
			break;

		if (found_key.offset < key.offset)
			break;

		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
		length = btrfs_dev_extent_length(l, dev_extent);

3564 3565
		if (found_key.offset + length <= start)
			goto skip;
A
Arne Jansen 已提交
3566 3567 3568 3569 3570 3571 3572 3573

		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);

		/*
		 * get a reference on the corresponding block group to prevent
		 * the chunk from going away while we scrub it
		 */
		cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3574 3575 3576 3577 3578 3579

		/* some chunks are removed but not committed to disk yet,
		 * continue scrubbing */
		if (!cache)
			goto skip;

3580 3581 3582 3583 3584 3585 3586 3587 3588 3589
		/*
		 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
		 * to avoid deadlock caused by:
		 * btrfs_inc_block_group_ro()
		 * -> btrfs_wait_for_commit()
		 * -> btrfs_commit_transaction()
		 * -> btrfs_scrub_pause()
		 */
		scrub_pause_on(fs_info);
		ret = btrfs_inc_block_group_ro(root, cache);
3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629
		if (!ret && is_dev_replace) {
			/*
			 * If we are doing a device replace wait for any tasks
			 * that started dellaloc right before we set the block
			 * group to RO mode, as they might have just allocated
			 * an extent from it or decided they could do a nocow
			 * write. And if any such tasks did that, wait for their
			 * ordered extents to complete and then commit the
			 * current transaction, so that we can later see the new
			 * extent items in the extent tree - the ordered extents
			 * create delayed data references (for cow writes) when
			 * they complete, which will be run and insert the
			 * corresponding extent items into the extent tree when
			 * we commit the transaction they used when running
			 * inode.c:btrfs_finish_ordered_io(). We later use
			 * the commit root of the extent tree to find extents
			 * to copy from the srcdev into the tgtdev, and we don't
			 * want to miss any new extents.
			 */
			btrfs_wait_block_group_reservations(cache);
			btrfs_wait_nocow_writers(cache);
			ret = btrfs_wait_ordered_roots(fs_info, -1,
						       cache->key.objectid,
						       cache->key.offset);
			if (ret > 0) {
				struct btrfs_trans_handle *trans;

				trans = btrfs_join_transaction(root);
				if (IS_ERR(trans))
					ret = PTR_ERR(trans);
				else
					ret = btrfs_commit_transaction(trans,
								       root);
				if (ret) {
					scrub_pause_off(fs_info);
					btrfs_put_block_group(cache);
					break;
				}
			}
		}
3630
		scrub_pause_off(fs_info);
3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643

		if (ret == 0) {
			ro_set = 1;
		} else if (ret == -ENOSPC) {
			/*
			 * btrfs_inc_block_group_ro return -ENOSPC when it
			 * failed in creating new chunk for metadata.
			 * It is not a problem for scrub/replace, because
			 * metadata are always cowed, and our scrub paused
			 * commit_transactions.
			 */
			ro_set = 0;
		} else {
J
Jeff Mahoney 已提交
3644 3645
			btrfs_warn(fs_info,
				   "failed setting block group ro, ret=%d\n",
3646
				   ret);
3647 3648 3649 3650
			btrfs_put_block_group(cache);
			break;
		}

3651
		btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
3652 3653 3654
		dev_replace->cursor_right = found_key.offset + length;
		dev_replace->cursor_left = found_key.offset;
		dev_replace->item_needs_writeback = 1;
3655
		btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
3656
		ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
3657
				  found_key.offset, cache, is_dev_replace);
3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676

		/*
		 * flush, submit all pending read and write bios, afterwards
		 * wait for them.
		 * Note that in the dev replace case, a read request causes
		 * write requests that are submitted in the read completion
		 * worker. Therefore in the current situation, it is required
		 * that all write requests are flushed, so that all read and
		 * write requests are really completed when bios_in_flight
		 * changes to 0.
		 */
		atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
		scrub_submit(sctx);
		mutex_lock(&sctx->wr_ctx.wr_lock);
		scrub_wr_submit(sctx);
		mutex_unlock(&sctx->wr_ctx.wr_lock);

		wait_event(sctx->list_wait,
			   atomic_read(&sctx->bios_in_flight) == 0);
3677 3678

		scrub_pause_on(fs_info);
3679 3680 3681 3682 3683 3684

		/*
		 * must be called before we decrease @scrub_paused.
		 * make sure we don't block transaction commit while
		 * we are waiting pending workers finished.
		 */
3685 3686
		wait_event(sctx->list_wait,
			   atomic_read(&sctx->workers_pending) == 0);
3687 3688
		atomic_set(&sctx->wr_ctx.flush_all_writes, 0);

3689
		scrub_pause_off(fs_info);
3690

3691 3692 3693 3694 3695
		btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
		dev_replace->cursor_left = dev_replace->cursor_right;
		dev_replace->item_needs_writeback = 1;
		btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);

3696 3697
		if (ro_set)
			btrfs_dec_block_group_ro(root, cache);
3698

3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720
		/*
		 * We might have prevented the cleaner kthread from deleting
		 * this block group if it was already unused because we raced
		 * and set it to RO mode first. So add it back to the unused
		 * list, otherwise it might not ever be deleted unless a manual
		 * balance is triggered or it becomes used and unused again.
		 */
		spin_lock(&cache->lock);
		if (!cache->removed && !cache->ro && cache->reserved == 0 &&
		    btrfs_block_group_used(&cache->item) == 0) {
			spin_unlock(&cache->lock);
			spin_lock(&fs_info->unused_bgs_lock);
			if (list_empty(&cache->bg_list)) {
				btrfs_get_block_group(cache);
				list_add_tail(&cache->bg_list,
					      &fs_info->unused_bgs);
			}
			spin_unlock(&fs_info->unused_bgs_lock);
		} else {
			spin_unlock(&cache->lock);
		}

A
Arne Jansen 已提交
3721 3722 3723
		btrfs_put_block_group(cache);
		if (ret)
			break;
3724 3725
		if (is_dev_replace &&
		    atomic64_read(&dev_replace->num_write_errors) > 0) {
3726 3727 3728 3729 3730 3731 3732
			ret = -EIO;
			break;
		}
		if (sctx->stat.malloc_errors > 0) {
			ret = -ENOMEM;
			break;
		}
3733
skip:
A
Arne Jansen 已提交
3734
		key.offset = found_key.offset + length;
C
Chris Mason 已提交
3735
		btrfs_release_path(path);
A
Arne Jansen 已提交
3736 3737 3738
	}

	btrfs_free_path(path);
3739

3740
	return ret;
A
Arne Jansen 已提交
3741 3742
}

3743 3744
static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
					   struct btrfs_device *scrub_dev)
A
Arne Jansen 已提交
3745 3746 3747 3748 3749
{
	int	i;
	u64	bytenr;
	u64	gen;
	int	ret;
3750
	struct btrfs_root *root = sctx->fs_info->dev_root;
A
Arne Jansen 已提交
3751

3752
	if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
3753 3754
		return -EIO;

3755 3756 3757 3758 3759
	/* Seed devices of a new filesystem has their own generation. */
	if (scrub_dev->fs_devices != root->fs_info->fs_devices)
		gen = scrub_dev->generation;
	else
		gen = root->fs_info->last_trans_committed;
A
Arne Jansen 已提交
3760 3761 3762

	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
		bytenr = btrfs_sb_offset(i);
3763 3764
		if (bytenr + BTRFS_SUPER_INFO_SIZE >
		    scrub_dev->commit_total_bytes)
A
Arne Jansen 已提交
3765 3766
			break;

3767
		ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
3768
				  scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
3769
				  NULL, 1, bytenr);
A
Arne Jansen 已提交
3770 3771 3772
		if (ret)
			return ret;
	}
3773
	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
A
Arne Jansen 已提交
3774 3775 3776 3777 3778 3779 3780

	return 0;
}

/*
 * get a reference count on fs_info->scrub_workers. start worker if necessary
 */
3781 3782
static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
						int is_dev_replace)
A
Arne Jansen 已提交
3783
{
3784
	unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
3785
	int max_active = fs_info->thread_pool_size;
A
Arne Jansen 已提交
3786

A
Arne Jansen 已提交
3787
	if (fs_info->scrub_workers_refcnt == 0) {
3788
		if (is_dev_replace)
3789
			fs_info->scrub_workers =
3790
				btrfs_alloc_workqueue(fs_info, "scrub", flags,
3791
						      1, 4);
3792
		else
3793
			fs_info->scrub_workers =
3794
				btrfs_alloc_workqueue(fs_info, "scrub", flags,
3795
						      max_active, 4);
3796 3797 3798
		if (!fs_info->scrub_workers)
			goto fail_scrub_workers;

3799
		fs_info->scrub_wr_completion_workers =
3800
			btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
3801
					      max_active, 2);
3802 3803 3804
		if (!fs_info->scrub_wr_completion_workers)
			goto fail_scrub_wr_completion_workers;

3805
		fs_info->scrub_nocow_workers =
3806
			btrfs_alloc_workqueue(fs_info, "scrubnc", flags, 1, 0);
3807 3808
		if (!fs_info->scrub_nocow_workers)
			goto fail_scrub_nocow_workers;
3809
		fs_info->scrub_parity_workers =
3810
			btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
3811
					      max_active, 2);
3812 3813
		if (!fs_info->scrub_parity_workers)
			goto fail_scrub_parity_workers;
A
Arne Jansen 已提交
3814
	}
A
Arne Jansen 已提交
3815
	++fs_info->scrub_workers_refcnt;
3816 3817 3818 3819 3820 3821 3822 3823 3824 3825
	return 0;

fail_scrub_parity_workers:
	btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
fail_scrub_nocow_workers:
	btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
fail_scrub_wr_completion_workers:
	btrfs_destroy_workqueue(fs_info->scrub_workers);
fail_scrub_workers:
	return -ENOMEM;
A
Arne Jansen 已提交
3826 3827
}

3828
static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
A
Arne Jansen 已提交
3829
{
3830
	if (--fs_info->scrub_workers_refcnt == 0) {
3831 3832 3833
		btrfs_destroy_workqueue(fs_info->scrub_workers);
		btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
		btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
3834
		btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
3835
	}
A
Arne Jansen 已提交
3836 3837 3838
	WARN_ON(fs_info->scrub_workers_refcnt < 0);
}

3839 3840
int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
		    u64 end, struct btrfs_scrub_progress *progress,
3841
		    int readonly, int is_dev_replace)
A
Arne Jansen 已提交
3842
{
3843
	struct scrub_ctx *sctx;
A
Arne Jansen 已提交
3844 3845
	int ret;
	struct btrfs_device *dev;
3846
	struct rcu_string *name;
A
Arne Jansen 已提交
3847

3848
	if (btrfs_fs_closing(fs_info))
A
Arne Jansen 已提交
3849 3850
		return -EINVAL;

3851
	if (fs_info->nodesize > BTRFS_STRIPE_LEN) {
3852 3853 3854 3855 3856
		/*
		 * in this case scrub is unable to calculate the checksum
		 * the way scrub is implemented. Do not handle this
		 * situation at all because it won't ever happen.
		 */
3857 3858
		btrfs_err(fs_info,
			   "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
3859 3860
		       fs_info->nodesize,
		       BTRFS_STRIPE_LEN);
3861 3862 3863
		return -EINVAL;
	}

3864
	if (fs_info->sectorsize != PAGE_SIZE) {
3865
		/* not supported for data w/o checksums */
3866
		btrfs_err_rl(fs_info,
J
Jeff Mahoney 已提交
3867
			   "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
3868
		       fs_info->sectorsize, PAGE_SIZE);
A
Arne Jansen 已提交
3869 3870 3871
		return -EINVAL;
	}

3872
	if (fs_info->nodesize >
3873
	    PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
3874
	    fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
3875 3876 3877 3878
		/*
		 * would exhaust the array bounds of pagev member in
		 * struct scrub_block
		 */
J
Jeff Mahoney 已提交
3879 3880
		btrfs_err(fs_info,
			  "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
3881
		       fs_info->nodesize,
3882
		       SCRUB_MAX_PAGES_PER_BLOCK,
3883
		       fs_info->sectorsize,
3884 3885 3886 3887
		       SCRUB_MAX_PAGES_PER_BLOCK);
		return -EINVAL;
	}

A
Arne Jansen 已提交
3888

3889 3890
	mutex_lock(&fs_info->fs_devices->device_list_mutex);
	dev = btrfs_find_device(fs_info, devid, NULL, NULL);
3891
	if (!dev || (dev->missing && !is_dev_replace)) {
3892
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
A
Arne Jansen 已提交
3893 3894 3895
		return -ENODEV;
	}

3896 3897 3898 3899 3900 3901 3902 3903 3904 3905
	if (!is_dev_replace && !readonly && !dev->writeable) {
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
		rcu_read_lock();
		name = rcu_dereference(dev->name);
		btrfs_err(fs_info, "scrub: device %s is not writable",
			  name->str);
		rcu_read_unlock();
		return -EROFS;
	}

3906
	mutex_lock(&fs_info->scrub_lock);
3907
	if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
A
Arne Jansen 已提交
3908
		mutex_unlock(&fs_info->scrub_lock);
3909 3910
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
		return -EIO;
A
Arne Jansen 已提交
3911 3912
	}

3913
	btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
3914 3915 3916
	if (dev->scrub_device ||
	    (!is_dev_replace &&
	     btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
3917
		btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
A
Arne Jansen 已提交
3918
		mutex_unlock(&fs_info->scrub_lock);
3919
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
A
Arne Jansen 已提交
3920 3921
		return -EINPROGRESS;
	}
3922
	btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
3923 3924 3925 3926 3927 3928 3929 3930

	ret = scrub_workers_get(fs_info, is_dev_replace);
	if (ret) {
		mutex_unlock(&fs_info->scrub_lock);
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
		return ret;
	}

3931
	sctx = scrub_setup_ctx(dev, is_dev_replace);
3932
	if (IS_ERR(sctx)) {
A
Arne Jansen 已提交
3933
		mutex_unlock(&fs_info->scrub_lock);
3934 3935
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
		scrub_workers_put(fs_info);
3936
		return PTR_ERR(sctx);
A
Arne Jansen 已提交
3937
	}
3938 3939
	sctx->readonly = readonly;
	dev->scrub_device = sctx;
3940
	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
A
Arne Jansen 已提交
3941

3942 3943 3944 3945
	/*
	 * checking @scrub_pause_req here, we can avoid
	 * race between committing transaction and scrubbing.
	 */
3946
	__scrub_blocked_if_needed(fs_info);
A
Arne Jansen 已提交
3947 3948 3949
	atomic_inc(&fs_info->scrubs_running);
	mutex_unlock(&fs_info->scrub_lock);

3950
	if (!is_dev_replace) {
3951 3952 3953 3954
		/*
		 * by holding device list mutex, we can
		 * kick off writing super in log tree sync.
		 */
3955
		mutex_lock(&fs_info->fs_devices->device_list_mutex);
3956
		ret = scrub_supers(sctx, dev);
3957
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3958
	}
A
Arne Jansen 已提交
3959 3960

	if (!ret)
3961 3962
		ret = scrub_enumerate_chunks(sctx, dev, start, end,
					     is_dev_replace);
A
Arne Jansen 已提交
3963

3964
	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
A
Arne Jansen 已提交
3965 3966 3967
	atomic_dec(&fs_info->scrubs_running);
	wake_up(&fs_info->scrub_pause_wait);

3968
	wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
3969

A
Arne Jansen 已提交
3970
	if (progress)
3971
		memcpy(progress, &sctx->stat, sizeof(*progress));
A
Arne Jansen 已提交
3972 3973 3974

	mutex_lock(&fs_info->scrub_lock);
	dev->scrub_device = NULL;
3975
	scrub_workers_put(fs_info);
A
Arne Jansen 已提交
3976 3977
	mutex_unlock(&fs_info->scrub_lock);

3978
	scrub_put_ctx(sctx);
A
Arne Jansen 已提交
3979 3980 3981 3982

	return ret;
}

3983
void btrfs_scrub_pause(struct btrfs_root *root)
A
Arne Jansen 已提交
3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999
{
	struct btrfs_fs_info *fs_info = root->fs_info;

	mutex_lock(&fs_info->scrub_lock);
	atomic_inc(&fs_info->scrub_pause_req);
	while (atomic_read(&fs_info->scrubs_paused) !=
	       atomic_read(&fs_info->scrubs_running)) {
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
			   atomic_read(&fs_info->scrubs_paused) ==
			   atomic_read(&fs_info->scrubs_running));
		mutex_lock(&fs_info->scrub_lock);
	}
	mutex_unlock(&fs_info->scrub_lock);
}

4000
void btrfs_scrub_continue(struct btrfs_root *root)
A
Arne Jansen 已提交
4001 4002 4003 4004 4005 4006 4007
{
	struct btrfs_fs_info *fs_info = root->fs_info;

	atomic_dec(&fs_info->scrub_pause_req);
	wake_up(&fs_info->scrub_pause_wait);
}

4008
int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
A
Arne Jansen 已提交
4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028
{
	mutex_lock(&fs_info->scrub_lock);
	if (!atomic_read(&fs_info->scrubs_running)) {
		mutex_unlock(&fs_info->scrub_lock);
		return -ENOTCONN;
	}

	atomic_inc(&fs_info->scrub_cancel_req);
	while (atomic_read(&fs_info->scrubs_running)) {
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
			   atomic_read(&fs_info->scrubs_running) == 0);
		mutex_lock(&fs_info->scrub_lock);
	}
	atomic_dec(&fs_info->scrub_cancel_req);
	mutex_unlock(&fs_info->scrub_lock);

	return 0;
}

4029 4030
int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
			   struct btrfs_device *dev)
4031
{
4032
	struct scrub_ctx *sctx;
A
Arne Jansen 已提交
4033 4034

	mutex_lock(&fs_info->scrub_lock);
4035 4036
	sctx = dev->scrub_device;
	if (!sctx) {
A
Arne Jansen 已提交
4037 4038 4039
		mutex_unlock(&fs_info->scrub_lock);
		return -ENOTCONN;
	}
4040
	atomic_inc(&sctx->cancel_req);
A
Arne Jansen 已提交
4041 4042 4043 4044 4045 4046 4047 4048 4049 4050
	while (dev->scrub_device) {
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
			   dev->scrub_device == NULL);
		mutex_lock(&fs_info->scrub_lock);
	}
	mutex_unlock(&fs_info->scrub_lock);

	return 0;
}
S
Stefan Behrens 已提交
4051

A
Arne Jansen 已提交
4052 4053 4054 4055
int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
			 struct btrfs_scrub_progress *progress)
{
	struct btrfs_device *dev;
4056
	struct scrub_ctx *sctx = NULL;
A
Arne Jansen 已提交
4057 4058

	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
4059
	dev = btrfs_find_device(root->fs_info, devid, NULL, NULL);
A
Arne Jansen 已提交
4060
	if (dev)
4061 4062 4063
		sctx = dev->scrub_device;
	if (sctx)
		memcpy(progress, &sctx->stat, sizeof(*progress));
A
Arne Jansen 已提交
4064 4065
	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);

4066
	return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
A
Arne Jansen 已提交
4067
}
4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079

static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
			       u64 extent_logical, u64 extent_len,
			       u64 *extent_physical,
			       struct btrfs_device **extent_dev,
			       int *extent_mirror_num)
{
	u64 mapped_length;
	struct btrfs_bio *bbio = NULL;
	int ret;

	mapped_length = extent_len;
4080
	ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
4081 4082 4083
			      &mapped_length, &bbio, 0);
	if (ret || !bbio || mapped_length < extent_len ||
	    !bbio->stripes[0].dev->bdev) {
4084
		btrfs_put_bbio(bbio);
4085 4086 4087 4088 4089 4090
		return;
	}

	*extent_physical = bbio->stripes[0].physical;
	*extent_mirror_num = bbio->mirror_num;
	*extent_dev = bbio->stripes[0].dev;
4091
	btrfs_put_bbio(bbio);
4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107
}

static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
			      struct scrub_wr_ctx *wr_ctx,
			      struct btrfs_fs_info *fs_info,
			      struct btrfs_device *dev,
			      int is_dev_replace)
{
	WARN_ON(wr_ctx->wr_curr_bio != NULL);

	mutex_init(&wr_ctx->wr_lock);
	wr_ctx->wr_curr_bio = NULL;
	if (!is_dev_replace)
		return 0;

	WARN_ON(!dev->bdev);
4108
	wr_ctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125
	wr_ctx->tgtdev = dev;
	atomic_set(&wr_ctx->flush_all_writes, 0);
	return 0;
}

static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
{
	mutex_lock(&wr_ctx->wr_lock);
	kfree(wr_ctx->wr_curr_bio);
	wr_ctx->wr_curr_bio = NULL;
	mutex_unlock(&wr_ctx->wr_lock);
}

static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
			    int mirror_num, u64 physical_for_dev_replace)
{
	struct scrub_copy_nocow_ctx *nocow_ctx;
4126
	struct btrfs_fs_info *fs_info = sctx->fs_info;
4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142

	nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
	if (!nocow_ctx) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
		return -ENOMEM;
	}

	scrub_pending_trans_workers_inc(sctx);

	nocow_ctx->sctx = sctx;
	nocow_ctx->logical = logical;
	nocow_ctx->len = len;
	nocow_ctx->mirror_num = mirror_num;
	nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
4143 4144
	btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
			copy_nocow_pages_worker, NULL, NULL);
4145
	INIT_LIST_HEAD(&nocow_ctx->inodes);
4146 4147
	btrfs_queue_work(fs_info->scrub_nocow_workers,
			 &nocow_ctx->work);
4148 4149 4150 4151

	return 0;
}

4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168
static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
{
	struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
	struct scrub_nocow_inode *nocow_inode;

	nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
	if (!nocow_inode)
		return -ENOMEM;
	nocow_inode->inum = inum;
	nocow_inode->offset = offset;
	nocow_inode->root = root;
	list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
	return 0;
}

#define COPY_COMPLETE 1

4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184
static void copy_nocow_pages_worker(struct btrfs_work *work)
{
	struct scrub_copy_nocow_ctx *nocow_ctx =
		container_of(work, struct scrub_copy_nocow_ctx, work);
	struct scrub_ctx *sctx = nocow_ctx->sctx;
	u64 logical = nocow_ctx->logical;
	u64 len = nocow_ctx->len;
	int mirror_num = nocow_ctx->mirror_num;
	u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
	int ret;
	struct btrfs_trans_handle *trans = NULL;
	struct btrfs_fs_info *fs_info;
	struct btrfs_path *path;
	struct btrfs_root *root;
	int not_written = 0;

4185
	fs_info = sctx->fs_info;
4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203
	root = fs_info->extent_root;

	path = btrfs_alloc_path();
	if (!path) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
		not_written = 1;
		goto out;
	}

	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans)) {
		not_written = 1;
		goto out;
	}

	ret = iterate_inodes_from_logical(logical, fs_info, path,
4204
					  record_inode_for_nocow, nocow_ctx);
4205
	if (ret != 0 && ret != -ENOENT) {
J
Jeff Mahoney 已提交
4206 4207 4208 4209
		btrfs_warn(fs_info,
			   "iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d",
			   logical, physical_for_dev_replace, len, mirror_num,
			   ret);
4210 4211 4212 4213
		not_written = 1;
		goto out;
	}

4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231
	btrfs_end_transaction(trans, root);
	trans = NULL;
	while (!list_empty(&nocow_ctx->inodes)) {
		struct scrub_nocow_inode *entry;
		entry = list_first_entry(&nocow_ctx->inodes,
					 struct scrub_nocow_inode,
					 list);
		list_del_init(&entry->list);
		ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
						 entry->root, nocow_ctx);
		kfree(entry);
		if (ret == COPY_COMPLETE) {
			ret = 0;
			break;
		} else if (ret) {
			break;
		}
	}
4232
out:
4233 4234 4235 4236 4237 4238 4239 4240
	while (!list_empty(&nocow_ctx->inodes)) {
		struct scrub_nocow_inode *entry;
		entry = list_first_entry(&nocow_ctx->inodes,
					 struct scrub_nocow_inode,
					 list);
		list_del_init(&entry->list);
		kfree(entry);
	}
4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252
	if (trans && !IS_ERR(trans))
		btrfs_end_transaction(trans, root);
	if (not_written)
		btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
					    num_uncorrectable_read_errors);

	btrfs_free_path(path);
	kfree(nocow_ctx);

	scrub_pending_trans_workers_dec(sctx);
}

4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264
static int check_extent_to_block(struct inode *inode, u64 start, u64 len,
				 u64 logical)
{
	struct extent_state *cached_state = NULL;
	struct btrfs_ordered_extent *ordered;
	struct extent_io_tree *io_tree;
	struct extent_map *em;
	u64 lockstart = start, lockend = start + len - 1;
	int ret = 0;

	io_tree = &BTRFS_I(inode)->io_tree;

4265
	lock_extent_bits(io_tree, lockstart, lockend, &cached_state);
4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296
	ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
	if (ordered) {
		btrfs_put_ordered_extent(ordered);
		ret = 1;
		goto out_unlock;
	}

	em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
	if (IS_ERR(em)) {
		ret = PTR_ERR(em);
		goto out_unlock;
	}

	/*
	 * This extent does not actually cover the logical extent anymore,
	 * move on to the next inode.
	 */
	if (em->block_start > logical ||
	    em->block_start + em->block_len < logical + len) {
		free_extent_map(em);
		ret = 1;
		goto out_unlock;
	}
	free_extent_map(em);

out_unlock:
	unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
			     GFP_NOFS);
	return ret;
}

4297 4298
static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
				      struct scrub_copy_nocow_ctx *nocow_ctx)
4299
{
4300
	struct btrfs_fs_info *fs_info = nocow_ctx->sctx->fs_info;
4301
	struct btrfs_key key;
4302 4303
	struct inode *inode;
	struct page *page;
4304
	struct btrfs_root *local_root;
4305
	struct extent_io_tree *io_tree;
4306
	u64 physical_for_dev_replace;
4307
	u64 nocow_ctx_logical;
4308
	u64 len = nocow_ctx->len;
4309
	unsigned long index;
4310
	int srcu_index;
4311 4312
	int ret = 0;
	int err = 0;
4313 4314 4315 4316

	key.objectid = root;
	key.type = BTRFS_ROOT_ITEM_KEY;
	key.offset = (u64)-1;
4317 4318 4319

	srcu_index = srcu_read_lock(&fs_info->subvol_srcu);

4320
	local_root = btrfs_read_fs_root_no_name(fs_info, &key);
4321 4322
	if (IS_ERR(local_root)) {
		srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4323
		return PTR_ERR(local_root);
4324
	}
4325 4326 4327 4328 4329

	key.type = BTRFS_INODE_ITEM_KEY;
	key.objectid = inum;
	key.offset = 0;
	inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
4330
	srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4331 4332 4333
	if (IS_ERR(inode))
		return PTR_ERR(inode);

4334
	/* Avoid truncate/dio/punch hole.. */
A
Al Viro 已提交
4335
	inode_lock(inode);
4336 4337
	inode_dio_wait(inode);

4338
	physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4339
	io_tree = &BTRFS_I(inode)->io_tree;
4340
	nocow_ctx_logical = nocow_ctx->logical;
4341

4342 4343 4344 4345
	ret = check_extent_to_block(inode, offset, len, nocow_ctx_logical);
	if (ret) {
		ret = ret > 0 ? 0 : ret;
		goto out;
4346 4347
	}

4348 4349
	while (len >= PAGE_SIZE) {
		index = offset >> PAGE_SHIFT;
4350
again:
4351 4352
		page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
		if (!page) {
4353
			btrfs_err(fs_info, "find_or_create_page() failed");
4354
			ret = -ENOMEM;
4355
			goto out;
4356 4357 4358 4359 4360 4361 4362
		}

		if (PageUptodate(page)) {
			if (PageDirty(page))
				goto next_page;
		} else {
			ClearPageError(page);
4363
			err = extent_read_full_page(io_tree, page,
4364 4365
							   btrfs_get_extent,
							   nocow_ctx->mirror_num);
4366 4367
			if (err) {
				ret = err;
4368 4369
				goto next_page;
			}
4370

4371
			lock_page(page);
4372 4373 4374 4375 4376 4377 4378
			/*
			 * If the page has been remove from the page cache,
			 * the data on it is meaningless, because it may be
			 * old one, the new data may be written into the new
			 * page in the page cache.
			 */
			if (page->mapping != inode->i_mapping) {
4379
				unlock_page(page);
4380
				put_page(page);
4381 4382
				goto again;
			}
4383 4384 4385 4386 4387
			if (!PageUptodate(page)) {
				ret = -EIO;
				goto next_page;
			}
		}
4388 4389 4390 4391 4392 4393 4394 4395

		ret = check_extent_to_block(inode, offset, len,
					    nocow_ctx_logical);
		if (ret) {
			ret = ret > 0 ? 0 : ret;
			goto next_page;
		}

4396 4397 4398 4399
		err = write_page_nocow(nocow_ctx->sctx,
				       physical_for_dev_replace, page);
		if (err)
			ret = err;
4400
next_page:
4401
		unlock_page(page);
4402
		put_page(page);
4403 4404 4405 4406

		if (ret)
			break;

4407 4408 4409 4410
		offset += PAGE_SIZE;
		physical_for_dev_replace += PAGE_SIZE;
		nocow_ctx_logical += PAGE_SIZE;
		len -= PAGE_SIZE;
4411
	}
4412
	ret = COPY_COMPLETE;
4413
out:
A
Al Viro 已提交
4414
	inode_unlock(inode);
4415
	iput(inode);
4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429
	return ret;
}

static int write_page_nocow(struct scrub_ctx *sctx,
			    u64 physical_for_dev_replace, struct page *page)
{
	struct bio *bio;
	struct btrfs_device *dev;
	int ret;

	dev = sctx->wr_ctx.tgtdev;
	if (!dev)
		return -EIO;
	if (!dev->bdev) {
4430
		btrfs_warn_rl(dev->fs_info,
4431
			"scrub write_page_nocow(bdev == NULL) is unexpected");
4432 4433
		return -EIO;
	}
4434
	bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
4435 4436 4437 4438 4439 4440
	if (!bio) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
		return -ENOMEM;
	}
4441 4442
	bio->bi_iter.bi_size = 0;
	bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
4443
	bio->bi_bdev = dev->bdev;
M
Mike Christie 已提交
4444
	bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC);
4445 4446
	ret = bio_add_page(bio, page, PAGE_SIZE, 0);
	if (ret != PAGE_SIZE) {
4447 4448 4449 4450 4451 4452
leave_with_eio:
		bio_put(bio);
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
		return -EIO;
	}

4453
	if (btrfsic_submit_bio_wait(bio))
4454 4455 4456 4457 4458
		goto leave_with_eio;

	bio_put(bio);
	return 0;
}