scrub.c 93.4 KB
Newer Older
A
Arne Jansen 已提交
1
/*
2
 * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
A
Arne Jansen 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

#include <linux/blkdev.h>
20
#include <linux/ratelimit.h>
A
Arne Jansen 已提交
21 22 23 24
#include "ctree.h"
#include "volumes.h"
#include "disk-io.h"
#include "ordered-data.h"
25
#include "transaction.h"
26
#include "backref.h"
27
#include "extent_io.h"
28
#include "dev-replace.h"
29
#include "check-integrity.h"
30
#include "rcu-string.h"
D
David Woodhouse 已提交
31
#include "raid56.h"
A
Arne Jansen 已提交
32 33 34 35 36 37 38 39 40 41 42 43 44 45

/*
 * This is only the first step towards a full-features scrub. It reads all
 * extent and super block and verifies the checksums. In case a bad checksum
 * is found or the extent cannot be read, good data will be written back if
 * any can be found.
 *
 * Future enhancements:
 *  - In case an unrepairable extent is encountered, track which files are
 *    affected and report them
 *  - track and record media errors, throw out bad devices
 *  - add a mode to also read unallocated space
 */

46
struct scrub_block;
47
struct scrub_ctx;
A
Arne Jansen 已提交
48

49 50 51 52 53 54 55 56 57
/*
 * the following three values only influence the performance.
 * The last one configures the number of parallel and outstanding I/O
 * operations. The first two values configure an upper limit for the number
 * of (dynamically allocated) pages that are added to a bio.
 */
#define SCRUB_PAGES_PER_RD_BIO	32	/* 128k per bio */
#define SCRUB_PAGES_PER_WR_BIO	32	/* 128k per bio */
#define SCRUB_BIOS_PER_SCTX	64	/* 8MB per device in flight */
58 59 60 61 62 63

/*
 * the following value times PAGE_SIZE needs to be large enough to match the
 * largest node/leaf/sector size that shall be supported.
 * Values larger than BTRFS_STRIPE_LEN are not supported.
 */
64
#define SCRUB_MAX_PAGES_PER_BLOCK	16	/* 64k per node/leaf/sector */
A
Arne Jansen 已提交
65 66

struct scrub_page {
67 68
	struct scrub_block	*sblock;
	struct page		*page;
69
	struct btrfs_device	*dev;
A
Arne Jansen 已提交
70 71
	u64			flags;  /* extent flags */
	u64			generation;
72 73
	u64			logical;
	u64			physical;
74
	u64			physical_for_dev_replace;
75
	atomic_t		ref_count;
76 77 78 79 80
	struct {
		unsigned int	mirror_num:8;
		unsigned int	have_csum:1;
		unsigned int	io_error:1;
	};
A
Arne Jansen 已提交
81 82 83 84 85
	u8			csum[BTRFS_CSUM_SIZE];
};

struct scrub_bio {
	int			index;
86
	struct scrub_ctx	*sctx;
87
	struct btrfs_device	*dev;
A
Arne Jansen 已提交
88 89 90 91
	struct bio		*bio;
	int			err;
	u64			logical;
	u64			physical;
92 93 94 95 96
#if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
	struct scrub_page	*pagev[SCRUB_PAGES_PER_WR_BIO];
#else
	struct scrub_page	*pagev[SCRUB_PAGES_PER_RD_BIO];
#endif
97
	int			page_count;
A
Arne Jansen 已提交
98 99 100 101
	int			next_free;
	struct btrfs_work	work;
};

102
struct scrub_block {
103
	struct scrub_page	*pagev[SCRUB_MAX_PAGES_PER_BLOCK];
104 105 106
	int			page_count;
	atomic_t		outstanding_pages;
	atomic_t		ref_count; /* free mem on transition to zero */
107
	struct scrub_ctx	*sctx;
108 109 110 111
	struct {
		unsigned int	header_error:1;
		unsigned int	checksum_error:1;
		unsigned int	no_io_error_seen:1;
112
		unsigned int	generation_error:1; /* also sets header_error */
113 114 115
	};
};

116 117 118 119 120 121 122 123
struct scrub_wr_ctx {
	struct scrub_bio *wr_curr_bio;
	struct btrfs_device *tgtdev;
	int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
	atomic_t flush_all_writes;
	struct mutex wr_lock;
};

124
struct scrub_ctx {
125
	struct scrub_bio	*bios[SCRUB_BIOS_PER_SCTX];
126
	struct btrfs_root	*dev_root;
A
Arne Jansen 已提交
127 128
	int			first_free;
	int			curr;
129 130
	atomic_t		bios_in_flight;
	atomic_t		workers_pending;
A
Arne Jansen 已提交
131 132 133 134 135
	spinlock_t		list_lock;
	wait_queue_head_t	list_wait;
	u16			csum_size;
	struct list_head	csum_list;
	atomic_t		cancel_req;
A
Arne Jansen 已提交
136
	int			readonly;
137
	int			pages_per_rd_bio;
138 139 140
	u32			sectorsize;
	u32			nodesize;
	u32			leafsize;
141 142

	int			is_dev_replace;
143
	struct scrub_wr_ctx	wr_ctx;
144

A
Arne Jansen 已提交
145 146 147 148 149 150 151
	/*
	 * statistics
	 */
	struct btrfs_scrub_progress stat;
	spinlock_t		stat_lock;
};

152
struct scrub_fixup_nodatasum {
153
	struct scrub_ctx	*sctx;
154
	struct btrfs_device	*dev;
155 156 157 158 159 160
	u64			logical;
	struct btrfs_root	*root;
	struct btrfs_work	work;
	int			mirror_num;
};

161 162 163 164 165 166 167
struct scrub_nocow_inode {
	u64			inum;
	u64			offset;
	u64			root;
	struct list_head	list;
};

168 169 170 171 172 173
struct scrub_copy_nocow_ctx {
	struct scrub_ctx	*sctx;
	u64			logical;
	u64			len;
	int			mirror_num;
	u64			physical_for_dev_replace;
174
	struct list_head	inodes;
175 176 177
	struct btrfs_work	work;
};

178 179 180 181 182 183 184 185 186 187 188 189 190
struct scrub_warning {
	struct btrfs_path	*path;
	u64			extent_item_size;
	char			*scratch_buf;
	char			*msg_buf;
	const char		*errstr;
	sector_t		sector;
	u64			logical;
	struct btrfs_device	*dev;
	int			msg_bufsize;
	int			scratch_bufsize;
};

191

192 193 194 195
static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
196
static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
197
static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
198
				     struct btrfs_fs_info *fs_info,
199
				     struct scrub_block *original_sblock,
200
				     u64 length, u64 logical,
201
				     struct scrub_block *sblocks_for_recheck);
202 203 204 205
static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
				struct scrub_block *sblock, int is_metadata,
				int have_csum, u8 *csum, u64 generation,
				u16 csum_size);
206 207 208 209 210 211 212 213 214 215 216
static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
					 struct scrub_block *sblock,
					 int is_metadata, int have_csum,
					 const u8 *csum, u64 generation,
					 u16 csum_size);
static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
					     struct scrub_block *sblock_good,
					     int force_write);
static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
					    struct scrub_block *sblock_good,
					    int page_num, int force_write);
217 218 219
static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
					   int page_num);
220 221 222 223 224
static int scrub_checksum_data(struct scrub_block *sblock);
static int scrub_checksum_tree_block(struct scrub_block *sblock);
static int scrub_checksum_super(struct scrub_block *sblock);
static void scrub_block_get(struct scrub_block *sblock);
static void scrub_block_put(struct scrub_block *sblock);
225 226
static void scrub_page_get(struct scrub_page *spage);
static void scrub_page_put(struct scrub_page *spage);
227 228
static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
				    struct scrub_page *spage);
229
static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
230
		       u64 physical, struct btrfs_device *dev, u64 flags,
231 232
		       u64 gen, int mirror_num, u8 *csum, int force,
		       u64 physical_for_dev_replace);
S
Stefan Behrens 已提交
233
static void scrub_bio_end_io(struct bio *bio, int err);
234 235
static void scrub_bio_end_io_worker(struct btrfs_work *work);
static void scrub_block_complete(struct scrub_block *sblock);
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
			       u64 extent_logical, u64 extent_len,
			       u64 *extent_physical,
			       struct btrfs_device **extent_dev,
			       int *extent_mirror_num);
static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
			      struct scrub_wr_ctx *wr_ctx,
			      struct btrfs_fs_info *fs_info,
			      struct btrfs_device *dev,
			      int is_dev_replace);
static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
				    struct scrub_page *spage);
static void scrub_wr_submit(struct scrub_ctx *sctx);
static void scrub_wr_bio_end_io(struct bio *bio, int err);
static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
static int write_page_nocow(struct scrub_ctx *sctx,
			    u64 physical_for_dev_replace, struct page *page);
static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
255
				      struct scrub_copy_nocow_ctx *ctx);
256 257 258
static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
			    int mirror_num, u64 physical_for_dev_replace);
static void copy_nocow_pages_worker(struct btrfs_work *work);
259
static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
260
static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
S
Stefan Behrens 已提交
261 262


263 264 265 266 267 268 269 270 271 272 273
static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
{
	atomic_inc(&sctx->bios_in_flight);
}

static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
{
	atomic_dec(&sctx->bios_in_flight);
	wake_up(&sctx->list_wait);
}

274
static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
275 276 277 278 279 280 281 282 283
{
	while (atomic_read(&fs_info->scrub_pause_req)) {
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
		   atomic_read(&fs_info->scrub_pause_req) == 0);
		mutex_lock(&fs_info->scrub_lock);
	}
}

284 285 286 287 288 289 290 291 292 293 294 295 296
static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
{
	atomic_inc(&fs_info->scrubs_paused);
	wake_up(&fs_info->scrub_pause_wait);

	mutex_lock(&fs_info->scrub_lock);
	__scrub_blocked_if_needed(fs_info);
	atomic_dec(&fs_info->scrubs_paused);
	mutex_unlock(&fs_info->scrub_lock);

	wake_up(&fs_info->scrub_pause_wait);
}

297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
/*
 * used for workers that require transaction commits (i.e., for the
 * NOCOW case)
 */
static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
{
	struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;

	/*
	 * increment scrubs_running to prevent cancel requests from
	 * completing as long as a worker is running. we must also
	 * increment scrubs_paused to prevent deadlocking on pause
	 * requests used for transactions commits (as the worker uses a
	 * transaction context). it is safe to regard the worker
	 * as paused for all matters practical. effectively, we only
	 * avoid cancellation requests from completing.
	 */
	mutex_lock(&fs_info->scrub_lock);
	atomic_inc(&fs_info->scrubs_running);
	atomic_inc(&fs_info->scrubs_paused);
	mutex_unlock(&fs_info->scrub_lock);
318 319 320 321 322 323 324 325 326 327

	/*
	 * check if @scrubs_running=@scrubs_paused condition
	 * inside wait_event() is not an atomic operation.
	 * which means we may inc/dec @scrub_running/paused
	 * at any time. Let's wake up @scrub_pause_wait as
	 * much as we can to let commit transaction blocked less.
	 */
	wake_up(&fs_info->scrub_pause_wait);

328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348
	atomic_inc(&sctx->workers_pending);
}

/* used for workers that require transaction commits */
static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
{
	struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;

	/*
	 * see scrub_pending_trans_workers_inc() why we're pretending
	 * to be paused in the scrub counters
	 */
	mutex_lock(&fs_info->scrub_lock);
	atomic_dec(&fs_info->scrubs_running);
	atomic_dec(&fs_info->scrubs_paused);
	mutex_unlock(&fs_info->scrub_lock);
	atomic_dec(&sctx->workers_pending);
	wake_up(&fs_info->scrub_pause_wait);
	wake_up(&sctx->list_wait);
}

349
static void scrub_free_csums(struct scrub_ctx *sctx)
A
Arne Jansen 已提交
350
{
351
	while (!list_empty(&sctx->csum_list)) {
A
Arne Jansen 已提交
352
		struct btrfs_ordered_sum *sum;
353
		sum = list_first_entry(&sctx->csum_list,
A
Arne Jansen 已提交
354 355 356 357 358 359
				       struct btrfs_ordered_sum, list);
		list_del(&sum->list);
		kfree(sum);
	}
}

360
static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
A
Arne Jansen 已提交
361 362 363
{
	int i;

364
	if (!sctx)
A
Arne Jansen 已提交
365 366
		return;

367 368
	scrub_free_wr_ctx(&sctx->wr_ctx);

369
	/* this can happen when scrub is cancelled */
370 371
	if (sctx->curr != -1) {
		struct scrub_bio *sbio = sctx->bios[sctx->curr];
372 373

		for (i = 0; i < sbio->page_count; i++) {
374
			WARN_ON(!sbio->pagev[i]->page);
375 376 377 378 379
			scrub_block_put(sbio->pagev[i]->sblock);
		}
		bio_put(sbio->bio);
	}

380
	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
381
		struct scrub_bio *sbio = sctx->bios[i];
A
Arne Jansen 已提交
382 383 384 385 386 387

		if (!sbio)
			break;
		kfree(sbio);
	}

388 389
	scrub_free_csums(sctx);
	kfree(sctx);
A
Arne Jansen 已提交
390 391 392
}

static noinline_for_stack
393
struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
A
Arne Jansen 已提交
394
{
395
	struct scrub_ctx *sctx;
A
Arne Jansen 已提交
396 397
	int		i;
	struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
398 399
	int pages_per_rd_bio;
	int ret;
A
Arne Jansen 已提交
400

401 402 403 404 405 406 407 408 409 410 411 412
	/*
	 * the setting of pages_per_rd_bio is correct for scrub but might
	 * be wrong for the dev_replace code where we might read from
	 * different devices in the initial huge bios. However, that
	 * code is able to correctly handle the case when adding a page
	 * to a bio fails.
	 */
	if (dev->bdev)
		pages_per_rd_bio = min_t(int, SCRUB_PAGES_PER_RD_BIO,
					 bio_get_nr_vecs(dev->bdev));
	else
		pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
413 414
	sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
	if (!sctx)
A
Arne Jansen 已提交
415
		goto nomem;
416
	sctx->is_dev_replace = is_dev_replace;
417
	sctx->pages_per_rd_bio = pages_per_rd_bio;
418
	sctx->curr = -1;
419
	sctx->dev_root = dev->dev_root;
420
	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
A
Arne Jansen 已提交
421 422 423 424 425
		struct scrub_bio *sbio;

		sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
		if (!sbio)
			goto nomem;
426
		sctx->bios[i] = sbio;
A
Arne Jansen 已提交
427 428

		sbio->index = i;
429
		sbio->sctx = sctx;
430
		sbio->page_count = 0;
431 432
		btrfs_init_work(&sbio->work, btrfs_scrub_helper,
				scrub_bio_end_io_worker, NULL, NULL);
A
Arne Jansen 已提交
433

434
		if (i != SCRUB_BIOS_PER_SCTX - 1)
435
			sctx->bios[i]->next_free = i + 1;
436
		else
437 438 439 440 441 442
			sctx->bios[i]->next_free = -1;
	}
	sctx->first_free = 0;
	sctx->nodesize = dev->dev_root->nodesize;
	sctx->leafsize = dev->dev_root->leafsize;
	sctx->sectorsize = dev->dev_root->sectorsize;
443 444
	atomic_set(&sctx->bios_in_flight, 0);
	atomic_set(&sctx->workers_pending, 0);
445 446 447 448 449 450 451
	atomic_set(&sctx->cancel_req, 0);
	sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
	INIT_LIST_HEAD(&sctx->csum_list);

	spin_lock_init(&sctx->list_lock);
	spin_lock_init(&sctx->stat_lock);
	init_waitqueue_head(&sctx->list_wait);
452 453 454 455 456 457 458

	ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info,
				 fs_info->dev_replace.tgtdev, is_dev_replace);
	if (ret) {
		scrub_free_ctx(sctx);
		return ERR_PTR(ret);
	}
459
	return sctx;
A
Arne Jansen 已提交
460 461

nomem:
462
	scrub_free_ctx(sctx);
A
Arne Jansen 已提交
463 464 465
	return ERR_PTR(-ENOMEM);
}

466 467
static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
				     void *warn_ctx)
468 469 470 471 472 473 474
{
	u64 isize;
	u32 nlink;
	int ret;
	int i;
	struct extent_buffer *eb;
	struct btrfs_inode_item *inode_item;
475
	struct scrub_warning *swarn = warn_ctx;
476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
	struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
	struct inode_fs_paths *ipath = NULL;
	struct btrfs_root *local_root;
	struct btrfs_key root_key;

	root_key.objectid = root;
	root_key.type = BTRFS_ROOT_ITEM_KEY;
	root_key.offset = (u64)-1;
	local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
	if (IS_ERR(local_root)) {
		ret = PTR_ERR(local_root);
		goto err;
	}

	ret = inode_item_info(inum, 0, local_root, swarn->path);
	if (ret) {
		btrfs_release_path(swarn->path);
		goto err;
	}

	eb = swarn->path->nodes[0];
	inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
					struct btrfs_inode_item);
	isize = btrfs_inode_size(eb, inode_item);
	nlink = btrfs_inode_nlink(eb, inode_item);
	btrfs_release_path(swarn->path);

	ipath = init_ipath(4096, local_root, swarn->path);
504 505 506 507 508
	if (IS_ERR(ipath)) {
		ret = PTR_ERR(ipath);
		ipath = NULL;
		goto err;
	}
509 510 511 512 513 514 515 516 517 518
	ret = paths_from_inode(inum, ipath);

	if (ret < 0)
		goto err;

	/*
	 * we deliberately ignore the bit ipath might have been too small to
	 * hold all of the paths here
	 */
	for (i = 0; i < ipath->fspath->elem_cnt; ++i)
519
		printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
520 521
			"%s, sector %llu, root %llu, inode %llu, offset %llu, "
			"length %llu, links %u (path: %s)\n", swarn->errstr,
522
			swarn->logical, rcu_str_deref(swarn->dev->name),
523 524
			(unsigned long long)swarn->sector, root, inum, offset,
			min(isize - offset, (u64)PAGE_SIZE), nlink,
525
			(char *)(unsigned long)ipath->fspath->val[i]);
526 527 528 529 530

	free_ipath(ipath);
	return 0;

err:
531
	printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
532 533
		"%s, sector %llu, root %llu, inode %llu, offset %llu: path "
		"resolving failed with ret=%d\n", swarn->errstr,
534
		swarn->logical, rcu_str_deref(swarn->dev->name),
535 536 537 538 539 540
		(unsigned long long)swarn->sector, root, inum, offset, ret);

	free_ipath(ipath);
	return 0;
}

541
static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
542
{
543 544
	struct btrfs_device *dev;
	struct btrfs_fs_info *fs_info;
545 546 547 548 549
	struct btrfs_path *path;
	struct btrfs_key found_key;
	struct extent_buffer *eb;
	struct btrfs_extent_item *ei;
	struct scrub_warning swarn;
550 551 552
	unsigned long ptr = 0;
	u64 extent_item_pos;
	u64 flags = 0;
553
	u64 ref_root;
554
	u32 item_size;
555 556
	u8 ref_level;
	const int bufsize = 4096;
557
	int ret;
558

559
	WARN_ON(sblock->page_count < 1);
560
	dev = sblock->pagev[0]->dev;
561 562
	fs_info = sblock->sctx->dev_root->fs_info;

563 564 565 566
	path = btrfs_alloc_path();

	swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS);
	swarn.msg_buf = kmalloc(bufsize, GFP_NOFS);
567 568
	swarn.sector = (sblock->pagev[0]->physical) >> 9;
	swarn.logical = sblock->pagev[0]->logical;
569
	swarn.errstr = errstr;
570
	swarn.dev = NULL;
571 572 573 574 575 576
	swarn.msg_bufsize = bufsize;
	swarn.scratch_bufsize = bufsize;

	if (!path || !swarn.scratch_buf || !swarn.msg_buf)
		goto out;

577 578
	ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
				  &flags);
579 580 581
	if (ret < 0)
		goto out;

J
Jan Schmidt 已提交
582
	extent_item_pos = swarn.logical - found_key.objectid;
583 584 585 586 587 588
	swarn.extent_item_size = found_key.offset;

	eb = path->nodes[0];
	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
	item_size = btrfs_item_size_nr(eb, path->slots[0]);

589
	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
590
		do {
591 592 593
			ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
						      item_size, &ref_root,
						      &ref_level);
594
			printk_in_rcu(KERN_WARNING
595
				"BTRFS: %s at logical %llu on dev %s, "
596
				"sector %llu: metadata %s (level %d) in tree "
597 598
				"%llu\n", errstr, swarn.logical,
				rcu_str_deref(dev->name),
599 600 601 602 603
				(unsigned long long)swarn.sector,
				ref_level ? "node" : "leaf",
				ret < 0 ? -1 : ref_level,
				ret < 0 ? -1 : ref_root);
		} while (ret != 1);
604
		btrfs_release_path(path);
605
	} else {
606
		btrfs_release_path(path);
607
		swarn.path = path;
608
		swarn.dev = dev;
609 610
		iterate_extent_inodes(fs_info, found_key.objectid,
					extent_item_pos, 1,
611 612 613 614 615 616 617 618 619
					scrub_print_warning_inode, &swarn);
	}

out:
	btrfs_free_path(path);
	kfree(swarn.scratch_buf);
	kfree(swarn.msg_buf);
}

620
static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
621
{
622
	struct page *page = NULL;
623
	unsigned long index;
624
	struct scrub_fixup_nodatasum *fixup = fixup_ctx;
625
	int ret;
626
	int corrected = 0;
627
	struct btrfs_key key;
628
	struct inode *inode = NULL;
629
	struct btrfs_fs_info *fs_info;
630 631
	u64 end = offset + PAGE_SIZE - 1;
	struct btrfs_root *local_root;
632
	int srcu_index;
633 634 635 636

	key.objectid = root;
	key.type = BTRFS_ROOT_ITEM_KEY;
	key.offset = (u64)-1;
637 638 639 640 641 642 643

	fs_info = fixup->root->fs_info;
	srcu_index = srcu_read_lock(&fs_info->subvol_srcu);

	local_root = btrfs_read_fs_root_no_name(fs_info, &key);
	if (IS_ERR(local_root)) {
		srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
644
		return PTR_ERR(local_root);
645
	}
646 647 648 649

	key.type = BTRFS_INODE_ITEM_KEY;
	key.objectid = inum;
	key.offset = 0;
650 651
	inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
	srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
652 653 654 655 656 657
	if (IS_ERR(inode))
		return PTR_ERR(inode);

	index = offset >> PAGE_CACHE_SHIFT;

	page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
	if (!page) {
		ret = -ENOMEM;
		goto out;
	}

	if (PageUptodate(page)) {
		if (PageDirty(page)) {
			/*
			 * we need to write the data to the defect sector. the
			 * data that was in that sector is not in memory,
			 * because the page was modified. we must not write the
			 * modified page to that sector.
			 *
			 * TODO: what could be done here: wait for the delalloc
			 *       runner to write out that page (might involve
			 *       COW) and see whether the sector is still
			 *       referenced afterwards.
			 *
			 * For the meantime, we'll treat this error
			 * incorrectable, although there is a chance that a
			 * later scrub will find the bad sector again and that
			 * there's no dirty page in memory, then.
			 */
			ret = -EIO;
			goto out;
		}
684 685
		fs_info = BTRFS_I(inode)->root->fs_info;
		ret = repair_io_failure(fs_info, offset, PAGE_SIZE,
686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720
					fixup->logical, page,
					fixup->mirror_num);
		unlock_page(page);
		corrected = !ret;
	} else {
		/*
		 * we need to get good data first. the general readpage path
		 * will call repair_io_failure for us, we just have to make
		 * sure we read the bad mirror.
		 */
		ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
					EXTENT_DAMAGED, GFP_NOFS);
		if (ret) {
			/* set_extent_bits should give proper error */
			WARN_ON(ret > 0);
			if (ret > 0)
				ret = -EFAULT;
			goto out;
		}

		ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
						btrfs_get_extent,
						fixup->mirror_num);
		wait_on_page_locked(page);

		corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
						end, EXTENT_DAMAGED, 0, NULL);
		if (!corrected)
			clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
						EXTENT_DAMAGED, GFP_NOFS);
	}

out:
	if (page)
		put_page(page);
721 722

	iput(inode);
723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741

	if (ret < 0)
		return ret;

	if (ret == 0 && corrected) {
		/*
		 * we only need to call readpage for one of the inodes belonging
		 * to this extent. so make iterate_extent_inodes stop
		 */
		return 1;
	}

	return -EIO;
}

static void scrub_fixup_nodatasum(struct btrfs_work *work)
{
	int ret;
	struct scrub_fixup_nodatasum *fixup;
742
	struct scrub_ctx *sctx;
743 744 745 746 747
	struct btrfs_trans_handle *trans = NULL;
	struct btrfs_path *path;
	int uncorrectable = 0;

	fixup = container_of(work, struct scrub_fixup_nodatasum, work);
748
	sctx = fixup->sctx;
749 750 751

	path = btrfs_alloc_path();
	if (!path) {
752 753 754
		spin_lock(&sctx->stat_lock);
		++sctx->stat.malloc_errors;
		spin_unlock(&sctx->stat_lock);
755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
		uncorrectable = 1;
		goto out;
	}

	trans = btrfs_join_transaction(fixup->root);
	if (IS_ERR(trans)) {
		uncorrectable = 1;
		goto out;
	}

	/*
	 * the idea is to trigger a regular read through the standard path. we
	 * read a page from the (failed) logical address by specifying the
	 * corresponding copynum of the failed sector. thus, that readpage is
	 * expected to fail.
	 * that is the point where on-the-fly error correction will kick in
	 * (once it's finished) and rewrite the failed sector if a good copy
	 * can be found.
	 */
	ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
						path, scrub_fixup_readpage,
						fixup);
	if (ret < 0) {
		uncorrectable = 1;
		goto out;
	}
	WARN_ON(ret != 1);

783 784 785
	spin_lock(&sctx->stat_lock);
	++sctx->stat.corrected_errors;
	spin_unlock(&sctx->stat_lock);
786 787 788 789 790

out:
	if (trans && !IS_ERR(trans))
		btrfs_end_transaction(trans, fixup->root);
	if (uncorrectable) {
791 792 793
		spin_lock(&sctx->stat_lock);
		++sctx->stat.uncorrectable_errors;
		spin_unlock(&sctx->stat_lock);
794 795 796
		btrfs_dev_replace_stats_inc(
			&sctx->dev_root->fs_info->dev_replace.
			num_uncorrectable_read_errors);
797 798
		printk_ratelimited_in_rcu(KERN_ERR "BTRFS: "
		    "unable to fixup (nodatasum) error at logical %llu on dev %s\n",
799
			fixup->logical, rcu_str_deref(fixup->dev->name));
800 801 802 803 804
	}

	btrfs_free_path(path);
	kfree(fixup);

805
	scrub_pending_trans_workers_dec(sctx);
806 807
}

A
Arne Jansen 已提交
808
/*
809 810 811 812 813 814
 * scrub_handle_errored_block gets called when either verification of the
 * pages failed or the bio failed to read, e.g. with EIO. In the latter
 * case, this function handles all pages in the bio, even though only one
 * may be bad.
 * The goal of this function is to repair the errored block by using the
 * contents of one of the mirrors.
A
Arne Jansen 已提交
815
 */
816
static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
A
Arne Jansen 已提交
817
{
818
	struct scrub_ctx *sctx = sblock_to_check->sctx;
819
	struct btrfs_device *dev;
820 821 822 823 824 825 826 827 828 829 830 831 832 833
	struct btrfs_fs_info *fs_info;
	u64 length;
	u64 logical;
	u64 generation;
	unsigned int failed_mirror_index;
	unsigned int is_metadata;
	unsigned int have_csum;
	u8 *csum;
	struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
	struct scrub_block *sblock_bad;
	int ret;
	int mirror_index;
	int page_num;
	int success;
834
	static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
835 836 837
				      DEFAULT_RATELIMIT_BURST);

	BUG_ON(sblock_to_check->page_count < 1);
838
	fs_info = sctx->dev_root->fs_info;
839 840 841 842 843 844 845 846 847 848 849
	if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
		/*
		 * if we find an error in a super block, we just report it.
		 * They will get written with the next transaction commit
		 * anyway
		 */
		spin_lock(&sctx->stat_lock);
		++sctx->stat.super_errors;
		spin_unlock(&sctx->stat_lock);
		return 0;
	}
850
	length = sblock_to_check->page_count * PAGE_SIZE;
851 852 853 854 855
	logical = sblock_to_check->pagev[0]->logical;
	generation = sblock_to_check->pagev[0]->generation;
	BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
	failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
	is_metadata = !(sblock_to_check->pagev[0]->flags &
856
			BTRFS_EXTENT_FLAG_DATA);
857 858 859
	have_csum = sblock_to_check->pagev[0]->have_csum;
	csum = sblock_to_check->pagev[0]->csum;
	dev = sblock_to_check->pagev[0]->dev;
860

861 862 863 864 865
	if (sctx->is_dev_replace && !is_metadata && !have_csum) {
		sblocks_for_recheck = NULL;
		goto nodatasum_case;
	}

866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898
	/*
	 * read all mirrors one after the other. This includes to
	 * re-read the extent or metadata block that failed (that was
	 * the cause that this fixup code is called) another time,
	 * page by page this time in order to know which pages
	 * caused I/O errors and which ones are good (for all mirrors).
	 * It is the goal to handle the situation when more than one
	 * mirror contains I/O errors, but the errors do not
	 * overlap, i.e. the data can be repaired by selecting the
	 * pages from those mirrors without I/O error on the
	 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
	 * would be that mirror #1 has an I/O error on the first page,
	 * the second page is good, and mirror #2 has an I/O error on
	 * the second page, but the first page is good.
	 * Then the first page of the first mirror can be repaired by
	 * taking the first page of the second mirror, and the
	 * second page of the second mirror can be repaired by
	 * copying the contents of the 2nd page of the 1st mirror.
	 * One more note: if the pages of one mirror contain I/O
	 * errors, the checksum cannot be verified. In order to get
	 * the best data for repairing, the first attempt is to find
	 * a mirror without I/O errors and with a validated checksum.
	 * Only if this is not possible, the pages are picked from
	 * mirrors with I/O errors without considering the checksum.
	 * If the latter is the case, at the end, the checksum of the
	 * repaired area is verified in order to correctly maintain
	 * the statistics.
	 */

	sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS *
				     sizeof(*sblocks_for_recheck),
				     GFP_NOFS);
	if (!sblocks_for_recheck) {
899 900 901 902 903
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		sctx->stat.read_errors++;
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
904
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
905
		goto out;
A
Arne Jansen 已提交
906 907
	}

908
	/* setup the context, map the logical blocks and alloc the pages */
909
	ret = scrub_setup_recheck_block(sctx, fs_info, sblock_to_check, length,
910 911
					logical, sblocks_for_recheck);
	if (ret) {
912 913 914 915
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors++;
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
916
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
917 918 919 920
		goto out;
	}
	BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
	sblock_bad = sblocks_for_recheck + failed_mirror_index;
921

922
	/* build and submit the bios for the failed mirror, check checksums */
923 924
	scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
			    csum, generation, sctx->csum_size);
A
Arne Jansen 已提交
925

926 927 928 929 930 931 932 933 934 935
	if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
	    sblock_bad->no_io_error_seen) {
		/*
		 * the error disappeared after reading page by page, or
		 * the area was part of a huge bio and other parts of the
		 * bio caused I/O errors, or the block layer merged several
		 * read requests into one and the error is caused by a
		 * different bio (usually one of the two latter cases is
		 * the cause)
		 */
936 937 938
		spin_lock(&sctx->stat_lock);
		sctx->stat.unverified_errors++;
		spin_unlock(&sctx->stat_lock);
A
Arne Jansen 已提交
939

940 941
		if (sctx->is_dev_replace)
			scrub_write_block_to_dev_replace(sblock_bad);
942
		goto out;
A
Arne Jansen 已提交
943 944
	}

945
	if (!sblock_bad->no_io_error_seen) {
946 947 948
		spin_lock(&sctx->stat_lock);
		sctx->stat.read_errors++;
		spin_unlock(&sctx->stat_lock);
949 950
		if (__ratelimit(&_rs))
			scrub_print_warning("i/o error", sblock_to_check);
951
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
952
	} else if (sblock_bad->checksum_error) {
953 954 955
		spin_lock(&sctx->stat_lock);
		sctx->stat.csum_errors++;
		spin_unlock(&sctx->stat_lock);
956 957
		if (__ratelimit(&_rs))
			scrub_print_warning("checksum error", sblock_to_check);
958
		btrfs_dev_stat_inc_and_print(dev,
959
					     BTRFS_DEV_STAT_CORRUPTION_ERRS);
960
	} else if (sblock_bad->header_error) {
961 962 963
		spin_lock(&sctx->stat_lock);
		sctx->stat.verify_errors++;
		spin_unlock(&sctx->stat_lock);
964 965 966
		if (__ratelimit(&_rs))
			scrub_print_warning("checksum/header error",
					    sblock_to_check);
967
		if (sblock_bad->generation_error)
968
			btrfs_dev_stat_inc_and_print(dev,
969 970
				BTRFS_DEV_STAT_GENERATION_ERRS);
		else
971
			btrfs_dev_stat_inc_and_print(dev,
972
				BTRFS_DEV_STAT_CORRUPTION_ERRS);
973
	}
A
Arne Jansen 已提交
974

975 976 977 978
	if (sctx->readonly) {
		ASSERT(!sctx->is_dev_replace);
		goto out;
	}
A
Arne Jansen 已提交
979

980 981
	if (!is_metadata && !have_csum) {
		struct scrub_fixup_nodatasum *fixup_nodatasum;
A
Arne Jansen 已提交
982

983 984 985
nodatasum_case:
		WARN_ON(sctx->is_dev_replace);

986 987 988 989 990 991 992 993 994 995
		/*
		 * !is_metadata and !have_csum, this means that the data
		 * might not be COW'ed, that it might be modified
		 * concurrently. The general strategy to work on the
		 * commit root does not help in the case when COW is not
		 * used.
		 */
		fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
		if (!fixup_nodatasum)
			goto did_not_correct_error;
996
		fixup_nodatasum->sctx = sctx;
997
		fixup_nodatasum->dev = dev;
998 999 1000
		fixup_nodatasum->logical = logical;
		fixup_nodatasum->root = fs_info->extent_root;
		fixup_nodatasum->mirror_num = failed_mirror_index + 1;
1001
		scrub_pending_trans_workers_inc(sctx);
1002 1003
		btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
				scrub_fixup_nodatasum, NULL, NULL);
1004 1005
		btrfs_queue_work(fs_info->scrub_workers,
				 &fixup_nodatasum->work);
1006
		goto out;
A
Arne Jansen 已提交
1007 1008
	}

1009 1010
	/*
	 * now build and submit the bios for the other mirrors, check
1011 1012
	 * checksums.
	 * First try to pick the mirror which is completely without I/O
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
	 * errors and also does not have a checksum error.
	 * If one is found, and if a checksum is present, the full block
	 * that is known to contain an error is rewritten. Afterwards
	 * the block is known to be corrected.
	 * If a mirror is found which is completely correct, and no
	 * checksum is present, only those pages are rewritten that had
	 * an I/O error in the block to be repaired, since it cannot be
	 * determined, which copy of the other pages is better (and it
	 * could happen otherwise that a correct page would be
	 * overwritten by a bad one).
	 */
	for (mirror_index = 0;
	     mirror_index < BTRFS_MAX_MIRRORS &&
	     sblocks_for_recheck[mirror_index].page_count > 0;
	     mirror_index++) {
1028
		struct scrub_block *sblock_other;
1029

1030 1031 1032 1033 1034
		if (mirror_index == failed_mirror_index)
			continue;
		sblock_other = sblocks_for_recheck + mirror_index;

		/* build and submit the bios, check checksums */
1035 1036 1037 1038 1039
		scrub_recheck_block(fs_info, sblock_other, is_metadata,
				    have_csum, csum, generation,
				    sctx->csum_size);

		if (!sblock_other->header_error &&
1040 1041
		    !sblock_other->checksum_error &&
		    sblock_other->no_io_error_seen) {
1042 1043 1044 1045 1046 1047 1048 1049 1050
			if (sctx->is_dev_replace) {
				scrub_write_block_to_dev_replace(sblock_other);
			} else {
				int force_write = is_metadata || have_csum;

				ret = scrub_repair_block_from_good_copy(
						sblock_bad, sblock_other,
						force_write);
			}
1051 1052 1053 1054
			if (0 == ret)
				goto corrected_error;
		}
	}
A
Arne Jansen 已提交
1055 1056

	/*
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
	 * for dev_replace, pick good pages and write to the target device.
	 */
	if (sctx->is_dev_replace) {
		success = 1;
		for (page_num = 0; page_num < sblock_bad->page_count;
		     page_num++) {
			int sub_success;

			sub_success = 0;
			for (mirror_index = 0;
			     mirror_index < BTRFS_MAX_MIRRORS &&
			     sblocks_for_recheck[mirror_index].page_count > 0;
			     mirror_index++) {
				struct scrub_block *sblock_other =
					sblocks_for_recheck + mirror_index;
				struct scrub_page *page_other =
					sblock_other->pagev[page_num];

				if (!page_other->io_error) {
					ret = scrub_write_page_to_dev_replace(
							sblock_other, page_num);
					if (ret == 0) {
						/* succeeded for this page */
						sub_success = 1;
						break;
					} else {
						btrfs_dev_replace_stats_inc(
							&sctx->dev_root->
							fs_info->dev_replace.
							num_write_errors);
					}
				}
			}

			if (!sub_success) {
				/*
				 * did not find a mirror to fetch the page
				 * from. scrub_write_page_to_dev_replace()
				 * handles this case (page->io_error), by
				 * filling the block with zeros before
				 * submitting the write request
				 */
				success = 0;
				ret = scrub_write_page_to_dev_replace(
						sblock_bad, page_num);
				if (ret)
					btrfs_dev_replace_stats_inc(
						&sctx->dev_root->fs_info->
						dev_replace.num_write_errors);
			}
		}

		goto out;
	}

	/*
	 * for regular scrub, repair those pages that are errored.
	 * In case of I/O errors in the area that is supposed to be
1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
	 * repaired, continue by picking good copies of those pages.
	 * Select the good pages from mirrors to rewrite bad pages from
	 * the area to fix. Afterwards verify the checksum of the block
	 * that is supposed to be repaired. This verification step is
	 * only done for the purpose of statistic counting and for the
	 * final scrub report, whether errors remain.
	 * A perfect algorithm could make use of the checksum and try
	 * all possible combinations of pages from the different mirrors
	 * until the checksum verification succeeds. For example, when
	 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
	 * of mirror #2 is readable but the final checksum test fails,
	 * then the 2nd page of mirror #3 could be tried, whether now
	 * the final checksum succeedes. But this would be a rare
	 * exception and is therefore not implemented. At least it is
	 * avoided that the good copy is overwritten.
	 * A more useful improvement would be to pick the sectors
	 * without I/O error based on sector sizes (512 bytes on legacy
	 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
	 * mirror could be repaired by taking 512 byte of a different
	 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
	 * area are unreadable.
A
Arne Jansen 已提交
1136 1137
	 */

1138 1139 1140 1141 1142 1143
	/* can only fix I/O errors from here on */
	if (sblock_bad->no_io_error_seen)
		goto did_not_correct_error;

	success = 1;
	for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1144
		struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1145 1146

		if (!page_bad->io_error)
A
Arne Jansen 已提交
1147
			continue;
1148 1149 1150 1151 1152 1153 1154

		for (mirror_index = 0;
		     mirror_index < BTRFS_MAX_MIRRORS &&
		     sblocks_for_recheck[mirror_index].page_count > 0;
		     mirror_index++) {
			struct scrub_block *sblock_other = sblocks_for_recheck +
							   mirror_index;
1155 1156
			struct scrub_page *page_other = sblock_other->pagev[
							page_num];
1157 1158 1159 1160 1161 1162 1163 1164 1165

			if (!page_other->io_error) {
				ret = scrub_repair_page_from_good_copy(
					sblock_bad, sblock_other, page_num, 0);
				if (0 == ret) {
					page_bad->io_error = 0;
					break; /* succeeded for this page */
				}
			}
I
Ilya Dryomov 已提交
1166
		}
A
Arne Jansen 已提交
1167

1168 1169 1170 1171
		if (page_bad->io_error) {
			/* did not find a mirror to copy the page from */
			success = 0;
		}
A
Arne Jansen 已提交
1172 1173
	}

1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
	if (success) {
		if (is_metadata || have_csum) {
			/*
			 * need to verify the checksum now that all
			 * sectors on disk are repaired (the write
			 * request for data to be repaired is on its way).
			 * Just be lazy and use scrub_recheck_block()
			 * which re-reads the data before the checksum
			 * is verified, but most likely the data comes out
			 * of the page cache.
			 */
1185 1186 1187 1188
			scrub_recheck_block(fs_info, sblock_bad,
					    is_metadata, have_csum, csum,
					    generation, sctx->csum_size);
			if (!sblock_bad->header_error &&
1189 1190 1191 1192 1193 1194 1195
			    !sblock_bad->checksum_error &&
			    sblock_bad->no_io_error_seen)
				goto corrected_error;
			else
				goto did_not_correct_error;
		} else {
corrected_error:
1196 1197 1198
			spin_lock(&sctx->stat_lock);
			sctx->stat.corrected_errors++;
			spin_unlock(&sctx->stat_lock);
1199
			printk_ratelimited_in_rcu(KERN_ERR
1200
				"BTRFS: fixed up error at logical %llu on dev %s\n",
1201
				logical, rcu_str_deref(dev->name));
A
Arne Jansen 已提交
1202
		}
1203 1204
	} else {
did_not_correct_error:
1205 1206 1207
		spin_lock(&sctx->stat_lock);
		sctx->stat.uncorrectable_errors++;
		spin_unlock(&sctx->stat_lock);
1208
		printk_ratelimited_in_rcu(KERN_ERR
1209
			"BTRFS: unable to fixup (regular) error at logical %llu on dev %s\n",
1210
			logical, rcu_str_deref(dev->name));
I
Ilya Dryomov 已提交
1211
	}
A
Arne Jansen 已提交
1212

1213 1214 1215 1216 1217 1218 1219 1220
out:
	if (sblocks_for_recheck) {
		for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
		     mirror_index++) {
			struct scrub_block *sblock = sblocks_for_recheck +
						     mirror_index;
			int page_index;

1221 1222 1223 1224 1225
			for (page_index = 0; page_index < sblock->page_count;
			     page_index++) {
				sblock->pagev[page_index]->sblock = NULL;
				scrub_page_put(sblock->pagev[page_index]);
			}
1226 1227 1228
		}
		kfree(sblocks_for_recheck);
	}
A
Arne Jansen 已提交
1229

1230 1231
	return 0;
}
A
Arne Jansen 已提交
1232

1233
static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
1234
				     struct btrfs_fs_info *fs_info,
1235
				     struct scrub_block *original_sblock,
1236 1237 1238 1239 1240 1241 1242 1243
				     u64 length, u64 logical,
				     struct scrub_block *sblocks_for_recheck)
{
	int page_index;
	int mirror_index;
	int ret;

	/*
1244
	 * note: the two members ref_count and outstanding_pages
1245 1246 1247 1248 1249 1250 1251 1252 1253
	 * are not used (and not set) in the blocks that are used for
	 * the recheck procedure
	 */

	page_index = 0;
	while (length > 0) {
		u64 sublen = min_t(u64, length, PAGE_SIZE);
		u64 mapped_length = sublen;
		struct btrfs_bio *bbio = NULL;
A
Arne Jansen 已提交
1254

1255 1256 1257 1258
		/*
		 * with a length of PAGE_SIZE, each returned stripe
		 * represents one mirror
		 */
1259 1260
		ret = btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, logical,
				      &mapped_length, &bbio, 0);
1261 1262 1263 1264
		if (ret || !bbio || mapped_length < sublen) {
			kfree(bbio);
			return -EIO;
		}
A
Arne Jansen 已提交
1265

1266
		BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
1267 1268 1269 1270 1271 1272 1273 1274 1275
		for (mirror_index = 0; mirror_index < (int)bbio->num_stripes;
		     mirror_index++) {
			struct scrub_block *sblock;
			struct scrub_page *page;

			if (mirror_index >= BTRFS_MAX_MIRRORS)
				continue;

			sblock = sblocks_for_recheck + mirror_index;
1276 1277 1278 1279
			sblock->sctx = sctx;
			page = kzalloc(sizeof(*page), GFP_NOFS);
			if (!page) {
leave_nomem:
1280 1281 1282
				spin_lock(&sctx->stat_lock);
				sctx->stat.malloc_errors++;
				spin_unlock(&sctx->stat_lock);
1283
				kfree(bbio);
1284 1285
				return -ENOMEM;
			}
1286 1287 1288 1289
			scrub_page_get(page);
			sblock->pagev[page_index] = page;
			page->logical = logical;
			page->physical = bbio->stripes[mirror_index].physical;
1290 1291 1292 1293
			BUG_ON(page_index >= original_sblock->page_count);
			page->physical_for_dev_replace =
				original_sblock->pagev[page_index]->
				physical_for_dev_replace;
1294 1295 1296
			/* for missing devices, dev->bdev is NULL */
			page->dev = bbio->stripes[mirror_index].dev;
			page->mirror_num = mirror_index + 1;
1297
			sblock->page_count++;
1298 1299 1300
			page->page = alloc_page(GFP_NOFS);
			if (!page->page)
				goto leave_nomem;
1301 1302 1303 1304 1305 1306 1307 1308
		}
		kfree(bbio);
		length -= sublen;
		logical += sublen;
		page_index++;
	}

	return 0;
I
Ilya Dryomov 已提交
1309 1310
}

1311 1312 1313 1314 1315 1316 1317
/*
 * this function will check the on disk data for checksum errors, header
 * errors and read I/O errors. If any I/O errors happen, the exact pages
 * which are errored are marked as being bad. The goal is to enable scrub
 * to take those pages that are not errored from all the mirrors so that
 * the pages that are errored in the just handled mirror can be repaired.
 */
1318 1319 1320 1321
static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
				struct scrub_block *sblock, int is_metadata,
				int have_csum, u8 *csum, u64 generation,
				u16 csum_size)
I
Ilya Dryomov 已提交
1322
{
1323
	int page_num;
I
Ilya Dryomov 已提交
1324

1325 1326 1327
	sblock->no_io_error_seen = 1;
	sblock->header_error = 0;
	sblock->checksum_error = 0;
I
Ilya Dryomov 已提交
1328

1329 1330
	for (page_num = 0; page_num < sblock->page_count; page_num++) {
		struct bio *bio;
1331
		struct scrub_page *page = sblock->pagev[page_num];
1332

1333
		if (page->dev->bdev == NULL) {
1334 1335 1336 1337 1338
			page->io_error = 1;
			sblock->no_io_error_seen = 0;
			continue;
		}

1339
		WARN_ON(!page->page);
1340
		bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1341 1342 1343 1344 1345
		if (!bio) {
			page->io_error = 1;
			sblock->no_io_error_seen = 0;
			continue;
		}
1346
		bio->bi_bdev = page->dev->bdev;
1347
		bio->bi_iter.bi_sector = page->physical >> 9;
1348

1349
		bio_add_page(bio, page->page, PAGE_SIZE, 0);
1350
		if (btrfsic_submit_bio_wait(READ, bio))
1351
			sblock->no_io_error_seen = 0;
1352

1353 1354
		bio_put(bio);
	}
I
Ilya Dryomov 已提交
1355

1356 1357 1358 1359 1360
	if (sblock->no_io_error_seen)
		scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
					     have_csum, csum, generation,
					     csum_size);

1361
	return;
A
Arne Jansen 已提交
1362 1363
}

1364 1365 1366 1367 1368
static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
					 struct scrub_block *sblock,
					 int is_metadata, int have_csum,
					 const u8 *csum, u64 generation,
					 u16 csum_size)
A
Arne Jansen 已提交
1369
{
1370 1371 1372 1373 1374
	int page_num;
	u8 calculated_csum[BTRFS_CSUM_SIZE];
	u32 crc = ~(u32)0;
	void *mapped_buffer;

1375
	WARN_ON(!sblock->pagev[0]->page);
1376 1377 1378
	if (is_metadata) {
		struct btrfs_header *h;

1379
		mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
1380 1381
		h = (struct btrfs_header *)mapped_buffer;

1382
		if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h) ||
1383 1384
		    memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) ||
		    memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1385
			   BTRFS_UUID_SIZE)) {
1386
			sblock->header_error = 1;
1387
		} else if (generation != btrfs_stack_header_generation(h)) {
1388 1389 1390
			sblock->header_error = 1;
			sblock->generation_error = 1;
		}
1391 1392 1393 1394
		csum = h->csum;
	} else {
		if (!have_csum)
			return;
A
Arne Jansen 已提交
1395

1396
		mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
1397
	}
A
Arne Jansen 已提交
1398

1399 1400
	for (page_num = 0;;) {
		if (page_num == 0 && is_metadata)
1401
			crc = btrfs_csum_data(
1402 1403 1404
				((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
				crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
		else
1405
			crc = btrfs_csum_data(mapped_buffer, crc, PAGE_SIZE);
1406

1407
		kunmap_atomic(mapped_buffer);
1408 1409 1410
		page_num++;
		if (page_num >= sblock->page_count)
			break;
1411
		WARN_ON(!sblock->pagev[page_num]->page);
1412

1413
		mapped_buffer = kmap_atomic(sblock->pagev[page_num]->page);
1414 1415 1416 1417 1418
	}

	btrfs_csum_final(crc, calculated_csum);
	if (memcmp(calculated_csum, csum, csum_size))
		sblock->checksum_error = 1;
A
Arne Jansen 已提交
1419 1420
}

1421 1422 1423 1424 1425 1426
static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
					     struct scrub_block *sblock_good,
					     int force_write)
{
	int page_num;
	int ret = 0;
I
Ilya Dryomov 已提交
1427

1428 1429
	for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
		int ret_sub;
I
Ilya Dryomov 已提交
1430

1431 1432 1433 1434 1435 1436
		ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
							   sblock_good,
							   page_num,
							   force_write);
		if (ret_sub)
			ret = ret_sub;
A
Arne Jansen 已提交
1437
	}
1438 1439 1440 1441 1442 1443 1444 1445

	return ret;
}

static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
					    struct scrub_block *sblock_good,
					    int page_num, int force_write)
{
1446 1447
	struct scrub_page *page_bad = sblock_bad->pagev[page_num];
	struct scrub_page *page_good = sblock_good->pagev[page_num];
1448

1449 1450
	BUG_ON(page_bad->page == NULL);
	BUG_ON(page_good->page == NULL);
1451 1452 1453 1454 1455
	if (force_write || sblock_bad->header_error ||
	    sblock_bad->checksum_error || page_bad->io_error) {
		struct bio *bio;
		int ret;

1456
		if (!page_bad->dev->bdev) {
1457 1458 1459
			printk_ratelimited(KERN_WARNING "BTRFS: "
				"scrub_repair_page_from_good_copy(bdev == NULL) "
				"is unexpected!\n");
1460 1461 1462
			return -EIO;
		}

1463
		bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1464 1465
		if (!bio)
			return -EIO;
1466
		bio->bi_bdev = page_bad->dev->bdev;
1467
		bio->bi_iter.bi_sector = page_bad->physical >> 9;
1468 1469 1470 1471 1472

		ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
		if (PAGE_SIZE != ret) {
			bio_put(bio);
			return -EIO;
1473
		}
1474

1475
		if (btrfsic_submit_bio_wait(WRITE, bio)) {
1476 1477
			btrfs_dev_stat_inc_and_print(page_bad->dev,
				BTRFS_DEV_STAT_WRITE_ERRS);
1478 1479 1480
			btrfs_dev_replace_stats_inc(
				&sblock_bad->sctx->dev_root->fs_info->
				dev_replace.num_write_errors);
1481 1482 1483
			bio_put(bio);
			return -EIO;
		}
1484
		bio_put(bio);
A
Arne Jansen 已提交
1485 1486
	}

1487 1488 1489
	return 0;
}

1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548
static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
{
	int page_num;

	for (page_num = 0; page_num < sblock->page_count; page_num++) {
		int ret;

		ret = scrub_write_page_to_dev_replace(sblock, page_num);
		if (ret)
			btrfs_dev_replace_stats_inc(
				&sblock->sctx->dev_root->fs_info->dev_replace.
				num_write_errors);
	}
}

static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
					   int page_num)
{
	struct scrub_page *spage = sblock->pagev[page_num];

	BUG_ON(spage->page == NULL);
	if (spage->io_error) {
		void *mapped_buffer = kmap_atomic(spage->page);

		memset(mapped_buffer, 0, PAGE_CACHE_SIZE);
		flush_dcache_page(spage->page);
		kunmap_atomic(mapped_buffer);
	}
	return scrub_add_page_to_wr_bio(sblock->sctx, spage);
}

static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
				    struct scrub_page *spage)
{
	struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
	struct scrub_bio *sbio;
	int ret;

	mutex_lock(&wr_ctx->wr_lock);
again:
	if (!wr_ctx->wr_curr_bio) {
		wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
					      GFP_NOFS);
		if (!wr_ctx->wr_curr_bio) {
			mutex_unlock(&wr_ctx->wr_lock);
			return -ENOMEM;
		}
		wr_ctx->wr_curr_bio->sctx = sctx;
		wr_ctx->wr_curr_bio->page_count = 0;
	}
	sbio = wr_ctx->wr_curr_bio;
	if (sbio->page_count == 0) {
		struct bio *bio;

		sbio->physical = spage->physical_for_dev_replace;
		sbio->logical = spage->logical;
		sbio->dev = wr_ctx->tgtdev;
		bio = sbio->bio;
		if (!bio) {
1549
			bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
1550 1551 1552 1553 1554 1555 1556 1557 1558 1559
			if (!bio) {
				mutex_unlock(&wr_ctx->wr_lock);
				return -ENOMEM;
			}
			sbio->bio = bio;
		}

		bio->bi_private = sbio;
		bio->bi_end_io = scrub_wr_bio_end_io;
		bio->bi_bdev = sbio->dev->bdev;
1560
		bio->bi_iter.bi_sector = sbio->physical >> 9;
1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618
		sbio->err = 0;
	} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
		   spage->physical_for_dev_replace ||
		   sbio->logical + sbio->page_count * PAGE_SIZE !=
		   spage->logical) {
		scrub_wr_submit(sctx);
		goto again;
	}

	ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
	if (ret != PAGE_SIZE) {
		if (sbio->page_count < 1) {
			bio_put(sbio->bio);
			sbio->bio = NULL;
			mutex_unlock(&wr_ctx->wr_lock);
			return -EIO;
		}
		scrub_wr_submit(sctx);
		goto again;
	}

	sbio->pagev[sbio->page_count] = spage;
	scrub_page_get(spage);
	sbio->page_count++;
	if (sbio->page_count == wr_ctx->pages_per_wr_bio)
		scrub_wr_submit(sctx);
	mutex_unlock(&wr_ctx->wr_lock);

	return 0;
}

static void scrub_wr_submit(struct scrub_ctx *sctx)
{
	struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
	struct scrub_bio *sbio;

	if (!wr_ctx->wr_curr_bio)
		return;

	sbio = wr_ctx->wr_curr_bio;
	wr_ctx->wr_curr_bio = NULL;
	WARN_ON(!sbio->bio->bi_bdev);
	scrub_pending_bio_inc(sctx);
	/* process all writes in a single worker thread. Then the block layer
	 * orders the requests before sending them to the driver which
	 * doubled the write performance on spinning disks when measured
	 * with Linux 3.5 */
	btrfsic_submit_bio(WRITE, sbio->bio);
}

static void scrub_wr_bio_end_io(struct bio *bio, int err)
{
	struct scrub_bio *sbio = bio->bi_private;
	struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;

	sbio->err = err;
	sbio->bio = bio;

1619 1620
	btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
			 scrub_wr_bio_end_io_worker, NULL, NULL);
1621
	btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652
}

static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
{
	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
	struct scrub_ctx *sctx = sbio->sctx;
	int i;

	WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
	if (sbio->err) {
		struct btrfs_dev_replace *dev_replace =
			&sbio->sctx->dev_root->fs_info->dev_replace;

		for (i = 0; i < sbio->page_count; i++) {
			struct scrub_page *spage = sbio->pagev[i];

			spage->io_error = 1;
			btrfs_dev_replace_stats_inc(&dev_replace->
						    num_write_errors);
		}
	}

	for (i = 0; i < sbio->page_count; i++)
		scrub_page_put(sbio->pagev[i]);

	bio_put(sbio->bio);
	kfree(sbio);
	scrub_pending_bio_dec(sctx);
}

static int scrub_checksum(struct scrub_block *sblock)
1653 1654 1655 1656
{
	u64 flags;
	int ret;

1657 1658
	WARN_ON(sblock->page_count < 1);
	flags = sblock->pagev[0]->flags;
1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669
	ret = 0;
	if (flags & BTRFS_EXTENT_FLAG_DATA)
		ret = scrub_checksum_data(sblock);
	else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
		ret = scrub_checksum_tree_block(sblock);
	else if (flags & BTRFS_EXTENT_FLAG_SUPER)
		(void)scrub_checksum_super(sblock);
	else
		WARN_ON(1);
	if (ret)
		scrub_handle_errored_block(sblock);
1670 1671

	return ret;
A
Arne Jansen 已提交
1672 1673
}

1674
static int scrub_checksum_data(struct scrub_block *sblock)
A
Arne Jansen 已提交
1675
{
1676
	struct scrub_ctx *sctx = sblock->sctx;
A
Arne Jansen 已提交
1677
	u8 csum[BTRFS_CSUM_SIZE];
1678 1679 1680
	u8 *on_disk_csum;
	struct page *page;
	void *buffer;
A
Arne Jansen 已提交
1681 1682
	u32 crc = ~(u32)0;
	int fail = 0;
1683 1684
	u64 len;
	int index;
A
Arne Jansen 已提交
1685

1686
	BUG_ON(sblock->page_count < 1);
1687
	if (!sblock->pagev[0]->have_csum)
A
Arne Jansen 已提交
1688 1689
		return 0;

1690 1691
	on_disk_csum = sblock->pagev[0]->csum;
	page = sblock->pagev[0]->page;
1692
	buffer = kmap_atomic(page);
1693

1694
	len = sctx->sectorsize;
1695 1696 1697 1698
	index = 0;
	for (;;) {
		u64 l = min_t(u64, len, PAGE_SIZE);

1699
		crc = btrfs_csum_data(buffer, crc, l);
1700
		kunmap_atomic(buffer);
1701 1702 1703 1704 1705
		len -= l;
		if (len == 0)
			break;
		index++;
		BUG_ON(index >= sblock->page_count);
1706 1707
		BUG_ON(!sblock->pagev[index]->page);
		page = sblock->pagev[index]->page;
1708
		buffer = kmap_atomic(page);
1709 1710
	}

A
Arne Jansen 已提交
1711
	btrfs_csum_final(crc, csum);
1712
	if (memcmp(csum, on_disk_csum, sctx->csum_size))
A
Arne Jansen 已提交
1713 1714 1715 1716 1717
		fail = 1;

	return fail;
}

1718
static int scrub_checksum_tree_block(struct scrub_block *sblock)
A
Arne Jansen 已提交
1719
{
1720
	struct scrub_ctx *sctx = sblock->sctx;
A
Arne Jansen 已提交
1721
	struct btrfs_header *h;
1722
	struct btrfs_root *root = sctx->dev_root;
A
Arne Jansen 已提交
1723
	struct btrfs_fs_info *fs_info = root->fs_info;
1724 1725 1726 1727 1728 1729
	u8 calculated_csum[BTRFS_CSUM_SIZE];
	u8 on_disk_csum[BTRFS_CSUM_SIZE];
	struct page *page;
	void *mapped_buffer;
	u64 mapped_size;
	void *p;
A
Arne Jansen 已提交
1730 1731 1732
	u32 crc = ~(u32)0;
	int fail = 0;
	int crc_fail = 0;
1733 1734 1735 1736
	u64 len;
	int index;

	BUG_ON(sblock->page_count < 1);
1737
	page = sblock->pagev[0]->page;
1738
	mapped_buffer = kmap_atomic(page);
1739
	h = (struct btrfs_header *)mapped_buffer;
1740
	memcpy(on_disk_csum, h->csum, sctx->csum_size);
A
Arne Jansen 已提交
1741 1742 1743 1744 1745 1746 1747

	/*
	 * we don't use the getter functions here, as we
	 * a) don't have an extent buffer and
	 * b) the page is already kmapped
	 */

1748
	if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
A
Arne Jansen 已提交
1749 1750
		++fail;

1751
	if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h))
A
Arne Jansen 已提交
1752 1753 1754 1755 1756 1757 1758 1759 1760
		++fail;

	if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
		++fail;

	if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
		   BTRFS_UUID_SIZE))
		++fail;

1761
	WARN_ON(sctx->nodesize != sctx->leafsize);
1762
	len = sctx->nodesize - BTRFS_CSUM_SIZE;
1763 1764 1765 1766 1767 1768
	mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
	p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
	index = 0;
	for (;;) {
		u64 l = min_t(u64, len, mapped_size);

1769
		crc = btrfs_csum_data(p, crc, l);
1770
		kunmap_atomic(mapped_buffer);
1771 1772 1773 1774 1775
		len -= l;
		if (len == 0)
			break;
		index++;
		BUG_ON(index >= sblock->page_count);
1776 1777
		BUG_ON(!sblock->pagev[index]->page);
		page = sblock->pagev[index]->page;
1778
		mapped_buffer = kmap_atomic(page);
1779 1780 1781 1782 1783
		mapped_size = PAGE_SIZE;
		p = mapped_buffer;
	}

	btrfs_csum_final(crc, calculated_csum);
1784
	if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
A
Arne Jansen 已提交
1785 1786 1787 1788 1789
		++crc_fail;

	return fail || crc_fail;
}

1790
static int scrub_checksum_super(struct scrub_block *sblock)
A
Arne Jansen 已提交
1791 1792
{
	struct btrfs_super_block *s;
1793
	struct scrub_ctx *sctx = sblock->sctx;
1794
	struct btrfs_root *root = sctx->dev_root;
A
Arne Jansen 已提交
1795
	struct btrfs_fs_info *fs_info = root->fs_info;
1796 1797 1798 1799 1800 1801
	u8 calculated_csum[BTRFS_CSUM_SIZE];
	u8 on_disk_csum[BTRFS_CSUM_SIZE];
	struct page *page;
	void *mapped_buffer;
	u64 mapped_size;
	void *p;
A
Arne Jansen 已提交
1802
	u32 crc = ~(u32)0;
1803 1804
	int fail_gen = 0;
	int fail_cor = 0;
1805 1806
	u64 len;
	int index;
A
Arne Jansen 已提交
1807

1808
	BUG_ON(sblock->page_count < 1);
1809
	page = sblock->pagev[0]->page;
1810
	mapped_buffer = kmap_atomic(page);
1811
	s = (struct btrfs_super_block *)mapped_buffer;
1812
	memcpy(on_disk_csum, s->csum, sctx->csum_size);
A
Arne Jansen 已提交
1813

1814
	if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
1815
		++fail_cor;
A
Arne Jansen 已提交
1816

1817
	if (sblock->pagev[0]->generation != btrfs_super_generation(s))
1818
		++fail_gen;
A
Arne Jansen 已提交
1819 1820

	if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
1821
		++fail_cor;
A
Arne Jansen 已提交
1822

1823 1824 1825 1826 1827 1828 1829
	len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
	mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
	p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
	index = 0;
	for (;;) {
		u64 l = min_t(u64, len, mapped_size);

1830
		crc = btrfs_csum_data(p, crc, l);
1831
		kunmap_atomic(mapped_buffer);
1832 1833 1834 1835 1836
		len -= l;
		if (len == 0)
			break;
		index++;
		BUG_ON(index >= sblock->page_count);
1837 1838
		BUG_ON(!sblock->pagev[index]->page);
		page = sblock->pagev[index]->page;
1839
		mapped_buffer = kmap_atomic(page);
1840 1841 1842 1843 1844
		mapped_size = PAGE_SIZE;
		p = mapped_buffer;
	}

	btrfs_csum_final(crc, calculated_csum);
1845
	if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1846
		++fail_cor;
A
Arne Jansen 已提交
1847

1848
	if (fail_cor + fail_gen) {
A
Arne Jansen 已提交
1849 1850 1851 1852 1853
		/*
		 * if we find an error in a super block, we just report it.
		 * They will get written with the next transaction commit
		 * anyway
		 */
1854 1855 1856
		spin_lock(&sctx->stat_lock);
		++sctx->stat.super_errors;
		spin_unlock(&sctx->stat_lock);
1857
		if (fail_cor)
1858
			btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
1859 1860
				BTRFS_DEV_STAT_CORRUPTION_ERRS);
		else
1861
			btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
1862
				BTRFS_DEV_STAT_GENERATION_ERRS);
A
Arne Jansen 已提交
1863 1864
	}

1865
	return fail_cor + fail_gen;
A
Arne Jansen 已提交
1866 1867
}

1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878
static void scrub_block_get(struct scrub_block *sblock)
{
	atomic_inc(&sblock->ref_count);
}

static void scrub_block_put(struct scrub_block *sblock)
{
	if (atomic_dec_and_test(&sblock->ref_count)) {
		int i;

		for (i = 0; i < sblock->page_count; i++)
1879
			scrub_page_put(sblock->pagev[i]);
1880 1881 1882 1883
		kfree(sblock);
	}
}

1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897
static void scrub_page_get(struct scrub_page *spage)
{
	atomic_inc(&spage->ref_count);
}

static void scrub_page_put(struct scrub_page *spage)
{
	if (atomic_dec_and_test(&spage->ref_count)) {
		if (spage->page)
			__free_page(spage->page);
		kfree(spage);
	}
}

1898
static void scrub_submit(struct scrub_ctx *sctx)
A
Arne Jansen 已提交
1899 1900 1901
{
	struct scrub_bio *sbio;

1902
	if (sctx->curr == -1)
S
Stefan Behrens 已提交
1903
		return;
A
Arne Jansen 已提交
1904

1905 1906
	sbio = sctx->bios[sctx->curr];
	sctx->curr = -1;
1907
	scrub_pending_bio_inc(sctx);
A
Arne Jansen 已提交
1908

1909 1910 1911 1912 1913 1914 1915 1916 1917
	if (!sbio->bio->bi_bdev) {
		/*
		 * this case should not happen. If btrfs_map_block() is
		 * wrong, it could happen for dev-replace operations on
		 * missing devices when no mirrors are available, but in
		 * this case it should already fail the mount.
		 * This case is handled correctly (but _very_ slowly).
		 */
		printk_ratelimited(KERN_WARNING
1918
			"BTRFS: scrub_submit(bio bdev == NULL) is unexpected!\n");
1919 1920 1921 1922
		bio_endio(sbio->bio, -EIO);
	} else {
		btrfsic_submit_bio(READ, sbio->bio);
	}
A
Arne Jansen 已提交
1923 1924
}

1925 1926
static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
				    struct scrub_page *spage)
A
Arne Jansen 已提交
1927
{
1928
	struct scrub_block *sblock = spage->sblock;
A
Arne Jansen 已提交
1929
	struct scrub_bio *sbio;
1930
	int ret;
A
Arne Jansen 已提交
1931 1932 1933 1934 1935

again:
	/*
	 * grab a fresh bio or wait for one to become available
	 */
1936 1937 1938 1939 1940 1941 1942 1943
	while (sctx->curr == -1) {
		spin_lock(&sctx->list_lock);
		sctx->curr = sctx->first_free;
		if (sctx->curr != -1) {
			sctx->first_free = sctx->bios[sctx->curr]->next_free;
			sctx->bios[sctx->curr]->next_free = -1;
			sctx->bios[sctx->curr]->page_count = 0;
			spin_unlock(&sctx->list_lock);
A
Arne Jansen 已提交
1944
		} else {
1945 1946
			spin_unlock(&sctx->list_lock);
			wait_event(sctx->list_wait, sctx->first_free != -1);
A
Arne Jansen 已提交
1947 1948
		}
	}
1949
	sbio = sctx->bios[sctx->curr];
1950
	if (sbio->page_count == 0) {
1951 1952
		struct bio *bio;

1953 1954
		sbio->physical = spage->physical;
		sbio->logical = spage->logical;
1955
		sbio->dev = spage->dev;
1956 1957
		bio = sbio->bio;
		if (!bio) {
1958
			bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
1959 1960 1961 1962
			if (!bio)
				return -ENOMEM;
			sbio->bio = bio;
		}
1963 1964 1965

		bio->bi_private = sbio;
		bio->bi_end_io = scrub_bio_end_io;
1966
		bio->bi_bdev = sbio->dev->bdev;
1967
		bio->bi_iter.bi_sector = sbio->physical >> 9;
1968
		sbio->err = 0;
1969 1970 1971
	} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
		   spage->physical ||
		   sbio->logical + sbio->page_count * PAGE_SIZE !=
1972 1973
		   spage->logical ||
		   sbio->dev != spage->dev) {
1974
		scrub_submit(sctx);
A
Arne Jansen 已提交
1975 1976
		goto again;
	}
1977

1978 1979 1980 1981 1982 1983 1984 1985
	sbio->pagev[sbio->page_count] = spage;
	ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
	if (ret != PAGE_SIZE) {
		if (sbio->page_count < 1) {
			bio_put(sbio->bio);
			sbio->bio = NULL;
			return -EIO;
		}
1986
		scrub_submit(sctx);
1987 1988 1989
		goto again;
	}

1990
	scrub_block_get(sblock); /* one for the page added to the bio */
1991 1992
	atomic_inc(&sblock->outstanding_pages);
	sbio->page_count++;
1993
	if (sbio->page_count == sctx->pages_per_rd_bio)
1994
		scrub_submit(sctx);
1995 1996 1997 1998

	return 0;
}

1999
static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
2000
		       u64 physical, struct btrfs_device *dev, u64 flags,
2001 2002
		       u64 gen, int mirror_num, u8 *csum, int force,
		       u64 physical_for_dev_replace)
2003 2004 2005 2006 2007 2008
{
	struct scrub_block *sblock;
	int index;

	sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
	if (!sblock) {
2009 2010 2011
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
2012
		return -ENOMEM;
A
Arne Jansen 已提交
2013
	}
2014

2015 2016
	/* one ref inside this function, plus one for each page added to
	 * a bio later on */
2017
	atomic_set(&sblock->ref_count, 1);
2018
	sblock->sctx = sctx;
2019 2020 2021
	sblock->no_io_error_seen = 1;

	for (index = 0; len > 0; index++) {
2022
		struct scrub_page *spage;
2023 2024
		u64 l = min_t(u64, len, PAGE_SIZE);

2025 2026 2027
		spage = kzalloc(sizeof(*spage), GFP_NOFS);
		if (!spage) {
leave_nomem:
2028 2029 2030
			spin_lock(&sctx->stat_lock);
			sctx->stat.malloc_errors++;
			spin_unlock(&sctx->stat_lock);
2031
			scrub_block_put(sblock);
2032 2033
			return -ENOMEM;
		}
2034 2035 2036
		BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
		scrub_page_get(spage);
		sblock->pagev[index] = spage;
2037
		spage->sblock = sblock;
2038
		spage->dev = dev;
2039 2040 2041 2042
		spage->flags = flags;
		spage->generation = gen;
		spage->logical = logical;
		spage->physical = physical;
2043
		spage->physical_for_dev_replace = physical_for_dev_replace;
2044 2045 2046
		spage->mirror_num = mirror_num;
		if (csum) {
			spage->have_csum = 1;
2047
			memcpy(spage->csum, csum, sctx->csum_size);
2048 2049 2050 2051
		} else {
			spage->have_csum = 0;
		}
		sblock->page_count++;
2052 2053 2054
		spage->page = alloc_page(GFP_NOFS);
		if (!spage->page)
			goto leave_nomem;
2055 2056 2057
		len -= l;
		logical += l;
		physical += l;
2058
		physical_for_dev_replace += l;
2059 2060
	}

2061
	WARN_ON(sblock->page_count == 0);
2062
	for (index = 0; index < sblock->page_count; index++) {
2063
		struct scrub_page *spage = sblock->pagev[index];
2064 2065
		int ret;

2066
		ret = scrub_add_page_to_rd_bio(sctx, spage);
2067 2068
		if (ret) {
			scrub_block_put(sblock);
2069
			return ret;
2070
		}
2071
	}
A
Arne Jansen 已提交
2072

2073
	if (force)
2074
		scrub_submit(sctx);
A
Arne Jansen 已提交
2075

2076 2077
	/* last one frees, either here or in bio completion for last page */
	scrub_block_put(sblock);
A
Arne Jansen 已提交
2078 2079 2080
	return 0;
}

2081 2082 2083
static void scrub_bio_end_io(struct bio *bio, int err)
{
	struct scrub_bio *sbio = bio->bi_private;
2084
	struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
2085 2086 2087 2088

	sbio->err = err;
	sbio->bio = bio;

2089
	btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
2090 2091 2092 2093 2094
}

static void scrub_bio_end_io_worker(struct btrfs_work *work)
{
	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2095
	struct scrub_ctx *sctx = sbio->sctx;
2096 2097
	int i;

2098
	BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119
	if (sbio->err) {
		for (i = 0; i < sbio->page_count; i++) {
			struct scrub_page *spage = sbio->pagev[i];

			spage->io_error = 1;
			spage->sblock->no_io_error_seen = 0;
		}
	}

	/* now complete the scrub_block items that have all pages completed */
	for (i = 0; i < sbio->page_count; i++) {
		struct scrub_page *spage = sbio->pagev[i];
		struct scrub_block *sblock = spage->sblock;

		if (atomic_dec_and_test(&sblock->outstanding_pages))
			scrub_block_complete(sblock);
		scrub_block_put(sblock);
	}

	bio_put(sbio->bio);
	sbio->bio = NULL;
2120 2121 2122 2123
	spin_lock(&sctx->list_lock);
	sbio->next_free = sctx->first_free;
	sctx->first_free = sbio->index;
	spin_unlock(&sctx->list_lock);
2124 2125 2126 2127 2128 2129 2130 2131

	if (sctx->is_dev_replace &&
	    atomic_read(&sctx->wr_ctx.flush_all_writes)) {
		mutex_lock(&sctx->wr_ctx.wr_lock);
		scrub_wr_submit(sctx);
		mutex_unlock(&sctx->wr_ctx.wr_lock);
	}

2132
	scrub_pending_bio_dec(sctx);
2133 2134 2135 2136
}

static void scrub_block_complete(struct scrub_block *sblock)
{
2137
	if (!sblock->no_io_error_seen) {
2138
		scrub_handle_errored_block(sblock);
2139 2140 2141 2142 2143 2144 2145 2146 2147
	} else {
		/*
		 * if has checksum error, write via repair mechanism in
		 * dev replace case, otherwise write here in dev replace
		 * case.
		 */
		if (!scrub_checksum(sblock) && sblock->sctx->is_dev_replace)
			scrub_write_block_to_dev_replace(sblock);
	}
2148 2149
}

2150
static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len,
A
Arne Jansen 已提交
2151 2152 2153
			   u8 *csum)
{
	struct btrfs_ordered_sum *sum = NULL;
2154
	unsigned long index;
A
Arne Jansen 已提交
2155 2156
	unsigned long num_sectors;

2157 2158
	while (!list_empty(&sctx->csum_list)) {
		sum = list_first_entry(&sctx->csum_list,
A
Arne Jansen 已提交
2159 2160 2161 2162 2163 2164
				       struct btrfs_ordered_sum, list);
		if (sum->bytenr > logical)
			return 0;
		if (sum->bytenr + sum->len > logical)
			break;

2165
		++sctx->stat.csum_discards;
A
Arne Jansen 已提交
2166 2167 2168 2169 2170 2171 2172
		list_del(&sum->list);
		kfree(sum);
		sum = NULL;
	}
	if (!sum)
		return 0;

2173
	index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
2174
	num_sectors = sum->len / sctx->sectorsize;
2175 2176
	memcpy(csum, sum->sums + index, sctx->csum_size);
	if (index == num_sectors - 1) {
A
Arne Jansen 已提交
2177 2178 2179
		list_del(&sum->list);
		kfree(sum);
	}
2180
	return 1;
A
Arne Jansen 已提交
2181 2182 2183
}

/* scrub extent tries to collect up to 64 kB for each bio */
2184
static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
2185
			u64 physical, struct btrfs_device *dev, u64 flags,
2186
			u64 gen, int mirror_num, u64 physical_for_dev_replace)
A
Arne Jansen 已提交
2187 2188 2189
{
	int ret;
	u8 csum[BTRFS_CSUM_SIZE];
2190 2191 2192
	u32 blocksize;

	if (flags & BTRFS_EXTENT_FLAG_DATA) {
2193 2194 2195 2196 2197
		blocksize = sctx->sectorsize;
		spin_lock(&sctx->stat_lock);
		sctx->stat.data_extents_scrubbed++;
		sctx->stat.data_bytes_scrubbed += len;
		spin_unlock(&sctx->stat_lock);
2198
	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2199
		WARN_ON(sctx->nodesize != sctx->leafsize);
2200 2201 2202 2203 2204
		blocksize = sctx->nodesize;
		spin_lock(&sctx->stat_lock);
		sctx->stat.tree_extents_scrubbed++;
		sctx->stat.tree_bytes_scrubbed += len;
		spin_unlock(&sctx->stat_lock);
2205
	} else {
2206
		blocksize = sctx->sectorsize;
2207
		WARN_ON(1);
2208
	}
A
Arne Jansen 已提交
2209 2210

	while (len) {
2211
		u64 l = min_t(u64, len, blocksize);
A
Arne Jansen 已提交
2212 2213 2214 2215
		int have_csum = 0;

		if (flags & BTRFS_EXTENT_FLAG_DATA) {
			/* push csums to sbio */
2216
			have_csum = scrub_find_csum(sctx, logical, l, csum);
A
Arne Jansen 已提交
2217
			if (have_csum == 0)
2218
				++sctx->stat.no_csum;
2219 2220 2221 2222 2223 2224
			if (sctx->is_dev_replace && !have_csum) {
				ret = copy_nocow_pages(sctx, logical, l,
						       mirror_num,
						      physical_for_dev_replace);
				goto behind_scrub_pages;
			}
A
Arne Jansen 已提交
2225
		}
2226
		ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
2227 2228 2229
				  mirror_num, have_csum ? csum : NULL, 0,
				  physical_for_dev_replace);
behind_scrub_pages:
A
Arne Jansen 已提交
2230 2231 2232 2233 2234
		if (ret)
			return ret;
		len -= l;
		logical += l;
		physical += l;
2235
		physical_for_dev_replace += l;
A
Arne Jansen 已提交
2236 2237 2238 2239
	}
	return 0;
}

2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270
/*
 * Given a physical address, this will calculate it's
 * logical offset. if this is a parity stripe, it will return
 * the most left data stripe's logical offset.
 *
 * return 0 if it is a data stripe, 1 means parity stripe.
 */
static int get_raid56_logic_offset(u64 physical, int num,
				   struct map_lookup *map, u64 *offset)
{
	int i;
	int j = 0;
	u64 stripe_nr;
	u64 last_offset;
	int stripe_index;
	int rot;

	last_offset = (physical - map->stripes[num].physical) *
		      nr_data_stripes(map);
	*offset = last_offset;
	for (i = 0; i < nr_data_stripes(map); i++) {
		*offset = last_offset + i * map->stripe_len;

		stripe_nr = *offset;
		do_div(stripe_nr, map->stripe_len);
		do_div(stripe_nr, nr_data_stripes(map));

		/* Work out the disk rotation on this stripe-set */
		rot = do_div(stripe_nr, map->num_stripes);
		/* calculate which stripe this data locates */
		rot += i;
2271
		stripe_index = rot % map->num_stripes;
2272 2273 2274 2275 2276 2277 2278 2279 2280
		if (stripe_index == num)
			return 0;
		if (stripe_index < num)
			j++;
	}
	*offset = last_offset + j * map->stripe_len;
	return 1;
}

2281
static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2282 2283
					   struct map_lookup *map,
					   struct btrfs_device *scrub_dev,
2284 2285
					   int num, u64 base, u64 length,
					   int is_dev_replace)
A
Arne Jansen 已提交
2286 2287
{
	struct btrfs_path *path;
2288
	struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
A
Arne Jansen 已提交
2289 2290 2291
	struct btrfs_root *root = fs_info->extent_root;
	struct btrfs_root *csum_root = fs_info->csum_root;
	struct btrfs_extent_item *extent;
2292
	struct blk_plug plug;
A
Arne Jansen 已提交
2293 2294 2295 2296 2297 2298 2299 2300
	u64 flags;
	int ret;
	int slot;
	u64 nstripes;
	struct extent_buffer *l;
	struct btrfs_key key;
	u64 physical;
	u64 logical;
L
Liu Bo 已提交
2301
	u64 logic_end;
2302
	u64 physical_end;
A
Arne Jansen 已提交
2303
	u64 generation;
2304
	int mirror_num;
A
Arne Jansen 已提交
2305 2306 2307 2308
	struct reada_control *reada1;
	struct reada_control *reada2;
	struct btrfs_key key_start;
	struct btrfs_key key_end;
A
Arne Jansen 已提交
2309 2310
	u64 increment = map->stripe_len;
	u64 offset;
2311 2312 2313 2314 2315
	u64 extent_logical;
	u64 extent_physical;
	u64 extent_len;
	struct btrfs_device *extent_dev;
	int extent_mirror_num;
2316
	int stop_loop = 0;
D
David Woodhouse 已提交
2317

A
Arne Jansen 已提交
2318
	nstripes = length;
2319
	physical = map->stripes[num].physical;
A
Arne Jansen 已提交
2320 2321 2322 2323 2324
	offset = 0;
	do_div(nstripes, map->stripe_len);
	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
		offset = map->stripe_len * num;
		increment = map->stripe_len * map->num_stripes;
2325
		mirror_num = 1;
A
Arne Jansen 已提交
2326 2327 2328 2329
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
		int factor = map->num_stripes / map->sub_stripes;
		offset = map->stripe_len * (num / map->sub_stripes);
		increment = map->stripe_len * factor;
2330
		mirror_num = num % map->sub_stripes + 1;
A
Arne Jansen 已提交
2331 2332
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
		increment = map->stripe_len;
2333
		mirror_num = num % map->num_stripes + 1;
A
Arne Jansen 已提交
2334 2335
	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
		increment = map->stripe_len;
2336
		mirror_num = num % map->num_stripes + 1;
2337 2338 2339 2340 2341
	} else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
				BTRFS_BLOCK_GROUP_RAID6)) {
		get_raid56_logic_offset(physical, num, map, &offset);
		increment = map->stripe_len * nr_data_stripes(map);
		mirror_num = 1;
A
Arne Jansen 已提交
2342 2343
	} else {
		increment = map->stripe_len;
2344
		mirror_num = 1;
A
Arne Jansen 已提交
2345 2346 2347 2348 2349 2350
	}

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

2351 2352 2353 2354 2355
	/*
	 * work on commit root. The related disk blocks are static as
	 * long as COW is applied. This means, it is save to rewrite
	 * them to repair disk errors without any race conditions
	 */
A
Arne Jansen 已提交
2356 2357 2358 2359
	path->search_commit_root = 1;
	path->skip_locking = 1;

	/*
A
Arne Jansen 已提交
2360 2361 2362
	 * trigger the readahead for extent tree csum tree and wait for
	 * completion. During readahead, the scrub is officially paused
	 * to not hold off transaction commits
A
Arne Jansen 已提交
2363 2364
	 */
	logical = base + offset;
2365 2366 2367 2368 2369 2370 2371 2372 2373
	physical_end = physical + nstripes * map->stripe_len;
	if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
			 BTRFS_BLOCK_GROUP_RAID6)) {
		get_raid56_logic_offset(physical_end, num,
					map, &logic_end);
		logic_end += base;
	} else {
		logic_end = logical + increment * nstripes;
	}
2374
	wait_event(sctx->list_wait,
2375
		   atomic_read(&sctx->bios_in_flight) == 0);
2376
	scrub_blocked_if_needed(fs_info);
A
Arne Jansen 已提交
2377 2378 2379 2380 2381

	/* FIXME it might be better to start readahead at commit root */
	key_start.objectid = logical;
	key_start.type = BTRFS_EXTENT_ITEM_KEY;
	key_start.offset = (u64)0;
2382
	key_end.objectid = logic_end;
2383 2384
	key_end.type = BTRFS_METADATA_ITEM_KEY;
	key_end.offset = (u64)-1;
A
Arne Jansen 已提交
2385 2386 2387 2388 2389 2390 2391
	reada1 = btrfs_reada_add(root, &key_start, &key_end);

	key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
	key_start.type = BTRFS_EXTENT_CSUM_KEY;
	key_start.offset = logical;
	key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
	key_end.type = BTRFS_EXTENT_CSUM_KEY;
2392
	key_end.offset = logic_end;
A
Arne Jansen 已提交
2393 2394 2395 2396 2397 2398 2399
	reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);

	if (!IS_ERR(reada1))
		btrfs_reada_wait(reada1);
	if (!IS_ERR(reada2))
		btrfs_reada_wait(reada2);

A
Arne Jansen 已提交
2400 2401 2402 2403 2404

	/*
	 * collect all data csums for the stripe to avoid seeking during
	 * the scrub. This might currently (crc32) end up to be about 1MB
	 */
2405
	blk_start_plug(&plug);
A
Arne Jansen 已提交
2406 2407 2408 2409 2410

	/*
	 * now find all extents for each stripe and scrub them
	 */
	ret = 0;
2411 2412 2413 2414 2415 2416 2417 2418 2419 2420
	while (physical < physical_end) {
		/* for raid56, we skip parity stripe */
		if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
				BTRFS_BLOCK_GROUP_RAID6)) {
			ret = get_raid56_logic_offset(physical, num,
					map, &logical);
			logical += base;
			if (ret)
				goto skip;
		}
A
Arne Jansen 已提交
2421 2422 2423 2424
		/*
		 * canceled?
		 */
		if (atomic_read(&fs_info->scrub_cancel_req) ||
2425
		    atomic_read(&sctx->cancel_req)) {
A
Arne Jansen 已提交
2426 2427 2428 2429 2430 2431 2432 2433
			ret = -ECANCELED;
			goto out;
		}
		/*
		 * check to see if we have to pause
		 */
		if (atomic_read(&fs_info->scrub_pause_req)) {
			/* push queued extents */
2434
			atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
2435
			scrub_submit(sctx);
2436 2437 2438
			mutex_lock(&sctx->wr_ctx.wr_lock);
			scrub_wr_submit(sctx);
			mutex_unlock(&sctx->wr_ctx.wr_lock);
2439
			wait_event(sctx->list_wait,
2440
				   atomic_read(&sctx->bios_in_flight) == 0);
2441
			atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
2442
			scrub_blocked_if_needed(fs_info);
A
Arne Jansen 已提交
2443 2444
		}

2445 2446 2447 2448
		if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
			key.type = BTRFS_METADATA_ITEM_KEY;
		else
			key.type = BTRFS_EXTENT_ITEM_KEY;
A
Arne Jansen 已提交
2449
		key.objectid = logical;
L
Liu Bo 已提交
2450
		key.offset = (u64)-1;
A
Arne Jansen 已提交
2451 2452 2453 2454

		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		if (ret < 0)
			goto out;
2455

2456
		if (ret > 0) {
2457
			ret = btrfs_previous_extent_item(root, path, 0);
A
Arne Jansen 已提交
2458 2459
			if (ret < 0)
				goto out;
2460 2461 2462 2463 2464 2465 2466 2467 2468
			if (ret > 0) {
				/* there's no smaller item, so stick with the
				 * larger one */
				btrfs_release_path(path);
				ret = btrfs_search_slot(NULL, root, &key,
							path, 0, 0);
				if (ret < 0)
					goto out;
			}
A
Arne Jansen 已提交
2469 2470
		}

L
Liu Bo 已提交
2471
		stop_loop = 0;
A
Arne Jansen 已提交
2472
		while (1) {
2473 2474
			u64 bytes;

A
Arne Jansen 已提交
2475 2476 2477 2478 2479 2480 2481 2482 2483
			l = path->nodes[0];
			slot = path->slots[0];
			if (slot >= btrfs_header_nritems(l)) {
				ret = btrfs_next_leaf(root, path);
				if (ret == 0)
					continue;
				if (ret < 0)
					goto out;

L
Liu Bo 已提交
2484
				stop_loop = 1;
A
Arne Jansen 已提交
2485 2486 2487 2488
				break;
			}
			btrfs_item_key_to_cpu(l, &key, slot);

2489 2490 2491 2492 2493 2494
			if (key.type == BTRFS_METADATA_ITEM_KEY)
				bytes = root->leafsize;
			else
				bytes = key.offset;

			if (key.objectid + bytes <= logical)
A
Arne Jansen 已提交
2495 2496
				goto next;

L
Liu Bo 已提交
2497 2498 2499
			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
			    key.type != BTRFS_METADATA_ITEM_KEY)
				goto next;
A
Arne Jansen 已提交
2500

L
Liu Bo 已提交
2501 2502 2503 2504 2505 2506
			if (key.objectid >= logical + map->stripe_len) {
				/* out of this device extent */
				if (key.objectid >= logic_end)
					stop_loop = 1;
				break;
			}
A
Arne Jansen 已提交
2507 2508 2509 2510 2511 2512 2513 2514

			extent = btrfs_item_ptr(l, slot,
						struct btrfs_extent_item);
			flags = btrfs_extent_flags(l, extent);
			generation = btrfs_extent_generation(l, extent);

			if (key.objectid < logical &&
			    (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
2515 2516 2517
				btrfs_err(fs_info,
					   "scrub: tree block %llu spanning "
					   "stripes, ignored. logical=%llu",
2518
				       key.objectid, logical);
A
Arne Jansen 已提交
2519 2520 2521
				goto next;
			}

L
Liu Bo 已提交
2522 2523 2524 2525
again:
			extent_logical = key.objectid;
			extent_len = bytes;

A
Arne Jansen 已提交
2526 2527 2528
			/*
			 * trim extent to this stripe
			 */
L
Liu Bo 已提交
2529 2530 2531
			if (extent_logical < logical) {
				extent_len -= logical - extent_logical;
				extent_logical = logical;
A
Arne Jansen 已提交
2532
			}
L
Liu Bo 已提交
2533
			if (extent_logical + extent_len >
A
Arne Jansen 已提交
2534
			    logical + map->stripe_len) {
L
Liu Bo 已提交
2535 2536
				extent_len = logical + map->stripe_len -
					     extent_logical;
A
Arne Jansen 已提交
2537 2538
			}

L
Liu Bo 已提交
2539
			extent_physical = extent_logical - logical + physical;
2540 2541 2542 2543 2544 2545 2546
			extent_dev = scrub_dev;
			extent_mirror_num = mirror_num;
			if (is_dev_replace)
				scrub_remap_extent(fs_info, extent_logical,
						   extent_len, &extent_physical,
						   &extent_dev,
						   &extent_mirror_num);
L
Liu Bo 已提交
2547 2548 2549 2550 2551 2552 2553

			ret = btrfs_lookup_csums_range(csum_root, logical,
						logical + map->stripe_len - 1,
						&sctx->csum_list, 1);
			if (ret)
				goto out;

2554 2555 2556
			ret = scrub_extent(sctx, extent_logical, extent_len,
					   extent_physical, extent_dev, flags,
					   generation, extent_mirror_num,
2557
					   extent_logical - logical + physical);
A
Arne Jansen 已提交
2558 2559 2560
			if (ret)
				goto out;

2561
			scrub_free_csums(sctx);
L
Liu Bo 已提交
2562 2563
			if (extent_logical + extent_len <
			    key.objectid + bytes) {
2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580
				if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
					BTRFS_BLOCK_GROUP_RAID6)) {
					/*
					 * loop until we find next data stripe
					 * or we have finished all stripes.
					 */
					do {
						physical += map->stripe_len;
						ret = get_raid56_logic_offset(
								physical, num,
								map, &logical);
						logical += base;
					} while (physical < physical_end && ret);
				} else {
					physical += map->stripe_len;
					logical += increment;
				}
L
Liu Bo 已提交
2581 2582 2583 2584 2585
				if (logical < key.objectid + bytes) {
					cond_resched();
					goto again;
				}

2586
				if (physical >= physical_end) {
L
Liu Bo 已提交
2587 2588 2589 2590
					stop_loop = 1;
					break;
				}
			}
A
Arne Jansen 已提交
2591 2592 2593
next:
			path->slots[0]++;
		}
C
Chris Mason 已提交
2594
		btrfs_release_path(path);
2595
skip:
A
Arne Jansen 已提交
2596 2597
		logical += increment;
		physical += map->stripe_len;
2598
		spin_lock(&sctx->stat_lock);
L
Liu Bo 已提交
2599 2600 2601 2602 2603
		if (stop_loop)
			sctx->stat.last_physical = map->stripes[num].physical +
						   length;
		else
			sctx->stat.last_physical = physical;
2604
		spin_unlock(&sctx->stat_lock);
L
Liu Bo 已提交
2605 2606
		if (stop_loop)
			break;
A
Arne Jansen 已提交
2607
	}
2608
out:
A
Arne Jansen 已提交
2609
	/* push queued extents */
2610
	scrub_submit(sctx);
2611 2612 2613
	mutex_lock(&sctx->wr_ctx.wr_lock);
	scrub_wr_submit(sctx);
	mutex_unlock(&sctx->wr_ctx.wr_lock);
A
Arne Jansen 已提交
2614

2615
	blk_finish_plug(&plug);
A
Arne Jansen 已提交
2616 2617 2618 2619
	btrfs_free_path(path);
	return ret < 0 ? ret : 0;
}

2620
static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
2621 2622 2623
					  struct btrfs_device *scrub_dev,
					  u64 chunk_tree, u64 chunk_objectid,
					  u64 chunk_offset, u64 length,
2624
					  u64 dev_offset, int is_dev_replace)
A
Arne Jansen 已提交
2625 2626
{
	struct btrfs_mapping_tree *map_tree =
2627
		&sctx->dev_root->fs_info->mapping_tree;
A
Arne Jansen 已提交
2628 2629 2630
	struct map_lookup *map;
	struct extent_map *em;
	int i;
2631
	int ret = 0;
A
Arne Jansen 已提交
2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647

	read_lock(&map_tree->map_tree.lock);
	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
	read_unlock(&map_tree->map_tree.lock);

	if (!em)
		return -EINVAL;

	map = (struct map_lookup *)em->bdev;
	if (em->start != chunk_offset)
		goto out;

	if (em->len < length)
		goto out;

	for (i = 0; i < map->num_stripes; ++i) {
2648
		if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
2649
		    map->stripes[i].physical == dev_offset) {
2650
			ret = scrub_stripe(sctx, map, scrub_dev, i,
2651 2652
					   chunk_offset, length,
					   is_dev_replace);
A
Arne Jansen 已提交
2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663
			if (ret)
				goto out;
		}
	}
out:
	free_extent_map(em);

	return ret;
}

static noinline_for_stack
2664
int scrub_enumerate_chunks(struct scrub_ctx *sctx,
2665 2666
			   struct btrfs_device *scrub_dev, u64 start, u64 end,
			   int is_dev_replace)
A
Arne Jansen 已提交
2667 2668 2669
{
	struct btrfs_dev_extent *dev_extent = NULL;
	struct btrfs_path *path;
2670
	struct btrfs_root *root = sctx->dev_root;
A
Arne Jansen 已提交
2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681
	struct btrfs_fs_info *fs_info = root->fs_info;
	u64 length;
	u64 chunk_tree;
	u64 chunk_objectid;
	u64 chunk_offset;
	int ret;
	int slot;
	struct extent_buffer *l;
	struct btrfs_key key;
	struct btrfs_key found_key;
	struct btrfs_block_group_cache *cache;
2682
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
A
Arne Jansen 已提交
2683 2684 2685 2686 2687 2688 2689 2690 2691

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	path->reada = 2;
	path->search_commit_root = 1;
	path->skip_locking = 1;

2692
	key.objectid = scrub_dev->devid;
A
Arne Jansen 已提交
2693 2694 2695 2696 2697 2698
	key.offset = 0ull;
	key.type = BTRFS_DEV_EXTENT_KEY;

	while (1) {
		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		if (ret < 0)
2699 2700 2701 2702 2703 2704 2705 2706 2707
			break;
		if (ret > 0) {
			if (path->slots[0] >=
			    btrfs_header_nritems(path->nodes[0])) {
				ret = btrfs_next_leaf(root, path);
				if (ret)
					break;
			}
		}
A
Arne Jansen 已提交
2708 2709 2710 2711 2712 2713

		l = path->nodes[0];
		slot = path->slots[0];

		btrfs_item_key_to_cpu(l, &found_key, slot);

2714
		if (found_key.objectid != scrub_dev->devid)
A
Arne Jansen 已提交
2715 2716
			break;

2717
		if (found_key.type != BTRFS_DEV_EXTENT_KEY)
A
Arne Jansen 已提交
2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728
			break;

		if (found_key.offset >= end)
			break;

		if (found_key.offset < key.offset)
			break;

		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
		length = btrfs_dev_extent_length(l, dev_extent);

2729 2730
		if (found_key.offset + length <= start)
			goto skip;
A
Arne Jansen 已提交
2731 2732 2733 2734 2735 2736 2737 2738 2739 2740

		chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
		chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);

		/*
		 * get a reference on the corresponding block group to prevent
		 * the chunk from going away while we scrub it
		 */
		cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2741 2742 2743 2744 2745 2746

		/* some chunks are removed but not committed to disk yet,
		 * continue scrubbing */
		if (!cache)
			goto skip;

2747 2748 2749
		dev_replace->cursor_right = found_key.offset + length;
		dev_replace->cursor_left = found_key.offset;
		dev_replace->item_needs_writeback = 1;
2750
		ret = scrub_chunk(sctx, scrub_dev, chunk_tree, chunk_objectid,
2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771
				  chunk_offset, length, found_key.offset,
				  is_dev_replace);

		/*
		 * flush, submit all pending read and write bios, afterwards
		 * wait for them.
		 * Note that in the dev replace case, a read request causes
		 * write requests that are submitted in the read completion
		 * worker. Therefore in the current situation, it is required
		 * that all write requests are flushed, so that all read and
		 * write requests are really completed when bios_in_flight
		 * changes to 0.
		 */
		atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
		scrub_submit(sctx);
		mutex_lock(&sctx->wr_ctx.wr_lock);
		scrub_wr_submit(sctx);
		mutex_unlock(&sctx->wr_ctx.wr_lock);

		wait_event(sctx->list_wait,
			   atomic_read(&sctx->bios_in_flight) == 0);
2772 2773 2774 2775 2776 2777 2778 2779
		atomic_inc(&fs_info->scrubs_paused);
		wake_up(&fs_info->scrub_pause_wait);

		/*
		 * must be called before we decrease @scrub_paused.
		 * make sure we don't block transaction commit while
		 * we are waiting pending workers finished.
		 */
2780 2781
		wait_event(sctx->list_wait,
			   atomic_read(&sctx->workers_pending) == 0);
2782 2783 2784 2785 2786 2787 2788
		atomic_set(&sctx->wr_ctx.flush_all_writes, 0);

		mutex_lock(&fs_info->scrub_lock);
		__scrub_blocked_if_needed(fs_info);
		atomic_dec(&fs_info->scrubs_paused);
		mutex_unlock(&fs_info->scrub_lock);
		wake_up(&fs_info->scrub_pause_wait);
2789

A
Arne Jansen 已提交
2790 2791 2792
		btrfs_put_block_group(cache);
		if (ret)
			break;
2793 2794
		if (is_dev_replace &&
		    atomic64_read(&dev_replace->num_write_errors) > 0) {
2795 2796 2797 2798 2799 2800 2801
			ret = -EIO;
			break;
		}
		if (sctx->stat.malloc_errors > 0) {
			ret = -ENOMEM;
			break;
		}
A
Arne Jansen 已提交
2802

2803 2804
		dev_replace->cursor_left = dev_replace->cursor_right;
		dev_replace->item_needs_writeback = 1;
2805
skip:
A
Arne Jansen 已提交
2806
		key.offset = found_key.offset + length;
C
Chris Mason 已提交
2807
		btrfs_release_path(path);
A
Arne Jansen 已提交
2808 2809 2810
	}

	btrfs_free_path(path);
2811 2812 2813 2814 2815 2816

	/*
	 * ret can still be 1 from search_slot or next_leaf,
	 * that's not an error
	 */
	return ret < 0 ? ret : 0;
A
Arne Jansen 已提交
2817 2818
}

2819 2820
static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
					   struct btrfs_device *scrub_dev)
A
Arne Jansen 已提交
2821 2822 2823 2824 2825
{
	int	i;
	u64	bytenr;
	u64	gen;
	int	ret;
2826
	struct btrfs_root *root = sctx->dev_root;
A
Arne Jansen 已提交
2827

2828
	if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
2829 2830
		return -EIO;

A
Arne Jansen 已提交
2831 2832 2833 2834
	gen = root->fs_info->last_trans_committed;

	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
		bytenr = btrfs_sb_offset(i);
2835
		if (bytenr + BTRFS_SUPER_INFO_SIZE > scrub_dev->total_bytes)
A
Arne Jansen 已提交
2836 2837
			break;

2838
		ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
2839
				  scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
2840
				  NULL, 1, bytenr);
A
Arne Jansen 已提交
2841 2842 2843
		if (ret)
			return ret;
	}
2844
	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
A
Arne Jansen 已提交
2845 2846 2847 2848 2849 2850 2851

	return 0;
}

/*
 * get a reference count on fs_info->scrub_workers. start worker if necessary
 */
2852 2853
static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
						int is_dev_replace)
A
Arne Jansen 已提交
2854
{
2855
	int ret = 0;
2856 2857
	int flags = WQ_FREEZABLE | WQ_UNBOUND;
	int max_active = fs_info->thread_pool_size;
A
Arne Jansen 已提交
2858

A
Arne Jansen 已提交
2859
	if (fs_info->scrub_workers_refcnt == 0) {
2860
		if (is_dev_replace)
2861 2862 2863
			fs_info->scrub_workers =
				btrfs_alloc_workqueue("btrfs-scrub", flags,
						      1, 4);
2864
		else
2865 2866 2867 2868 2869
			fs_info->scrub_workers =
				btrfs_alloc_workqueue("btrfs-scrub", flags,
						      max_active, 4);
		if (!fs_info->scrub_workers) {
			ret = -ENOMEM;
2870
			goto out;
2871 2872 2873 2874 2875 2876
		}
		fs_info->scrub_wr_completion_workers =
			btrfs_alloc_workqueue("btrfs-scrubwrc", flags,
					      max_active, 2);
		if (!fs_info->scrub_wr_completion_workers) {
			ret = -ENOMEM;
2877
			goto out;
2878 2879 2880 2881 2882
		}
		fs_info->scrub_nocow_workers =
			btrfs_alloc_workqueue("btrfs-scrubnc", flags, 1, 0);
		if (!fs_info->scrub_nocow_workers) {
			ret = -ENOMEM;
2883
			goto out;
2884
		}
A
Arne Jansen 已提交
2885
	}
A
Arne Jansen 已提交
2886
	++fs_info->scrub_workers_refcnt;
2887 2888
out:
	return ret;
A
Arne Jansen 已提交
2889 2890
}

2891
static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
A
Arne Jansen 已提交
2892
{
2893
	if (--fs_info->scrub_workers_refcnt == 0) {
2894 2895 2896
		btrfs_destroy_workqueue(fs_info->scrub_workers);
		btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
		btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
2897
	}
A
Arne Jansen 已提交
2898 2899 2900
	WARN_ON(fs_info->scrub_workers_refcnt < 0);
}

2901 2902
int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
		    u64 end, struct btrfs_scrub_progress *progress,
2903
		    int readonly, int is_dev_replace)
A
Arne Jansen 已提交
2904
{
2905
	struct scrub_ctx *sctx;
A
Arne Jansen 已提交
2906 2907
	int ret;
	struct btrfs_device *dev;
2908
	struct rcu_string *name;
A
Arne Jansen 已提交
2909

2910
	if (btrfs_fs_closing(fs_info))
A
Arne Jansen 已提交
2911 2912 2913 2914 2915
		return -EINVAL;

	/*
	 * check some assumptions
	 */
2916
	if (fs_info->chunk_root->nodesize != fs_info->chunk_root->leafsize) {
2917 2918
		btrfs_err(fs_info,
			   "scrub: size assumption nodesize == leafsize (%d == %d) fails",
2919 2920
		       fs_info->chunk_root->nodesize,
		       fs_info->chunk_root->leafsize);
2921 2922 2923
		return -EINVAL;
	}

2924
	if (fs_info->chunk_root->nodesize > BTRFS_STRIPE_LEN) {
2925 2926 2927 2928 2929
		/*
		 * in this case scrub is unable to calculate the checksum
		 * the way scrub is implemented. Do not handle this
		 * situation at all because it won't ever happen.
		 */
2930 2931
		btrfs_err(fs_info,
			   "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
2932
		       fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN);
2933 2934 2935
		return -EINVAL;
	}

2936
	if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
2937
		/* not supported for data w/o checksums */
2938 2939 2940
		btrfs_err(fs_info,
			   "scrub: size assumption sectorsize != PAGE_SIZE "
			   "(%d != %lu) fails",
2941
		       fs_info->chunk_root->sectorsize, PAGE_SIZE);
A
Arne Jansen 已提交
2942 2943 2944
		return -EINVAL;
	}

2945 2946 2947 2948 2949 2950 2951 2952
	if (fs_info->chunk_root->nodesize >
	    PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
	    fs_info->chunk_root->sectorsize >
	    PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
		/*
		 * would exhaust the array bounds of pagev member in
		 * struct scrub_block
		 */
2953 2954
		btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize "
			   "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
2955 2956 2957 2958 2959 2960 2961
		       fs_info->chunk_root->nodesize,
		       SCRUB_MAX_PAGES_PER_BLOCK,
		       fs_info->chunk_root->sectorsize,
		       SCRUB_MAX_PAGES_PER_BLOCK);
		return -EINVAL;
	}

A
Arne Jansen 已提交
2962

2963 2964
	mutex_lock(&fs_info->fs_devices->device_list_mutex);
	dev = btrfs_find_device(fs_info, devid, NULL, NULL);
2965
	if (!dev || (dev->missing && !is_dev_replace)) {
2966
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
A
Arne Jansen 已提交
2967 2968 2969
		return -ENODEV;
	}

2970 2971 2972 2973 2974 2975 2976 2977 2978 2979
	if (!is_dev_replace && !readonly && !dev->writeable) {
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
		rcu_read_lock();
		name = rcu_dereference(dev->name);
		btrfs_err(fs_info, "scrub: device %s is not writable",
			  name->str);
		rcu_read_unlock();
		return -EROFS;
	}

2980
	mutex_lock(&fs_info->scrub_lock);
2981
	if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
A
Arne Jansen 已提交
2982
		mutex_unlock(&fs_info->scrub_lock);
2983 2984
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
		return -EIO;
A
Arne Jansen 已提交
2985 2986
	}

2987 2988 2989 2990 2991
	btrfs_dev_replace_lock(&fs_info->dev_replace);
	if (dev->scrub_device ||
	    (!is_dev_replace &&
	     btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
		btrfs_dev_replace_unlock(&fs_info->dev_replace);
A
Arne Jansen 已提交
2992
		mutex_unlock(&fs_info->scrub_lock);
2993
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
A
Arne Jansen 已提交
2994 2995
		return -EINPROGRESS;
	}
2996
	btrfs_dev_replace_unlock(&fs_info->dev_replace);
2997 2998 2999 3000 3001 3002 3003 3004

	ret = scrub_workers_get(fs_info, is_dev_replace);
	if (ret) {
		mutex_unlock(&fs_info->scrub_lock);
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
		return ret;
	}

3005
	sctx = scrub_setup_ctx(dev, is_dev_replace);
3006
	if (IS_ERR(sctx)) {
A
Arne Jansen 已提交
3007
		mutex_unlock(&fs_info->scrub_lock);
3008 3009
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
		scrub_workers_put(fs_info);
3010
		return PTR_ERR(sctx);
A
Arne Jansen 已提交
3011
	}
3012 3013
	sctx->readonly = readonly;
	dev->scrub_device = sctx;
3014
	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
A
Arne Jansen 已提交
3015

3016 3017 3018 3019
	/*
	 * checking @scrub_pause_req here, we can avoid
	 * race between committing transaction and scrubbing.
	 */
3020
	__scrub_blocked_if_needed(fs_info);
A
Arne Jansen 已提交
3021 3022 3023
	atomic_inc(&fs_info->scrubs_running);
	mutex_unlock(&fs_info->scrub_lock);

3024
	if (!is_dev_replace) {
3025 3026 3027 3028
		/*
		 * by holding device list mutex, we can
		 * kick off writing super in log tree sync.
		 */
3029
		mutex_lock(&fs_info->fs_devices->device_list_mutex);
3030
		ret = scrub_supers(sctx, dev);
3031
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3032
	}
A
Arne Jansen 已提交
3033 3034

	if (!ret)
3035 3036
		ret = scrub_enumerate_chunks(sctx, dev, start, end,
					     is_dev_replace);
A
Arne Jansen 已提交
3037

3038
	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
A
Arne Jansen 已提交
3039 3040 3041
	atomic_dec(&fs_info->scrubs_running);
	wake_up(&fs_info->scrub_pause_wait);

3042
	wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
3043

A
Arne Jansen 已提交
3044
	if (progress)
3045
		memcpy(progress, &sctx->stat, sizeof(*progress));
A
Arne Jansen 已提交
3046 3047 3048

	mutex_lock(&fs_info->scrub_lock);
	dev->scrub_device = NULL;
3049
	scrub_workers_put(fs_info);
A
Arne Jansen 已提交
3050 3051
	mutex_unlock(&fs_info->scrub_lock);

3052
	scrub_free_ctx(sctx);
A
Arne Jansen 已提交
3053 3054 3055 3056

	return ret;
}

3057
void btrfs_scrub_pause(struct btrfs_root *root)
A
Arne Jansen 已提交
3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073
{
	struct btrfs_fs_info *fs_info = root->fs_info;

	mutex_lock(&fs_info->scrub_lock);
	atomic_inc(&fs_info->scrub_pause_req);
	while (atomic_read(&fs_info->scrubs_paused) !=
	       atomic_read(&fs_info->scrubs_running)) {
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
			   atomic_read(&fs_info->scrubs_paused) ==
			   atomic_read(&fs_info->scrubs_running));
		mutex_lock(&fs_info->scrub_lock);
	}
	mutex_unlock(&fs_info->scrub_lock);
}

3074
void btrfs_scrub_continue(struct btrfs_root *root)
A
Arne Jansen 已提交
3075 3076 3077 3078 3079 3080 3081
{
	struct btrfs_fs_info *fs_info = root->fs_info;

	atomic_dec(&fs_info->scrub_pause_req);
	wake_up(&fs_info->scrub_pause_wait);
}

3082
int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
A
Arne Jansen 已提交
3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102
{
	mutex_lock(&fs_info->scrub_lock);
	if (!atomic_read(&fs_info->scrubs_running)) {
		mutex_unlock(&fs_info->scrub_lock);
		return -ENOTCONN;
	}

	atomic_inc(&fs_info->scrub_cancel_req);
	while (atomic_read(&fs_info->scrubs_running)) {
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
			   atomic_read(&fs_info->scrubs_running) == 0);
		mutex_lock(&fs_info->scrub_lock);
	}
	atomic_dec(&fs_info->scrub_cancel_req);
	mutex_unlock(&fs_info->scrub_lock);

	return 0;
}

3103 3104
int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
			   struct btrfs_device *dev)
3105
{
3106
	struct scrub_ctx *sctx;
A
Arne Jansen 已提交
3107 3108

	mutex_lock(&fs_info->scrub_lock);
3109 3110
	sctx = dev->scrub_device;
	if (!sctx) {
A
Arne Jansen 已提交
3111 3112 3113
		mutex_unlock(&fs_info->scrub_lock);
		return -ENOTCONN;
	}
3114
	atomic_inc(&sctx->cancel_req);
A
Arne Jansen 已提交
3115 3116 3117 3118 3119 3120 3121 3122 3123 3124
	while (dev->scrub_device) {
		mutex_unlock(&fs_info->scrub_lock);
		wait_event(fs_info->scrub_pause_wait,
			   dev->scrub_device == NULL);
		mutex_lock(&fs_info->scrub_lock);
	}
	mutex_unlock(&fs_info->scrub_lock);

	return 0;
}
S
Stefan Behrens 已提交
3125

A
Arne Jansen 已提交
3126 3127 3128 3129
int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
			 struct btrfs_scrub_progress *progress)
{
	struct btrfs_device *dev;
3130
	struct scrub_ctx *sctx = NULL;
A
Arne Jansen 已提交
3131 3132

	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
3133
	dev = btrfs_find_device(root->fs_info, devid, NULL, NULL);
A
Arne Jansen 已提交
3134
	if (dev)
3135 3136 3137
		sctx = dev->scrub_device;
	if (sctx)
		memcpy(progress, &sctx->stat, sizeof(*progress));
A
Arne Jansen 已提交
3138 3139
	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);

3140
	return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
A
Arne Jansen 已提交
3141
}
3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217

static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
			       u64 extent_logical, u64 extent_len,
			       u64 *extent_physical,
			       struct btrfs_device **extent_dev,
			       int *extent_mirror_num)
{
	u64 mapped_length;
	struct btrfs_bio *bbio = NULL;
	int ret;

	mapped_length = extent_len;
	ret = btrfs_map_block(fs_info, READ, extent_logical,
			      &mapped_length, &bbio, 0);
	if (ret || !bbio || mapped_length < extent_len ||
	    !bbio->stripes[0].dev->bdev) {
		kfree(bbio);
		return;
	}

	*extent_physical = bbio->stripes[0].physical;
	*extent_mirror_num = bbio->mirror_num;
	*extent_dev = bbio->stripes[0].dev;
	kfree(bbio);
}

static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
			      struct scrub_wr_ctx *wr_ctx,
			      struct btrfs_fs_info *fs_info,
			      struct btrfs_device *dev,
			      int is_dev_replace)
{
	WARN_ON(wr_ctx->wr_curr_bio != NULL);

	mutex_init(&wr_ctx->wr_lock);
	wr_ctx->wr_curr_bio = NULL;
	if (!is_dev_replace)
		return 0;

	WARN_ON(!dev->bdev);
	wr_ctx->pages_per_wr_bio = min_t(int, SCRUB_PAGES_PER_WR_BIO,
					 bio_get_nr_vecs(dev->bdev));
	wr_ctx->tgtdev = dev;
	atomic_set(&wr_ctx->flush_all_writes, 0);
	return 0;
}

static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
{
	mutex_lock(&wr_ctx->wr_lock);
	kfree(wr_ctx->wr_curr_bio);
	wr_ctx->wr_curr_bio = NULL;
	mutex_unlock(&wr_ctx->wr_lock);
}

static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
			    int mirror_num, u64 physical_for_dev_replace)
{
	struct scrub_copy_nocow_ctx *nocow_ctx;
	struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;

	nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
	if (!nocow_ctx) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
		return -ENOMEM;
	}

	scrub_pending_trans_workers_inc(sctx);

	nocow_ctx->sctx = sctx;
	nocow_ctx->logical = logical;
	nocow_ctx->len = len;
	nocow_ctx->mirror_num = mirror_num;
	nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
3218 3219
	btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
			copy_nocow_pages_worker, NULL, NULL);
3220
	INIT_LIST_HEAD(&nocow_ctx->inodes);
3221 3222
	btrfs_queue_work(fs_info->scrub_nocow_workers,
			 &nocow_ctx->work);
3223 3224 3225 3226

	return 0;
}

3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243
static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
{
	struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
	struct scrub_nocow_inode *nocow_inode;

	nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
	if (!nocow_inode)
		return -ENOMEM;
	nocow_inode->inum = inum;
	nocow_inode->offset = offset;
	nocow_inode->root = root;
	list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
	return 0;
}

#define COPY_COMPLETE 1

3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278
static void copy_nocow_pages_worker(struct btrfs_work *work)
{
	struct scrub_copy_nocow_ctx *nocow_ctx =
		container_of(work, struct scrub_copy_nocow_ctx, work);
	struct scrub_ctx *sctx = nocow_ctx->sctx;
	u64 logical = nocow_ctx->logical;
	u64 len = nocow_ctx->len;
	int mirror_num = nocow_ctx->mirror_num;
	u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
	int ret;
	struct btrfs_trans_handle *trans = NULL;
	struct btrfs_fs_info *fs_info;
	struct btrfs_path *path;
	struct btrfs_root *root;
	int not_written = 0;

	fs_info = sctx->dev_root->fs_info;
	root = fs_info->extent_root;

	path = btrfs_alloc_path();
	if (!path) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
		not_written = 1;
		goto out;
	}

	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans)) {
		not_written = 1;
		goto out;
	}

	ret = iterate_inodes_from_logical(logical, fs_info, path,
3279
					  record_inode_for_nocow, nocow_ctx);
3280
	if (ret != 0 && ret != -ENOENT) {
3281 3282
		btrfs_warn(fs_info, "iterate_inodes_from_logical() failed: log %llu, "
			"phys %llu, len %llu, mir %u, ret %d",
3283 3284
			logical, physical_for_dev_replace, len, mirror_num,
			ret);
3285 3286 3287 3288
		not_written = 1;
		goto out;
	}

3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306
	btrfs_end_transaction(trans, root);
	trans = NULL;
	while (!list_empty(&nocow_ctx->inodes)) {
		struct scrub_nocow_inode *entry;
		entry = list_first_entry(&nocow_ctx->inodes,
					 struct scrub_nocow_inode,
					 list);
		list_del_init(&entry->list);
		ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
						 entry->root, nocow_ctx);
		kfree(entry);
		if (ret == COPY_COMPLETE) {
			ret = 0;
			break;
		} else if (ret) {
			break;
		}
	}
3307
out:
3308 3309 3310 3311 3312 3313 3314 3315
	while (!list_empty(&nocow_ctx->inodes)) {
		struct scrub_nocow_inode *entry;
		entry = list_first_entry(&nocow_ctx->inodes,
					 struct scrub_nocow_inode,
					 list);
		list_del_init(&entry->list);
		kfree(entry);
	}
3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327
	if (trans && !IS_ERR(trans))
		btrfs_end_transaction(trans, root);
	if (not_written)
		btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
					    num_uncorrectable_read_errors);

	btrfs_free_path(path);
	kfree(nocow_ctx);

	scrub_pending_trans_workers_dec(sctx);
}

3328 3329
static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
				      struct scrub_copy_nocow_ctx *nocow_ctx)
3330
{
3331
	struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
3332
	struct btrfs_key key;
3333 3334
	struct inode *inode;
	struct page *page;
3335
	struct btrfs_root *local_root;
3336 3337 3338 3339
	struct btrfs_ordered_extent *ordered;
	struct extent_map *em;
	struct extent_state *cached_state = NULL;
	struct extent_io_tree *io_tree;
3340
	u64 physical_for_dev_replace;
3341 3342
	u64 len = nocow_ctx->len;
	u64 lockstart = offset, lockend = offset + len - 1;
3343
	unsigned long index;
3344
	int srcu_index;
3345 3346
	int ret = 0;
	int err = 0;
3347 3348 3349 3350

	key.objectid = root;
	key.type = BTRFS_ROOT_ITEM_KEY;
	key.offset = (u64)-1;
3351 3352 3353

	srcu_index = srcu_read_lock(&fs_info->subvol_srcu);

3354
	local_root = btrfs_read_fs_root_no_name(fs_info, &key);
3355 3356
	if (IS_ERR(local_root)) {
		srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
3357
		return PTR_ERR(local_root);
3358
	}
3359 3360 3361 3362 3363

	key.type = BTRFS_INODE_ITEM_KEY;
	key.objectid = inum;
	key.offset = 0;
	inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
3364
	srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
3365 3366 3367
	if (IS_ERR(inode))
		return PTR_ERR(inode);

3368 3369 3370 3371
	/* Avoid truncate/dio/punch hole.. */
	mutex_lock(&inode->i_mutex);
	inode_dio_wait(inode);

3372
	physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398
	io_tree = &BTRFS_I(inode)->io_tree;

	lock_extent_bits(io_tree, lockstart, lockend, 0, &cached_state);
	ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
	if (ordered) {
		btrfs_put_ordered_extent(ordered);
		goto out_unlock;
	}

	em = btrfs_get_extent(inode, NULL, 0, lockstart, len, 0);
	if (IS_ERR(em)) {
		ret = PTR_ERR(em);
		goto out_unlock;
	}

	/*
	 * This extent does not actually cover the logical extent anymore,
	 * move on to the next inode.
	 */
	if (em->block_start > nocow_ctx->logical ||
	    em->block_start + em->block_len < nocow_ctx->logical + len) {
		free_extent_map(em);
		goto out_unlock;
	}
	free_extent_map(em);

3399 3400
	while (len >= PAGE_CACHE_SIZE) {
		index = offset >> PAGE_CACHE_SHIFT;
3401
again:
3402 3403
		page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
		if (!page) {
3404
			btrfs_err(fs_info, "find_or_create_page() failed");
3405
			ret = -ENOMEM;
3406
			goto out;
3407 3408 3409 3410 3411 3412 3413
		}

		if (PageUptodate(page)) {
			if (PageDirty(page))
				goto next_page;
		} else {
			ClearPageError(page);
3414 3415 3416
			err = extent_read_full_page_nolock(io_tree, page,
							   btrfs_get_extent,
							   nocow_ctx->mirror_num);
3417 3418
			if (err) {
				ret = err;
3419 3420
				goto next_page;
			}
3421

3422
			lock_page(page);
3423 3424 3425 3426 3427 3428 3429
			/*
			 * If the page has been remove from the page cache,
			 * the data on it is meaningless, because it may be
			 * old one, the new data may be written into the new
			 * page in the page cache.
			 */
			if (page->mapping != inode->i_mapping) {
3430
				unlock_page(page);
3431 3432 3433
				page_cache_release(page);
				goto again;
			}
3434 3435 3436 3437 3438
			if (!PageUptodate(page)) {
				ret = -EIO;
				goto next_page;
			}
		}
3439 3440 3441 3442
		err = write_page_nocow(nocow_ctx->sctx,
				       physical_for_dev_replace, page);
		if (err)
			ret = err;
3443
next_page:
3444 3445 3446 3447 3448 3449
		unlock_page(page);
		page_cache_release(page);

		if (ret)
			break;

3450 3451 3452 3453
		offset += PAGE_CACHE_SIZE;
		physical_for_dev_replace += PAGE_CACHE_SIZE;
		len -= PAGE_CACHE_SIZE;
	}
3454 3455 3456 3457
	ret = COPY_COMPLETE;
out_unlock:
	unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
			     GFP_NOFS);
3458
out:
3459
	mutex_unlock(&inode->i_mutex);
3460
	iput(inode);
3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475
	return ret;
}

static int write_page_nocow(struct scrub_ctx *sctx,
			    u64 physical_for_dev_replace, struct page *page)
{
	struct bio *bio;
	struct btrfs_device *dev;
	int ret;

	dev = sctx->wr_ctx.tgtdev;
	if (!dev)
		return -EIO;
	if (!dev->bdev) {
		printk_ratelimited(KERN_WARNING
3476
			"BTRFS: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
3477 3478
		return -EIO;
	}
3479
	bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
3480 3481 3482 3483 3484 3485
	if (!bio) {
		spin_lock(&sctx->stat_lock);
		sctx->stat.malloc_errors++;
		spin_unlock(&sctx->stat_lock);
		return -ENOMEM;
	}
3486 3487
	bio->bi_iter.bi_size = 0;
	bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
3488 3489 3490 3491 3492 3493 3494 3495 3496
	bio->bi_bdev = dev->bdev;
	ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
	if (ret != PAGE_CACHE_SIZE) {
leave_with_eio:
		bio_put(bio);
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
		return -EIO;
	}

3497
	if (btrfsic_submit_bio_wait(WRITE_SYNC, bio))
3498 3499 3500 3501 3502
		goto leave_with_eio;

	bio_put(bio);
	return 0;
}