dev-replace.c 38.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4
/*
 * Copyright (C) STRATO AG 2012.  All rights reserved.
 */
5

6 7 8 9 10 11
#include <linux/sched.h>
#include <linux/bio.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/math64.h>
12
#include "misc.h"
13 14 15 16 17 18 19 20 21 22
#include "ctree.h"
#include "extent_map.h"
#include "disk-io.h"
#include "transaction.h"
#include "print-tree.h"
#include "volumes.h"
#include "async-thread.h"
#include "check-integrity.h"
#include "rcu-string.h"
#include "dev-replace.h"
23
#include "sysfs.h"
24
#include "zoned.h"
25
#include "block-group.h"
26

27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
/*
 * Device replace overview
 *
 * [Objective]
 * To copy all extents (both new and on-disk) from source device to target
 * device, while still keeping the filesystem read-write.
 *
 * [Method]
 * There are two main methods involved:
 *
 * - Write duplication
 *
 *   All new writes will be written to both target and source devices, so even
 *   if replace gets canceled, sources device still contans up-to-date data.
 *
 *   Location:		handle_ops_on_dev_replace() from __btrfs_map_block()
 *   Start:		btrfs_dev_replace_start()
 *   End:		btrfs_dev_replace_finishing()
 *   Content:		Latest data/metadata
 *
 * - Copy existing extents
 *
 *   This happens by re-using scrub facility, as scrub also iterates through
 *   existing extents from commit root.
 *
 *   Location:		scrub_write_block_to_dev_replace() from
 *   			scrub_block_complete()
 *   Content:		Data/meta from commit root.
 *
 * Due to the content difference, we need to avoid nocow write when dev-replace
 * is happening.  This is done by marking the block group read-only and waiting
 * for NOCOW writes.
 *
 * After replace is done, the finishing part is done by swapping the target and
 * source devices.
 *
 *   Location:		btrfs_dev_replace_update_device_in_mapping_tree() from
 *   			btrfs_dev_replace_finishing()
 */

67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
				       int scrub_ret);
static int btrfs_dev_replace_kthread(void *data);

int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info)
{
	struct btrfs_key key;
	struct btrfs_root *dev_root = fs_info->dev_root;
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
	struct extent_buffer *eb;
	int slot;
	int ret = 0;
	struct btrfs_path *path = NULL;
	int item_size;
	struct btrfs_dev_replace_item *ptr;
	u64 src_devid;

84 85 86
	if (!dev_root)
		return 0;

87 88 89 90 91 92 93 94 95 96 97 98
	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

	key.objectid = 0;
	key.type = BTRFS_DEV_REPLACE_KEY;
	key.offset = 0;
	ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
	if (ret) {
no_valid_dev_replace_entry_found:
99 100 101 102 103
		/*
		 * We don't have a replace item or it's corrupted.  If there is
		 * a replace target, fail the mount.
		 */
		if (btrfs_find_device(fs_info->fs_devices,
104
				      BTRFS_DEV_REPLACE_DEVID, NULL, NULL)) {
105 106 107 108 109
			btrfs_err(fs_info,
			"found replace target device without a valid replace item");
			ret = -EUCLEAN;
			goto out;
		}
110 111
		ret = 0;
		dev_replace->replace_state =
112
			BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED;
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
		dev_replace->cont_reading_from_srcdev_mode =
		    BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_ALWAYS;
		dev_replace->time_started = 0;
		dev_replace->time_stopped = 0;
		atomic64_set(&dev_replace->num_write_errors, 0);
		atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0);
		dev_replace->cursor_left = 0;
		dev_replace->committed_cursor_left = 0;
		dev_replace->cursor_left_last_write_of_item = 0;
		dev_replace->cursor_right = 0;
		dev_replace->srcdev = NULL;
		dev_replace->tgtdev = NULL;
		dev_replace->is_valid = 0;
		dev_replace->item_needs_writeback = 0;
		goto out;
	}
	slot = path->slots[0];
	eb = path->nodes[0];
	item_size = btrfs_item_size_nr(eb, slot);
	ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_replace_item);

	if (item_size != sizeof(struct btrfs_dev_replace_item)) {
135 136
		btrfs_warn(fs_info,
			"dev_replace entry found has unexpected size, ignore entry");
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
		goto no_valid_dev_replace_entry_found;
	}

	src_devid = btrfs_dev_replace_src_devid(eb, ptr);
	dev_replace->cont_reading_from_srcdev_mode =
		btrfs_dev_replace_cont_reading_from_srcdev_mode(eb, ptr);
	dev_replace->replace_state = btrfs_dev_replace_replace_state(eb, ptr);
	dev_replace->time_started = btrfs_dev_replace_time_started(eb, ptr);
	dev_replace->time_stopped =
		btrfs_dev_replace_time_stopped(eb, ptr);
	atomic64_set(&dev_replace->num_write_errors,
		     btrfs_dev_replace_num_write_errors(eb, ptr));
	atomic64_set(&dev_replace->num_uncorrectable_read_errors,
		     btrfs_dev_replace_num_uncorrectable_read_errors(eb, ptr));
	dev_replace->cursor_left = btrfs_dev_replace_cursor_left(eb, ptr);
	dev_replace->committed_cursor_left = dev_replace->cursor_left;
	dev_replace->cursor_left_last_write_of_item = dev_replace->cursor_left;
	dev_replace->cursor_right = btrfs_dev_replace_cursor_right(eb, ptr);
	dev_replace->is_valid = 1;

	dev_replace->item_needs_writeback = 0;
	switch (dev_replace->replace_state) {
	case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
	case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
	case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
162 163 164 165 166
		/*
		 * We don't have an active replace item but if there is a
		 * replace target, fail the mount.
		 */
		if (btrfs_find_device(fs_info->fs_devices,
167
				      BTRFS_DEV_REPLACE_DEVID, NULL, NULL)) {
168 169 170 171 172 173 174
			btrfs_err(fs_info,
			"replace devid present without an active replace item");
			ret = -EUCLEAN;
		} else {
			dev_replace->srcdev = NULL;
			dev_replace->tgtdev = NULL;
		}
175 176 177
		break;
	case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
	case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
178
		dev_replace->srcdev = btrfs_find_device(fs_info->fs_devices,
179
						src_devid, NULL, NULL);
180
		dev_replace->tgtdev = btrfs_find_device(fs_info->fs_devices,
181
							BTRFS_DEV_REPLACE_DEVID,
182
							NULL, NULL);
183 184 185 186 187
		/*
		 * allow 'btrfs dev replace_cancel' if src/tgt device is
		 * missing
		 */
		if (!dev_replace->srcdev &&
188
		    !btrfs_test_opt(fs_info, DEGRADED)) {
189
			ret = -EIO;
190 191 192 193 194
			btrfs_warn(fs_info,
			   "cannot mount because device replace operation is ongoing and");
			btrfs_warn(fs_info,
			   "srcdev (devid %llu) is missing, need to run 'btrfs dev scan'?",
			   src_devid);
195 196
		}
		if (!dev_replace->tgtdev &&
197
		    !btrfs_test_opt(fs_info, DEGRADED)) {
198
			ret = -EIO;
199 200 201 202
			btrfs_warn(fs_info,
			   "cannot mount because device replace operation is ongoing and");
			btrfs_warn(fs_info,
			   "tgtdev (devid %llu) is missing, need to run 'btrfs dev scan'?",
203
				BTRFS_DEV_REPLACE_DEVID);
204 205 206 207 208 209 210
		}
		if (dev_replace->tgtdev) {
			if (dev_replace->srcdev) {
				dev_replace->tgtdev->total_bytes =
					dev_replace->srcdev->total_bytes;
				dev_replace->tgtdev->disk_total_bytes =
					dev_replace->srcdev->disk_total_bytes;
211 212
				dev_replace->tgtdev->commit_total_bytes =
					dev_replace->srcdev->commit_total_bytes;
213 214
				dev_replace->tgtdev->bytes_used =
					dev_replace->srcdev->bytes_used;
215 216
				dev_replace->tgtdev->commit_bytes_used =
					dev_replace->srcdev->commit_bytes_used;
217
			}
218 219
			set_bit(BTRFS_DEV_STATE_REPLACE_TGT,
				&dev_replace->tgtdev->dev_state);
220 221 222 223 224 225 226 227

			WARN_ON(fs_info->fs_devices->rw_devices == 0);
			dev_replace->tgtdev->io_width = fs_info->sectorsize;
			dev_replace->tgtdev->io_align = fs_info->sectorsize;
			dev_replace->tgtdev->sector_size = fs_info->sectorsize;
			dev_replace->tgtdev->fs_info = fs_info;
			set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
				&dev_replace->tgtdev->dev_state);
228 229 230 231 232
		}
		break;
	}

out:
233
	btrfs_free_path(path);
234 235 236
	return ret;
}

237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
/*
 * Initialize a new device for device replace target from a given source dev
 * and path.
 *
 * Return 0 and new device in @device_out, otherwise return < 0
 */
static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
				  const char *device_path,
				  struct btrfs_device *srcdev,
				  struct btrfs_device **device_out)
{
	struct btrfs_device *device;
	struct block_device *bdev;
	struct rcu_string *name;
	u64 devid = BTRFS_DEV_REPLACE_DEVID;
	int ret = 0;

	*device_out = NULL;
A
Anand Jain 已提交
255
	if (srcdev->fs_devices->seeding) {
256 257 258 259 260 261 262 263 264 265 266
		btrfs_err(fs_info, "the filesystem is a seed filesystem!");
		return -EINVAL;
	}

	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
				  fs_info->bdev_holder);
	if (IS_ERR(bdev)) {
		btrfs_err(fs_info, "target device %s is invalid!", device_path);
		return PTR_ERR(bdev);
	}

N
Naohiro Aota 已提交
267 268 269 270 271 272 273
	if (!btrfs_check_device_zone_type(fs_info, bdev)) {
		btrfs_err(fs_info,
		"dev-replace: zoned type of target device mismatch with filesystem");
		ret = -EINVAL;
		goto error;
	}

274
	sync_blockdev(bdev);
275

276
	list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
		if (device->bdev == bdev) {
			btrfs_err(fs_info,
				  "target device is in the filesystem!");
			ret = -EEXIST;
			goto error;
		}
	}


	if (i_size_read(bdev->bd_inode) <
	    btrfs_device_get_total_bytes(srcdev)) {
		btrfs_err(fs_info,
			  "target device is smaller than source device!");
		ret = -EINVAL;
		goto error;
	}


	device = btrfs_alloc_device(NULL, &devid, NULL);
	if (IS_ERR(device)) {
		ret = PTR_ERR(device);
		goto error;
	}

	name = rcu_string_strdup(device_path, GFP_KERNEL);
	if (!name) {
		btrfs_free_device(device);
		ret = -ENOMEM;
		goto error;
	}
	rcu_assign_pointer(device->name, name);

	set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
	device->generation = 0;
	device->io_width = fs_info->sectorsize;
	device->io_align = fs_info->sectorsize;
	device->sector_size = fs_info->sectorsize;
	device->total_bytes = btrfs_device_get_total_bytes(srcdev);
	device->disk_total_bytes = btrfs_device_get_disk_total_bytes(srcdev);
	device->bytes_used = btrfs_device_get_bytes_used(srcdev);
	device->commit_total_bytes = srcdev->commit_total_bytes;
	device->commit_bytes_used = device->bytes_used;
	device->fs_info = fs_info;
	device->bdev = bdev;
	set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
	set_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
	device->mode = FMODE_EXCL;
	device->dev_stats_valid = 1;
	set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
	device->fs_devices = fs_info->fs_devices;
327

328 329 330 331
	ret = btrfs_get_dev_zone_info(device);
	if (ret)
		goto error;

332
	mutex_lock(&fs_info->fs_devices->device_list_mutex);
333 334 335 336 337 338 339 340 341 342 343 344 345
	list_add(&device->dev_list, &fs_info->fs_devices->devices);
	fs_info->fs_devices->num_devices++;
	fs_info->fs_devices->open_devices++;
	mutex_unlock(&fs_info->fs_devices->device_list_mutex);

	*device_out = device;
	return 0;

error:
	blkdev_put(bdev, FMODE_EXCL);
	return ret;
}

346 347 348 349
/*
 * called from commit_transaction. Writes changed device replace state to
 * disk.
 */
350
int btrfs_run_dev_replace(struct btrfs_trans_handle *trans)
351
{
352
	struct btrfs_fs_info *fs_info = trans->fs_info;
353 354 355 356 357 358 359 360
	int ret;
	struct btrfs_root *dev_root = fs_info->dev_root;
	struct btrfs_path *path;
	struct btrfs_key key;
	struct extent_buffer *eb;
	struct btrfs_dev_replace_item *ptr;
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;

361
	down_read(&dev_replace->rwsem);
362 363
	if (!dev_replace->is_valid ||
	    !dev_replace->item_needs_writeback) {
364
		up_read(&dev_replace->rwsem);
365 366
		return 0;
	}
367
	up_read(&dev_replace->rwsem);
368 369 370 371 372 373 374 375 376 377 378 379

	key.objectid = 0;
	key.type = BTRFS_DEV_REPLACE_KEY;
	key.offset = 0;

	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}
	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
	if (ret < 0) {
J
Jeff Mahoney 已提交
380 381 382
		btrfs_warn(fs_info,
			   "error %d while searching for dev_replace item!",
			   ret);
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
		goto out;
	}

	if (ret == 0 &&
	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
		/*
		 * need to delete old one and insert a new one.
		 * Since no attempt is made to recover any old state, if the
		 * dev_replace state is 'running', the data on the target
		 * drive is lost.
		 * It would be possible to recover the state: just make sure
		 * that the beginning of the item is never changed and always
		 * contains all the essential information. Then read this
		 * minimal set of information and use it as a base for the
		 * new state.
		 */
		ret = btrfs_del_item(trans, dev_root, path);
		if (ret != 0) {
J
Jeff Mahoney 已提交
401 402 403
			btrfs_warn(fs_info,
				   "delete too small dev_replace item failed %d!",
				   ret);
404 405 406 407 408 409 410 411 412 413 414
			goto out;
		}
		ret = 1;
	}

	if (ret == 1) {
		/* need to insert a new item */
		btrfs_release_path(path);
		ret = btrfs_insert_empty_item(trans, dev_root, path,
					      &key, sizeof(*ptr));
		if (ret < 0) {
J
Jeff Mahoney 已提交
415 416
			btrfs_warn(fs_info,
				   "insert dev_replace item failed %d!", ret);
417 418 419 420 421 422 423 424
			goto out;
		}
	}

	eb = path->nodes[0];
	ptr = btrfs_item_ptr(eb, path->slots[0],
			     struct btrfs_dev_replace_item);

425
	down_write(&dev_replace->rwsem);
426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
	if (dev_replace->srcdev)
		btrfs_set_dev_replace_src_devid(eb, ptr,
			dev_replace->srcdev->devid);
	else
		btrfs_set_dev_replace_src_devid(eb, ptr, (u64)-1);
	btrfs_set_dev_replace_cont_reading_from_srcdev_mode(eb, ptr,
		dev_replace->cont_reading_from_srcdev_mode);
	btrfs_set_dev_replace_replace_state(eb, ptr,
		dev_replace->replace_state);
	btrfs_set_dev_replace_time_started(eb, ptr, dev_replace->time_started);
	btrfs_set_dev_replace_time_stopped(eb, ptr, dev_replace->time_stopped);
	btrfs_set_dev_replace_num_write_errors(eb, ptr,
		atomic64_read(&dev_replace->num_write_errors));
	btrfs_set_dev_replace_num_uncorrectable_read_errors(eb, ptr,
		atomic64_read(&dev_replace->num_uncorrectable_read_errors));
	dev_replace->cursor_left_last_write_of_item =
		dev_replace->cursor_left;
	btrfs_set_dev_replace_cursor_left(eb, ptr,
		dev_replace->cursor_left_last_write_of_item);
	btrfs_set_dev_replace_cursor_right(eb, ptr,
		dev_replace->cursor_right);
	dev_replace->item_needs_writeback = 0;
448
	up_write(&dev_replace->rwsem);
449 450 451 452 453 454 455 456 457

	btrfs_mark_buffer_dirty(eb);

out:
	btrfs_free_path(path);

	return ret;
}

458 459
static char* btrfs_dev_name(struct btrfs_device *device)
{
460
	if (!device || test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
461 462 463 464 465
		return "<missing disk>";
	else
		return rcu_str_deref(device->name);
}

466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
static int mark_block_group_to_copy(struct btrfs_fs_info *fs_info,
				    struct btrfs_device *src_dev)
{
	struct btrfs_path *path;
	struct btrfs_key key;
	struct btrfs_key found_key;
	struct btrfs_root *root = fs_info->dev_root;
	struct btrfs_dev_extent *dev_extent = NULL;
	struct btrfs_block_group *cache;
	struct btrfs_trans_handle *trans;
	int ret = 0;
	u64 chunk_offset;

	/* Do not use "to_copy" on non zoned filesystem for now */
	if (!btrfs_is_zoned(fs_info))
		return 0;

	mutex_lock(&fs_info->chunk_mutex);

	/* Ensure we don't have pending new block group */
	spin_lock(&fs_info->trans_lock);
	while (fs_info->running_transaction &&
	       !list_empty(&fs_info->running_transaction->dev_update_list)) {
		spin_unlock(&fs_info->trans_lock);
		mutex_unlock(&fs_info->chunk_mutex);
		trans = btrfs_attach_transaction(root);
		if (IS_ERR(trans)) {
			ret = PTR_ERR(trans);
			mutex_lock(&fs_info->chunk_mutex);
			if (ret == -ENOENT) {
				spin_lock(&fs_info->trans_lock);
				continue;
			} else {
				goto unlock;
			}
		}

		ret = btrfs_commit_transaction(trans);
		mutex_lock(&fs_info->chunk_mutex);
		if (ret)
			goto unlock;

		spin_lock(&fs_info->trans_lock);
	}
	spin_unlock(&fs_info->trans_lock);

	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto unlock;
	}

	path->reada = READA_FORWARD;
	path->search_commit_root = 1;
	path->skip_locking = 1;

	key.objectid = src_dev->devid;
	key.type = BTRFS_DEV_EXTENT_KEY;
	key.offset = 0;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		goto free_path;
	if (ret > 0) {
		if (path->slots[0] >=
		    btrfs_header_nritems(path->nodes[0])) {
			ret = btrfs_next_leaf(root, path);
			if (ret < 0)
				goto free_path;
			if (ret > 0) {
				ret = 0;
				goto free_path;
			}
		} else {
			ret = 0;
		}
	}

	while (1) {
		struct extent_buffer *leaf = path->nodes[0];
		int slot = path->slots[0];

		btrfs_item_key_to_cpu(leaf, &found_key, slot);

		if (found_key.objectid != src_dev->devid)
			break;

		if (found_key.type != BTRFS_DEV_EXTENT_KEY)
			break;

		if (found_key.offset < key.offset)
			break;

		dev_extent = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);

		chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dev_extent);

		cache = btrfs_lookup_block_group(fs_info, chunk_offset);
		if (!cache)
			goto skip;

		spin_lock(&cache->lock);
		cache->to_copy = 1;
		spin_unlock(&cache->lock);

		btrfs_put_block_group(cache);

skip:
		ret = btrfs_next_item(root, path);
		if (ret != 0) {
			if (ret > 0)
				ret = 0;
			break;
		}
	}

free_path:
	btrfs_free_path(path);
unlock:
	mutex_unlock(&fs_info->chunk_mutex);

	return ret;
}

bool btrfs_finish_block_group_to_copy(struct btrfs_device *srcdev,
				      struct btrfs_block_group *cache,
				      u64 physical)
{
	struct btrfs_fs_info *fs_info = cache->fs_info;
	struct extent_map *em;
	struct map_lookup *map;
	u64 chunk_offset = cache->start;
	int num_extents, cur_extent;
	int i;

	/* Do not use "to_copy" on non zoned filesystem for now */
	if (!btrfs_is_zoned(fs_info))
		return true;

	spin_lock(&cache->lock);
	if (cache->removed) {
		spin_unlock(&cache->lock);
		return true;
	}
	spin_unlock(&cache->lock);

	em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
	ASSERT(!IS_ERR(em));
	map = em->map_lookup;

	num_extents = cur_extent = 0;
	for (i = 0; i < map->num_stripes; i++) {
		/* We have more device extent to copy */
		if (srcdev != map->stripes[i].dev)
			continue;

		num_extents++;
		if (physical == map->stripes[i].physical)
			cur_extent = i;
	}

	free_extent_map(em);

	if (num_extents > 1 && cur_extent < num_extents - 1) {
		/*
		 * Has more stripes on this device. Keep this block group
		 * readonly until we finish all the stripes.
		 */
		return false;
	}

	/* Last stripe on this device */
	spin_lock(&cache->lock);
	cache->to_copy = 0;
	spin_unlock(&cache->lock);

	return true;
}

645
static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
646 647
		const char *tgtdev_name, u64 srcdevid, const char *srcdev_name,
		int read_src)
648
{
649
	struct btrfs_root *root = fs_info->dev_root;
650 651 652 653 654 655
	struct btrfs_trans_handle *trans;
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
	int ret;
	struct btrfs_device *tgt_device = NULL;
	struct btrfs_device *src_device = NULL;

656 657 658 659
	src_device = btrfs_find_device_by_devspec(fs_info, srcdevid,
						  srcdev_name);
	if (IS_ERR(src_device))
		return PTR_ERR(src_device);
660

661 662 663 664 665 666 667
	if (btrfs_pinned_by_swapfile(fs_info, src_device)) {
		btrfs_warn_in_rcu(fs_info,
	  "cannot replace device %s (devid %llu) due to active swapfile",
			btrfs_dev_name(src_device), src_device->devid);
		return -ETXTBSY;
	}

668 669 670 671 672 673
	/*
	 * Here we commit the transaction to make sure commit_total_bytes
	 * of all the devices are updated.
	 */
	trans = btrfs_attach_transaction(root);
	if (!IS_ERR(trans)) {
674
		ret = btrfs_commit_transaction(trans);
675 676 677 678 679 680
		if (ret)
			return ret;
	} else if (PTR_ERR(trans) != -ENOENT) {
		return PTR_ERR(trans);
	}

681 682 683 684 685
	ret = btrfs_init_dev_replace_tgtdev(fs_info, tgtdev_name,
					    src_device, &tgt_device);
	if (ret)
		return ret;

686 687 688 689
	ret = mark_block_group_to_copy(fs_info, src_device);
	if (ret)
		return ret;

690
	down_write(&dev_replace->rwsem);
691 692 693 694 695 696 697
	switch (dev_replace->replace_state) {
	case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
	case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
	case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
		break;
	case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
	case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
698
		ASSERT(0);
699
		ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED;
700
		up_write(&dev_replace->rwsem);
701 702 703
		goto leave;
	}

704
	dev_replace->cont_reading_from_srcdev_mode = read_src;
705 706 707
	dev_replace->srcdev = src_device;
	dev_replace->tgtdev = tgt_device;

A
Anand Jain 已提交
708
	btrfs_info_in_rcu(fs_info,
709
		      "dev_replace from %s (devid %llu) to %s started",
710
		      btrfs_dev_name(src_device),
711 712 713 714 715 716 717 718
		      src_device->devid,
		      rcu_str_deref(tgt_device->name));

	/*
	 * from now on, the writes to the srcdev are all duplicated to
	 * go to the tgtdev as well (refer to btrfs_map_block()).
	 */
	dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED;
719
	dev_replace->time_started = ktime_get_real_seconds();
720 721 722 723 724 725
	dev_replace->cursor_left = 0;
	dev_replace->committed_cursor_left = 0;
	dev_replace->cursor_left_last_write_of_item = 0;
	dev_replace->cursor_right = 0;
	dev_replace->is_valid = 1;
	dev_replace->item_needs_writeback = 1;
726 727
	atomic64_set(&dev_replace->num_write_errors, 0);
	atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0);
728
	up_write(&dev_replace->rwsem);
729

730
	ret = btrfs_sysfs_add_device(tgt_device);
731
	if (ret)
732
		btrfs_err(fs_info, "kobj add dev failed %d", ret);
733

734
	btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
735

736 737
	/* Commit dev_replace state and reserve 1 item for it. */
	trans = btrfs_start_transaction(root, 1);
738 739
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
740
		down_write(&dev_replace->rwsem);
741 742 743 744
		dev_replace->replace_state =
			BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED;
		dev_replace->srcdev = NULL;
		dev_replace->tgtdev = NULL;
745
		up_write(&dev_replace->rwsem);
746 747 748
		goto leave;
	}

749
	ret = btrfs_commit_transaction(trans);
750 751 752 753
	WARN_ON(ret);

	/* the disk copy procedure reuses the scrub code */
	ret = btrfs_scrub_dev(fs_info, src_device->devid, 0,
754
			      btrfs_device_get_total_bytes(src_device),
755 756
			      &dev_replace->scrub_progress, 0, 1);

A
Anand Jain 已提交
757
	ret = btrfs_dev_replace_finishing(fs_info, ret);
758
	if (ret == -EINPROGRESS)
759
		ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS;
760

761
	return ret;
762 763

leave:
764
	btrfs_destroy_dev_replace_tgtdev(tgt_device);
765 766 767
	return ret;
}

768
int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
			    struct btrfs_ioctl_dev_replace_args *args)
{
	int ret;

	switch (args->start.cont_reading_from_srcdev_mode) {
	case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_ALWAYS:
	case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_AVOID:
		break;
	default:
		return -EINVAL;
	}

	if ((args->start.srcdevid == 0 && args->start.srcdev_name[0] == '\0') ||
	    args->start.tgtdev_name[0] == '\0')
		return -EINVAL;

785
	ret = btrfs_dev_replace_start(fs_info, args->start.tgtdev_name,
786 787 788 789 790
					args->start.srcdevid,
					args->start.srcdev_name,
					args->start.cont_reading_from_srcdev_mode);
	args->result = ret;
	/* don't warn if EINPROGRESS, someone else might be running scrub */
791 792 793
	if (ret == BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS ||
	    ret == BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR)
		return 0;
794 795 796 797

	return ret;
}

798
/*
799
 * blocked until all in-flight bios operations are finished.
800 801 802 803
 */
static void btrfs_rm_dev_replace_blocked(struct btrfs_fs_info *fs_info)
{
	set_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state);
804 805
	wait_event(fs_info->dev_replace.replace_wait, !percpu_counter_sum(
		   &fs_info->dev_replace.bio_counter));
806 807 808 809 810 811 812 813
}

/*
 * we have removed target device, it is safe to allow new bios request.
 */
static void btrfs_rm_dev_replace_unblocked(struct btrfs_fs_info *fs_info)
{
	clear_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state);
814
	wake_up(&fs_info->dev_replace.replace_wait);
815 816
}

817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847
/*
 * When finishing the device replace, before swapping the source device with the
 * target device we must update the chunk allocation state in the target device,
 * as it is empty because replace works by directly copying the chunks and not
 * through the normal chunk allocation path.
 */
static int btrfs_set_target_alloc_state(struct btrfs_device *srcdev,
					struct btrfs_device *tgtdev)
{
	struct extent_state *cached_state = NULL;
	u64 start = 0;
	u64 found_start;
	u64 found_end;
	int ret = 0;

	lockdep_assert_held(&srcdev->fs_info->chunk_mutex);

	while (!find_first_extent_bit(&srcdev->alloc_state, start,
				      &found_start, &found_end,
				      CHUNK_ALLOCATED, &cached_state)) {
		ret = set_extent_bits(&tgtdev->alloc_state, found_start,
				      found_end, CHUNK_ALLOCATED);
		if (ret)
			break;
		start = found_end + 1;
	}

	free_extent_state(cached_state);
	return ret;
}

848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873
static void btrfs_dev_replace_update_device_in_mapping_tree(
						struct btrfs_fs_info *fs_info,
						struct btrfs_device *srcdev,
						struct btrfs_device *tgtdev)
{
	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
	struct extent_map *em;
	struct map_lookup *map;
	u64 start = 0;
	int i;

	write_lock(&em_tree->lock);
	do {
		em = lookup_extent_mapping(em_tree, start, (u64)-1);
		if (!em)
			break;
		map = em->map_lookup;
		for (i = 0; i < map->num_stripes; i++)
			if (srcdev == map->stripes[i].dev)
				map->stripes[i].dev = tgtdev;
		start = em->start + em->len;
		free_extent_map(em);
	} while (start);
	write_unlock(&em_tree->lock);
}

874 875 876 877 878 879 880 881 882 883 884 885 886 887
static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
				       int scrub_ret)
{
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
	struct btrfs_device *tgt_device;
	struct btrfs_device *src_device;
	struct btrfs_root *root = fs_info->tree_root;
	u8 uuid_tmp[BTRFS_UUID_SIZE];
	struct btrfs_trans_handle *trans;
	int ret = 0;

	/* don't allow cancel or unmount to disturb the finishing procedure */
	mutex_lock(&dev_replace->lock_finishing_cancel_unmount);

888
	down_read(&dev_replace->rwsem);
889 890 891
	/* was the operation canceled, or is it finished? */
	if (dev_replace->replace_state !=
	    BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED) {
892
		up_read(&dev_replace->rwsem);
893 894 895 896 897 898
		mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
		return 0;
	}

	tgt_device = dev_replace->tgtdev;
	src_device = dev_replace->srcdev;
899
	up_read(&dev_replace->rwsem);
900 901 902 903 904

	/*
	 * flush all outstanding I/O and inode extent mappings before the
	 * copy operation is declared as being finished
	 */
905
	ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false);
906 907 908 909
	if (ret) {
		mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
		return ret;
	}
910
	btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
911

912 913 914
	if (!scrub_ret)
		btrfs_reada_remove_dev(src_device);

915 916 917 918 919 920 921 922
	/*
	 * We have to use this loop approach because at this point src_device
	 * has to be available for transaction commit to complete, yet new
	 * chunks shouldn't be allocated on the device.
	 */
	while (1) {
		trans = btrfs_start_transaction(root, 0);
		if (IS_ERR(trans)) {
923
			btrfs_reada_undo_remove_dev(src_device);
924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940
			mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
			return PTR_ERR(trans);
		}
		ret = btrfs_commit_transaction(trans);
		WARN_ON(ret);

		/* Prevent write_all_supers() during the finishing procedure */
		mutex_lock(&fs_info->fs_devices->device_list_mutex);
		/* Prevent new chunks being allocated on the source device */
		mutex_lock(&fs_info->chunk_mutex);

		if (!list_empty(&src_device->post_commit_list)) {
			mutex_unlock(&fs_info->fs_devices->device_list_mutex);
			mutex_unlock(&fs_info->chunk_mutex);
		} else {
			break;
		}
941 942
	}

943
	down_write(&dev_replace->rwsem);
944 945 946 947 948
	dev_replace->replace_state =
		scrub_ret ? BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED
			  : BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED;
	dev_replace->tgtdev = NULL;
	dev_replace->srcdev = NULL;
949
	dev_replace->time_stopped = ktime_get_real_seconds();
950 951
	dev_replace->item_needs_writeback = 1;

952 953 954 955
	/*
	 * Update allocation state in the new device and replace the old device
	 * with the new one in the mapping tree.
	 */
956
	if (!scrub_ret) {
957 958 959
		scrub_ret = btrfs_set_target_alloc_state(src_device, tgt_device);
		if (scrub_ret)
			goto error;
960 961 962 963
		btrfs_dev_replace_update_device_in_mapping_tree(fs_info,
								src_device,
								tgt_device);
	} else {
964 965
		if (scrub_ret != -ECANCELED)
			btrfs_err_in_rcu(fs_info,
966
				 "btrfs_scrub_dev(%s, %llu, %s) failed %d",
967
				 btrfs_dev_name(src_device),
968 969
				 src_device->devid,
				 rcu_str_deref(tgt_device->name), scrub_ret);
970
error:
971
		up_write(&dev_replace->rwsem);
972 973
		mutex_unlock(&fs_info->chunk_mutex);
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
974
		btrfs_reada_undo_remove_dev(src_device);
975
		btrfs_rm_dev_replace_blocked(fs_info);
976
		if (tgt_device)
977
			btrfs_destroy_dev_replace_tgtdev(tgt_device);
978
		btrfs_rm_dev_replace_unblocked(fs_info);
979 980
		mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);

981
		return scrub_ret;
982 983
	}

984 985
	btrfs_info_in_rcu(fs_info,
			  "dev_replace from %s (devid %llu) to %s finished",
986
			  btrfs_dev_name(src_device),
987 988
			  src_device->devid,
			  rcu_str_deref(tgt_device->name));
989
	clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &tgt_device->dev_state);
990 991 992 993 994
	tgt_device->devid = src_device->devid;
	src_device->devid = BTRFS_DEV_REPLACE_DEVID;
	memcpy(uuid_tmp, tgt_device->uuid, sizeof(uuid_tmp));
	memcpy(tgt_device->uuid, src_device->uuid, sizeof(tgt_device->uuid));
	memcpy(src_device->uuid, uuid_tmp, sizeof(src_device->uuid));
995 996 997 998
	btrfs_device_set_total_bytes(tgt_device, src_device->total_bytes);
	btrfs_device_set_disk_total_bytes(tgt_device,
					  src_device->disk_total_bytes);
	btrfs_device_set_bytes_used(tgt_device, src_device->bytes_used);
999
	tgt_device->commit_bytes_used = src_device->bytes_used;
1000

1001
	btrfs_assign_next_active_device(src_device, tgt_device);
1002

1003
	list_add(&tgt_device->dev_alloc_list, &fs_info->fs_devices->alloc_list);
1004
	fs_info->fs_devices->rw_devices++;
1005

1006
	up_write(&dev_replace->rwsem);
1007 1008
	btrfs_rm_dev_replace_blocked(fs_info);

1009
	btrfs_rm_dev_replace_remove_srcdev(src_device);
1010

1011 1012
	btrfs_rm_dev_replace_unblocked(fs_info);

1013 1014 1015 1016 1017 1018
	/*
	 * Increment dev_stats_ccnt so that btrfs_run_dev_stats() will
	 * update on-disk dev stats value during commit transaction
	 */
	atomic_inc(&tgt_device->dev_stats_ccnt);

1019 1020 1021 1022 1023 1024 1025
	/*
	 * this is again a consistent state where no dev_replace procedure
	 * is running, the target device is part of the filesystem, the
	 * source device is not part of the filesystem anymore and its 1st
	 * superblock is scratched out so that it is no longer marked to
	 * belong to this filesystem.
	 */
1026 1027
	mutex_unlock(&fs_info->chunk_mutex);
	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1028

1029
	/* replace the sysfs entry */
1030
	btrfs_sysfs_remove_device(src_device);
1031
	btrfs_sysfs_update_devid(tgt_device);
1032 1033 1034
	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &src_device->dev_state))
		btrfs_scratch_superblocks(fs_info, src_device->bdev,
					  src_device->name->str);
1035

1036 1037 1038
	/* write back the superblocks */
	trans = btrfs_start_transaction(root, 0);
	if (!IS_ERR(trans))
1039
		btrfs_commit_transaction(trans);
1040 1041 1042

	mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);

1043 1044
	btrfs_rm_dev_replace_free_srcdev(src_device);

1045 1046 1047
	return 0;
}

1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
/*
 * Read progress of device replace status according to the state and last
 * stored position. The value format is the same as for
 * btrfs_dev_replace::progress_1000
 */
static u64 btrfs_dev_replace_progress(struct btrfs_fs_info *fs_info)
{
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
	u64 ret = 0;

	switch (dev_replace->replace_state) {
	case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
	case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
		ret = 0;
		break;
	case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
		ret = 1000;
		break;
	case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
	case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
		ret = div64_u64(dev_replace->cursor_left,
				div_u64(btrfs_device_get_total_bytes(
						dev_replace->srcdev), 1000));
		break;
	}

	return ret;
}

1077 1078 1079 1080 1081
void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
			      struct btrfs_ioctl_dev_replace_args *args)
{
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;

1082
	down_read(&dev_replace->rwsem);
1083 1084 1085 1086 1087 1088 1089 1090 1091 1092
	/* even if !dev_replace_is_valid, the values are good enough for
	 * the replace_status ioctl */
	args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
	args->status.replace_state = dev_replace->replace_state;
	args->status.time_started = dev_replace->time_started;
	args->status.time_stopped = dev_replace->time_stopped;
	args->status.num_write_errors =
		atomic64_read(&dev_replace->num_write_errors);
	args->status.num_uncorrectable_read_errors =
		atomic64_read(&dev_replace->num_uncorrectable_read_errors);
1093
	args->status.progress_1000 = btrfs_dev_replace_progress(fs_info);
1094
	up_read(&dev_replace->rwsem);
1095 1096
}

1097
int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
1098 1099 1100
{
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
	struct btrfs_device *tgt_device = NULL;
1101
	struct btrfs_device *src_device = NULL;
1102 1103
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = fs_info->tree_root;
1104
	int result;
1105 1106
	int ret;

1107
	if (sb_rdonly(fs_info->sb))
1108 1109
		return -EROFS;

1110
	mutex_lock(&dev_replace->lock_finishing_cancel_unmount);
1111
	down_write(&dev_replace->rwsem);
1112 1113 1114 1115 1116
	switch (dev_replace->replace_state) {
	case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
	case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
	case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
		result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED;
1117
		up_write(&dev_replace->rwsem);
1118
		break;
1119
	case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
1120 1121
		tgt_device = dev_replace->tgtdev;
		src_device = dev_replace->srcdev;
1122
		up_write(&dev_replace->rwsem);
1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136
		ret = btrfs_scrub_cancel(fs_info);
		if (ret < 0) {
			result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED;
		} else {
			result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
			/*
			 * btrfs_dev_replace_finishing() will handle the
			 * cleanup part
			 */
			btrfs_info_in_rcu(fs_info,
				"dev_replace from %s (devid %llu) to %s canceled",
				btrfs_dev_name(src_device), src_device->devid,
				btrfs_dev_name(tgt_device));
		}
1137
		break;
1138
	case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
1139 1140 1141 1142
		/*
		 * Scrub doing the replace isn't running so we need to do the
		 * cleanup step of btrfs_dev_replace_finishing() here
		 */
1143 1144
		result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
		tgt_device = dev_replace->tgtdev;
1145
		src_device = dev_replace->srcdev;
1146 1147
		dev_replace->tgtdev = NULL;
		dev_replace->srcdev = NULL;
1148 1149 1150 1151
		dev_replace->replace_state =
				BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED;
		dev_replace->time_stopped = ktime_get_real_seconds();
		dev_replace->item_needs_writeback = 1;
1152

1153
		up_write(&dev_replace->rwsem);
1154

1155 1156 1157
		/* Scrub for replace must not be running in suspended state */
		ret = btrfs_scrub_cancel(fs_info);
		ASSERT(ret != -ENOTCONN);
1158 1159 1160 1161 1162 1163 1164 1165

		trans = btrfs_start_transaction(root, 0);
		if (IS_ERR(trans)) {
			mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
			return PTR_ERR(trans);
		}
		ret = btrfs_commit_transaction(trans);
		WARN_ON(ret);
1166

1167 1168 1169 1170 1171 1172 1173 1174 1175
		btrfs_info_in_rcu(fs_info,
		"suspended dev_replace from %s (devid %llu) to %s canceled",
			btrfs_dev_name(src_device), src_device->devid,
			btrfs_dev_name(tgt_device));

		if (tgt_device)
			btrfs_destroy_dev_replace_tgtdev(tgt_device);
		break;
	default:
1176
		up_write(&dev_replace->rwsem);
1177 1178
		result = -EINVAL;
	}
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188

	mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
	return result;
}

void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info)
{
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;

	mutex_lock(&dev_replace->lock_finishing_cancel_unmount);
1189 1190
	down_write(&dev_replace->rwsem);

1191 1192 1193 1194 1195 1196 1197 1198 1199
	switch (dev_replace->replace_state) {
	case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
	case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
	case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
	case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
		break;
	case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
		dev_replace->replace_state =
			BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
1200
		dev_replace->time_stopped = ktime_get_real_seconds();
1201
		dev_replace->item_needs_writeback = 1;
1202
		btrfs_info(fs_info, "suspending dev_replace for unmount");
1203 1204 1205
		break;
	}

1206
	up_write(&dev_replace->rwsem);
1207 1208 1209 1210 1211 1212 1213 1214 1215
	mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
}

/* resume dev_replace procedure that was interrupted by unmount */
int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info)
{
	struct task_struct *task;
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;

1216 1217
	down_write(&dev_replace->rwsem);

1218 1219 1220 1221
	switch (dev_replace->replace_state) {
	case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
	case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
	case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
1222
		up_write(&dev_replace->rwsem);
1223 1224 1225 1226 1227 1228 1229 1230 1231
		return 0;
	case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
		break;
	case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
		dev_replace->replace_state =
			BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED;
		break;
	}
	if (!dev_replace->tgtdev || !dev_replace->tgtdev->bdev) {
1232
		btrfs_info(fs_info,
J
Jeff Mahoney 已提交
1233 1234 1235
			   "cannot continue dev_replace, tgtdev is missing");
		btrfs_info(fs_info,
			   "you may cancel the operation after 'mount -o degraded'");
1236 1237
		dev_replace->replace_state =
					BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
1238
		up_write(&dev_replace->rwsem);
1239 1240
		return 0;
	}
1241
	up_write(&dev_replace->rwsem);
1242

1243 1244 1245 1246 1247
	/*
	 * This could collide with a paused balance, but the exclusive op logic
	 * should never allow both to start and pause. We don't want to allow
	 * dev-replace to start anyway.
	 */
1248
	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_REPLACE)) {
1249
		down_write(&dev_replace->rwsem);
1250 1251
		dev_replace->replace_state =
					BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
1252
		up_write(&dev_replace->rwsem);
1253 1254 1255 1256 1257
		btrfs_info(fs_info,
		"cannot resume dev-replace, other exclusive operation running");
		return 0;
	}

1258
	task = kthread_run(btrfs_dev_replace_kthread, fs_info, "btrfs-devrepl");
1259
	return PTR_ERR_OR_ZERO(task);
1260 1261 1262 1263 1264 1265 1266
}

static int btrfs_dev_replace_kthread(void *data)
{
	struct btrfs_fs_info *fs_info = data;
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
	u64 progress;
1267
	int ret;
1268

1269 1270 1271
	progress = btrfs_dev_replace_progress(fs_info);
	progress = div_u64(progress, 10);
	btrfs_info_in_rcu(fs_info,
1272 1273
		"continuing dev_replace from %s (devid %llu) to target %s @%u%%",
		btrfs_dev_name(dev_replace->srcdev),
1274
		dev_replace->srcdev->devid,
1275
		btrfs_dev_name(dev_replace->tgtdev),
1276 1277
		(unsigned int)progress);

1278 1279
	ret = btrfs_scrub_dev(fs_info, dev_replace->srcdev->devid,
			      dev_replace->committed_cursor_left,
1280
			      btrfs_device_get_total_bytes(dev_replace->srcdev),
1281 1282
			      &dev_replace->scrub_progress, 0, 1);
	ret = btrfs_dev_replace_finishing(fs_info, ret);
1283
	WARN_ON(ret && ret != -ECANCELED);
1284

1285
	btrfs_exclop_finish(fs_info);
1286 1287 1288
	return 0;
}

1289
int __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace)
1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305
{
	if (!dev_replace->is_valid)
		return 0;

	switch (dev_replace->replace_state) {
	case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
	case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
	case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
		return 0;
	case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
	case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
		/*
		 * return true even if tgtdev is missing (this is
		 * something that can happen if the dev_replace
		 * procedure is suspended by an umount and then
		 * the tgtdev is missing (or "btrfs dev scan") was
1306
		 * not called and the filesystem is remounted
1307 1308
		 * in degraded state. This does not stop the
		 * dev_replace procedure. It needs to be canceled
1309
		 * manually if the cancellation is wanted.
1310 1311 1312 1313 1314 1315
		 */
		break;
	}
	return 1;
}

1316 1317
void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info)
{
1318
	percpu_counter_inc(&fs_info->dev_replace.bio_counter);
1319 1320
}

1321
void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount)
1322
{
1323 1324
	percpu_counter_sub(&fs_info->dev_replace.bio_counter, amount);
	cond_wake_up_nomb(&fs_info->dev_replace.replace_wait);
1325 1326 1327 1328
}

void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info)
{
1329
	while (1) {
1330
		percpu_counter_inc(&fs_info->dev_replace.bio_counter);
1331 1332 1333 1334
		if (likely(!test_bit(BTRFS_FS_STATE_DEV_REPLACING,
				     &fs_info->fs_state)))
			break;

1335
		btrfs_bio_counter_dec(fs_info);
1336
		wait_event(fs_info->dev_replace.replace_wait,
1337 1338 1339 1340
			   !test_bit(BTRFS_FS_STATE_DEV_REPLACING,
				     &fs_info->fs_state));
	}
}