volumes.c 205.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 */
5

6
#include <linux/sched.h>
7
#include <linux/sched/mm.h>
8
#include <linux/bio.h>
9
#include <linux/slab.h>
10
#include <linux/blkdev.h>
11
#include <linux/ratelimit.h>
I
Ilya Dryomov 已提交
12
#include <linux/kthread.h>
D
David Woodhouse 已提交
13
#include <linux/raid/pq.h>
S
Stefan Behrens 已提交
14
#include <linux/semaphore.h>
15
#include <linux/uuid.h>
A
Anand Jain 已提交
16
#include <linux/list_sort.h>
17
#include "misc.h"
18 19 20 21 22 23
#include "ctree.h"
#include "extent_map.h"
#include "disk-io.h"
#include "transaction.h"
#include "print-tree.h"
#include "volumes.h"
D
David Woodhouse 已提交
24
#include "raid56.h"
25
#include "async-thread.h"
26
#include "check-integrity.h"
27
#include "rcu-string.h"
28
#include "dev-replace.h"
29
#include "sysfs.h"
30
#include "tree-checker.h"
31
#include "space-info.h"
32
#include "block-group.h"
33
#include "discard.h"
34

Z
Zhao Lei 已提交
35 36 37 38 39 40
const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
	[BTRFS_RAID_RAID10] = {
		.sub_stripes	= 2,
		.dev_stripes	= 1,
		.devs_max	= 0,	/* 0 == as many as possible */
		.devs_min	= 4,
41
		.tolerated_failures = 1,
Z
Zhao Lei 已提交
42 43
		.devs_increment	= 2,
		.ncopies	= 2,
44
		.nparity        = 0,
45
		.raid_name	= "raid10",
46
		.bg_flag	= BTRFS_BLOCK_GROUP_RAID10,
47
		.mindev_error	= BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
Z
Zhao Lei 已提交
48 49 50 51 52 53
	},
	[BTRFS_RAID_RAID1] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 2,
		.devs_min	= 2,
54
		.tolerated_failures = 1,
Z
Zhao Lei 已提交
55 56
		.devs_increment	= 2,
		.ncopies	= 2,
57
		.nparity        = 0,
58
		.raid_name	= "raid1",
59
		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1,
60
		.mindev_error	= BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
Z
Zhao Lei 已提交
61
	},
62 63 64
	[BTRFS_RAID_RAID1C3] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
65
		.devs_max	= 3,
66 67 68 69
		.devs_min	= 3,
		.tolerated_failures = 2,
		.devs_increment	= 3,
		.ncopies	= 3,
70
		.nparity        = 0,
71 72 73 74
		.raid_name	= "raid1c3",
		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1C3,
		.mindev_error	= BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
	},
75 76 77
	[BTRFS_RAID_RAID1C4] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
78
		.devs_max	= 4,
79 80 81 82
		.devs_min	= 4,
		.tolerated_failures = 3,
		.devs_increment	= 4,
		.ncopies	= 4,
83
		.nparity        = 0,
84 85 86 87
		.raid_name	= "raid1c4",
		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1C4,
		.mindev_error	= BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
	},
Z
Zhao Lei 已提交
88 89 90 91 92
	[BTRFS_RAID_DUP] = {
		.sub_stripes	= 1,
		.dev_stripes	= 2,
		.devs_max	= 1,
		.devs_min	= 1,
93
		.tolerated_failures = 0,
Z
Zhao Lei 已提交
94 95
		.devs_increment	= 1,
		.ncopies	= 2,
96
		.nparity        = 0,
97
		.raid_name	= "dup",
98
		.bg_flag	= BTRFS_BLOCK_GROUP_DUP,
99
		.mindev_error	= 0,
Z
Zhao Lei 已提交
100 101 102 103 104 105
	},
	[BTRFS_RAID_RAID0] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 0,
		.devs_min	= 2,
106
		.tolerated_failures = 0,
Z
Zhao Lei 已提交
107 108
		.devs_increment	= 1,
		.ncopies	= 1,
109
		.nparity        = 0,
110
		.raid_name	= "raid0",
111
		.bg_flag	= BTRFS_BLOCK_GROUP_RAID0,
112
		.mindev_error	= 0,
Z
Zhao Lei 已提交
113 114 115 116 117 118
	},
	[BTRFS_RAID_SINGLE] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 1,
		.devs_min	= 1,
119
		.tolerated_failures = 0,
Z
Zhao Lei 已提交
120 121
		.devs_increment	= 1,
		.ncopies	= 1,
122
		.nparity        = 0,
123
		.raid_name	= "single",
124
		.bg_flag	= 0,
125
		.mindev_error	= 0,
Z
Zhao Lei 已提交
126 127 128 129 130 131
	},
	[BTRFS_RAID_RAID5] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 0,
		.devs_min	= 2,
132
		.tolerated_failures = 1,
Z
Zhao Lei 已提交
133
		.devs_increment	= 1,
134
		.ncopies	= 1,
135
		.nparity        = 1,
136
		.raid_name	= "raid5",
137
		.bg_flag	= BTRFS_BLOCK_GROUP_RAID5,
138
		.mindev_error	= BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
Z
Zhao Lei 已提交
139 140 141 142 143 144
	},
	[BTRFS_RAID_RAID6] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 0,
		.devs_min	= 3,
145
		.tolerated_failures = 2,
Z
Zhao Lei 已提交
146
		.devs_increment	= 1,
147
		.ncopies	= 1,
148
		.nparity        = 2,
149
		.raid_name	= "raid6",
150
		.bg_flag	= BTRFS_BLOCK_GROUP_RAID6,
151
		.mindev_error	= BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
Z
Zhao Lei 已提交
152 153 154
	},
};

155
const char *btrfs_bg_type_to_raid_name(u64 flags)
156
{
157 158 159
	const int index = btrfs_bg_flags_to_raid_index(flags);

	if (index >= BTRFS_NR_RAID_TYPES)
160 161
		return NULL;

162
	return btrfs_raid_array[index].raid_name;
163 164
}

165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
/*
 * Fill @buf with textual description of @bg_flags, no more than @size_buf
 * bytes including terminating null byte.
 */
void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
{
	int i;
	int ret;
	char *bp = buf;
	u64 flags = bg_flags;
	u32 size_bp = size_buf;

	if (!flags) {
		strcpy(bp, "NONE");
		return;
	}

#define DESCRIBE_FLAG(flag, desc)						\
	do {								\
		if (flags & (flag)) {					\
			ret = snprintf(bp, size_bp, "%s|", (desc));	\
			if (ret < 0 || ret >= size_bp)			\
				goto out_overflow;			\
			size_bp -= ret;					\
			bp += ret;					\
			flags &= ~(flag);				\
		}							\
	} while (0)

	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");

	DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
		DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
			      btrfs_raid_array[i].raid_name);
#undef DESCRIBE_FLAG

	if (flags) {
		ret = snprintf(bp, size_bp, "0x%llx|", flags);
		size_bp -= ret;
	}

	if (size_bp < size_buf)
		buf[size_buf - size_bp - 1] = '\0'; /* remove last | */

	/*
	 * The text is trimmed, it's up to the caller to provide sufficiently
	 * large buffer
	 */
out_overflow:;
}

219
static int init_first_rw_device(struct btrfs_trans_handle *trans);
220
static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
221
static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
222
static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
223 224 225 226 227
static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
			     enum btrfs_map_op op,
			     u64 logical, u64 *length,
			     struct btrfs_bio **bbio_ret,
			     int mirror_num, int need_raid_map);
Y
Yan Zheng 已提交
228

D
David Sterba 已提交
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
/*
 * Device locking
 * ==============
 *
 * There are several mutexes that protect manipulation of devices and low-level
 * structures like chunks but not block groups, extents or files
 *
 * uuid_mutex (global lock)
 * ------------------------
 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
 * the SCAN_DEV ioctl registration or from mount either implicitly (the first
 * device) or requested by the device= mount option
 *
 * the mutex can be very coarse and can cover long-running operations
 *
 * protects: updates to fs_devices counters like missing devices, rw devices,
245
 * seeding, structure cloning, opening/closing devices at mount/umount time
D
David Sterba 已提交
246 247 248
 *
 * global::fs_devs - add, remove, updates to the global list
 *
249 250 251
 * does not protect: manipulation of the fs_devices::devices list in general
 * but in mount context it could be used to exclude list modifications by eg.
 * scan ioctl
D
David Sterba 已提交
252 253 254 255 256 257 258 259 260 261 262 263
 *
 * btrfs_device::name - renames (write side), read is RCU
 *
 * fs_devices::device_list_mutex (per-fs, with RCU)
 * ------------------------------------------------
 * protects updates to fs_devices::devices, ie. adding and deleting
 *
 * simple list traversal with read-only actions can be done with RCU protection
 *
 * may be used to exclude some operations from running concurrently without any
 * modifications to the list (see write_all_supers)
 *
264 265 266
 * Is not required at mount and close times, because our device list is
 * protected by the uuid_mutex at that point.
 *
D
David Sterba 已提交
267 268 269 270 271 272 273 274
 * balance_mutex
 * -------------
 * protects balance structures (status, state) and context accessed from
 * several places (internally, ioctl)
 *
 * chunk_mutex
 * -----------
 * protects chunks, adding or removing during allocation, trim or when a new
275 276 277
 * device is added/removed. Additionally it also protects post_commit_list of
 * individual devices, since they can be added to the transaction's
 * post_commit_list only with chunk_mutex held.
D
David Sterba 已提交
278 279 280 281 282 283 284 285 286 287 288
 *
 * cleaner_mutex
 * -------------
 * a big lock that is held by the cleaner thread and prevents running subvolume
 * cleaning together with relocation or delayed iputs
 *
 *
 * Lock nesting
 * ============
 *
 * uuid_mutex
289 290 291
 *   device_list_mutex
 *     chunk_mutex
 *   balance_mutex
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
 *
 *
 * Exclusive operations, BTRFS_FS_EXCL_OP
 * ======================================
 *
 * Maintains the exclusivity of the following operations that apply to the
 * whole filesystem and cannot run in parallel.
 *
 * - Balance (*)
 * - Device add
 * - Device remove
 * - Device replace (*)
 * - Resize
 *
 * The device operations (as above) can be in one of the following states:
 *
 * - Running state
 * - Paused state
 * - Completed state
 *
 * Only device operations marked with (*) can go into the Paused state for the
 * following reasons:
 *
 * - ioctl (only Balance can be Paused through ioctl)
 * - filesystem remounted as read-only
 * - filesystem unmounted and mounted as read-only
 * - system power-cycle and filesystem mounted as read-only
 * - filesystem or device errors leading to forced read-only
 *
 * BTRFS_FS_EXCL_OP flag is set and cleared using atomic operations.
 * During the course of Paused state, the BTRFS_FS_EXCL_OP remains set.
 * A device operation in Paused or Running state can be canceled or resumed
 * either by ioctl (Balance only) or when remounted as read-write.
 * BTRFS_FS_EXCL_OP flag is cleared when the device operation is canceled or
 * completed.
D
David Sterba 已提交
327 328
 */

329
DEFINE_MUTEX(uuid_mutex);
330
static LIST_HEAD(fs_uuids);
D
David Sterba 已提交
331
struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
332 333 334
{
	return &fs_uuids;
}
335

D
David Sterba 已提交
336 337
/*
 * alloc_fs_devices - allocate struct btrfs_fs_devices
338 339
 * @fsid:		if not NULL, copy the UUID to fs_devices::fsid
 * @metadata_fsid:	if not NULL, copy the UUID to fs_devices::metadata_fsid
D
David Sterba 已提交
340 341 342 343 344
 *
 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
 * The returned struct is not linked onto any lists and can be destroyed with
 * kfree() right away.
 */
345 346
static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
						 const u8 *metadata_fsid)
347 348 349
{
	struct btrfs_fs_devices *fs_devs;

350
	fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
351 352 353 354 355 356 357
	if (!fs_devs)
		return ERR_PTR(-ENOMEM);

	mutex_init(&fs_devs->device_list_mutex);

	INIT_LIST_HEAD(&fs_devs->devices);
	INIT_LIST_HEAD(&fs_devs->alloc_list);
358
	INIT_LIST_HEAD(&fs_devs->fs_list);
359
	INIT_LIST_HEAD(&fs_devs->seed_list);
360 361 362
	if (fsid)
		memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);

363 364 365 366 367
	if (metadata_fsid)
		memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE);
	else if (fsid)
		memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);

368 369 370
	return fs_devs;
}

371
void btrfs_free_device(struct btrfs_device *device)
372
{
373
	WARN_ON(!list_empty(&device->post_commit_list));
374
	rcu_string_free(device->name);
375
	extent_io_tree_release(&device->alloc_state);
376 377 378 379
	bio_put(device->flush_bio);
	kfree(device);
}

Y
Yan Zheng 已提交
380 381 382 383 384 385 386 387
static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
{
	struct btrfs_device *device;
	WARN_ON(fs_devices->opened);
	while (!list_empty(&fs_devices->devices)) {
		device = list_entry(fs_devices->devices.next,
				    struct btrfs_device, dev_list);
		list_del(&device->dev_list);
388
		btrfs_free_device(device);
Y
Yan Zheng 已提交
389 390 391 392
	}
	kfree(fs_devices);
}

393
void __exit btrfs_cleanup_fs_uuids(void)
394 395 396
{
	struct btrfs_fs_devices *fs_devices;

Y
Yan Zheng 已提交
397 398
	while (!list_empty(&fs_uuids)) {
		fs_devices = list_entry(fs_uuids.next,
399 400
					struct btrfs_fs_devices, fs_list);
		list_del(&fs_devices->fs_list);
Y
Yan Zheng 已提交
401
		free_fs_devices(fs_devices);
402 403 404
	}
}

405 406 407
/*
 * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error.
 * Returned struct is not linked onto any lists and must be destroyed using
408
 * btrfs_free_device.
409
 */
410
static struct btrfs_device *__alloc_device(struct btrfs_fs_info *fs_info)
411 412 413
{
	struct btrfs_device *dev;

414
	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
415 416 417
	if (!dev)
		return ERR_PTR(-ENOMEM);

418 419 420 421 422 423 424 425 426 427
	/*
	 * Preallocate a bio that's always going to be used for flushing device
	 * barriers and matches the device lifespan
	 */
	dev->flush_bio = bio_alloc_bioset(GFP_KERNEL, 0, NULL);
	if (!dev->flush_bio) {
		kfree(dev);
		return ERR_PTR(-ENOMEM);
	}

428 429
	INIT_LIST_HEAD(&dev->dev_list);
	INIT_LIST_HEAD(&dev->dev_alloc_list);
430
	INIT_LIST_HEAD(&dev->post_commit_list);
431 432

	atomic_set(&dev->reada_in_flight, 0);
433
	atomic_set(&dev->dev_stats_ccnt, 0);
434
	btrfs_device_data_ordered_init(dev);
435
	INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
436
	INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
437 438
	extent_io_tree_init(fs_info, &dev->alloc_state,
			    IO_TREE_DEVICE_ALLOC_STATE, NULL);
439 440 441 442

	return dev;
}

443 444
static noinline struct btrfs_fs_devices *find_fsid(
		const u8 *fsid, const u8 *metadata_fsid)
445 446 447
{
	struct btrfs_fs_devices *fs_devices;

448 449
	ASSERT(fsid);

450
	/* Handle non-split brain cases */
451
	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
452 453 454 455 456 457 458 459 460
		if (metadata_fsid) {
			if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0
			    && memcmp(metadata_fsid, fs_devices->metadata_uuid,
				      BTRFS_FSID_SIZE) == 0)
				return fs_devices;
		} else {
			if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
				return fs_devices;
		}
461 462 463 464
	}
	return NULL;
}

465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
static struct btrfs_fs_devices *find_fsid_with_metadata_uuid(
				struct btrfs_super_block *disk_super)
{

	struct btrfs_fs_devices *fs_devices;

	/*
	 * Handle scanned device having completed its fsid change but
	 * belonging to a fs_devices that was created by first scanning
	 * a device which didn't have its fsid/metadata_uuid changed
	 * at all and the CHANGING_FSID_V2 flag set.
	 */
	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
		if (fs_devices->fsid_change &&
		    memcmp(disk_super->metadata_uuid, fs_devices->fsid,
			   BTRFS_FSID_SIZE) == 0 &&
		    memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
			   BTRFS_FSID_SIZE) == 0) {
			return fs_devices;
		}
	}
	/*
	 * Handle scanned device having completed its fsid change but
	 * belonging to a fs_devices that was created by a device that
	 * has an outdated pair of fsid/metadata_uuid and
	 * CHANGING_FSID_V2 flag set.
	 */
	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
		if (fs_devices->fsid_change &&
		    memcmp(fs_devices->metadata_uuid,
			   fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
		    memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid,
			   BTRFS_FSID_SIZE) == 0) {
			return fs_devices;
		}
	}

	return find_fsid(disk_super->fsid, disk_super->metadata_uuid);
}


506 507 508
static int
btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
		      int flush, struct block_device **bdev,
509
		      struct btrfs_super_block **disk_super)
510 511 512 513 514 515 516 517 518 519 520 521
{
	int ret;

	*bdev = blkdev_get_by_path(device_path, flags, holder);

	if (IS_ERR(*bdev)) {
		ret = PTR_ERR(*bdev);
		goto error;
	}

	if (flush)
		filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
522
	ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
523 524 525 526 527
	if (ret) {
		blkdev_put(*bdev, flags);
		goto error;
	}
	invalidate_bdev(*bdev);
528 529 530
	*disk_super = btrfs_read_dev_super(*bdev);
	if (IS_ERR(*disk_super)) {
		ret = PTR_ERR(*disk_super);
531 532 533 534 535 536 537 538 539 540 541
		blkdev_put(*bdev, flags);
		goto error;
	}

	return 0;

error:
	*bdev = NULL;
	return ret;
}

542 543 544 545 546 547 548 549 550 551 552
static bool device_path_matched(const char *path, struct btrfs_device *device)
{
	int found;

	rcu_read_lock();
	found = strcmp(rcu_str_deref(device->name), path);
	rcu_read_unlock();

	return found == 0;
}

553 554 555 556 557 558 559
/*
 *  Search and remove all stale (devices which are not mounted) devices.
 *  When both inputs are NULL, it will search and release all stale devices.
 *  path:	Optional. When provided will it release all unmounted devices
 *		matching this path only.
 *  skip_dev:	Optional. Will skip this device when searching for the stale
 *		devices.
560 561 562
 *  Return:	0 for success or if @path is NULL.
 * 		-EBUSY if @path is a mounted device.
 * 		-ENOENT if @path does not match any device in the list.
563
 */
564
static int btrfs_free_stale_devices(const char *path,
565
				     struct btrfs_device *skip_device)
A
Anand Jain 已提交
566
{
567 568
	struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
	struct btrfs_device *device, *tmp_device;
569 570 571 572
	int ret = 0;

	if (path)
		ret = -ENOENT;
A
Anand Jain 已提交
573

574
	list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
A
Anand Jain 已提交
575

576
		mutex_lock(&fs_devices->device_list_mutex);
577 578 579
		list_for_each_entry_safe(device, tmp_device,
					 &fs_devices->devices, dev_list) {
			if (skip_device && skip_device == device)
580
				continue;
581
			if (path && !device->name)
A
Anand Jain 已提交
582
				continue;
583
			if (path && !device_path_matched(path, device))
584
				continue;
585 586 587 588 589 590
			if (fs_devices->opened) {
				/* for an already deleted device return 0 */
				if (path && ret != 0)
					ret = -EBUSY;
				break;
			}
A
Anand Jain 已提交
591 592

			/* delete the stale device */
593 594 595 596
			fs_devices->num_devices--;
			list_del(&device->dev_list);
			btrfs_free_device(device);

597
			ret = 0;
598
			if (fs_devices->num_devices == 0)
599
				break;
600 601
		}
		mutex_unlock(&fs_devices->device_list_mutex);
602

603 604 605 606
		if (fs_devices->num_devices == 0) {
			btrfs_sysfs_remove_fsid(fs_devices);
			list_del(&fs_devices->fs_list);
			free_fs_devices(fs_devices);
A
Anand Jain 已提交
607 608
		}
	}
609 610

	return ret;
A
Anand Jain 已提交
611 612
}

613 614 615 616 617
/*
 * This is only used on mount, and we are protected from competing things
 * messing with our fs_devices by the uuid_mutex, thus we do not need the
 * fs_devices->device_list_mutex here.
 */
618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
			struct btrfs_device *device, fmode_t flags,
			void *holder)
{
	struct request_queue *q;
	struct block_device *bdev;
	struct btrfs_super_block *disk_super;
	u64 devid;
	int ret;

	if (device->bdev)
		return -EINVAL;
	if (!device->name)
		return -EINVAL;

	ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
634
				    &bdev, &disk_super);
635 636 637 638 639
	if (ret)
		return ret;

	devid = btrfs_stack_device_id(&disk_super->dev_item);
	if (devid != device->devid)
640
		goto error_free_page;
641 642

	if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
643
		goto error_free_page;
644 645 646 647

	device->generation = btrfs_super_generation(disk_super);

	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
648 649 650 651
		if (btrfs_super_incompat_flags(disk_super) &
		    BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
			pr_err(
		"BTRFS: Invalid seeding and uuid-changed device detected\n");
652
			goto error_free_page;
653 654
		}

655
		clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
656
		fs_devices->seeding = true;
657
	} else {
658 659 660 661
		if (bdev_read_only(bdev))
			clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
		else
			set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
662 663 664 665
	}

	q = bdev_get_queue(bdev);
	if (!blk_queue_nonrot(q))
666
		fs_devices->rotating = true;
667 668

	device->bdev = bdev;
669
	clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
670 671 672
	device->mode = flags;

	fs_devices->open_devices++;
673 674
	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
675
		fs_devices->rw_devices++;
676
		list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
677
	}
678
	btrfs_release_disk_super(disk_super);
679 680 681

	return 0;

682 683
error_free_page:
	btrfs_release_disk_super(disk_super);
684 685 686 687 688
	blkdev_put(bdev, flags);

	return -EINVAL;
}

689 690
/*
 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
691 692 693
 * being created with a disk that has already completed its fsid change. Such
 * disk can belong to an fs which has its FSID changed or to one which doesn't.
 * Handle both cases here.
694 695 696 697 698 699 700 701 702 703 704 705 706 707 708
 */
static struct btrfs_fs_devices *find_fsid_inprogress(
					struct btrfs_super_block *disk_super)
{
	struct btrfs_fs_devices *fs_devices;

	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
			   BTRFS_FSID_SIZE) != 0 &&
		    memcmp(fs_devices->metadata_uuid, disk_super->fsid,
			   BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) {
			return fs_devices;
		}
	}

709
	return find_fsid(disk_super->fsid, NULL);
710 711
}

712 713 714 715 716 717 718 719 720

static struct btrfs_fs_devices *find_fsid_changed(
					struct btrfs_super_block *disk_super)
{
	struct btrfs_fs_devices *fs_devices;

	/*
	 * Handles the case where scanned device is part of an fs that had
	 * multiple successful changes of FSID but curently device didn't
721 722 723 724 725
	 * observe it. Meaning our fsid will be different than theirs. We need
	 * to handle two subcases :
	 *  1 - The fs still continues to have different METADATA/FSID uuids.
	 *  2 - The fs is switched back to its original FSID (METADATA/FSID
	 *  are equal).
726 727
	 */
	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
728
		/* Changed UUIDs */
729 730 731 732 733
		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
			   BTRFS_FSID_SIZE) != 0 &&
		    memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid,
			   BTRFS_FSID_SIZE) == 0 &&
		    memcmp(fs_devices->fsid, disk_super->fsid,
734 735 736 737 738 739 740 741
			   BTRFS_FSID_SIZE) != 0)
			return fs_devices;

		/* Unchanged UUIDs */
		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
			   BTRFS_FSID_SIZE) == 0 &&
		    memcmp(fs_devices->fsid, disk_super->metadata_uuid,
			   BTRFS_FSID_SIZE) == 0)
742 743 744 745 746
			return fs_devices;
	}

	return NULL;
}
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772

static struct btrfs_fs_devices *find_fsid_reverted_metadata(
				struct btrfs_super_block *disk_super)
{
	struct btrfs_fs_devices *fs_devices;

	/*
	 * Handle the case where the scanned device is part of an fs whose last
	 * metadata UUID change reverted it to the original FSID. At the same
	 * time * fs_devices was first created by another constitutent device
	 * which didn't fully observe the operation. This results in an
	 * btrfs_fs_devices created with metadata/fsid different AND
	 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the
	 * fs_devices equal to the FSID of the disk.
	 */
	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
		if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
			   BTRFS_FSID_SIZE) != 0 &&
		    memcmp(fs_devices->metadata_uuid, disk_super->fsid,
			   BTRFS_FSID_SIZE) == 0 &&
		    fs_devices->fsid_change)
			return fs_devices;
	}

	return NULL;
}
773 774 775 776
/*
 * Add new device to list of registered devices
 *
 * Returns:
777 778
 * device pointer which was just added or updated when successful
 * error pointer when failed
779
 */
780
static noinline struct btrfs_device *device_list_add(const char *path,
781 782
			   struct btrfs_super_block *disk_super,
			   bool *new_device_added)
783 784
{
	struct btrfs_device *device;
785
	struct btrfs_fs_devices *fs_devices = NULL;
786
	struct rcu_string *name;
787
	u64 found_transid = btrfs_super_generation(disk_super);
788
	u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
789 790
	bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
		BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
791 792
	bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
					BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
793

794
	if (fsid_change_in_progress) {
795
		if (!has_metadata_uuid)
796
			fs_devices = find_fsid_inprogress(disk_super);
797
		else
798
			fs_devices = find_fsid_changed(disk_super);
799
	} else if (has_metadata_uuid) {
800
		fs_devices = find_fsid_with_metadata_uuid(disk_super);
801
	} else {
802 803 804
		fs_devices = find_fsid_reverted_metadata(disk_super);
		if (!fs_devices)
			fs_devices = find_fsid(disk_super->fsid, NULL);
805 806
	}

807 808

	if (!fs_devices) {
809 810 811 812 813 814
		if (has_metadata_uuid)
			fs_devices = alloc_fs_devices(disk_super->fsid,
						      disk_super->metadata_uuid);
		else
			fs_devices = alloc_fs_devices(disk_super->fsid, NULL);

815
		if (IS_ERR(fs_devices))
816
			return ERR_CAST(fs_devices);
817

818 819
		fs_devices->fsid_change = fsid_change_in_progress;

820
		mutex_lock(&fs_devices->device_list_mutex);
821
		list_add(&fs_devices->fs_list, &fs_uuids);
822

823 824
		device = NULL;
	} else {
825
		mutex_lock(&fs_devices->device_list_mutex);
826 827
		device = btrfs_find_device(fs_devices, devid,
				disk_super->dev_item.uuid, NULL, false);
828 829 830 831 832 833

		/*
		 * If this disk has been pulled into an fs devices created by
		 * a device which had the CHANGING_FSID_V2 flag then replace the
		 * metadata_uuid/fsid values of the fs_devices.
		 */
834
		if (fs_devices->fsid_change &&
835 836 837
		    found_transid > fs_devices->latest_generation) {
			memcpy(fs_devices->fsid, disk_super->fsid,
					BTRFS_FSID_SIZE);
838 839 840 841 842 843 844 845

			if (has_metadata_uuid)
				memcpy(fs_devices->metadata_uuid,
				       disk_super->metadata_uuid,
				       BTRFS_FSID_SIZE);
			else
				memcpy(fs_devices->metadata_uuid,
				       disk_super->fsid, BTRFS_FSID_SIZE);
846 847 848

			fs_devices->fsid_change = false;
		}
849
	}
850

851
	if (!device) {
852 853
		if (fs_devices->opened) {
			mutex_unlock(&fs_devices->device_list_mutex);
854
			return ERR_PTR(-EBUSY);
855
		}
Y
Yan Zheng 已提交
856

857 858 859
		device = btrfs_alloc_device(NULL, &devid,
					    disk_super->dev_item.uuid);
		if (IS_ERR(device)) {
860
			mutex_unlock(&fs_devices->device_list_mutex);
861
			/* we can safely leave the fs_devices entry around */
862
			return device;
863
		}
864 865 866

		name = rcu_string_strdup(path, GFP_NOFS);
		if (!name) {
867
			btrfs_free_device(device);
868
			mutex_unlock(&fs_devices->device_list_mutex);
869
			return ERR_PTR(-ENOMEM);
870
		}
871
		rcu_assign_pointer(device->name, name);
872

873
		list_add_rcu(&device->dev_list, &fs_devices->devices);
874
		fs_devices->num_devices++;
875

Y
Yan Zheng 已提交
876
		device->fs_devices = fs_devices;
877
		*new_device_added = true;
878 879

		if (disk_super->label[0])
880 881 882 883
			pr_info(
	"BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n",
				disk_super->label, devid, found_transid, path,
				current->comm, task_pid_nr(current));
884
		else
885 886 887 888
			pr_info(
	"BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n",
				disk_super->fsid, devid, found_transid, path,
				current->comm, task_pid_nr(current));
889

890
	} else if (!device->name || strcmp(device->name->str, path)) {
891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911
		/*
		 * When FS is already mounted.
		 * 1. If you are here and if the device->name is NULL that
		 *    means this device was missing at time of FS mount.
		 * 2. If you are here and if the device->name is different
		 *    from 'path' that means either
		 *      a. The same device disappeared and reappeared with
		 *         different name. or
		 *      b. The missing-disk-which-was-replaced, has
		 *         reappeared now.
		 *
		 * We must allow 1 and 2a above. But 2b would be a spurious
		 * and unintentional.
		 *
		 * Further in case of 1 and 2a above, the disk at 'path'
		 * would have missed some transaction when it was away and
		 * in case of 2a the stale bdev has to be updated as well.
		 * 2b must not be allowed at all time.
		 */

		/*
912 913 914 915
		 * For now, we do allow update to btrfs_fs_device through the
		 * btrfs dev scan cli after FS has been mounted.  We're still
		 * tracking a problem where systems fail mount by subvolume id
		 * when we reject replacement on a mounted FS.
916
		 */
917
		if (!fs_devices->opened && found_transid < device->generation) {
918 919 920 921 922 923 924
			/*
			 * That is if the FS is _not_ mounted and if you
			 * are here, that means there is more than one
			 * disk with same uuid and devid.We keep the one
			 * with larger generation number or the last-in if
			 * generation are equal.
			 */
925
			mutex_unlock(&fs_devices->device_list_mutex);
926
			return ERR_PTR(-EEXIST);
927
		}
928

929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957
		/*
		 * We are going to replace the device path for a given devid,
		 * make sure it's the same device if the device is mounted
		 */
		if (device->bdev) {
			struct block_device *path_bdev;

			path_bdev = lookup_bdev(path);
			if (IS_ERR(path_bdev)) {
				mutex_unlock(&fs_devices->device_list_mutex);
				return ERR_CAST(path_bdev);
			}

			if (device->bdev != path_bdev) {
				bdput(path_bdev);
				mutex_unlock(&fs_devices->device_list_mutex);
				btrfs_warn_in_rcu(device->fs_info,
			"duplicate device fsid:devid for %pU:%llu old:%s new:%s",
					disk_super->fsid, devid,
					rcu_str_deref(device->name), path);
				return ERR_PTR(-EEXIST);
			}
			bdput(path_bdev);
			btrfs_info_in_rcu(device->fs_info,
				"device fsid %pU devid %llu moved old:%s new:%s",
				disk_super->fsid, devid,
				rcu_str_deref(device->name), path);
		}

958
		name = rcu_string_strdup(path, GFP_NOFS);
959 960
		if (!name) {
			mutex_unlock(&fs_devices->device_list_mutex);
961
			return ERR_PTR(-ENOMEM);
962
		}
963 964
		rcu_string_free(device->name);
		rcu_assign_pointer(device->name, name);
965
		if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
966
			fs_devices->missing_devices--;
967
			clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
968
		}
969 970
	}

971 972 973 974 975 976
	/*
	 * Unmount does not free the btrfs_device struct but would zero
	 * generation along with most of the other members. So just update
	 * it back. We need it to pick the disk with largest generation
	 * (as above).
	 */
977
	if (!fs_devices->opened) {
978
		device->generation = found_transid;
979 980 981
		fs_devices->latest_generation = max_t(u64, found_transid,
						fs_devices->latest_generation);
	}
982

983 984
	fs_devices->total_devices = btrfs_super_num_devices(disk_super);

985
	mutex_unlock(&fs_devices->device_list_mutex);
986
	return device;
987 988
}

Y
Yan Zheng 已提交
989 990 991 992 993
static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
{
	struct btrfs_fs_devices *fs_devices;
	struct btrfs_device *device;
	struct btrfs_device *orig_dev;
994
	int ret = 0;
Y
Yan Zheng 已提交
995

996
	fs_devices = alloc_fs_devices(orig->fsid, NULL);
997 998
	if (IS_ERR(fs_devices))
		return fs_devices;
Y
Yan Zheng 已提交
999

1000
	mutex_lock(&orig->device_list_mutex);
J
Josef Bacik 已提交
1001
	fs_devices->total_devices = orig->total_devices;
Y
Yan Zheng 已提交
1002 1003

	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
1004 1005
		struct rcu_string *name;

1006 1007
		device = btrfs_alloc_device(NULL, &orig_dev->devid,
					    orig_dev->uuid);
1008 1009
		if (IS_ERR(device)) {
			ret = PTR_ERR(device);
Y
Yan Zheng 已提交
1010
			goto error;
1011
		}
Y
Yan Zheng 已提交
1012

1013 1014 1015 1016
		/*
		 * This is ok to do without rcu read locked because we hold the
		 * uuid mutex so nothing we touch in here is going to disappear.
		 */
1017
		if (orig_dev->name) {
1018 1019
			name = rcu_string_strdup(orig_dev->name->str,
					GFP_KERNEL);
1020
			if (!name) {
1021
				btrfs_free_device(device);
1022
				ret = -ENOMEM;
1023 1024 1025
				goto error;
			}
			rcu_assign_pointer(device->name, name);
J
Julia Lawall 已提交
1026
		}
Y
Yan Zheng 已提交
1027 1028 1029 1030 1031

		list_add(&device->dev_list, &fs_devices->devices);
		device->fs_devices = fs_devices;
		fs_devices->num_devices++;
	}
1032
	mutex_unlock(&orig->device_list_mutex);
Y
Yan Zheng 已提交
1033 1034
	return fs_devices;
error:
1035
	mutex_unlock(&orig->device_list_mutex);
Y
Yan Zheng 已提交
1036
	free_fs_devices(fs_devices);
1037
	return ERR_PTR(ret);
Y
Yan Zheng 已提交
1038 1039
}

1040 1041
static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
				      int step, struct btrfs_device **latest_dev)
1042
{
Q
Qinghuang Feng 已提交
1043
	struct btrfs_device *device, *next;
1044

1045
	/* This is the initialized path, it is safe to release the devices. */
Q
Qinghuang Feng 已提交
1046
	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
1047
		if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) {
1048
			if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1049
				      &device->dev_state) &&
1050 1051
			    !test_bit(BTRFS_DEV_STATE_MISSING,
				      &device->dev_state) &&
1052 1053 1054
			    (!*latest_dev ||
			     device->generation > (*latest_dev)->generation)) {
				*latest_dev = device;
1055
			}
Y
Yan Zheng 已提交
1056
			continue;
1057
		}
Y
Yan Zheng 已提交
1058

1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
		if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
			/*
			 * In the first step, keep the device which has
			 * the correct fsid and the devid that is used
			 * for the dev_replace procedure.
			 * In the second step, the dev_replace state is
			 * read from the device tree and it is known
			 * whether the procedure is really active or
			 * not, which means whether this device is
			 * used or whether it should be removed.
			 */
1070 1071
			if (step == 0 || test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
						  &device->dev_state)) {
1072 1073 1074
				continue;
			}
		}
Y
Yan Zheng 已提交
1075
		if (device->bdev) {
1076
			blkdev_put(device->bdev, device->mode);
Y
Yan Zheng 已提交
1077 1078 1079
			device->bdev = NULL;
			fs_devices->open_devices--;
		}
1080
		if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
Y
Yan Zheng 已提交
1081
			list_del_init(&device->dev_alloc_list);
1082
			clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1083 1084
			if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
				      &device->dev_state))
1085
				fs_devices->rw_devices--;
Y
Yan Zheng 已提交
1086
		}
Y
Yan Zheng 已提交
1087 1088
		list_del_init(&device->dev_list);
		fs_devices->num_devices--;
1089
		btrfs_free_device(device);
1090
	}
Y
Yan Zheng 已提交
1091

1092 1093 1094 1095 1096 1097 1098 1099 1100
}

/*
 * After we have read the system tree and know devids belonging to this
 * filesystem, remove the device which does not belong there.
 */
void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step)
{
	struct btrfs_device *latest_dev = NULL;
1101
	struct btrfs_fs_devices *seed_dev;
1102 1103 1104

	mutex_lock(&uuid_mutex);
	__btrfs_free_extra_devids(fs_devices, step, &latest_dev);
1105 1106 1107

	list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list)
		__btrfs_free_extra_devids(seed_dev, step, &latest_dev);
Y
Yan Zheng 已提交
1108

1109
	fs_devices->latest_bdev = latest_dev->bdev;
1110

1111 1112
	mutex_unlock(&uuid_mutex);
}
1113

1114 1115
static void btrfs_close_bdev(struct btrfs_device *device)
{
D
David Sterba 已提交
1116 1117 1118
	if (!device->bdev)
		return;

1119
	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1120 1121 1122 1123
		sync_blockdev(device->bdev);
		invalidate_bdev(device->bdev);
	}

D
David Sterba 已提交
1124
	blkdev_put(device->bdev, device->mode);
1125 1126
}

1127
static void btrfs_close_one_device(struct btrfs_device *device)
1128 1129 1130
{
	struct btrfs_fs_devices *fs_devices = device->fs_devices;

1131
	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1132 1133 1134 1135 1136
	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
		list_del_init(&device->dev_alloc_list);
		fs_devices->rw_devices--;
	}

1137
	if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
1138 1139
		fs_devices->missing_devices--;

1140
	btrfs_close_bdev(device);
1141
	if (device->bdev) {
1142
		fs_devices->open_devices--;
1143
		device->bdev = NULL;
1144
	}
1145
	clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1146

1147 1148 1149
	device->fs_info = NULL;
	atomic_set(&device->dev_stats_ccnt, 0);
	extent_io_tree_release(&device->alloc_state);
1150

1151 1152 1153 1154 1155 1156
	/* Verify the device is back in a pristine state  */
	ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
	ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
	ASSERT(list_empty(&device->dev_alloc_list));
	ASSERT(list_empty(&device->post_commit_list));
	ASSERT(atomic_read(&device->reada_in_flight) == 0);
1157 1158
}

1159
static void close_fs_devices(struct btrfs_fs_devices *fs_devices)
1160
{
1161
	struct btrfs_device *device, *tmp;
Y
Yan Zheng 已提交
1162

Y
Yan Zheng 已提交
1163
	if (--fs_devices->opened > 0)
1164
		return;
1165

1166
	mutex_lock(&fs_devices->device_list_mutex);
1167
	list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
1168
		btrfs_close_one_device(device);
1169
	}
1170 1171
	mutex_unlock(&fs_devices->device_list_mutex);

Y
Yan Zheng 已提交
1172 1173
	WARN_ON(fs_devices->open_devices);
	WARN_ON(fs_devices->rw_devices);
Y
Yan Zheng 已提交
1174
	fs_devices->opened = 0;
1175
	fs_devices->seeding = false;
1176
	fs_devices->fs_info = NULL;
1177 1178
}

1179
void btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
Y
Yan Zheng 已提交
1180
{
1181 1182
	LIST_HEAD(list);
	struct btrfs_fs_devices *tmp;
Y
Yan Zheng 已提交
1183 1184

	mutex_lock(&uuid_mutex);
1185
	close_fs_devices(fs_devices);
1186 1187
	if (!fs_devices->opened)
		list_splice_init(&fs_devices->seed_list, &list);
Y
Yan Zheng 已提交
1188
	mutex_unlock(&uuid_mutex);
Y
Yan Zheng 已提交
1189

1190
	list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) {
1191
		close_fs_devices(fs_devices);
1192
		list_del(&fs_devices->seed_list);
Y
Yan Zheng 已提交
1193 1194
		free_fs_devices(fs_devices);
	}
Y
Yan Zheng 已提交
1195 1196
}

1197
static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
Y
Yan Zheng 已提交
1198
				fmode_t flags, void *holder)
1199 1200
{
	struct btrfs_device *device;
1201
	struct btrfs_device *latest_dev = NULL;
1202

1203 1204
	flags |= FMODE_EXCL;

1205
	list_for_each_entry(device, &fs_devices->devices, dev_list) {
1206
		/* Just open everything we can; ignore failures here */
1207
		if (btrfs_open_one_device(fs_devices, device, flags, holder))
1208
			continue;
1209

1210 1211 1212
		if (!latest_dev ||
		    device->generation > latest_dev->generation)
			latest_dev = device;
1213
	}
1214 1215 1216
	if (fs_devices->open_devices == 0)
		return -EINVAL;

Y
Yan Zheng 已提交
1217
	fs_devices->opened = 1;
1218
	fs_devices->latest_bdev = latest_dev->bdev;
Y
Yan Zheng 已提交
1219
	fs_devices->total_rw_bytes = 0;
1220
	fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR;
1221 1222

	return 0;
Y
Yan Zheng 已提交
1223 1224
}

A
Anand Jain 已提交
1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238
static int devid_cmp(void *priv, struct list_head *a, struct list_head *b)
{
	struct btrfs_device *dev1, *dev2;

	dev1 = list_entry(a, struct btrfs_device, dev_list);
	dev2 = list_entry(b, struct btrfs_device, dev_list);

	if (dev1->devid < dev2->devid)
		return -1;
	else if (dev1->devid > dev2->devid)
		return 1;
	return 0;
}

Y
Yan Zheng 已提交
1239
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1240
		       fmode_t flags, void *holder)
Y
Yan Zheng 已提交
1241 1242 1243
{
	int ret;

1244
	lockdep_assert_held(&uuid_mutex);
1245 1246 1247 1248 1249 1250 1251
	/*
	 * The device_list_mutex cannot be taken here in case opening the
	 * underlying device takes further locks like bd_mutex.
	 *
	 * We also don't need the lock here as this is called during mount and
	 * exclusion is provided by uuid_mutex
	 */
1252

Y
Yan Zheng 已提交
1253
	if (fs_devices->opened) {
Y
Yan Zheng 已提交
1254 1255
		fs_devices->opened++;
		ret = 0;
Y
Yan Zheng 已提交
1256
	} else {
A
Anand Jain 已提交
1257
		list_sort(NULL, &fs_devices->devices, devid_cmp);
1258
		ret = open_fs_devices(fs_devices, flags, holder);
Y
Yan Zheng 已提交
1259
	}
1260

1261 1262 1263
	return ret;
}

1264
void btrfs_release_disk_super(struct btrfs_super_block *super)
1265
{
1266 1267
	struct page *page = virt_to_page(super);

1268 1269 1270
	put_page(page);
}

1271 1272
static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
						       u64 bytenr)
1273
{
1274 1275
	struct btrfs_super_block *disk_super;
	struct page *page;
1276 1277 1278 1279 1280
	void *p;
	pgoff_t index;

	/* make sure our super fits in the device */
	if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1281
		return ERR_PTR(-EINVAL);
1282 1283

	/* make sure our super fits in the page */
1284 1285
	if (sizeof(*disk_super) > PAGE_SIZE)
		return ERR_PTR(-EINVAL);
1286 1287 1288

	/* make sure our super doesn't straddle pages on disk */
	index = bytenr >> PAGE_SHIFT;
1289 1290
	if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
		return ERR_PTR(-EINVAL);
1291 1292

	/* pull in the page with our super */
1293
	page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL);
1294

1295 1296
	if (IS_ERR(page))
		return ERR_CAST(page);
1297

1298
	p = page_address(page);
1299 1300

	/* align our pointer to the offset of the super block */
1301
	disk_super = p + offset_in_page(bytenr);
1302

1303 1304
	if (btrfs_super_bytenr(disk_super) != bytenr ||
	    btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1305
		btrfs_release_disk_super(p);
1306
		return ERR_PTR(-EINVAL);
1307 1308
	}

1309 1310
	if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1])
		disk_super->label[BTRFS_LABEL_SIZE - 1] = 0;
1311

1312
	return disk_super;
1313 1314
}

1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325
int btrfs_forget_devices(const char *path)
{
	int ret;

	mutex_lock(&uuid_mutex);
	ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL);
	mutex_unlock(&uuid_mutex);

	return ret;
}

1326 1327 1328 1329 1330
/*
 * Look for a btrfs signature on a device. This may be called out of the mount path
 * and we are not allowed to call set_blocksize during the scan. The superblock
 * is read via pagecache
 */
1331 1332
struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
					   void *holder)
1333 1334
{
	struct btrfs_super_block *disk_super;
1335
	bool new_device_added = false;
1336
	struct btrfs_device *device = NULL;
1337
	struct block_device *bdev;
1338
	u64 bytenr;
1339

1340 1341
	lockdep_assert_held(&uuid_mutex);

1342 1343 1344 1345 1346 1347 1348
	/*
	 * we would like to check all the supers, but that would make
	 * a btrfs mount succeed after a mkfs from a different FS.
	 * So, we need to add a special mount option to scan for
	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
	 */
	bytenr = btrfs_sb_offset(0);
1349
	flags |= FMODE_EXCL;
1350 1351

	bdev = blkdev_get_by_path(path, flags, holder);
1352
	if (IS_ERR(bdev))
1353
		return ERR_CAST(bdev);
1354

1355 1356 1357
	disk_super = btrfs_read_disk_super(bdev, bytenr);
	if (IS_ERR(disk_super)) {
		device = ERR_CAST(disk_super);
1358
		goto error_bdev_put;
1359
	}
1360

1361
	device = device_list_add(path, disk_super, &new_device_added);
1362
	if (!IS_ERR(device)) {
1363 1364 1365
		if (new_device_added)
			btrfs_free_stale_devices(path, device);
	}
1366

1367
	btrfs_release_disk_super(disk_super);
1368 1369

error_bdev_put:
1370
	blkdev_put(bdev, flags);
1371

1372
	return device;
1373
}
1374

1375 1376 1377 1378 1379 1380
/*
 * Try to find a chunk that intersects [start, start + len] range and when one
 * such is found, record the end of it in *start
 */
static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
				    u64 len)
1381
{
1382
	u64 physical_start, physical_end;
1383

1384
	lockdep_assert_held(&device->fs_info->chunk_mutex);
1385

1386 1387 1388
	if (!find_first_extent_bit(&device->alloc_state, *start,
				   &physical_start, &physical_end,
				   CHUNK_ALLOCATED, NULL)) {
1389

1390 1391 1392 1393 1394
		if (in_range(physical_start, *start, len) ||
		    in_range(*start, physical_start,
			     physical_end - physical_start)) {
			*start = physical_end + 1;
			return true;
1395 1396
		}
	}
1397
	return false;
1398 1399
}

1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452
static u64 dev_extent_search_start(struct btrfs_device *device, u64 start)
{
	switch (device->fs_devices->chunk_alloc_policy) {
	case BTRFS_CHUNK_ALLOC_REGULAR:
		/*
		 * We don't want to overwrite the superblock on the drive nor
		 * any area used by the boot loader (grub for example), so we
		 * make sure to start at an offset of at least 1MB.
		 */
		return max_t(u64, start, SZ_1M);
	default:
		BUG();
	}
}

/**
 * dev_extent_hole_check - check if specified hole is suitable for allocation
 * @device:	the device which we have the hole
 * @hole_start: starting position of the hole
 * @hole_size:	the size of the hole
 * @num_bytes:	the size of the free space that we need
 *
 * This function may modify @hole_start and @hole_end to reflect the suitable
 * position for allocation. Returns 1 if hole position is updated, 0 otherwise.
 */
static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
				  u64 *hole_size, u64 num_bytes)
{
	bool changed = false;
	u64 hole_end = *hole_start + *hole_size;

	/*
	 * Check before we set max_hole_start, otherwise we could end up
	 * sending back this offset anyway.
	 */
	if (contains_pending_extent(device, hole_start, *hole_size)) {
		if (hole_end >= *hole_start)
			*hole_size = hole_end - *hole_start;
		else
			*hole_size = 0;
		changed = true;
	}

	switch (device->fs_devices->chunk_alloc_policy) {
	case BTRFS_CHUNK_ALLOC_REGULAR:
		/* No extra check */
		break;
	default:
		BUG();
	}

	return changed;
}
1453

1454
/*
1455 1456 1457 1458 1459 1460 1461
 * find_free_dev_extent_start - find free space in the specified device
 * @device:	  the device which we search the free space in
 * @num_bytes:	  the size of the free space that we need
 * @search_start: the position from which to begin the search
 * @start:	  store the start of the free space.
 * @len:	  the size of the free space. that we find, or the size
 *		  of the max free space if we don't find suitable free space
1462
 *
1463 1464 1465
 * this uses a pretty simple search, the expectation is that it is
 * called very infrequently and that a given device has a small number
 * of extents
1466 1467 1468 1469 1470 1471 1472 1473
 *
 * @start is used to store the start of the free space if we find. But if we
 * don't find suitable free space, it will be used to store the start position
 * of the max free space.
 *
 * @len is used to store the size of the free space that we find.
 * But if we don't find suitable free space, it is used to store the size of
 * the max free space.
1474 1475 1476 1477 1478 1479
 *
 * NOTE: This function will search *commit* root of device tree, and does extra
 * check to ensure dev extents are not double allocated.
 * This makes the function safe to allocate dev extents but may not report
 * correct usable device space, as device extent freed in current transaction
 * is not reported as avaiable.
1480
 */
1481 1482 1483
static int find_free_dev_extent_start(struct btrfs_device *device,
				u64 num_bytes, u64 search_start, u64 *start,
				u64 *len)
1484
{
1485 1486
	struct btrfs_fs_info *fs_info = device->fs_info;
	struct btrfs_root *root = fs_info->dev_root;
1487
	struct btrfs_key key;
1488
	struct btrfs_dev_extent *dev_extent;
Y
Yan Zheng 已提交
1489
	struct btrfs_path *path;
1490 1491 1492 1493
	u64 hole_size;
	u64 max_hole_start;
	u64 max_hole_size;
	u64 extent_end;
1494 1495
	u64 search_end = device->total_bytes;
	int ret;
1496
	int slot;
1497
	struct extent_buffer *l;
1498

1499
	search_start = dev_extent_search_start(device, search_start);
1500

1501 1502 1503
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
1504

1505 1506 1507
	max_hole_start = search_start;
	max_hole_size = 0;

1508
again:
1509 1510
	if (search_start >= search_end ||
		test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1511
		ret = -ENOSPC;
1512
		goto out;
1513 1514
	}

1515
	path->reada = READA_FORWARD;
1516 1517
	path->search_commit_root = 1;
	path->skip_locking = 1;
1518

1519 1520 1521
	key.objectid = device->devid;
	key.offset = search_start;
	key.type = BTRFS_DEV_EXTENT_KEY;
1522

1523
	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1524
	if (ret < 0)
1525
		goto out;
1526 1527 1528
	if (ret > 0) {
		ret = btrfs_previous_item(root, path, key.objectid, key.type);
		if (ret < 0)
1529
			goto out;
1530
	}
1531

1532 1533 1534 1535 1536 1537 1538 1539
	while (1) {
		l = path->nodes[0];
		slot = path->slots[0];
		if (slot >= btrfs_header_nritems(l)) {
			ret = btrfs_next_leaf(root, path);
			if (ret == 0)
				continue;
			if (ret < 0)
1540 1541 1542
				goto out;

			break;
1543 1544 1545 1546 1547 1548 1549
		}
		btrfs_item_key_to_cpu(l, &key, slot);

		if (key.objectid < device->devid)
			goto next;

		if (key.objectid > device->devid)
1550
			break;
1551

1552
		if (key.type != BTRFS_DEV_EXTENT_KEY)
1553
			goto next;
1554

1555 1556
		if (key.offset > search_start) {
			hole_size = key.offset - search_start;
1557 1558
			dev_extent_hole_check(device, &search_start, &hole_size,
					      num_bytes);
1559

1560 1561 1562 1563
			if (hole_size > max_hole_size) {
				max_hole_start = search_start;
				max_hole_size = hole_size;
			}
1564

1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576
			/*
			 * If this free space is greater than which we need,
			 * it must be the max free space that we have found
			 * until now, so max_hole_start must point to the start
			 * of this free space and the length of this free space
			 * is stored in max_hole_size. Thus, we return
			 * max_hole_start and max_hole_size and go back to the
			 * caller.
			 */
			if (hole_size >= num_bytes) {
				ret = 0;
				goto out;
1577 1578 1579 1580
			}
		}

		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1581 1582 1583 1584
		extent_end = key.offset + btrfs_dev_extent_length(l,
								  dev_extent);
		if (extent_end > search_start)
			search_start = extent_end;
1585 1586 1587 1588 1589
next:
		path->slots[0]++;
		cond_resched();
	}

1590 1591 1592 1593 1594
	/*
	 * At this point, search_start should be the end of
	 * allocated dev extents, and when shrinking the device,
	 * search_end may be smaller than search_start.
	 */
1595
	if (search_end > search_start) {
1596
		hole_size = search_end - search_start;
1597 1598
		if (dev_extent_hole_check(device, &search_start, &hole_size,
					  num_bytes)) {
1599 1600 1601
			btrfs_release_path(path);
			goto again;
		}
1602

1603 1604 1605 1606
		if (hole_size > max_hole_size) {
			max_hole_start = search_start;
			max_hole_size = hole_size;
		}
1607 1608
	}

1609
	/* See above. */
1610
	if (max_hole_size < num_bytes)
1611 1612 1613 1614 1615
		ret = -ENOSPC;
	else
		ret = 0;

out:
Y
Yan Zheng 已提交
1616
	btrfs_free_path(path);
1617
	*start = max_hole_start;
1618
	if (len)
1619
		*len = max_hole_size;
1620 1621 1622
	return ret;
}

1623
int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
1624 1625 1626
			 u64 *start, u64 *len)
{
	/* FIXME use last free of some kind */
1627
	return find_free_dev_extent_start(device, num_bytes, 0, start, len);
1628 1629
}

1630
static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1631
			  struct btrfs_device *device,
M
Miao Xie 已提交
1632
			  u64 start, u64 *dev_extent_len)
1633
{
1634 1635
	struct btrfs_fs_info *fs_info = device->fs_info;
	struct btrfs_root *root = fs_info->dev_root;
1636 1637 1638
	int ret;
	struct btrfs_path *path;
	struct btrfs_key key;
1639 1640 1641
	struct btrfs_key found_key;
	struct extent_buffer *leaf = NULL;
	struct btrfs_dev_extent *extent = NULL;
1642 1643 1644 1645 1646 1647 1648 1649

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = device->devid;
	key.offset = start;
	key.type = BTRFS_DEV_EXTENT_KEY;
M
Miao Xie 已提交
1650
again:
1651
	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1652 1653 1654
	if (ret > 0) {
		ret = btrfs_previous_item(root, path, key.objectid,
					  BTRFS_DEV_EXTENT_KEY);
1655 1656
		if (ret)
			goto out;
1657 1658 1659 1660 1661 1662
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
		extent = btrfs_item_ptr(leaf, path->slots[0],
					struct btrfs_dev_extent);
		BUG_ON(found_key.offset > start || found_key.offset +
		       btrfs_dev_extent_length(leaf, extent) < start);
M
Miao Xie 已提交
1663 1664 1665
		key = found_key;
		btrfs_release_path(path);
		goto again;
1666 1667 1668 1669
	} else if (ret == 0) {
		leaf = path->nodes[0];
		extent = btrfs_item_ptr(leaf, path->slots[0],
					struct btrfs_dev_extent);
1670
	} else {
1671
		btrfs_handle_fs_error(fs_info, ret, "Slot search failed");
1672
		goto out;
1673
	}
1674

M
Miao Xie 已提交
1675 1676
	*dev_extent_len = btrfs_dev_extent_length(leaf, extent);

1677
	ret = btrfs_del_item(trans, root, path);
1678
	if (ret) {
1679 1680
		btrfs_handle_fs_error(fs_info, ret,
				      "Failed to remove dev extent item");
Z
Zhao Lei 已提交
1681
	} else {
1682
		set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1683
	}
1684
out:
1685 1686 1687 1688
	btrfs_free_path(path);
	return ret;
}

1689 1690 1691
static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
				  struct btrfs_device *device,
				  u64 chunk_offset, u64 start, u64 num_bytes)
1692 1693 1694
{
	int ret;
	struct btrfs_path *path;
1695 1696
	struct btrfs_fs_info *fs_info = device->fs_info;
	struct btrfs_root *root = fs_info->dev_root;
1697 1698 1699 1700
	struct btrfs_dev_extent *extent;
	struct extent_buffer *leaf;
	struct btrfs_key key;

1701
	WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state));
1702
	WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1703 1704 1705 1706 1707
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = device->devid;
Y
Yan Zheng 已提交
1708
	key.offset = start;
1709 1710 1711
	key.type = BTRFS_DEV_EXTENT_KEY;
	ret = btrfs_insert_empty_item(trans, root, path, &key,
				      sizeof(*extent));
1712 1713
	if (ret)
		goto out;
1714 1715 1716 1717

	leaf = path->nodes[0];
	extent = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_dev_extent);
1718 1719
	btrfs_set_dev_extent_chunk_tree(leaf, extent,
					BTRFS_CHUNK_TREE_OBJECTID);
1720 1721
	btrfs_set_dev_extent_chunk_objectid(leaf, extent,
					    BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1722 1723
	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);

1724 1725
	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
	btrfs_mark_buffer_dirty(leaf);
1726
out:
1727 1728 1729 1730
	btrfs_free_path(path);
	return ret;
}

1731
static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1732
{
1733 1734 1735 1736
	struct extent_map_tree *em_tree;
	struct extent_map *em;
	struct rb_node *n;
	u64 ret = 0;
1737

1738
	em_tree = &fs_info->mapping_tree;
1739
	read_lock(&em_tree->lock);
L
Liu Bo 已提交
1740
	n = rb_last(&em_tree->map.rb_root);
1741 1742 1743
	if (n) {
		em = rb_entry(n, struct extent_map, rb_node);
		ret = em->start + em->len;
1744
	}
1745 1746
	read_unlock(&em_tree->lock);

1747 1748 1749
	return ret;
}

1750 1751
static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
				    u64 *devid_ret)
1752 1753 1754 1755
{
	int ret;
	struct btrfs_key key;
	struct btrfs_key found_key;
Y
Yan Zheng 已提交
1756 1757 1758 1759 1760
	struct btrfs_path *path;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
1761 1762 1763 1764 1765

	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
	key.offset = (u64)-1;

1766
	ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1767 1768 1769
	if (ret < 0)
		goto error;

1770 1771 1772 1773 1774 1775
	if (ret == 0) {
		/* Corruption */
		btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
		ret = -EUCLEAN;
		goto error;
	}
1776

1777 1778
	ret = btrfs_previous_item(fs_info->chunk_root, path,
				  BTRFS_DEV_ITEMS_OBJECTID,
1779 1780
				  BTRFS_DEV_ITEM_KEY);
	if (ret) {
1781
		*devid_ret = 1;
1782 1783 1784
	} else {
		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
				      path->slots[0]);
1785
		*devid_ret = found_key.offset + 1;
1786 1787 1788
	}
	ret = 0;
error:
Y
Yan Zheng 已提交
1789
	btrfs_free_path(path);
1790 1791 1792 1793 1794 1795 1796
	return ret;
}

/*
 * the device information is stored in the chunk root
 * the btrfs_device struct should be fully filled in
 */
1797
static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
1798
			    struct btrfs_device *device)
1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_dev_item *dev_item;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	unsigned long ptr;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
Y
Yan Zheng 已提交
1813
	key.offset = device->devid;
1814

1815 1816
	ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
				      &key, sizeof(*dev_item));
1817 1818 1819 1820 1821 1822 1823
	if (ret)
		goto out;

	leaf = path->nodes[0];
	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);

	btrfs_set_device_id(leaf, dev_item, device->devid);
Y
Yan Zheng 已提交
1824
	btrfs_set_device_generation(leaf, dev_item, 0);
1825 1826 1827 1828
	btrfs_set_device_type(leaf, dev_item, device->type);
	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1829 1830 1831 1832
	btrfs_set_device_total_bytes(leaf, dev_item,
				     btrfs_device_get_disk_total_bytes(device));
	btrfs_set_device_bytes_used(leaf, dev_item,
				    btrfs_device_get_bytes_used(device));
1833 1834 1835
	btrfs_set_device_group(leaf, dev_item, 0);
	btrfs_set_device_seek_speed(leaf, dev_item, 0);
	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1836
	btrfs_set_device_start_offset(leaf, dev_item, 0);
1837

1838
	ptr = btrfs_device_uuid(dev_item);
1839
	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1840
	ptr = btrfs_device_fsid(dev_item);
1841 1842
	write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
			    ptr, BTRFS_FSID_SIZE);
1843 1844
	btrfs_mark_buffer_dirty(leaf);

Y
Yan Zheng 已提交
1845
	ret = 0;
1846 1847 1848 1849
out:
	btrfs_free_path(path);
	return ret;
}
1850

1851 1852 1853 1854
/*
 * Function to update ctime/mtime for a given device path.
 * Mainly used for ctime/mtime based probe like libblkid.
 */
1855
static void update_dev_time(const char *path_name)
1856 1857 1858 1859
{
	struct file *filp;

	filp = filp_open(path_name, O_RDWR, 0);
1860
	if (IS_ERR(filp))
1861 1862 1863 1864 1865
		return;
	file_update_time(filp);
	filp_close(filp, NULL);
}

1866
static int btrfs_rm_dev_item(struct btrfs_device *device)
1867
{
1868
	struct btrfs_root *root = device->fs_info->chunk_root;
1869 1870 1871 1872 1873 1874 1875 1876 1877
	int ret;
	struct btrfs_path *path;
	struct btrfs_key key;
	struct btrfs_trans_handle *trans;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

1878
	trans = btrfs_start_transaction(root, 0);
1879 1880 1881 1882
	if (IS_ERR(trans)) {
		btrfs_free_path(path);
		return PTR_ERR(trans);
	}
1883 1884 1885 1886 1887
	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
	key.offset = device->devid;

	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1888 1889 1890 1891 1892
	if (ret) {
		if (ret > 0)
			ret = -ENOENT;
		btrfs_abort_transaction(trans, ret);
		btrfs_end_transaction(trans);
1893 1894 1895 1896
		goto out;
	}

	ret = btrfs_del_item(trans, root, path);
1897 1898 1899 1900 1901
	if (ret) {
		btrfs_abort_transaction(trans, ret);
		btrfs_end_transaction(trans);
	}

1902 1903
out:
	btrfs_free_path(path);
1904 1905
	if (!ret)
		ret = btrfs_commit_transaction(trans);
1906 1907 1908
	return ret;
}

1909 1910 1911 1912 1913 1914 1915
/*
 * Verify that @num_devices satisfies the RAID profile constraints in the whole
 * filesystem. It's up to the caller to adjust that number regarding eg. device
 * replace.
 */
static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
		u64 num_devices)
1916 1917
{
	u64 all_avail;
1918
	unsigned seq;
1919
	int i;
1920

1921
	do {
1922
		seq = read_seqbegin(&fs_info->profiles_lock);
1923

1924 1925 1926 1927
		all_avail = fs_info->avail_data_alloc_bits |
			    fs_info->avail_system_alloc_bits |
			    fs_info->avail_metadata_alloc_bits;
	} while (read_seqretry(&fs_info->profiles_lock, seq));
1928

1929
	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
1930
		if (!(all_avail & btrfs_raid_array[i].bg_flag))
1931
			continue;
1932

1933
		if (num_devices < btrfs_raid_array[i].devs_min) {
1934
			int ret = btrfs_raid_array[i].mindev_error;
1935

1936 1937 1938
			if (ret)
				return ret;
		}
D
David Woodhouse 已提交
1939 1940
	}

1941
	return 0;
1942 1943
}

1944 1945
static struct btrfs_device * btrfs_find_next_active_device(
		struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
1946
{
Y
Yan Zheng 已提交
1947
	struct btrfs_device *next_device;
1948 1949 1950

	list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
		if (next_device != device &&
1951 1952
		    !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
		    && next_device->bdev)
1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964
			return next_device;
	}

	return NULL;
}

/*
 * Helper function to check if the given device is part of s_bdev / latest_bdev
 * and replace it with the provided or the next active device, in the context
 * where this function called, there should be always be another device (or
 * this_dev) which is active.
 */
1965
void __cold btrfs_assign_next_active_device(struct btrfs_device *device,
1966
				     struct btrfs_device *this_dev)
1967
{
1968
	struct btrfs_fs_info *fs_info = device->fs_info;
1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985
	struct btrfs_device *next_device;

	if (this_dev)
		next_device = this_dev;
	else
		next_device = btrfs_find_next_active_device(fs_info->fs_devices,
								device);
	ASSERT(next_device);

	if (fs_info->sb->s_bdev &&
			(fs_info->sb->s_bdev == device->bdev))
		fs_info->sb->s_bdev = next_device->bdev;

	if (fs_info->fs_devices->latest_bdev == device->bdev)
		fs_info->fs_devices->latest_bdev = next_device->bdev;
}

1986 1987 1988 1989 1990 1991 1992 1993
/*
 * Return btrfs_fs_devices::num_devices excluding the device that's being
 * currently replaced.
 */
static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
{
	u64 num_devices = fs_info->fs_devices->num_devices;

1994
	down_read(&fs_info->dev_replace.rwsem);
1995 1996 1997 1998
	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
		ASSERT(num_devices > 1);
		num_devices--;
	}
1999
	up_read(&fs_info->dev_replace.rwsem);
2000 2001 2002 2003

	return num_devices;
}

2004 2005 2006
void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
			       struct block_device *bdev,
			       const char *device_path)
2007 2008 2009 2010 2011 2012 2013 2014
{
	struct btrfs_super_block *disk_super;
	int copy_num;

	if (!bdev)
		return;

	for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) {
2015 2016
		struct page *page;
		int ret;
2017

2018 2019 2020
		disk_super = btrfs_read_dev_one_super(bdev, copy_num);
		if (IS_ERR(disk_super))
			continue;
2021 2022

		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034

		page = virt_to_page(disk_super);
		set_page_dirty(page);
		lock_page(page);
		/* write_on_page() unlocks the page */
		ret = write_one_page(page);
		if (ret)
			btrfs_warn(fs_info,
				"error clearing superblock number %d (%d)",
				copy_num, ret);
		btrfs_release_disk_super(disk_super);

2035 2036 2037 2038 2039 2040 2041 2042 2043
	}

	/* Notify udev that device has changed */
	btrfs_kobject_uevent(bdev, KOBJ_CHANGE);

	/* Update ctime/mtime for device path for libblkid */
	update_dev_time(device_path);
}

2044 2045
int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
		u64 devid)
2046 2047
{
	struct btrfs_device *device;
2048
	struct btrfs_fs_devices *cur_devices;
2049
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
Y
Yan Zheng 已提交
2050
	u64 num_devices;
2051 2052 2053 2054
	int ret = 0;

	mutex_lock(&uuid_mutex);

2055
	num_devices = btrfs_num_devices(fs_info);
2056

2057
	ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
2058
	if (ret)
2059 2060
		goto out;

2061 2062 2063 2064 2065 2066 2067 2068
	device = btrfs_find_device_by_devspec(fs_info, devid, device_path);

	if (IS_ERR(device)) {
		if (PTR_ERR(device) == -ENOENT &&
		    strcmp(device_path, "missing") == 0)
			ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
		else
			ret = PTR_ERR(device);
D
David Woodhouse 已提交
2069
		goto out;
2070
	}
2071

2072 2073 2074 2075 2076 2077 2078 2079
	if (btrfs_pinned_by_swapfile(fs_info, device)) {
		btrfs_warn_in_rcu(fs_info,
		  "cannot remove device %s (devid %llu) due to active swapfile",
				  rcu_str_deref(device->name), device->devid);
		ret = -ETXTBSY;
		goto out;
	}

2080
	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2081
		ret = BTRFS_ERROR_DEV_TGT_REPLACE;
2082
		goto out;
2083 2084
	}

2085 2086
	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
	    fs_info->fs_devices->rw_devices == 1) {
2087
		ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
2088
		goto out;
Y
Yan Zheng 已提交
2089 2090
	}

2091
	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2092
		mutex_lock(&fs_info->chunk_mutex);
Y
Yan Zheng 已提交
2093
		list_del_init(&device->dev_alloc_list);
2094
		device->fs_devices->rw_devices--;
2095
		mutex_unlock(&fs_info->chunk_mutex);
2096
	}
2097

2098
	mutex_unlock(&uuid_mutex);
2099
	ret = btrfs_shrink_device(device, 0);
2100
	mutex_lock(&uuid_mutex);
2101
	if (ret)
2102
		goto error_undo;
2103

2104 2105 2106 2107 2108
	/*
	 * TODO: the superblock still includes this device in its num_devices
	 * counter although write_all_supers() is not locked out. This
	 * could give a filesystem state which requires a degraded mount.
	 */
2109
	ret = btrfs_rm_dev_item(device);
2110
	if (ret)
2111
		goto error_undo;
2112

2113
	clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2114
	btrfs_scrub_cancel_dev(device);
2115 2116 2117 2118

	/*
	 * the device list mutex makes sure that we don't change
	 * the device list while someone else is writing out all
2119 2120 2121 2122 2123
	 * the device supers. Whoever is writing all supers, should
	 * lock the device list mutex before getting the number of
	 * devices in the super block (super_copy). Conversely,
	 * whoever updates the number of devices in the super block
	 * (super_copy) should hold the device list mutex.
2124
	 */
2125

2126 2127 2128 2129 2130
	/*
	 * In normal cases the cur_devices == fs_devices. But in case
	 * of deleting a seed device, the cur_devices should point to
	 * its own fs_devices listed under the fs_devices->seed.
	 */
2131
	cur_devices = device->fs_devices;
2132
	mutex_lock(&fs_devices->device_list_mutex);
2133
	list_del_rcu(&device->dev_list);
2134

2135 2136
	cur_devices->num_devices--;
	cur_devices->total_devices--;
2137 2138 2139
	/* Update total_devices of the parent fs_devices if it's seed */
	if (cur_devices != fs_devices)
		fs_devices->total_devices--;
Y
Yan Zheng 已提交
2140

2141
	if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
2142
		cur_devices->missing_devices--;
2143

2144
	btrfs_assign_next_active_device(device, NULL);
Y
Yan Zheng 已提交
2145

2146
	if (device->bdev) {
2147
		cur_devices->open_devices--;
2148
		/* remove sysfs entry */
2149
		btrfs_sysfs_remove_devices_dir(fs_devices, device);
2150
	}
2151

2152 2153
	num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
	btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
2154
	mutex_unlock(&fs_devices->device_list_mutex);
Y
Yan Zheng 已提交
2155

2156 2157 2158 2159 2160
	/*
	 * at this point, the device is zero sized and detached from
	 * the devices list.  All that's left is to zero out the old
	 * supers and free the device.
	 */
2161
	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2162 2163
		btrfs_scratch_superblocks(fs_info, device->bdev,
					  device->name->str);
2164 2165

	btrfs_close_bdev(device);
2166 2167
	synchronize_rcu();
	btrfs_free_device(device);
2168

2169
	if (cur_devices->open_devices == 0) {
2170
		list_del_init(&cur_devices->seed_list);
2171
		close_fs_devices(cur_devices);
2172
		free_fs_devices(cur_devices);
Y
Yan Zheng 已提交
2173 2174
	}

2175 2176 2177
out:
	mutex_unlock(&uuid_mutex);
	return ret;
2178

2179
error_undo:
2180
	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2181
		mutex_lock(&fs_info->chunk_mutex);
2182
		list_add(&device->dev_alloc_list,
2183
			 &fs_devices->alloc_list);
2184
		device->fs_devices->rw_devices++;
2185
		mutex_unlock(&fs_info->chunk_mutex);
2186
	}
2187
	goto out;
2188 2189
}

2190
void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
2191
{
2192 2193
	struct btrfs_fs_devices *fs_devices;

2194
	lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
2195

2196 2197 2198 2199 2200 2201 2202
	/*
	 * in case of fs with no seed, srcdev->fs_devices will point
	 * to fs_devices of fs_info. However when the dev being replaced is
	 * a seed dev it will point to the seed's local fs_devices. In short
	 * srcdev will have its correct fs_devices in both the cases.
	 */
	fs_devices = srcdev->fs_devices;
2203

2204
	list_del_rcu(&srcdev->dev_list);
2205
	list_del(&srcdev->dev_alloc_list);
2206
	fs_devices->num_devices--;
2207
	if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
2208
		fs_devices->missing_devices--;
2209

2210
	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
2211
		fs_devices->rw_devices--;
2212

2213
	if (srcdev->bdev)
2214
		fs_devices->open_devices--;
2215 2216
}

2217
void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev)
2218
{
2219
	struct btrfs_fs_info *fs_info = srcdev->fs_info;
2220
	struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2221

2222 2223
	mutex_lock(&uuid_mutex);

2224
	btrfs_close_bdev(srcdev);
2225 2226
	synchronize_rcu();
	btrfs_free_device(srcdev);
2227 2228 2229

	/* if this is no devs we rather delete the fs_devices */
	if (!fs_devices->num_devices) {
2230 2231 2232 2233 2234 2235 2236 2237
		/*
		 * On a mounted FS, num_devices can't be zero unless it's a
		 * seed. In case of a seed device being replaced, the replace
		 * target added to the sprout FS, so there will be no more
		 * device left under the seed FS.
		 */
		ASSERT(fs_devices->seeding);

2238
		list_del_init(&fs_devices->seed_list);
2239
		close_fs_devices(fs_devices);
2240
		free_fs_devices(fs_devices);
2241
	}
2242
	mutex_unlock(&uuid_mutex);
2243 2244
}

2245
void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
2246
{
2247
	struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
2248 2249

	mutex_lock(&fs_devices->device_list_mutex);
2250

2251
	btrfs_sysfs_remove_devices_dir(fs_devices, tgtdev);
2252

2253
	if (tgtdev->bdev)
2254
		fs_devices->open_devices--;
2255

2256
	fs_devices->num_devices--;
2257

2258
	btrfs_assign_next_active_device(tgtdev, NULL);
2259 2260 2261

	list_del_rcu(&tgtdev->dev_list);

2262
	mutex_unlock(&fs_devices->device_list_mutex);
2263 2264 2265 2266 2267 2268 2269 2270

	/*
	 * The update_dev_time() with in btrfs_scratch_superblocks()
	 * may lead to a call to btrfs_show_devname() which will try
	 * to hold device_list_mutex. And here this device
	 * is already out of device list, so we don't have to hold
	 * the device_list_mutex lock.
	 */
2271 2272
	btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev,
				  tgtdev->name->str);
2273 2274

	btrfs_close_bdev(tgtdev);
2275 2276
	synchronize_rcu();
	btrfs_free_device(tgtdev);
2277 2278
}

2279 2280
static struct btrfs_device *btrfs_find_device_by_path(
		struct btrfs_fs_info *fs_info, const char *device_path)
2281 2282 2283 2284 2285 2286
{
	int ret = 0;
	struct btrfs_super_block *disk_super;
	u64 devid;
	u8 *dev_uuid;
	struct block_device *bdev;
2287
	struct btrfs_device *device;
2288 2289

	ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
2290
				    fs_info->bdev_holder, 0, &bdev, &disk_super);
2291
	if (ret)
2292
		return ERR_PTR(ret);
2293

2294 2295
	devid = btrfs_stack_device_id(&disk_super->dev_item);
	dev_uuid = disk_super->dev_item.uuid;
2296
	if (btrfs_fs_incompat(fs_info, METADATA_UUID))
2297
		device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2298
					   disk_super->metadata_uuid, true);
2299
	else
2300
		device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2301
					   disk_super->fsid, true);
2302

2303
	btrfs_release_disk_super(disk_super);
2304 2305
	if (!device)
		device = ERR_PTR(-ENOENT);
2306
	blkdev_put(bdev, FMODE_READ);
2307
	return device;
2308 2309
}

2310 2311 2312
/*
 * Lookup a device given by device id, or the path if the id is 0.
 */
2313
struct btrfs_device *btrfs_find_device_by_devspec(
2314 2315
		struct btrfs_fs_info *fs_info, u64 devid,
		const char *device_path)
2316
{
2317
	struct btrfs_device *device;
2318

2319
	if (devid) {
2320
		device = btrfs_find_device(fs_info->fs_devices, devid, NULL,
2321
					   NULL, true);
2322 2323
		if (!device)
			return ERR_PTR(-ENOENT);
2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336
		return device;
	}

	if (!device_path || !device_path[0])
		return ERR_PTR(-EINVAL);

	if (strcmp(device_path, "missing") == 0) {
		/* Find first missing device */
		list_for_each_entry(device, &fs_info->fs_devices->devices,
				    dev_list) {
			if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
				     &device->dev_state) && !device->bdev)
				return device;
2337
		}
2338
		return ERR_PTR(-ENOENT);
2339
	}
2340 2341

	return btrfs_find_device_by_path(fs_info, device_path);
2342 2343
}

Y
Yan Zheng 已提交
2344 2345 2346
/*
 * does all the dirty work required for changing file system's UUID.
 */
2347
static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
Y
Yan Zheng 已提交
2348
{
2349
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
Y
Yan Zheng 已提交
2350
	struct btrfs_fs_devices *old_devices;
Y
Yan Zheng 已提交
2351
	struct btrfs_fs_devices *seed_devices;
2352
	struct btrfs_super_block *disk_super = fs_info->super_copy;
Y
Yan Zheng 已提交
2353 2354 2355
	struct btrfs_device *device;
	u64 super_flags;

2356
	lockdep_assert_held(&uuid_mutex);
Y
Yan Zheng 已提交
2357
	if (!fs_devices->seeding)
Y
Yan Zheng 已提交
2358 2359
		return -EINVAL;

2360 2361 2362 2363
	/*
	 * Private copy of the seed devices, anchored at
	 * fs_info->fs_devices->seed_list
	 */
2364
	seed_devices = alloc_fs_devices(NULL, NULL);
2365 2366
	if (IS_ERR(seed_devices))
		return PTR_ERR(seed_devices);
Y
Yan Zheng 已提交
2367

2368 2369 2370 2371 2372 2373
	/*
	 * It's necessary to retain a copy of the original seed fs_devices in
	 * fs_uuids so that filesystems which have been seeded can successfully
	 * reference the seed device from open_seed_devices. This also supports
	 * multiple fs seed.
	 */
Y
Yan Zheng 已提交
2374 2375 2376 2377
	old_devices = clone_fs_devices(fs_devices);
	if (IS_ERR(old_devices)) {
		kfree(seed_devices);
		return PTR_ERR(old_devices);
Y
Yan Zheng 已提交
2378
	}
Y
Yan Zheng 已提交
2379

2380
	list_add(&old_devices->fs_list, &fs_uuids);
Y
Yan Zheng 已提交
2381

Y
Yan Zheng 已提交
2382 2383 2384 2385
	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
	seed_devices->opened = 1;
	INIT_LIST_HEAD(&seed_devices->devices);
	INIT_LIST_HEAD(&seed_devices->alloc_list);
2386
	mutex_init(&seed_devices->device_list_mutex);
2387

2388
	mutex_lock(&fs_devices->device_list_mutex);
2389 2390
	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
			      synchronize_rcu);
M
Miao Xie 已提交
2391 2392
	list_for_each_entry(device, &seed_devices->devices, dev_list)
		device->fs_devices = seed_devices;
2393

2394
	mutex_lock(&fs_info->chunk_mutex);
Y
Yan Zheng 已提交
2395
	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
2396
	mutex_unlock(&fs_info->chunk_mutex);
Y
Yan Zheng 已提交
2397

2398
	fs_devices->seeding = false;
Y
Yan Zheng 已提交
2399 2400
	fs_devices->num_devices = 0;
	fs_devices->open_devices = 0;
2401
	fs_devices->missing_devices = 0;
2402
	fs_devices->rotating = false;
2403
	list_add(&seed_devices->seed_list, &fs_devices->seed_list);
Y
Yan Zheng 已提交
2404 2405

	generate_random_uuid(fs_devices->fsid);
2406
	memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
Y
Yan Zheng 已提交
2407
	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2408
	mutex_unlock(&fs_devices->device_list_mutex);
2409

Y
Yan Zheng 已提交
2410 2411 2412 2413 2414 2415 2416 2417
	super_flags = btrfs_super_flags(disk_super) &
		      ~BTRFS_SUPER_FLAG_SEEDING;
	btrfs_set_super_flags(disk_super, super_flags);

	return 0;
}

/*
2418
 * Store the expected generation for seed devices in device items.
Y
Yan Zheng 已提交
2419
 */
2420
static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
Y
Yan Zheng 已提交
2421
{
2422
	struct btrfs_fs_info *fs_info = trans->fs_info;
2423
	struct btrfs_root *root = fs_info->chunk_root;
Y
Yan Zheng 已提交
2424 2425 2426 2427 2428
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_dev_item *dev_item;
	struct btrfs_device *device;
	struct btrfs_key key;
2429
	u8 fs_uuid[BTRFS_FSID_SIZE];
Y
Yan Zheng 已提交
2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456
	u8 dev_uuid[BTRFS_UUID_SIZE];
	u64 devid;
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.offset = 0;
	key.type = BTRFS_DEV_ITEM_KEY;

	while (1) {
		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
		if (ret < 0)
			goto error;

		leaf = path->nodes[0];
next_slot:
		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret > 0)
				break;
			if (ret < 0)
				goto error;
			leaf = path->nodes[0];
			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2457
			btrfs_release_path(path);
Y
Yan Zheng 已提交
2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468
			continue;
		}

		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
		    key.type != BTRFS_DEV_ITEM_KEY)
			break;

		dev_item = btrfs_item_ptr(leaf, path->slots[0],
					  struct btrfs_dev_item);
		devid = btrfs_device_id(leaf, dev_item);
2469
		read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
Y
Yan Zheng 已提交
2470
				   BTRFS_UUID_SIZE);
2471
		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2472
				   BTRFS_FSID_SIZE);
2473
		device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2474
					   fs_uuid, true);
2475
		BUG_ON(!device); /* Logic error */
Y
Yan Zheng 已提交
2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491

		if (device->fs_devices->seeding) {
			btrfs_set_device_generation(leaf, dev_item,
						    device->generation);
			btrfs_mark_buffer_dirty(leaf);
		}

		path->slots[0]++;
		goto next_slot;
	}
	ret = 0;
error:
	btrfs_free_path(path);
	return ret;
}

2492
int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2493
{
2494
	struct btrfs_root *root = fs_info->dev_root;
2495
	struct request_queue *q;
2496 2497 2498
	struct btrfs_trans_handle *trans;
	struct btrfs_device *device;
	struct block_device *bdev;
2499
	struct super_block *sb = fs_info->sb;
2500
	struct rcu_string *name;
2501
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2502 2503
	u64 orig_super_total_bytes;
	u64 orig_super_num_devices;
Y
Yan Zheng 已提交
2504
	int seeding_dev = 0;
2505
	int ret = 0;
2506
	bool unlocked = false;
2507

2508
	if (sb_rdonly(sb) && !fs_devices->seeding)
2509
		return -EROFS;
2510

2511
	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2512
				  fs_info->bdev_holder);
2513 2514
	if (IS_ERR(bdev))
		return PTR_ERR(bdev);
2515

2516
	if (fs_devices->seeding) {
Y
Yan Zheng 已提交
2517 2518 2519 2520 2521
		seeding_dev = 1;
		down_write(&sb->s_umount);
		mutex_lock(&uuid_mutex);
	}

2522
	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2523

2524
	mutex_lock(&fs_devices->device_list_mutex);
2525
	list_for_each_entry(device, &fs_devices->devices, dev_list) {
2526 2527
		if (device->bdev == bdev) {
			ret = -EEXIST;
2528
			mutex_unlock(
2529
				&fs_devices->device_list_mutex);
Y
Yan Zheng 已提交
2530
			goto error;
2531 2532
		}
	}
2533
	mutex_unlock(&fs_devices->device_list_mutex);
2534

2535
	device = btrfs_alloc_device(fs_info, NULL, NULL);
2536
	if (IS_ERR(device)) {
2537
		/* we can safely leave the fs_devices entry around */
2538
		ret = PTR_ERR(device);
Y
Yan Zheng 已提交
2539
		goto error;
2540 2541
	}

2542
	name = rcu_string_strdup(device_path, GFP_KERNEL);
2543
	if (!name) {
Y
Yan Zheng 已提交
2544
		ret = -ENOMEM;
2545
		goto error_free_device;
2546
	}
2547
	rcu_assign_pointer(device->name, name);
Y
Yan Zheng 已提交
2548

2549
	trans = btrfs_start_transaction(root, 0);
2550 2551
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
2552
		goto error_free_device;
2553 2554
	}

2555
	q = bdev_get_queue(bdev);
2556
	set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
Y
Yan Zheng 已提交
2557
	device->generation = trans->transid;
2558 2559 2560
	device->io_width = fs_info->sectorsize;
	device->io_align = fs_info->sectorsize;
	device->sector_size = fs_info->sectorsize;
2561 2562
	device->total_bytes = round_down(i_size_read(bdev->bd_inode),
					 fs_info->sectorsize);
2563
	device->disk_total_bytes = device->total_bytes;
2564
	device->commit_total_bytes = device->total_bytes;
2565
	device->fs_info = fs_info;
2566
	device->bdev = bdev;
2567
	set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2568
	clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
2569
	device->mode = FMODE_EXCL;
2570
	device->dev_stats_valid = 1;
2571
	set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2572

Y
Yan Zheng 已提交
2573
	if (seeding_dev) {
2574
		sb->s_flags &= ~SB_RDONLY;
2575
		ret = btrfs_prepare_sprout(fs_info);
2576 2577 2578 2579
		if (ret) {
			btrfs_abort_transaction(trans, ret);
			goto error_trans;
		}
Y
Yan Zheng 已提交
2580
	}
2581

2582
	device->fs_devices = fs_devices;
2583

2584
	mutex_lock(&fs_devices->device_list_mutex);
2585
	mutex_lock(&fs_info->chunk_mutex);
2586 2587 2588 2589 2590 2591 2592
	list_add_rcu(&device->dev_list, &fs_devices->devices);
	list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
	fs_devices->num_devices++;
	fs_devices->open_devices++;
	fs_devices->rw_devices++;
	fs_devices->total_devices++;
	fs_devices->total_rw_bytes += device->total_bytes;
2593

2594
	atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2595

2596
	if (!blk_queue_nonrot(q))
2597
		fs_devices->rotating = true;
C
Chris Mason 已提交
2598

2599
	orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2600
	btrfs_set_super_total_bytes(fs_info->super_copy,
2601 2602
		round_down(orig_super_total_bytes + device->total_bytes,
			   fs_info->sectorsize));
2603

2604 2605 2606
	orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
	btrfs_set_super_num_devices(fs_info->super_copy,
				    orig_super_num_devices + 1);
2607 2608

	/* add sysfs device entry */
2609
	btrfs_sysfs_add_devices_dir(fs_devices, device);
2610

M
Miao Xie 已提交
2611 2612 2613 2614
	/*
	 * we've got more storage, clear any full flags on the space
	 * infos
	 */
2615
	btrfs_clear_space_info_full(fs_info);
M
Miao Xie 已提交
2616

2617
	mutex_unlock(&fs_info->chunk_mutex);
2618
	mutex_unlock(&fs_devices->device_list_mutex);
2619

Y
Yan Zheng 已提交
2620
	if (seeding_dev) {
2621
		mutex_lock(&fs_info->chunk_mutex);
2622
		ret = init_first_rw_device(trans);
2623
		mutex_unlock(&fs_info->chunk_mutex);
2624
		if (ret) {
2625
			btrfs_abort_transaction(trans, ret);
2626
			goto error_sysfs;
2627
		}
M
Miao Xie 已提交
2628 2629
	}

2630
	ret = btrfs_add_dev_item(trans, device);
M
Miao Xie 已提交
2631
	if (ret) {
2632
		btrfs_abort_transaction(trans, ret);
2633
		goto error_sysfs;
M
Miao Xie 已提交
2634 2635 2636
	}

	if (seeding_dev) {
2637
		ret = btrfs_finish_sprout(trans);
2638
		if (ret) {
2639
			btrfs_abort_transaction(trans, ret);
2640
			goto error_sysfs;
2641
		}
2642

2643 2644 2645 2646 2647
		/*
		 * fs_devices now represents the newly sprouted filesystem and
		 * its fsid has been changed by btrfs_prepare_sprout
		 */
		btrfs_sysfs_update_sprout_fsid(fs_devices);
Y
Yan Zheng 已提交
2648 2649
	}

2650
	ret = btrfs_commit_transaction(trans);
2651

Y
Yan Zheng 已提交
2652 2653 2654
	if (seeding_dev) {
		mutex_unlock(&uuid_mutex);
		up_write(&sb->s_umount);
2655
		unlocked = true;
2656

2657 2658 2659
		if (ret) /* transaction commit */
			return ret;

2660
		ret = btrfs_relocate_sys_chunks(fs_info);
2661
		if (ret < 0)
2662
			btrfs_handle_fs_error(fs_info, ret,
J
Jeff Mahoney 已提交
2663
				    "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2664 2665 2666 2667
		trans = btrfs_attach_transaction(root);
		if (IS_ERR(trans)) {
			if (PTR_ERR(trans) == -ENOENT)
				return 0;
2668 2669 2670
			ret = PTR_ERR(trans);
			trans = NULL;
			goto error_sysfs;
2671
		}
2672
		ret = btrfs_commit_transaction(trans);
Y
Yan Zheng 已提交
2673
	}
2674

2675 2676 2677 2678 2679 2680 2681 2682 2683 2684
	/*
	 * Now that we have written a new super block to this device, check all
	 * other fs_devices list if device_path alienates any other scanned
	 * device.
	 * We can ignore the return value as it typically returns -EINVAL and
	 * only succeeds if the device was an alien.
	 */
	btrfs_forget_devices(device_path);

	/* Update ctime/mtime for blkid or udev */
2685
	update_dev_time(device_path);
2686

Y
Yan Zheng 已提交
2687
	return ret;
2688

2689
error_sysfs:
2690
	btrfs_sysfs_remove_devices_dir(fs_devices, device);
2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706
	mutex_lock(&fs_info->fs_devices->device_list_mutex);
	mutex_lock(&fs_info->chunk_mutex);
	list_del_rcu(&device->dev_list);
	list_del(&device->dev_alloc_list);
	fs_info->fs_devices->num_devices--;
	fs_info->fs_devices->open_devices--;
	fs_info->fs_devices->rw_devices--;
	fs_info->fs_devices->total_devices--;
	fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
	atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
	btrfs_set_super_total_bytes(fs_info->super_copy,
				    orig_super_total_bytes);
	btrfs_set_super_num_devices(fs_info->super_copy,
				    orig_super_num_devices);
	mutex_unlock(&fs_info->chunk_mutex);
	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2707
error_trans:
2708
	if (seeding_dev)
2709
		sb->s_flags |= SB_RDONLY;
2710 2711
	if (trans)
		btrfs_end_transaction(trans);
2712
error_free_device:
2713
	btrfs_free_device(device);
Y
Yan Zheng 已提交
2714
error:
2715
	blkdev_put(bdev, FMODE_EXCL);
2716
	if (seeding_dev && !unlocked) {
Y
Yan Zheng 已提交
2717 2718 2719
		mutex_unlock(&uuid_mutex);
		up_write(&sb->s_umount);
	}
2720
	return ret;
2721 2722
}

C
Chris Mason 已提交
2723 2724
static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
					struct btrfs_device *device)
2725 2726 2727
{
	int ret;
	struct btrfs_path *path;
2728
	struct btrfs_root *root = device->fs_info->chunk_root;
2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757
	struct btrfs_dev_item *dev_item;
	struct extent_buffer *leaf;
	struct btrfs_key key;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
	key.offset = device->devid;

	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
	if (ret < 0)
		goto out;

	if (ret > 0) {
		ret = -ENOENT;
		goto out;
	}

	leaf = path->nodes[0];
	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);

	btrfs_set_device_id(leaf, dev_item, device->devid);
	btrfs_set_device_type(leaf, dev_item, device->type);
	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2758 2759 2760 2761
	btrfs_set_device_total_bytes(leaf, dev_item,
				     btrfs_device_get_disk_total_bytes(device));
	btrfs_set_device_bytes_used(leaf, dev_item,
				    btrfs_device_get_bytes_used(device));
2762 2763 2764 2765 2766 2767 2768
	btrfs_mark_buffer_dirty(leaf);

out:
	btrfs_free_path(path);
	return ret;
}

M
Miao Xie 已提交
2769
int btrfs_grow_device(struct btrfs_trans_handle *trans,
2770 2771
		      struct btrfs_device *device, u64 new_size)
{
2772 2773
	struct btrfs_fs_info *fs_info = device->fs_info;
	struct btrfs_super_block *super_copy = fs_info->super_copy;
M
Miao Xie 已提交
2774 2775
	u64 old_total;
	u64 diff;
2776

2777
	if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
Y
Yan Zheng 已提交
2778
		return -EACCES;
M
Miao Xie 已提交
2779

2780 2781
	new_size = round_down(new_size, fs_info->sectorsize);

2782
	mutex_lock(&fs_info->chunk_mutex);
M
Miao Xie 已提交
2783
	old_total = btrfs_super_total_bytes(super_copy);
2784
	diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
M
Miao Xie 已提交
2785

2786
	if (new_size <= device->total_bytes ||
2787
	    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2788
		mutex_unlock(&fs_info->chunk_mutex);
Y
Yan Zheng 已提交
2789
		return -EINVAL;
M
Miao Xie 已提交
2790
	}
Y
Yan Zheng 已提交
2791

2792 2793
	btrfs_set_super_total_bytes(super_copy,
			round_down(old_total + diff, fs_info->sectorsize));
Y
Yan Zheng 已提交
2794 2795
	device->fs_devices->total_rw_bytes += diff;

2796 2797
	btrfs_device_set_total_bytes(device, new_size);
	btrfs_device_set_disk_total_bytes(device, new_size);
2798
	btrfs_clear_space_info_full(device->fs_info);
2799 2800 2801
	if (list_empty(&device->post_commit_list))
		list_add_tail(&device->post_commit_list,
			      &trans->transaction->dev_update_list);
2802
	mutex_unlock(&fs_info->chunk_mutex);
2803

2804 2805 2806
	return btrfs_update_device(trans, device);
}

2807
static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2808
{
2809
	struct btrfs_fs_info *fs_info = trans->fs_info;
2810
	struct btrfs_root *root = fs_info->chunk_root;
2811 2812 2813 2814 2815 2816 2817 2818
	int ret;
	struct btrfs_path *path;
	struct btrfs_key key;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

2819
	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2820 2821 2822 2823
	key.offset = chunk_offset;
	key.type = BTRFS_CHUNK_ITEM_KEY;

	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2824 2825 2826
	if (ret < 0)
		goto out;
	else if (ret > 0) { /* Logic error or corruption */
2827 2828
		btrfs_handle_fs_error(fs_info, -ENOENT,
				      "Failed lookup while freeing chunk.");
2829 2830 2831
		ret = -ENOENT;
		goto out;
	}
2832 2833

	ret = btrfs_del_item(trans, root, path);
2834
	if (ret < 0)
2835 2836
		btrfs_handle_fs_error(fs_info, ret,
				      "Failed to delete chunk item.");
2837
out:
2838
	btrfs_free_path(path);
2839
	return ret;
2840 2841
}

2842
static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2843
{
2844
	struct btrfs_super_block *super_copy = fs_info->super_copy;
2845 2846 2847 2848 2849 2850 2851 2852 2853 2854
	struct btrfs_disk_key *disk_key;
	struct btrfs_chunk *chunk;
	u8 *ptr;
	int ret = 0;
	u32 num_stripes;
	u32 array_size;
	u32 len = 0;
	u32 cur;
	struct btrfs_key key;

2855
	mutex_lock(&fs_info->chunk_mutex);
2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874
	array_size = btrfs_super_sys_array_size(super_copy);

	ptr = super_copy->sys_chunk_array;
	cur = 0;

	while (cur < array_size) {
		disk_key = (struct btrfs_disk_key *)ptr;
		btrfs_disk_key_to_cpu(&key, disk_key);

		len = sizeof(*disk_key);

		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
			chunk = (struct btrfs_chunk *)(ptr + len);
			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
			len += btrfs_chunk_item_size(num_stripes);
		} else {
			ret = -EIO;
			break;
		}
2875
		if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
2876 2877 2878 2879 2880 2881 2882 2883 2884
		    key.offset == chunk_offset) {
			memmove(ptr, ptr + len, array_size - (cur + len));
			array_size -= len;
			btrfs_set_super_sys_array_size(super_copy, array_size);
		} else {
			ptr += len;
			cur += len;
		}
	}
2885
	mutex_unlock(&fs_info->chunk_mutex);
2886 2887 2888
	return ret;
}

2889 2890 2891 2892 2893 2894 2895 2896 2897
/*
 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
 * @logical: Logical block offset in bytes.
 * @length: Length of extent in bytes.
 *
 * Return: Chunk mapping or ERR_PTR.
 */
struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
				       u64 logical, u64 length)
2898 2899 2900 2901
{
	struct extent_map_tree *em_tree;
	struct extent_map *em;

2902
	em_tree = &fs_info->mapping_tree;
2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924
	read_lock(&em_tree->lock);
	em = lookup_extent_mapping(em_tree, logical, length);
	read_unlock(&em_tree->lock);

	if (!em) {
		btrfs_crit(fs_info, "unable to find logical %llu length %llu",
			   logical, length);
		return ERR_PTR(-EINVAL);
	}

	if (em->start > logical || em->start + em->len < logical) {
		btrfs_crit(fs_info,
			   "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
			   logical, length, em->start, em->start + em->len);
		free_extent_map(em);
		return ERR_PTR(-EINVAL);
	}

	/* callers are responsible for dropping em's ref. */
	return em;
}

2925
int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2926
{
2927
	struct btrfs_fs_info *fs_info = trans->fs_info;
2928 2929
	struct extent_map *em;
	struct map_lookup *map;
M
Miao Xie 已提交
2930
	u64 dev_extent_len = 0;
2931
	int i, ret = 0;
2932
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2933

2934
	em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
2935
	if (IS_ERR(em)) {
2936 2937
		/*
		 * This is a logic error, but we don't want to just rely on the
2938
		 * user having built with ASSERT enabled, so if ASSERT doesn't
2939 2940 2941
		 * do anything we still error out.
		 */
		ASSERT(0);
2942
		return PTR_ERR(em);
2943
	}
2944
	map = em->map_lookup;
2945
	mutex_lock(&fs_info->chunk_mutex);
2946
	check_system_chunk(trans, map->type);
2947
	mutex_unlock(&fs_info->chunk_mutex);
2948

2949 2950 2951 2952 2953 2954
	/*
	 * Take the device list mutex to prevent races with the final phase of
	 * a device replace operation that replaces the device object associated
	 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
	 */
	mutex_lock(&fs_devices->device_list_mutex);
2955
	for (i = 0; i < map->num_stripes; i++) {
2956
		struct btrfs_device *device = map->stripes[i].dev;
M
Miao Xie 已提交
2957 2958 2959
		ret = btrfs_free_dev_extent(trans, device,
					    map->stripes[i].physical,
					    &dev_extent_len);
2960
		if (ret) {
2961
			mutex_unlock(&fs_devices->device_list_mutex);
2962
			btrfs_abort_transaction(trans, ret);
2963 2964
			goto out;
		}
2965

M
Miao Xie 已提交
2966
		if (device->bytes_used > 0) {
2967
			mutex_lock(&fs_info->chunk_mutex);
M
Miao Xie 已提交
2968 2969
			btrfs_device_set_bytes_used(device,
					device->bytes_used - dev_extent_len);
2970
			atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
2971
			btrfs_clear_space_info_full(fs_info);
2972
			mutex_unlock(&fs_info->chunk_mutex);
M
Miao Xie 已提交
2973
		}
2974

2975 2976 2977 2978 2979
		ret = btrfs_update_device(trans, device);
		if (ret) {
			mutex_unlock(&fs_devices->device_list_mutex);
			btrfs_abort_transaction(trans, ret);
			goto out;
2980
		}
2981
	}
2982 2983
	mutex_unlock(&fs_devices->device_list_mutex);

2984
	ret = btrfs_free_chunk(trans, chunk_offset);
2985
	if (ret) {
2986
		btrfs_abort_transaction(trans, ret);
2987 2988
		goto out;
	}
2989

2990
	trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
2991

2992
	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2993
		ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
2994
		if (ret) {
2995
			btrfs_abort_transaction(trans, ret);
2996 2997
			goto out;
		}
2998 2999
	}

3000
	ret = btrfs_remove_block_group(trans, chunk_offset, em);
3001
	if (ret) {
3002
		btrfs_abort_transaction(trans, ret);
3003 3004
		goto out;
	}
Y
Yan Zheng 已提交
3005

3006
out:
Y
Yan Zheng 已提交
3007 3008
	/* once for us */
	free_extent_map(em);
3009 3010
	return ret;
}
Y
Yan Zheng 已提交
3011

3012
static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3013
{
3014
	struct btrfs_root *root = fs_info->chunk_root;
3015
	struct btrfs_trans_handle *trans;
3016
	struct btrfs_block_group *block_group;
3017
	int ret;
Y
Yan Zheng 已提交
3018

3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030
	/*
	 * Prevent races with automatic removal of unused block groups.
	 * After we relocate and before we remove the chunk with offset
	 * chunk_offset, automatic removal of the block group can kick in,
	 * resulting in a failure when calling btrfs_remove_chunk() below.
	 *
	 * Make sure to acquire this mutex before doing a tree search (dev
	 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
	 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
	 * we release the path used to search the chunk/dev tree and before
	 * the current task acquires this mutex and calls us.
	 */
3031
	lockdep_assert_held(&fs_info->delete_unused_bgs_mutex);
3032

3033
	/* step one, relocate all the extents inside this chunk */
3034
	btrfs_scrub_pause(fs_info);
3035
	ret = btrfs_relocate_block_group(fs_info, chunk_offset);
3036
	btrfs_scrub_continue(fs_info);
3037 3038 3039
	if (ret)
		return ret;

3040 3041 3042 3043 3044 3045
	block_group = btrfs_lookup_block_group(fs_info, chunk_offset);
	if (!block_group)
		return -ENOENT;
	btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
	btrfs_put_block_group(block_group);

3046 3047 3048 3049 3050 3051 3052 3053
	trans = btrfs_start_trans_remove_block_group(root->fs_info,
						     chunk_offset);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		btrfs_handle_fs_error(root->fs_info, ret, NULL);
		return ret;
	}

3054
	/*
3055 3056
	 * step two, delete the device extents and the
	 * chunk tree entries
3057
	 */
3058
	ret = btrfs_remove_chunk(trans, chunk_offset);
3059
	btrfs_end_transaction(trans);
3060
	return ret;
Y
Yan Zheng 已提交
3061 3062
}

3063
static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
Y
Yan Zheng 已提交
3064
{
3065
	struct btrfs_root *chunk_root = fs_info->chunk_root;
Y
Yan Zheng 已提交
3066 3067 3068 3069 3070 3071
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_chunk *chunk;
	struct btrfs_key key;
	struct btrfs_key found_key;
	u64 chunk_type;
3072 3073
	bool retried = false;
	int failed = 0;
Y
Yan Zheng 已提交
3074 3075 3076 3077 3078 3079
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

3080
again:
Y
Yan Zheng 已提交
3081 3082 3083 3084 3085
	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
	key.offset = (u64)-1;
	key.type = BTRFS_CHUNK_ITEM_KEY;

	while (1) {
3086
		mutex_lock(&fs_info->delete_unused_bgs_mutex);
Y
Yan Zheng 已提交
3087
		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3088
		if (ret < 0) {
3089
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
Y
Yan Zheng 已提交
3090
			goto error;
3091
		}
3092
		BUG_ON(ret == 0); /* Corruption */
Y
Yan Zheng 已提交
3093 3094 3095

		ret = btrfs_previous_item(chunk_root, path, key.objectid,
					  key.type);
3096
		if (ret)
3097
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
Y
Yan Zheng 已提交
3098 3099 3100 3101
		if (ret < 0)
			goto error;
		if (ret > 0)
			break;
Z
Zheng Yan 已提交
3102

Y
Yan Zheng 已提交
3103 3104
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
Z
Zheng Yan 已提交
3105

Y
Yan Zheng 已提交
3106 3107 3108
		chunk = btrfs_item_ptr(leaf, path->slots[0],
				       struct btrfs_chunk);
		chunk_type = btrfs_chunk_type(leaf, chunk);
3109
		btrfs_release_path(path);
3110

Y
Yan Zheng 已提交
3111
		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
3112
			ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3113 3114
			if (ret == -ENOSPC)
				failed++;
H
HIMANGI SARAOGI 已提交
3115 3116
			else
				BUG_ON(ret);
Y
Yan Zheng 已提交
3117
		}
3118
		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3119

Y
Yan Zheng 已提交
3120 3121 3122 3123 3124
		if (found_key.offset == 0)
			break;
		key.offset = found_key.offset - 1;
	}
	ret = 0;
3125 3126 3127 3128
	if (failed && !retried) {
		failed = 0;
		retried = true;
		goto again;
3129
	} else if (WARN_ON(failed && retried)) {
3130 3131
		ret = -ENOSPC;
	}
Y
Yan Zheng 已提交
3132 3133 3134
error:
	btrfs_free_path(path);
	return ret;
3135 3136
}

3137 3138 3139 3140 3141 3142 3143 3144
/*
 * return 1 : allocate a data chunk successfully,
 * return <0: errors during allocating a data chunk,
 * return 0 : no need to allocate a data chunk.
 */
static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
				      u64 chunk_offset)
{
3145
	struct btrfs_block_group *cache;
3146 3147 3148 3149 3150 3151 3152 3153
	u64 bytes_used;
	u64 chunk_type;

	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
	ASSERT(cache);
	chunk_type = cache->flags;
	btrfs_put_block_group(cache);

3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173
	if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA))
		return 0;

	spin_lock(&fs_info->data_sinfo->lock);
	bytes_used = fs_info->data_sinfo->bytes_used;
	spin_unlock(&fs_info->data_sinfo->lock);

	if (!bytes_used) {
		struct btrfs_trans_handle *trans;
		int ret;

		trans =	btrfs_join_transaction(fs_info->tree_root);
		if (IS_ERR(trans))
			return PTR_ERR(trans);

		ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA);
		btrfs_end_transaction(trans);
		if (ret < 0)
			return ret;
		return 1;
3174
	}
3175

3176 3177 3178
	return 0;
}

3179
static int insert_balance_item(struct btrfs_fs_info *fs_info,
3180 3181
			       struct btrfs_balance_control *bctl)
{
3182
	struct btrfs_root *root = fs_info->tree_root;
3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201
	struct btrfs_trans_handle *trans;
	struct btrfs_balance_item *item;
	struct btrfs_disk_balance_args disk_bargs;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	int ret, err;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	trans = btrfs_start_transaction(root, 0);
	if (IS_ERR(trans)) {
		btrfs_free_path(path);
		return PTR_ERR(trans);
	}

	key.objectid = BTRFS_BALANCE_OBJECTID;
3202
	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3203 3204 3205 3206 3207 3208 3209 3210 3211 3212
	key.offset = 0;

	ret = btrfs_insert_empty_item(trans, root, path, &key,
				      sizeof(*item));
	if (ret)
		goto out;

	leaf = path->nodes[0];
	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);

3213
	memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226

	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
	btrfs_set_balance_data(leaf, item, &disk_bargs);
	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
	btrfs_set_balance_meta(leaf, item, &disk_bargs);
	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
	btrfs_set_balance_sys(leaf, item, &disk_bargs);

	btrfs_set_balance_flags(leaf, item, bctl->flags);

	btrfs_mark_buffer_dirty(leaf);
out:
	btrfs_free_path(path);
3227
	err = btrfs_commit_transaction(trans);
3228 3229 3230 3231 3232
	if (err && !ret)
		ret = err;
	return ret;
}

3233
static int del_balance_item(struct btrfs_fs_info *fs_info)
3234
{
3235
	struct btrfs_root *root = fs_info->tree_root;
3236 3237 3238 3239 3240 3241 3242 3243 3244
	struct btrfs_trans_handle *trans;
	struct btrfs_path *path;
	struct btrfs_key key;
	int ret, err;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

3245
	trans = btrfs_start_transaction_fallback_global_rsv(root, 0);
3246 3247 3248 3249 3250 3251
	if (IS_ERR(trans)) {
		btrfs_free_path(path);
		return PTR_ERR(trans);
	}

	key.objectid = BTRFS_BALANCE_OBJECTID;
3252
	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265
	key.offset = 0;

	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
	if (ret < 0)
		goto out;
	if (ret > 0) {
		ret = -ENOENT;
		goto out;
	}

	ret = btrfs_del_item(trans, root, path);
out:
	btrfs_free_path(path);
3266
	err = btrfs_commit_transaction(trans);
3267 3268 3269 3270 3271
	if (err && !ret)
		ret = err;
	return ret;
}

I
Ilya Dryomov 已提交
3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295
/*
 * This is a heuristic used to reduce the number of chunks balanced on
 * resume after balance was interrupted.
 */
static void update_balance_args(struct btrfs_balance_control *bctl)
{
	/*
	 * Turn on soft mode for chunk types that were being converted.
	 */
	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;

	/*
	 * Turn on usage filter if is not already used.  The idea is
	 * that chunks that we have already balanced should be
	 * reasonably full.  Don't do it for chunks that are being
	 * converted - that will keep us from relocating unconverted
	 * (albeit full) chunks.
	 */
	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3296
	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
I
Ilya Dryomov 已提交
3297 3298 3299 3300 3301
	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
		bctl->data.usage = 90;
	}
	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3302
	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
I
Ilya Dryomov 已提交
3303 3304 3305 3306 3307
	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
		bctl->sys.usage = 90;
	}
	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3308
	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
I
Ilya Dryomov 已提交
3309 3310 3311 3312 3313 3314
	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
		bctl->meta.usage = 90;
	}
}

3315 3316 3317 3318
/*
 * Clear the balance status in fs_info and delete the balance item from disk.
 */
static void reset_balance_state(struct btrfs_fs_info *fs_info)
3319 3320
{
	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3321
	int ret;
3322 3323 3324 3325 3326 3327 3328 3329

	BUG_ON(!fs_info->balance_ctl);

	spin_lock(&fs_info->balance_lock);
	fs_info->balance_ctl = NULL;
	spin_unlock(&fs_info->balance_lock);

	kfree(bctl);
3330 3331 3332
	ret = del_balance_item(fs_info);
	if (ret)
		btrfs_handle_fs_error(fs_info, ret, NULL);
3333 3334
}

I
Ilya Dryomov 已提交
3335 3336 3337 3338
/*
 * Balance filters.  Return 1 if chunk should be filtered out
 * (should not be balanced).
 */
3339
static int chunk_profiles_filter(u64 chunk_type,
I
Ilya Dryomov 已提交
3340 3341
				 struct btrfs_balance_args *bargs)
{
3342 3343
	chunk_type = chunk_to_extended(chunk_type) &
				BTRFS_EXTENDED_PROFILE_MASK;
I
Ilya Dryomov 已提交
3344

3345
	if (bargs->profiles & chunk_type)
I
Ilya Dryomov 已提交
3346 3347 3348 3349 3350
		return 0;

	return 1;
}

3351
static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
I
Ilya Dryomov 已提交
3352
			      struct btrfs_balance_args *bargs)
3353
{
3354
	struct btrfs_block_group *cache;
3355 3356 3357 3358 3359 3360
	u64 chunk_used;
	u64 user_thresh_min;
	u64 user_thresh_max;
	int ret = 1;

	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3361
	chunk_used = cache->used;
3362 3363 3364 3365

	if (bargs->usage_min == 0)
		user_thresh_min = 0;
	else
3366 3367
		user_thresh_min = div_factor_fine(cache->length,
						  bargs->usage_min);
3368 3369 3370 3371

	if (bargs->usage_max == 0)
		user_thresh_max = 1;
	else if (bargs->usage_max > 100)
3372
		user_thresh_max = cache->length;
3373
	else
3374 3375
		user_thresh_max = div_factor_fine(cache->length,
						  bargs->usage_max);
3376 3377 3378 3379 3380 3381 3382 3383

	if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
		ret = 0;

	btrfs_put_block_group(cache);
	return ret;
}

3384
static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3385
		u64 chunk_offset, struct btrfs_balance_args *bargs)
I
Ilya Dryomov 已提交
3386
{
3387
	struct btrfs_block_group *cache;
I
Ilya Dryomov 已提交
3388 3389 3390 3391
	u64 chunk_used, user_thresh;
	int ret = 1;

	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3392
	chunk_used = cache->used;
I
Ilya Dryomov 已提交
3393

3394
	if (bargs->usage_min == 0)
3395
		user_thresh = 1;
3396
	else if (bargs->usage > 100)
3397
		user_thresh = cache->length;
3398
	else
3399
		user_thresh = div_factor_fine(cache->length, bargs->usage);
3400

I
Ilya Dryomov 已提交
3401 3402 3403 3404 3405 3406 3407
	if (chunk_used < user_thresh)
		ret = 0;

	btrfs_put_block_group(cache);
	return ret;
}

I
Ilya Dryomov 已提交
3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424
static int chunk_devid_filter(struct extent_buffer *leaf,
			      struct btrfs_chunk *chunk,
			      struct btrfs_balance_args *bargs)
{
	struct btrfs_stripe *stripe;
	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
	int i;

	for (i = 0; i < num_stripes; i++) {
		stripe = btrfs_stripe_nr(chunk, i);
		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
			return 0;
	}

	return 1;
}

3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436
static u64 calc_data_stripes(u64 type, int num_stripes)
{
	const int index = btrfs_bg_flags_to_raid_index(type);
	const int ncopies = btrfs_raid_array[index].ncopies;
	const int nparity = btrfs_raid_array[index].nparity;

	if (nparity)
		return num_stripes - nparity;
	else
		return num_stripes / ncopies;
}

I
Ilya Dryomov 已提交
3437 3438 3439 3440 3441 3442 3443 3444 3445
/* [pstart, pend) */
static int chunk_drange_filter(struct extent_buffer *leaf,
			       struct btrfs_chunk *chunk,
			       struct btrfs_balance_args *bargs)
{
	struct btrfs_stripe *stripe;
	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
	u64 stripe_offset;
	u64 stripe_length;
3446
	u64 type;
I
Ilya Dryomov 已提交
3447 3448 3449 3450 3451 3452
	int factor;
	int i;

	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
		return 0;

3453 3454
	type = btrfs_chunk_type(leaf, chunk);
	factor = calc_data_stripes(type, num_stripes);
I
Ilya Dryomov 已提交
3455 3456 3457 3458 3459 3460 3461 3462

	for (i = 0; i < num_stripes; i++) {
		stripe = btrfs_stripe_nr(chunk, i);
		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
			continue;

		stripe_offset = btrfs_stripe_offset(leaf, stripe);
		stripe_length = btrfs_chunk_length(leaf, chunk);
3463
		stripe_length = div_u64(stripe_length, factor);
I
Ilya Dryomov 已提交
3464 3465 3466 3467 3468 3469 3470 3471 3472

		if (stripe_offset < bargs->pend &&
		    stripe_offset + stripe_length > bargs->pstart)
			return 0;
	}

	return 1;
}

3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486
/* [vstart, vend) */
static int chunk_vrange_filter(struct extent_buffer *leaf,
			       struct btrfs_chunk *chunk,
			       u64 chunk_offset,
			       struct btrfs_balance_args *bargs)
{
	if (chunk_offset < bargs->vend &&
	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
		/* at least part of the chunk is inside this vrange */
		return 0;

	return 1;
}

3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499
static int chunk_stripes_range_filter(struct extent_buffer *leaf,
			       struct btrfs_chunk *chunk,
			       struct btrfs_balance_args *bargs)
{
	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);

	if (bargs->stripes_min <= num_stripes
			&& num_stripes <= bargs->stripes_max)
		return 0;

	return 1;
}

3500
static int chunk_soft_convert_filter(u64 chunk_type,
3501 3502 3503 3504 3505
				     struct btrfs_balance_args *bargs)
{
	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
		return 0;

3506 3507
	chunk_type = chunk_to_extended(chunk_type) &
				BTRFS_EXTENDED_PROFILE_MASK;
3508

3509
	if (bargs->target == chunk_type)
3510 3511 3512 3513 3514
		return 1;

	return 0;
}

3515
static int should_balance_chunk(struct extent_buffer *leaf,
3516 3517
				struct btrfs_chunk *chunk, u64 chunk_offset)
{
3518
	struct btrfs_fs_info *fs_info = leaf->fs_info;
3519
	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535
	struct btrfs_balance_args *bargs = NULL;
	u64 chunk_type = btrfs_chunk_type(leaf, chunk);

	/* type filter */
	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
		return 0;
	}

	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
		bargs = &bctl->data;
	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
		bargs = &bctl->sys;
	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
		bargs = &bctl->meta;

I
Ilya Dryomov 已提交
3536 3537 3538 3539
	/* profiles filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
	    chunk_profiles_filter(chunk_type, bargs)) {
		return 0;
I
Ilya Dryomov 已提交
3540 3541 3542 3543
	}

	/* usage filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3544
	    chunk_usage_filter(fs_info, chunk_offset, bargs)) {
I
Ilya Dryomov 已提交
3545
		return 0;
3546
	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3547
	    chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
3548
		return 0;
I
Ilya Dryomov 已提交
3549 3550 3551 3552 3553 3554
	}

	/* devid filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
	    chunk_devid_filter(leaf, chunk, bargs)) {
		return 0;
I
Ilya Dryomov 已提交
3555 3556 3557 3558
	}

	/* drange filter, makes sense only with devid filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3559
	    chunk_drange_filter(leaf, chunk, bargs)) {
I
Ilya Dryomov 已提交
3560
		return 0;
3561 3562 3563 3564 3565 3566
	}

	/* vrange filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
		return 0;
I
Ilya Dryomov 已提交
3567 3568
	}

3569 3570 3571 3572 3573 3574
	/* stripes filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
	    chunk_stripes_range_filter(leaf, chunk, bargs)) {
		return 0;
	}

3575 3576 3577 3578 3579 3580
	/* soft profile changing mode */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
	    chunk_soft_convert_filter(chunk_type, bargs)) {
		return 0;
	}

3581 3582 3583 3584 3585 3586 3587 3588
	/*
	 * limited by count, must be the last filter
	 */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
		if (bargs->limit == 0)
			return 0;
		else
			bargs->limit--;
3589 3590 3591
	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
		/*
		 * Same logic as the 'limit' filter; the minimum cannot be
3592
		 * determined here because we do not have the global information
3593 3594 3595 3596 3597 3598
		 * about the count of all chunks that satisfy the filters.
		 */
		if (bargs->limit_max == 0)
			return 0;
		else
			bargs->limit_max--;
3599 3600
	}

3601 3602 3603
	return 1;
}

3604
static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3605
{
3606
	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3607
	struct btrfs_root *chunk_root = fs_info->chunk_root;
3608
	u64 chunk_type;
3609
	struct btrfs_chunk *chunk;
3610
	struct btrfs_path *path = NULL;
3611 3612
	struct btrfs_key key;
	struct btrfs_key found_key;
3613 3614
	struct extent_buffer *leaf;
	int slot;
3615 3616
	int ret;
	int enospc_errors = 0;
3617
	bool counting = true;
3618
	/* The single value limit and min/max limits use the same bytes in the */
3619 3620 3621
	u64 limit_data = bctl->data.limit;
	u64 limit_meta = bctl->meta.limit;
	u64 limit_sys = bctl->sys.limit;
3622 3623 3624
	u32 count_data = 0;
	u32 count_meta = 0;
	u32 count_sys = 0;
3625
	int chunk_reserved = 0;
3626 3627

	path = btrfs_alloc_path();
3628 3629 3630 3631
	if (!path) {
		ret = -ENOMEM;
		goto error;
	}
3632 3633 3634 3635 3636 3637

	/* zero out stat counters */
	spin_lock(&fs_info->balance_lock);
	memset(&bctl->stat, 0, sizeof(bctl->stat));
	spin_unlock(&fs_info->balance_lock);
again:
3638
	if (!counting) {
3639 3640 3641 3642
		/*
		 * The single value limit and min/max limits use the same bytes
		 * in the
		 */
3643 3644 3645 3646
		bctl->data.limit = limit_data;
		bctl->meta.limit = limit_meta;
		bctl->sys.limit = limit_sys;
	}
3647 3648 3649 3650
	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
	key.offset = (u64)-1;
	key.type = BTRFS_CHUNK_ITEM_KEY;

C
Chris Mason 已提交
3651
	while (1) {
3652
		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3653
		    atomic_read(&fs_info->balance_cancel_req)) {
3654 3655 3656 3657
			ret = -ECANCELED;
			goto error;
		}

3658
		mutex_lock(&fs_info->delete_unused_bgs_mutex);
3659
		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3660 3661
		if (ret < 0) {
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3662
			goto error;
3663
		}
3664 3665 3666 3667 3668 3669

		/*
		 * this shouldn't happen, it means the last relocate
		 * failed
		 */
		if (ret == 0)
3670
			BUG(); /* FIXME break ? */
3671 3672 3673

		ret = btrfs_previous_item(chunk_root, path, 0,
					  BTRFS_CHUNK_ITEM_KEY);
3674
		if (ret) {
3675
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3676
			ret = 0;
3677
			break;
3678
		}
3679

3680 3681 3682
		leaf = path->nodes[0];
		slot = path->slots[0];
		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3683

3684 3685
		if (found_key.objectid != key.objectid) {
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3686
			break;
3687
		}
3688

3689
		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3690
		chunk_type = btrfs_chunk_type(leaf, chunk);
3691

3692 3693 3694 3695 3696 3697
		if (!counting) {
			spin_lock(&fs_info->balance_lock);
			bctl->stat.considered++;
			spin_unlock(&fs_info->balance_lock);
		}

3698
		ret = should_balance_chunk(leaf, chunk, found_key.offset);
3699

3700
		btrfs_release_path(path);
3701 3702
		if (!ret) {
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3703
			goto loop;
3704
		}
3705

3706
		if (counting) {
3707
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3708 3709 3710
			spin_lock(&fs_info->balance_lock);
			bctl->stat.expected++;
			spin_unlock(&fs_info->balance_lock);
3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732

			if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
				count_data++;
			else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
				count_sys++;
			else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
				count_meta++;

			goto loop;
		}

		/*
		 * Apply limit_min filter, no need to check if the LIMITS
		 * filter is used, limit_min is 0 by default
		 */
		if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
					count_data < bctl->data.limit_min)
				|| ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
					count_meta < bctl->meta.limit_min)
				|| ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
					count_sys < bctl->sys.limit_min)) {
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3733 3734 3735
			goto loop;
		}

3736 3737 3738 3739 3740 3741 3742 3743 3744
		if (!chunk_reserved) {
			/*
			 * We may be relocating the only data chunk we have,
			 * which could potentially end up with losing data's
			 * raid profile, so lets allocate an empty one in
			 * advance.
			 */
			ret = btrfs_may_alloc_data_chunk(fs_info,
							 found_key.offset);
3745 3746 3747
			if (ret < 0) {
				mutex_unlock(&fs_info->delete_unused_bgs_mutex);
				goto error;
3748 3749
			} else if (ret == 1) {
				chunk_reserved = 1;
3750 3751 3752
			}
		}

3753
		ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3754
		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3755
		if (ret == -ENOSPC) {
3756
			enospc_errors++;
3757 3758 3759 3760 3761 3762 3763
		} else if (ret == -ETXTBSY) {
			btrfs_info(fs_info,
	   "skipping relocation of block group %llu due to active swapfile",
				   found_key.offset);
			ret = 0;
		} else if (ret) {
			goto error;
3764 3765 3766 3767 3768
		} else {
			spin_lock(&fs_info->balance_lock);
			bctl->stat.completed++;
			spin_unlock(&fs_info->balance_lock);
		}
3769
loop:
3770 3771
		if (found_key.offset == 0)
			break;
3772
		key.offset = found_key.offset - 1;
3773
	}
3774

3775 3776 3777 3778 3779
	if (counting) {
		btrfs_release_path(path);
		counting = false;
		goto again;
	}
3780 3781
error:
	btrfs_free_path(path);
3782
	if (enospc_errors) {
3783
		btrfs_info(fs_info, "%d enospc errors during balance",
J
Jeff Mahoney 已提交
3784
			   enospc_errors);
3785 3786 3787 3788
		if (!ret)
			ret = -ENOSPC;
	}

3789 3790 3791
	return ret;
}

3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811
/**
 * alloc_profile_is_valid - see if a given profile is valid and reduced
 * @flags: profile to validate
 * @extended: if true @flags is treated as an extended profile
 */
static int alloc_profile_is_valid(u64 flags, int extended)
{
	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
			       BTRFS_BLOCK_GROUP_PROFILE_MASK);

	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;

	/* 1) check that all other bits are zeroed */
	if (flags & ~mask)
		return 0;

	/* 2) see if profile is reduced */
	if (flags == 0)
		return !extended; /* "0" is valid for usual profiles */

3812
	return has_single_bit_set(flags);
3813 3814
}

3815 3816
static inline int balance_need_close(struct btrfs_fs_info *fs_info)
{
3817 3818 3819 3820
	/* cancel requested || normal exit path */
	return atomic_read(&fs_info->balance_cancel_req) ||
		(atomic_read(&fs_info->balance_pause_req) == 0 &&
		 atomic_read(&fs_info->balance_cancel_req) == 0);
3821 3822
}

3823 3824 3825 3826 3827 3828 3829
/*
 * Validate target profile against allowed profiles and return true if it's OK.
 * Otherwise print the error message and return false.
 */
static inline int validate_convert_profile(struct btrfs_fs_info *fs_info,
		const struct btrfs_balance_args *bargs,
		u64 allowed, const char *type)
3830
{
3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841
	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
		return true;

	/* Profile is valid and does not have bits outside of the allowed set */
	if (alloc_profile_is_valid(bargs->target, 1) &&
	    (bargs->target & ~allowed) == 0)
		return true;

	btrfs_err(fs_info, "balance: invalid convert %s profile %s",
			type, btrfs_bg_type_to_raid_name(bargs->target));
	return false;
3842 3843
}

3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887
/*
 * Fill @buf with textual description of balance filter flags @bargs, up to
 * @size_buf including the terminating null. The output may be trimmed if it
 * does not fit into the provided buffer.
 */
static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
				 u32 size_buf)
{
	int ret;
	u32 size_bp = size_buf;
	char *bp = buf;
	u64 flags = bargs->flags;
	char tmp_buf[128] = {'\0'};

	if (!flags)
		return;

#define CHECK_APPEND_NOARG(a)						\
	do {								\
		ret = snprintf(bp, size_bp, (a));			\
		if (ret < 0 || ret >= size_bp)				\
			goto out_overflow;				\
		size_bp -= ret;						\
		bp += ret;						\
	} while (0)

#define CHECK_APPEND_1ARG(a, v1)					\
	do {								\
		ret = snprintf(bp, size_bp, (a), (v1));			\
		if (ret < 0 || ret >= size_bp)				\
			goto out_overflow;				\
		size_bp -= ret;						\
		bp += ret;						\
	} while (0)

#define CHECK_APPEND_2ARG(a, v1, v2)					\
	do {								\
		ret = snprintf(bp, size_bp, (a), (v1), (v2));		\
		if (ret < 0 || ret >= size_bp)				\
			goto out_overflow;				\
		size_bp -= ret;						\
		bp += ret;						\
	} while (0)

3888 3889 3890
	if (flags & BTRFS_BALANCE_ARGS_CONVERT)
		CHECK_APPEND_1ARG("convert=%s,",
				  btrfs_bg_type_to_raid_name(bargs->target));
3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997

	if (flags & BTRFS_BALANCE_ARGS_SOFT)
		CHECK_APPEND_NOARG("soft,");

	if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
		btrfs_describe_block_groups(bargs->profiles, tmp_buf,
					    sizeof(tmp_buf));
		CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
	}

	if (flags & BTRFS_BALANCE_ARGS_USAGE)
		CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);

	if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
		CHECK_APPEND_2ARG("usage=%u..%u,",
				  bargs->usage_min, bargs->usage_max);

	if (flags & BTRFS_BALANCE_ARGS_DEVID)
		CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);

	if (flags & BTRFS_BALANCE_ARGS_DRANGE)
		CHECK_APPEND_2ARG("drange=%llu..%llu,",
				  bargs->pstart, bargs->pend);

	if (flags & BTRFS_BALANCE_ARGS_VRANGE)
		CHECK_APPEND_2ARG("vrange=%llu..%llu,",
				  bargs->vstart, bargs->vend);

	if (flags & BTRFS_BALANCE_ARGS_LIMIT)
		CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);

	if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
		CHECK_APPEND_2ARG("limit=%u..%u,",
				bargs->limit_min, bargs->limit_max);

	if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
		CHECK_APPEND_2ARG("stripes=%u..%u,",
				  bargs->stripes_min, bargs->stripes_max);

#undef CHECK_APPEND_2ARG
#undef CHECK_APPEND_1ARG
#undef CHECK_APPEND_NOARG

out_overflow:

	if (size_bp < size_buf)
		buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
	else
		buf[0] = '\0';
}

static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
{
	u32 size_buf = 1024;
	char tmp_buf[192] = {'\0'};
	char *buf;
	char *bp;
	u32 size_bp = size_buf;
	int ret;
	struct btrfs_balance_control *bctl = fs_info->balance_ctl;

	buf = kzalloc(size_buf, GFP_KERNEL);
	if (!buf)
		return;

	bp = buf;

#define CHECK_APPEND_1ARG(a, v1)					\
	do {								\
		ret = snprintf(bp, size_bp, (a), (v1));			\
		if (ret < 0 || ret >= size_bp)				\
			goto out_overflow;				\
		size_bp -= ret;						\
		bp += ret;						\
	} while (0)

	if (bctl->flags & BTRFS_BALANCE_FORCE)
		CHECK_APPEND_1ARG("%s", "-f ");

	if (bctl->flags & BTRFS_BALANCE_DATA) {
		describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
		CHECK_APPEND_1ARG("-d%s ", tmp_buf);
	}

	if (bctl->flags & BTRFS_BALANCE_METADATA) {
		describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
		CHECK_APPEND_1ARG("-m%s ", tmp_buf);
	}

	if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
		describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
		CHECK_APPEND_1ARG("-s%s ", tmp_buf);
	}

#undef CHECK_APPEND_1ARG

out_overflow:

	if (size_bp < size_buf)
		buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
	btrfs_info(fs_info, "balance: %s %s",
		   (bctl->flags & BTRFS_BALANCE_RESUME) ?
		   "resume" : "start", buf);

	kfree(buf);
}

3998
/*
3999
 * Should be called with balance mutexe held
4000
 */
4001 4002
int btrfs_balance(struct btrfs_fs_info *fs_info,
		  struct btrfs_balance_control *bctl,
4003 4004
		  struct btrfs_ioctl_balance_args *bargs)
{
4005
	u64 meta_target, data_target;
4006
	u64 allowed;
4007
	int mixed = 0;
4008
	int ret;
4009
	u64 num_devices;
4010
	unsigned seq;
4011
	bool reducing_redundancy;
4012
	int i;
4013

4014
	if (btrfs_fs_closing(fs_info) ||
4015
	    atomic_read(&fs_info->balance_pause_req) ||
4016
	    btrfs_should_cancel_balance(fs_info)) {
4017 4018 4019 4020
		ret = -EINVAL;
		goto out;
	}

4021 4022 4023 4024
	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
		mixed = 1;

4025 4026 4027 4028
	/*
	 * In case of mixed groups both data and meta should be picked,
	 * and identical options should be given for both of them.
	 */
4029 4030
	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
	if (mixed && (bctl->flags & allowed)) {
4031 4032 4033
		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
J
Jeff Mahoney 已提交
4034
			btrfs_err(fs_info,
4035
	  "balance: mixed groups data and metadata options must be the same");
4036 4037 4038 4039 4040
			ret = -EINVAL;
			goto out;
		}
	}

4041 4042 4043 4044 4045
	/*
	 * rw_devices will not change at the moment, device add/delete/replace
	 * are excluded by EXCL_OP
	 */
	num_devices = fs_info->fs_devices->rw_devices;
4046 4047 4048 4049 4050 4051 4052

	/*
	 * SINGLE profile on-disk has no profile bit, but in-memory we have a
	 * special bit for it, to make it easier to distinguish.  Thus we need
	 * to set it manually, or balance would refuse the profile.
	 */
	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
4053 4054 4055
	for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
		if (num_devices >= btrfs_raid_array[i].devs_min)
			allowed |= btrfs_raid_array[i].bg_flag;
4056

4057 4058 4059
	if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") ||
	    !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") ||
	    !validate_convert_profile(fs_info, &bctl->sys,  allowed, "system")) {
4060 4061 4062 4063
		ret = -EINVAL;
		goto out;
	}

4064 4065 4066 4067 4068 4069 4070 4071 4072 4073
	/*
	 * Allow to reduce metadata or system integrity only if force set for
	 * profiles with redundancy (copies, parity)
	 */
	allowed = 0;
	for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) {
		if (btrfs_raid_array[i].ncopies >= 2 ||
		    btrfs_raid_array[i].tolerated_failures >= 1)
			allowed |= btrfs_raid_array[i].bg_flag;
	}
4074 4075 4076 4077 4078 4079 4080 4081
	do {
		seq = read_seqbegin(&fs_info->profiles_lock);

		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
		     (fs_info->avail_system_alloc_bits & allowed) &&
		     !(bctl->sys.target & allowed)) ||
		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
		     (fs_info->avail_metadata_alloc_bits & allowed) &&
4082
		     !(bctl->meta.target & allowed)))
4083
			reducing_redundancy = true;
4084
		else
4085
			reducing_redundancy = false;
4086 4087 4088 4089 4090 4091

		/* if we're not converting, the target field is uninitialized */
		meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
			bctl->meta.target : fs_info->avail_metadata_alloc_bits;
		data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
			bctl->data.target : fs_info->avail_data_alloc_bits;
4092
	} while (read_seqretry(&fs_info->profiles_lock, seq));
4093

4094
	if (reducing_redundancy) {
4095 4096
		if (bctl->flags & BTRFS_BALANCE_FORCE) {
			btrfs_info(fs_info,
4097
			   "balance: force reducing metadata redundancy");
4098 4099
		} else {
			btrfs_err(fs_info,
4100
	"balance: reduces metadata redundancy, use --force if you want this");
4101 4102 4103 4104 4105
			ret = -EINVAL;
			goto out;
		}
	}

4106 4107
	if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
		btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
4108
		btrfs_warn(fs_info,
4109
	"balance: metadata profile %s has lower redundancy than data profile %s",
4110 4111
				btrfs_bg_type_to_raid_name(meta_target),
				btrfs_bg_type_to_raid_name(data_target));
4112 4113
	}

4114 4115 4116 4117 4118 4119 4120 4121
	if (fs_info->send_in_progress) {
		btrfs_warn_rl(fs_info,
"cannot run balance while send operations are in progress (%d in progress)",
			      fs_info->send_in_progress);
		ret = -EAGAIN;
		goto out;
	}

4122
	ret = insert_balance_item(fs_info, bctl);
I
Ilya Dryomov 已提交
4123
	if (ret && ret != -EEXIST)
4124 4125
		goto out;

I
Ilya Dryomov 已提交
4126 4127
	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
		BUG_ON(ret == -EEXIST);
4128 4129 4130 4131
		BUG_ON(fs_info->balance_ctl);
		spin_lock(&fs_info->balance_lock);
		fs_info->balance_ctl = bctl;
		spin_unlock(&fs_info->balance_lock);
I
Ilya Dryomov 已提交
4132 4133 4134 4135 4136 4137
	} else {
		BUG_ON(ret != -EEXIST);
		spin_lock(&fs_info->balance_lock);
		update_balance_args(bctl);
		spin_unlock(&fs_info->balance_lock);
	}
4138

4139 4140
	ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
	set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4141
	describe_balance_start_or_resume(fs_info);
4142 4143 4144 4145 4146
	mutex_unlock(&fs_info->balance_mutex);

	ret = __btrfs_balance(fs_info);

	mutex_lock(&fs_info->balance_mutex);
4147 4148
	if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req))
		btrfs_info(fs_info, "balance: paused");
4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164
	/*
	 * Balance can be canceled by:
	 *
	 * - Regular cancel request
	 *   Then ret == -ECANCELED and balance_cancel_req > 0
	 *
	 * - Fatal signal to "btrfs" process
	 *   Either the signal caught by wait_reserve_ticket() and callers
	 *   got -EINTR, or caught by btrfs_should_cancel_balance() and
	 *   got -ECANCELED.
	 *   Either way, in this case balance_cancel_req = 0, and
	 *   ret == -EINTR or ret == -ECANCELED.
	 *
	 * So here we only check the return value to catch canceled balance.
	 */
	else if (ret == -ECANCELED || ret == -EINTR)
4165 4166 4167 4168
		btrfs_info(fs_info, "balance: canceled");
	else
		btrfs_info(fs_info, "balance: ended with status: %d", ret);

4169
	clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4170 4171 4172

	if (bargs) {
		memset(bargs, 0, sizeof(*bargs));
4173
		btrfs_update_ioctl_balance_args(fs_info, bargs);
4174 4175
	}

4176 4177
	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
	    balance_need_close(fs_info)) {
4178
		reset_balance_state(fs_info);
4179
		clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
4180 4181
	}

4182
	wake_up(&fs_info->balance_wait_q);
4183 4184 4185

	return ret;
out:
I
Ilya Dryomov 已提交
4186
	if (bctl->flags & BTRFS_BALANCE_RESUME)
4187
		reset_balance_state(fs_info);
4188
	else
I
Ilya Dryomov 已提交
4189
		kfree(bctl);
4190 4191
	clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);

I
Ilya Dryomov 已提交
4192 4193 4194 4195 4196
	return ret;
}

static int balance_kthread(void *data)
{
4197
	struct btrfs_fs_info *fs_info = data;
4198
	int ret = 0;
I
Ilya Dryomov 已提交
4199 4200

	mutex_lock(&fs_info->balance_mutex);
4201
	if (fs_info->balance_ctl)
4202
		ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
I
Ilya Dryomov 已提交
4203
	mutex_unlock(&fs_info->balance_mutex);
4204

I
Ilya Dryomov 已提交
4205 4206 4207
	return ret;
}

4208 4209 4210 4211
int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
{
	struct task_struct *tsk;

4212
	mutex_lock(&fs_info->balance_mutex);
4213
	if (!fs_info->balance_ctl) {
4214
		mutex_unlock(&fs_info->balance_mutex);
4215 4216
		return 0;
	}
4217
	mutex_unlock(&fs_info->balance_mutex);
4218

4219
	if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
4220
		btrfs_info(fs_info, "balance: resume skipped");
4221 4222 4223
		return 0;
	}

4224 4225 4226 4227 4228 4229 4230 4231 4232
	/*
	 * A ro->rw remount sequence should continue with the paused balance
	 * regardless of who pauses it, system or the user as of now, so set
	 * the resume flag.
	 */
	spin_lock(&fs_info->balance_lock);
	fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
	spin_unlock(&fs_info->balance_lock);

4233
	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
4234
	return PTR_ERR_OR_ZERO(tsk);
4235 4236
}

4237
int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
I
Ilya Dryomov 已提交
4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251
{
	struct btrfs_balance_control *bctl;
	struct btrfs_balance_item *item;
	struct btrfs_disk_balance_args disk_bargs;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = BTRFS_BALANCE_OBJECTID;
4252
	key.type = BTRFS_TEMPORARY_ITEM_KEY;
I
Ilya Dryomov 已提交
4253 4254
	key.offset = 0;

4255
	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
I
Ilya Dryomov 已提交
4256
	if (ret < 0)
4257
		goto out;
I
Ilya Dryomov 已提交
4258 4259
	if (ret > 0) { /* ret = -ENOENT; */
		ret = 0;
4260 4261 4262 4263 4264 4265 4266
		goto out;
	}

	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
	if (!bctl) {
		ret = -ENOMEM;
		goto out;
I
Ilya Dryomov 已提交
4267 4268 4269 4270 4271
	}

	leaf = path->nodes[0];
	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);

4272 4273
	bctl->flags = btrfs_balance_flags(leaf, item);
	bctl->flags |= BTRFS_BALANCE_RESUME;
I
Ilya Dryomov 已提交
4274 4275 4276 4277 4278 4279 4280 4281

	btrfs_balance_data(leaf, item, &disk_bargs);
	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
	btrfs_balance_meta(leaf, item, &disk_bargs);
	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
	btrfs_balance_sys(leaf, item, &disk_bargs);
	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);

4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293
	/*
	 * This should never happen, as the paused balance state is recovered
	 * during mount without any chance of other exclusive ops to collide.
	 *
	 * This gives the exclusive op status to balance and keeps in paused
	 * state until user intervention (cancel or umount). If the ownership
	 * cannot be assigned, show a message but do not fail. The balance
	 * is in a paused state and must have fs_info::balance_ctl properly
	 * set up.
	 */
	if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags))
		btrfs_warn(fs_info,
4294
	"balance: cannot set exclusive op status, resume manually");
4295

4296
	mutex_lock(&fs_info->balance_mutex);
4297 4298 4299 4300
	BUG_ON(fs_info->balance_ctl);
	spin_lock(&fs_info->balance_lock);
	fs_info->balance_ctl = bctl;
	spin_unlock(&fs_info->balance_lock);
4301
	mutex_unlock(&fs_info->balance_mutex);
I
Ilya Dryomov 已提交
4302 4303
out:
	btrfs_free_path(path);
4304 4305 4306
	return ret;
}

4307 4308 4309 4310 4311 4312 4313 4314 4315 4316
int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
{
	int ret = 0;

	mutex_lock(&fs_info->balance_mutex);
	if (!fs_info->balance_ctl) {
		mutex_unlock(&fs_info->balance_mutex);
		return -ENOTCONN;
	}

4317
	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4318 4319 4320 4321
		atomic_inc(&fs_info->balance_pause_req);
		mutex_unlock(&fs_info->balance_mutex);

		wait_event(fs_info->balance_wait_q,
4322
			   !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4323 4324 4325

		mutex_lock(&fs_info->balance_mutex);
		/* we are good with balance_ctl ripped off from under us */
4326
		BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4327 4328 4329 4330 4331 4332 4333 4334 4335
		atomic_dec(&fs_info->balance_pause_req);
	} else {
		ret = -ENOTCONN;
	}

	mutex_unlock(&fs_info->balance_mutex);
	return ret;
}

4336 4337 4338 4339 4340 4341 4342 4343
int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
{
	mutex_lock(&fs_info->balance_mutex);
	if (!fs_info->balance_ctl) {
		mutex_unlock(&fs_info->balance_mutex);
		return -ENOTCONN;
	}

4344 4345 4346 4347 4348 4349 4350 4351 4352 4353
	/*
	 * A paused balance with the item stored on disk can be resumed at
	 * mount time if the mount is read-write. Otherwise it's still paused
	 * and we must not allow cancelling as it deletes the item.
	 */
	if (sb_rdonly(fs_info->sb)) {
		mutex_unlock(&fs_info->balance_mutex);
		return -EROFS;
	}

4354 4355 4356 4357 4358
	atomic_inc(&fs_info->balance_cancel_req);
	/*
	 * if we are running just wait and return, balance item is
	 * deleted in btrfs_balance in this case
	 */
4359
	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4360 4361
		mutex_unlock(&fs_info->balance_mutex);
		wait_event(fs_info->balance_wait_q,
4362
			   !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4363 4364 4365
		mutex_lock(&fs_info->balance_mutex);
	} else {
		mutex_unlock(&fs_info->balance_mutex);
4366 4367 4368 4369
		/*
		 * Lock released to allow other waiters to continue, we'll
		 * reexamine the status again.
		 */
4370 4371
		mutex_lock(&fs_info->balance_mutex);

4372
		if (fs_info->balance_ctl) {
4373
			reset_balance_state(fs_info);
4374
			clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
4375
			btrfs_info(fs_info, "balance: canceled");
4376
		}
4377 4378
	}

4379 4380
	BUG_ON(fs_info->balance_ctl ||
		test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4381 4382 4383 4384 4385
	atomic_dec(&fs_info->balance_cancel_req);
	mutex_unlock(&fs_info->balance_mutex);
	return 0;
}

4386
int btrfs_uuid_scan_kthread(void *data)
S
Stefan Behrens 已提交
4387 4388 4389 4390 4391 4392 4393 4394 4395 4396
{
	struct btrfs_fs_info *fs_info = data;
	struct btrfs_root *root = fs_info->tree_root;
	struct btrfs_key key;
	struct btrfs_path *path = NULL;
	int ret = 0;
	struct extent_buffer *eb;
	int slot;
	struct btrfs_root_item root_item;
	u32 item_size;
4397
	struct btrfs_trans_handle *trans = NULL;
4398
	bool closing = false;
S
Stefan Behrens 已提交
4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410

	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

	key.objectid = 0;
	key.type = BTRFS_ROOT_ITEM_KEY;
	key.offset = 0;

	while (1) {
4411 4412 4413 4414
		if (btrfs_fs_closing(fs_info)) {
			closing = true;
			break;
		}
4415 4416
		ret = btrfs_search_forward(root, &key, path,
				BTRFS_OLDEST_GENERATION);
S
Stefan Behrens 已提交
4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439
		if (ret) {
			if (ret > 0)
				ret = 0;
			break;
		}

		if (key.type != BTRFS_ROOT_ITEM_KEY ||
		    (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
		     key.objectid != BTRFS_FS_TREE_OBJECTID) ||
		    key.objectid > BTRFS_LAST_FREE_OBJECTID)
			goto skip;

		eb = path->nodes[0];
		slot = path->slots[0];
		item_size = btrfs_item_size_nr(eb, slot);
		if (item_size < sizeof(root_item))
			goto skip;

		read_extent_buffer(eb, &root_item,
				   btrfs_item_ptr_offset(eb, slot),
				   (int)sizeof(root_item));
		if (btrfs_root_refs(&root_item) == 0)
			goto skip;
4440 4441 4442 4443 4444 4445 4446

		if (!btrfs_is_empty_uuid(root_item.uuid) ||
		    !btrfs_is_empty_uuid(root_item.received_uuid)) {
			if (trans)
				goto update_tree;

			btrfs_release_path(path);
S
Stefan Behrens 已提交
4447 4448 4449 4450 4451 4452 4453 4454 4455
			/*
			 * 1 - subvol uuid item
			 * 1 - received_subvol uuid item
			 */
			trans = btrfs_start_transaction(fs_info->uuid_root, 2);
			if (IS_ERR(trans)) {
				ret = PTR_ERR(trans);
				break;
			}
4456 4457 4458 4459 4460
			continue;
		} else {
			goto skip;
		}
update_tree:
4461
		btrfs_release_path(path);
4462
		if (!btrfs_is_empty_uuid(root_item.uuid)) {
4463
			ret = btrfs_uuid_tree_add(trans, root_item.uuid,
S
Stefan Behrens 已提交
4464 4465 4466
						  BTRFS_UUID_KEY_SUBVOL,
						  key.objectid);
			if (ret < 0) {
4467
				btrfs_warn(fs_info, "uuid_tree_add failed %d",
S
Stefan Behrens 已提交
4468 4469 4470 4471 4472 4473
					ret);
				break;
			}
		}

		if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4474
			ret = btrfs_uuid_tree_add(trans,
S
Stefan Behrens 已提交
4475 4476 4477 4478
						  root_item.received_uuid,
						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
						  key.objectid);
			if (ret < 0) {
4479
				btrfs_warn(fs_info, "uuid_tree_add failed %d",
S
Stefan Behrens 已提交
4480 4481 4482 4483 4484
					ret);
				break;
			}
		}

4485
skip:
4486
		btrfs_release_path(path);
S
Stefan Behrens 已提交
4487
		if (trans) {
4488
			ret = btrfs_end_transaction(trans);
4489
			trans = NULL;
S
Stefan Behrens 已提交
4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510
			if (ret)
				break;
		}

		if (key.offset < (u64)-1) {
			key.offset++;
		} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
			key.offset = 0;
			key.type = BTRFS_ROOT_ITEM_KEY;
		} else if (key.objectid < (u64)-1) {
			key.offset = 0;
			key.type = BTRFS_ROOT_ITEM_KEY;
			key.objectid++;
		} else {
			break;
		}
		cond_resched();
	}

out:
	btrfs_free_path(path);
4511
	if (trans && !IS_ERR(trans))
4512
		btrfs_end_transaction(trans);
S
Stefan Behrens 已提交
4513
	if (ret)
4514
		btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4515
	else if (!closing)
4516
		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
S
Stefan Behrens 已提交
4517 4518 4519 4520
	up(&fs_info->uuid_tree_rescan_sem);
	return 0;
}

4521 4522 4523 4524 4525
int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *tree_root = fs_info->tree_root;
	struct btrfs_root *uuid_root;
S
Stefan Behrens 已提交
4526 4527
	struct task_struct *task;
	int ret;
4528 4529 4530 4531 4532 4533 4534 4535 4536

	/*
	 * 1 - root node
	 * 1 - root item
	 */
	trans = btrfs_start_transaction(tree_root, 2);
	if (IS_ERR(trans))
		return PTR_ERR(trans);

4537
	uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
4538
	if (IS_ERR(uuid_root)) {
4539
		ret = PTR_ERR(uuid_root);
4540
		btrfs_abort_transaction(trans, ret);
4541
		btrfs_end_transaction(trans);
4542
		return ret;
4543 4544 4545 4546
	}

	fs_info->uuid_root = uuid_root;

4547
	ret = btrfs_commit_transaction(trans);
S
Stefan Behrens 已提交
4548 4549 4550 4551 4552 4553
	if (ret)
		return ret;

	down(&fs_info->uuid_tree_rescan_sem);
	task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
	if (IS_ERR(task)) {
4554
		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4555
		btrfs_warn(fs_info, "failed to start uuid_scan task");
S
Stefan Behrens 已提交
4556 4557 4558 4559 4560
		up(&fs_info->uuid_tree_rescan_sem);
		return PTR_ERR(task);
	}

	return 0;
4561
}
S
Stefan Behrens 已提交
4562

4563 4564 4565 4566 4567 4568 4569
/*
 * shrinking a device means finding all of the device extents past
 * the new size, and then following the back refs to the chunks.
 * The chunk relocation code actually frees the device extent
 */
int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
{
4570 4571
	struct btrfs_fs_info *fs_info = device->fs_info;
	struct btrfs_root *root = fs_info->dev_root;
4572 4573 4574 4575 4576 4577 4578
	struct btrfs_trans_handle *trans;
	struct btrfs_dev_extent *dev_extent = NULL;
	struct btrfs_path *path;
	u64 length;
	u64 chunk_offset;
	int ret;
	int slot;
4579 4580
	int failed = 0;
	bool retried = false;
4581 4582
	struct extent_buffer *l;
	struct btrfs_key key;
4583
	struct btrfs_super_block *super_copy = fs_info->super_copy;
4584
	u64 old_total = btrfs_super_total_bytes(super_copy);
4585
	u64 old_size = btrfs_device_get_total_bytes(device);
4586
	u64 diff;
4587
	u64 start;
4588 4589

	new_size = round_down(new_size, fs_info->sectorsize);
4590
	start = new_size;
4591
	diff = round_down(old_size - new_size, fs_info->sectorsize);
4592

4593
	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4594 4595
		return -EINVAL;

4596 4597 4598 4599
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

4600
	path->reada = READA_BACK;
4601

4602 4603 4604 4605 4606 4607
	trans = btrfs_start_transaction(root, 0);
	if (IS_ERR(trans)) {
		btrfs_free_path(path);
		return PTR_ERR(trans);
	}

4608
	mutex_lock(&fs_info->chunk_mutex);
4609

4610
	btrfs_device_set_total_bytes(device, new_size);
4611
	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
Y
Yan Zheng 已提交
4612
		device->fs_devices->total_rw_bytes -= diff;
4613
		atomic64_sub(diff, &fs_info->free_chunk_space);
4614
	}
4615 4616 4617 4618 4619 4620

	/*
	 * Once the device's size has been set to the new size, ensure all
	 * in-memory chunks are synced to disk so that the loop below sees them
	 * and relocates them accordingly.
	 */
4621
	if (contains_pending_extent(device, &start, diff)) {
4622 4623 4624 4625 4626 4627 4628 4629
		mutex_unlock(&fs_info->chunk_mutex);
		ret = btrfs_commit_transaction(trans);
		if (ret)
			goto done;
	} else {
		mutex_unlock(&fs_info->chunk_mutex);
		btrfs_end_transaction(trans);
	}
4630

4631
again:
4632 4633 4634 4635
	key.objectid = device->devid;
	key.offset = (u64)-1;
	key.type = BTRFS_DEV_EXTENT_KEY;

4636
	do {
4637
		mutex_lock(&fs_info->delete_unused_bgs_mutex);
4638
		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4639
		if (ret < 0) {
4640
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4641
			goto done;
4642
		}
4643 4644

		ret = btrfs_previous_item(root, path, 0, key.type);
4645
		if (ret)
4646
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4647 4648 4649 4650
		if (ret < 0)
			goto done;
		if (ret) {
			ret = 0;
4651
			btrfs_release_path(path);
4652
			break;
4653 4654 4655 4656 4657 4658
		}

		l = path->nodes[0];
		slot = path->slots[0];
		btrfs_item_key_to_cpu(l, &key, path->slots[0]);

4659
		if (key.objectid != device->devid) {
4660
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4661
			btrfs_release_path(path);
4662
			break;
4663
		}
4664 4665 4666 4667

		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
		length = btrfs_dev_extent_length(l, dev_extent);

4668
		if (key.offset + length <= new_size) {
4669
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4670
			btrfs_release_path(path);
4671
			break;
4672
		}
4673 4674

		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4675
		btrfs_release_path(path);
4676

4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688
		/*
		 * We may be relocating the only data chunk we have,
		 * which could potentially end up with losing data's
		 * raid profile, so lets allocate an empty one in
		 * advance.
		 */
		ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
		if (ret < 0) {
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
			goto done;
		}

4689 4690
		ret = btrfs_relocate_chunk(fs_info, chunk_offset);
		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4691
		if (ret == -ENOSPC) {
4692
			failed++;
4693 4694 4695 4696 4697 4698 4699 4700
		} else if (ret) {
			if (ret == -ETXTBSY) {
				btrfs_warn(fs_info,
		   "could not shrink block group %llu due to active swapfile",
					   chunk_offset);
			}
			goto done;
		}
4701
	} while (key.offset-- > 0);
4702 4703 4704 4705 4706 4707 4708 4709

	if (failed && !retried) {
		failed = 0;
		retried = true;
		goto again;
	} else if (failed && retried) {
		ret = -ENOSPC;
		goto done;
4710 4711
	}

4712
	/* Shrinking succeeded, else we would be at "done". */
4713
	trans = btrfs_start_transaction(root, 0);
4714 4715 4716 4717 4718
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto done;
	}

4719
	mutex_lock(&fs_info->chunk_mutex);
4720 4721 4722 4723
	/* Clear all state bits beyond the shrunk device size */
	clear_extent_bits(&device->alloc_state, new_size, (u64)-1,
			  CHUNK_STATE_MASK);

4724
	btrfs_device_set_disk_total_bytes(device, new_size);
4725 4726 4727
	if (list_empty(&device->post_commit_list))
		list_add_tail(&device->post_commit_list,
			      &trans->transaction->dev_update_list);
4728 4729

	WARN_ON(diff > old_total);
4730 4731
	btrfs_set_super_total_bytes(super_copy,
			round_down(old_total - diff, fs_info->sectorsize));
4732
	mutex_unlock(&fs_info->chunk_mutex);
M
Miao Xie 已提交
4733 4734 4735

	/* Now btrfs_update_device() will change the on-disk size. */
	ret = btrfs_update_device(trans, device);
4736 4737 4738 4739 4740 4741
	if (ret < 0) {
		btrfs_abort_transaction(trans, ret);
		btrfs_end_transaction(trans);
	} else {
		ret = btrfs_commit_transaction(trans);
	}
4742 4743
done:
	btrfs_free_path(path);
4744
	if (ret) {
4745
		mutex_lock(&fs_info->chunk_mutex);
4746
		btrfs_device_set_total_bytes(device, old_size);
4747
		if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
4748
			device->fs_devices->total_rw_bytes += diff;
4749
		atomic64_add(diff, &fs_info->free_chunk_space);
4750
		mutex_unlock(&fs_info->chunk_mutex);
4751
	}
4752 4753 4754
	return ret;
}

4755
static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
4756 4757 4758
			   struct btrfs_key *key,
			   struct btrfs_chunk *chunk, int item_size)
{
4759
	struct btrfs_super_block *super_copy = fs_info->super_copy;
4760 4761 4762 4763
	struct btrfs_disk_key disk_key;
	u32 array_size;
	u8 *ptr;

4764
	mutex_lock(&fs_info->chunk_mutex);
4765
	array_size = btrfs_super_sys_array_size(super_copy);
4766
	if (array_size + item_size + sizeof(disk_key)
4767
			> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4768
		mutex_unlock(&fs_info->chunk_mutex);
4769
		return -EFBIG;
4770
	}
4771 4772 4773 4774 4775 4776 4777 4778

	ptr = super_copy->sys_chunk_array + array_size;
	btrfs_cpu_key_to_disk(&disk_key, key);
	memcpy(ptr, &disk_key, sizeof(disk_key));
	ptr += sizeof(disk_key);
	memcpy(ptr, chunk, item_size);
	item_size += sizeof(disk_key);
	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4779
	mutex_unlock(&fs_info->chunk_mutex);
4780

4781 4782 4783
	return 0;
}

4784 4785 4786 4787
/*
 * sort the devices in descending order by max_avail, total_avail
 */
static int btrfs_cmp_device_info(const void *a, const void *b)
4788
{
4789 4790
	const struct btrfs_device_info *di_a = a;
	const struct btrfs_device_info *di_b = b;
4791

4792
	if (di_a->max_avail > di_b->max_avail)
4793
		return -1;
4794
	if (di_a->max_avail < di_b->max_avail)
4795
		return 1;
4796 4797 4798 4799 4800
	if (di_a->total_avail > di_b->total_avail)
		return -1;
	if (di_a->total_avail < di_b->total_avail)
		return 1;
	return 0;
4801
}
4802

D
David Woodhouse 已提交
4803 4804
static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
{
4805
	if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
D
David Woodhouse 已提交
4806 4807
		return;

4808
	btrfs_set_fs_incompat(info, RAID56);
D
David Woodhouse 已提交
4809 4810
}

4811 4812 4813 4814 4815 4816 4817 4818
static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type)
{
	if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4)))
		return;

	btrfs_set_fs_incompat(info, RAID1C34);
}

N
Naohiro Aota 已提交
4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843
/*
 * Structure used internally for __btrfs_alloc_chunk() function.
 * Wraps needed parameters.
 */
struct alloc_chunk_ctl {
	u64 start;
	u64 type;
	/* Total number of stripes to allocate */
	int num_stripes;
	/* sub_stripes info for map */
	int sub_stripes;
	/* Stripes per device */
	int dev_stripes;
	/* Maximum number of devices to use */
	int devs_max;
	/* Minimum number of devices to use */
	int devs_min;
	/* ndevs has to be a multiple of this */
	int devs_increment;
	/* Number of copies */
	int ncopies;
	/* Number of stripes worth of bytes to store parity information */
	int nparity;
	u64 max_stripe_size;
	u64 max_chunk_size;
4844
	u64 dev_extent_min;
N
Naohiro Aota 已提交
4845 4846 4847 4848 4849
	u64 stripe_size;
	u64 chunk_size;
	int ndevs;
};

4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877
static void init_alloc_chunk_ctl_policy_regular(
				struct btrfs_fs_devices *fs_devices,
				struct alloc_chunk_ctl *ctl)
{
	u64 type = ctl->type;

	if (type & BTRFS_BLOCK_GROUP_DATA) {
		ctl->max_stripe_size = SZ_1G;
		ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
		/* For larger filesystems, use larger metadata chunks */
		if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
			ctl->max_stripe_size = SZ_1G;
		else
			ctl->max_stripe_size = SZ_256M;
		ctl->max_chunk_size = ctl->max_stripe_size;
	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
		ctl->max_stripe_size = SZ_32M;
		ctl->max_chunk_size = 2 * ctl->max_stripe_size;
		ctl->devs_max = min_t(int, ctl->devs_max,
				      BTRFS_MAX_DEVS_SYS_CHUNK);
	} else {
		BUG();
	}

	/* We don't want a chunk larger than 10% of writable space */
	ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
				  ctl->max_chunk_size);
4878
	ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes;
4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905
}

static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
				 struct alloc_chunk_ctl *ctl)
{
	int index = btrfs_bg_flags_to_raid_index(ctl->type);

	ctl->sub_stripes = btrfs_raid_array[index].sub_stripes;
	ctl->dev_stripes = btrfs_raid_array[index].dev_stripes;
	ctl->devs_max = btrfs_raid_array[index].devs_max;
	if (!ctl->devs_max)
		ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info);
	ctl->devs_min = btrfs_raid_array[index].devs_min;
	ctl->devs_increment = btrfs_raid_array[index].devs_increment;
	ctl->ncopies = btrfs_raid_array[index].ncopies;
	ctl->nparity = btrfs_raid_array[index].nparity;
	ctl->ndevs = 0;

	switch (fs_devices->chunk_alloc_policy) {
	case BTRFS_CHUNK_ALLOC_REGULAR:
		init_alloc_chunk_ctl_policy_regular(fs_devices, ctl);
		break;
	default:
		BUG();
	}
}

4906 4907 4908
static int gather_device_info(struct btrfs_fs_devices *fs_devices,
			      struct alloc_chunk_ctl *ctl,
			      struct btrfs_device_info *devices_info)
4909
{
4910
	struct btrfs_fs_info *info = fs_devices->fs_info;
4911
	struct btrfs_device *device;
4912
	u64 total_avail;
4913
	u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes;
4914
	int ret;
4915 4916 4917
	int ndevs = 0;
	u64 max_avail;
	u64 dev_offset;
4918

4919
	/*
4920 4921
	 * in the first pass through the devices list, we gather information
	 * about the available holes on each device.
4922
	 */
4923
	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
4924
		if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
J
Julia Lawall 已提交
4925
			WARN(1, KERN_ERR
4926
			       "BTRFS: read-only device in alloc_list\n");
4927 4928
			continue;
		}
4929

4930 4931
		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
					&device->dev_state) ||
4932
		    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4933
			continue;
4934

4935 4936 4937 4938
		if (device->total_bytes > device->bytes_used)
			total_avail = device->total_bytes - device->bytes_used;
		else
			total_avail = 0;
4939 4940

		/* If there is no space on this device, skip it. */
4941
		if (total_avail < ctl->dev_extent_min)
4942
			continue;
4943

4944 4945
		ret = find_free_dev_extent(device, dev_extent_want, &dev_offset,
					   &max_avail);
4946
		if (ret && ret != -ENOSPC)
4947
			return ret;
4948

4949
		if (ret == 0)
4950
			max_avail = dev_extent_want;
4951

4952
		if (max_avail < ctl->dev_extent_min) {
4953 4954
			if (btrfs_test_opt(info, ENOSPC_DEBUG))
				btrfs_debug(info,
4955
			"%s: devid %llu has no free space, have=%llu want=%llu",
4956
					    __func__, device->devid, max_avail,
4957
					    ctl->dev_extent_min);
4958
			continue;
4959
		}
4960

4961 4962 4963 4964 4965
		if (ndevs == fs_devices->rw_devices) {
			WARN(1, "%s: found more than %llu devices\n",
			     __func__, fs_devices->rw_devices);
			break;
		}
4966 4967 4968 4969 4970 4971
		devices_info[ndevs].dev_offset = dev_offset;
		devices_info[ndevs].max_avail = max_avail;
		devices_info[ndevs].total_avail = total_avail;
		devices_info[ndevs].dev = device;
		++ndevs;
	}
4972
	ctl->ndevs = ndevs;
4973

4974 4975 4976
	/*
	 * now sort the devices by hole size / available space
	 */
4977
	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
4978
	     btrfs_cmp_device_info, NULL);
4979

4980 4981 4982
	return 0;
}

4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058
static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
				      struct btrfs_device_info *devices_info)
{
	/* Number of stripes that count for block group size */
	int data_stripes;

	/*
	 * The primary goal is to maximize the number of stripes, so use as
	 * many devices as possible, even if the stripes are not maximum sized.
	 *
	 * The DUP profile stores more than one stripe per device, the
	 * max_avail is the total size so we have to adjust.
	 */
	ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail,
				   ctl->dev_stripes);
	ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;

	/* This will have to be fixed for RAID1 and RAID10 over more drives */
	data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;

	/*
	 * Use the number of data stripes to figure out how big this chunk is
	 * really going to be in terms of logical address space, and compare
	 * that answer with the max chunk size. If it's higher, we try to
	 * reduce stripe_size.
	 */
	if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
		/*
		 * Reduce stripe_size, round it up to a 16MB boundary again and
		 * then use it, unless it ends up being even bigger than the
		 * previous value we had already.
		 */
		ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size,
							data_stripes), SZ_16M),
				       ctl->stripe_size);
	}

	/* Align to BTRFS_STRIPE_LEN */
	ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN);
	ctl->chunk_size = ctl->stripe_size * data_stripes;

	return 0;
}

static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
			      struct alloc_chunk_ctl *ctl,
			      struct btrfs_device_info *devices_info)
{
	struct btrfs_fs_info *info = fs_devices->fs_info;

	/*
	 * Round down to number of usable stripes, devs_increment can be any
	 * number so we can't use round_down() that requires power of 2, while
	 * rounddown is safe.
	 */
	ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment);

	if (ctl->ndevs < ctl->devs_min) {
		if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
			btrfs_debug(info,
	"%s: not enough devices with free space: have=%d minimum required=%d",
				    __func__, ctl->ndevs, ctl->devs_min);
		}
		return -ENOSPC;
	}

	ctl->ndevs = min(ctl->ndevs, ctl->devs_max);

	switch (fs_devices->chunk_alloc_policy) {
	case BTRFS_CHUNK_ALLOC_REGULAR:
		return decide_stripe_size_regular(ctl, devices_info);
	default:
		BUG();
	}
}

N
Naohiro Aota 已提交
5059 5060 5061
static int create_chunk(struct btrfs_trans_handle *trans,
			struct alloc_chunk_ctl *ctl,
			struct btrfs_device_info *devices_info)
5062 5063 5064 5065 5066
{
	struct btrfs_fs_info *info = trans->fs_info;
	struct map_lookup *map = NULL;
	struct extent_map_tree *em_tree;
	struct extent_map *em;
N
Naohiro Aota 已提交
5067 5068
	u64 start = ctl->start;
	u64 type = ctl->type;
5069 5070 5071 5072
	int ret;
	int i;
	int j;

N
Naohiro Aota 已提交
5073 5074
	map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS);
	if (!map)
5075
		return -ENOMEM;
N
Naohiro Aota 已提交
5076
	map->num_stripes = ctl->num_stripes;
5077

N
Naohiro Aota 已提交
5078 5079 5080
	for (i = 0; i < ctl->ndevs; ++i) {
		for (j = 0; j < ctl->dev_stripes; ++j) {
			int s = i * ctl->dev_stripes + j;
5081 5082
			map->stripes[s].dev = devices_info[i].dev;
			map->stripes[s].physical = devices_info[i].dev_offset +
N
Naohiro Aota 已提交
5083
						   j * ctl->stripe_size;
5084 5085
		}
	}
5086 5087 5088
	map->stripe_len = BTRFS_STRIPE_LEN;
	map->io_align = BTRFS_STRIPE_LEN;
	map->io_width = BTRFS_STRIPE_LEN;
Y
Yan Zheng 已提交
5089
	map->type = type;
N
Naohiro Aota 已提交
5090
	map->sub_stripes = ctl->sub_stripes;
5091

N
Naohiro Aota 已提交
5092
	trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size);
5093

5094
	em = alloc_extent_map();
Y
Yan Zheng 已提交
5095
	if (!em) {
5096
		kfree(map);
N
Naohiro Aota 已提交
5097
		return -ENOMEM;
5098
	}
5099
	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
5100
	em->map_lookup = map;
Y
Yan Zheng 已提交
5101
	em->start = start;
N
Naohiro Aota 已提交
5102
	em->len = ctl->chunk_size;
Y
Yan Zheng 已提交
5103 5104
	em->block_start = 0;
	em->block_len = em->len;
N
Naohiro Aota 已提交
5105
	em->orig_block_len = ctl->stripe_size;
5106

5107
	em_tree = &info->mapping_tree;
5108
	write_lock(&em_tree->lock);
J
Josef Bacik 已提交
5109
	ret = add_extent_mapping(em_tree, em, 0);
5110
	if (ret) {
5111
		write_unlock(&em_tree->lock);
5112
		free_extent_map(em);
N
Naohiro Aota 已提交
5113
		return ret;
5114
	}
5115 5116
	write_unlock(&em_tree->lock);

N
Naohiro Aota 已提交
5117
	ret = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
5118 5119
	if (ret)
		goto error_del_extent;
Y
Yan Zheng 已提交
5120

5121 5122 5123
	for (i = 0; i < map->num_stripes; i++) {
		struct btrfs_device *dev = map->stripes[i].dev;

N
Naohiro Aota 已提交
5124
		btrfs_device_set_bytes_used(dev,
N
Naohiro Aota 已提交
5125
					    dev->bytes_used + ctl->stripe_size);
5126 5127 5128 5129
		if (list_empty(&dev->post_commit_list))
			list_add_tail(&dev->post_commit_list,
				      &trans->transaction->dev_update_list);
	}
5130

N
Naohiro Aota 已提交
5131
	atomic64_sub(ctl->stripe_size * map->num_stripes,
N
Naohiro Aota 已提交
5132
		     &info->free_chunk_space);
5133

5134
	free_extent_map(em);
5135
	check_raid56_incompat_flag(info, type);
5136
	check_raid1c34_incompat_flag(info, type);
D
David Woodhouse 已提交
5137

Y
Yan Zheng 已提交
5138
	return 0;
5139

5140
error_del_extent:
5141 5142 5143 5144 5145 5146 5147 5148
	write_lock(&em_tree->lock);
	remove_extent_mapping(em_tree, em);
	write_unlock(&em_tree->lock);

	/* One for our allocation */
	free_extent_map(em);
	/* One for the tree reference */
	free_extent_map(em);
N
Naohiro Aota 已提交
5149 5150 5151 5152

	return ret;
}

5153
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
N
Naohiro Aota 已提交
5154 5155 5156 5157 5158 5159 5160
{
	struct btrfs_fs_info *info = trans->fs_info;
	struct btrfs_fs_devices *fs_devices = info->fs_devices;
	struct btrfs_device_info *devices_info = NULL;
	struct alloc_chunk_ctl ctl;
	int ret;

5161 5162
	lockdep_assert_held(&info->chunk_mutex);

N
Naohiro Aota 已提交
5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179
	if (!alloc_profile_is_valid(type, 0)) {
		ASSERT(0);
		return -EINVAL;
	}

	if (list_empty(&fs_devices->alloc_list)) {
		if (btrfs_test_opt(info, ENOSPC_DEBUG))
			btrfs_debug(info, "%s: no writable device", __func__);
		return -ENOSPC;
	}

	if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
		btrfs_err(info, "invalid chunk type 0x%llx requested", type);
		ASSERT(0);
		return -EINVAL;
	}

5180
	ctl.start = find_next_chunk(info);
N
Naohiro Aota 已提交
5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199
	ctl.type = type;
	init_alloc_chunk_ctl(fs_devices, &ctl);

	devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
			       GFP_NOFS);
	if (!devices_info)
		return -ENOMEM;

	ret = gather_device_info(fs_devices, &ctl, devices_info);
	if (ret < 0)
		goto out;

	ret = decide_stripe_size(fs_devices, &ctl, devices_info);
	if (ret < 0)
		goto out;

	ret = create_chunk(trans, &ctl, devices_info);

out:
5200 5201
	kfree(devices_info);
	return ret;
Y
Yan Zheng 已提交
5202 5203
}

5204 5205 5206 5207 5208 5209 5210
/*
 * Chunk allocation falls into two parts. The first part does work
 * that makes the new allocated chunk usable, but does not do any operation
 * that modifies the chunk tree. The second part does the work that
 * requires modifying the chunk tree. This division is important for the
 * bootstrap process of adding storage to a seed btrfs.
 */
5211
int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
5212
			     u64 chunk_offset, u64 chunk_size)
Y
Yan Zheng 已提交
5213
{
5214
	struct btrfs_fs_info *fs_info = trans->fs_info;
5215 5216
	struct btrfs_root *extent_root = fs_info->extent_root;
	struct btrfs_root *chunk_root = fs_info->chunk_root;
Y
Yan Zheng 已提交
5217 5218 5219 5220
	struct btrfs_key key;
	struct btrfs_device *device;
	struct btrfs_chunk *chunk;
	struct btrfs_stripe *stripe;
5221 5222 5223 5224 5225 5226
	struct extent_map *em;
	struct map_lookup *map;
	size_t item_size;
	u64 dev_offset;
	u64 stripe_size;
	int i = 0;
5227
	int ret = 0;
Y
Yan Zheng 已提交
5228

5229
	em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
5230 5231
	if (IS_ERR(em))
		return PTR_ERR(em);
5232

5233
	map = em->map_lookup;
5234 5235 5236
	item_size = btrfs_chunk_item_size(map->num_stripes);
	stripe_size = em->orig_block_len;

Y
Yan Zheng 已提交
5237
	chunk = kzalloc(item_size, GFP_NOFS);
5238 5239 5240 5241 5242
	if (!chunk) {
		ret = -ENOMEM;
		goto out;
	}

5243 5244 5245 5246 5247 5248 5249
	/*
	 * Take the device list mutex to prevent races with the final phase of
	 * a device replace operation that replaces the device object associated
	 * with the map's stripes, because the device object's id can change
	 * at any time during that final phase of the device replace operation
	 * (dev-replace.c:btrfs_dev_replace_finishing()).
	 */
5250
	mutex_lock(&fs_info->fs_devices->device_list_mutex);
5251 5252 5253
	for (i = 0; i < map->num_stripes; i++) {
		device = map->stripes[i].dev;
		dev_offset = map->stripes[i].physical;
Y
Yan Zheng 已提交
5254

5255
		ret = btrfs_update_device(trans, device);
5256
		if (ret)
5257
			break;
5258 5259
		ret = btrfs_alloc_dev_extent(trans, device, chunk_offset,
					     dev_offset, stripe_size);
5260
		if (ret)
5261 5262 5263
			break;
	}
	if (ret) {
5264
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5265
		goto out;
Y
Yan Zheng 已提交
5266 5267 5268
	}

	stripe = &chunk->stripe;
5269 5270 5271
	for (i = 0; i < map->num_stripes; i++) {
		device = map->stripes[i].dev;
		dev_offset = map->stripes[i].physical;
5272

5273 5274 5275
		btrfs_set_stack_stripe_devid(stripe, device->devid);
		btrfs_set_stack_stripe_offset(stripe, dev_offset);
		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
Y
Yan Zheng 已提交
5276
		stripe++;
5277
	}
5278
	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5279

Y
Yan Zheng 已提交
5280
	btrfs_set_stack_chunk_length(chunk, chunk_size);
5281
	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
Y
Yan Zheng 已提交
5282 5283 5284 5285 5286
	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
	btrfs_set_stack_chunk_type(chunk, map->type);
	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
5287
	btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
Y
Yan Zheng 已提交
5288
	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5289

Y
Yan Zheng 已提交
5290 5291 5292
	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
	key.type = BTRFS_CHUNK_ITEM_KEY;
	key.offset = chunk_offset;
5293

Y
Yan Zheng 已提交
5294
	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
5295 5296 5297 5298 5299
	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
		/*
		 * TODO: Cleanup of inserted chunk root in case of
		 * failure.
		 */
5300
		ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
5301
	}
5302

5303
out:
5304
	kfree(chunk);
5305
	free_extent_map(em);
5306
	return ret;
Y
Yan Zheng 已提交
5307
}
5308

5309
static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
Y
Yan Zheng 已提交
5310
{
5311
	struct btrfs_fs_info *fs_info = trans->fs_info;
Y
Yan Zheng 已提交
5312 5313 5314
	u64 alloc_profile;
	int ret;

5315
	alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5316
	ret = btrfs_alloc_chunk(trans, alloc_profile);
5317 5318
	if (ret)
		return ret;
Y
Yan Zheng 已提交
5319

5320
	alloc_profile = btrfs_system_alloc_profile(fs_info);
5321
	ret = btrfs_alloc_chunk(trans, alloc_profile);
5322
	return ret;
Y
Yan Zheng 已提交
5323 5324
}

5325 5326
static inline int btrfs_chunk_max_errors(struct map_lookup *map)
{
5327
	const int index = btrfs_bg_flags_to_raid_index(map->type);
Y
Yan Zheng 已提交
5328

5329
	return btrfs_raid_array[index].tolerated_failures;
Y
Yan Zheng 已提交
5330 5331
}

5332
int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
Y
Yan Zheng 已提交
5333 5334 5335 5336
{
	struct extent_map *em;
	struct map_lookup *map;
	int readonly = 0;
5337
	int miss_ndevs = 0;
Y
Yan Zheng 已提交
5338 5339
	int i;

5340
	em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
5341
	if (IS_ERR(em))
Y
Yan Zheng 已提交
5342 5343
		return 1;

5344
	map = em->map_lookup;
Y
Yan Zheng 已提交
5345
	for (i = 0; i < map->num_stripes; i++) {
5346 5347
		if (test_bit(BTRFS_DEV_STATE_MISSING,
					&map->stripes[i].dev->dev_state)) {
5348 5349 5350
			miss_ndevs++;
			continue;
		}
5351 5352
		if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
					&map->stripes[i].dev->dev_state)) {
Y
Yan Zheng 已提交
5353
			readonly = 1;
5354
			goto end;
Y
Yan Zheng 已提交
5355 5356
		}
	}
5357 5358 5359 5360 5361 5362 5363 5364 5365

	/*
	 * If the number of missing devices is larger than max errors,
	 * we can not write the data into that chunk successfully, so
	 * set it readonly.
	 */
	if (miss_ndevs > btrfs_chunk_max_errors(map))
		readonly = 1;
end:
5366
	free_extent_map(em);
Y
Yan Zheng 已提交
5367
	return readonly;
5368 5369
}

5370
void btrfs_mapping_tree_free(struct extent_map_tree *tree)
5371 5372 5373
{
	struct extent_map *em;

C
Chris Mason 已提交
5374
	while (1) {
5375 5376
		write_lock(&tree->lock);
		em = lookup_extent_mapping(tree, 0, (u64)-1);
5377
		if (em)
5378 5379
			remove_extent_mapping(tree, em);
		write_unlock(&tree->lock);
5380 5381 5382 5383 5384 5385 5386 5387 5388
		if (!em)
			break;
		/* once for us */
		free_extent_map(em);
		/* once for the tree */
		free_extent_map(em);
	}
}

5389
int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5390 5391 5392 5393 5394
{
	struct extent_map *em;
	struct map_lookup *map;
	int ret;

5395
	em = btrfs_get_chunk_map(fs_info, logical, len);
5396 5397 5398 5399 5400 5401 5402
	if (IS_ERR(em))
		/*
		 * We could return errors for these cases, but that could get
		 * ugly and we'd probably do the same thing which is just not do
		 * anything else and exit, so return 1 so the callers don't try
		 * to use other copies.
		 */
5403 5404
		return 1;

5405
	map = em->map_lookup;
5406
	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK))
5407
		ret = map->num_stripes;
C
Chris Mason 已提交
5408 5409
	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
		ret = map->sub_stripes;
D
David Woodhouse 已提交
5410 5411 5412
	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
		ret = 2;
	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
L
Liu Bo 已提交
5413 5414 5415
		/*
		 * There could be two corrupted data stripes, we need
		 * to loop retry in order to rebuild the correct data.
5416
		 *
L
Liu Bo 已提交
5417 5418 5419 5420
		 * Fail a stripe at a time on every retry except the
		 * stripe under reconstruction.
		 */
		ret = map->num_stripes;
5421 5422 5423
	else
		ret = 1;
	free_extent_map(em);
5424

5425
	down_read(&fs_info->dev_replace.rwsem);
5426 5427
	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
	    fs_info->dev_replace.tgtdev)
5428
		ret++;
5429
	up_read(&fs_info->dev_replace.rwsem);
5430

5431 5432 5433
	return ret;
}

5434
unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
D
David Woodhouse 已提交
5435 5436 5437 5438
				    u64 logical)
{
	struct extent_map *em;
	struct map_lookup *map;
5439
	unsigned long len = fs_info->sectorsize;
D
David Woodhouse 已提交
5440

5441
	em = btrfs_get_chunk_map(fs_info, logical, len);
D
David Woodhouse 已提交
5442

5443 5444 5445 5446 5447 5448
	if (!WARN_ON(IS_ERR(em))) {
		map = em->map_lookup;
		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
			len = map->stripe_len * nr_data_stripes(map);
		free_extent_map(em);
	}
D
David Woodhouse 已提交
5449 5450 5451
	return len;
}

5452
int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
D
David Woodhouse 已提交
5453 5454 5455 5456 5457
{
	struct extent_map *em;
	struct map_lookup *map;
	int ret = 0;

5458
	em = btrfs_get_chunk_map(fs_info, logical, len);
D
David Woodhouse 已提交
5459

5460 5461 5462 5463 5464 5465
	if(!WARN_ON(IS_ERR(em))) {
		map = em->map_lookup;
		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
			ret = 1;
		free_extent_map(em);
	}
D
David Woodhouse 已提交
5466 5467 5468
	return ret;
}

5469
static int find_live_mirror(struct btrfs_fs_info *fs_info,
5470
			    struct map_lookup *map, int first,
5471
			    int dev_replace_is_ongoing)
5472 5473
{
	int i;
5474
	int num_stripes;
5475
	int preferred_mirror;
5476 5477 5478
	int tolerance;
	struct btrfs_device *srcdev;

5479
	ASSERT((map->type &
5480
		 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)));
5481 5482 5483 5484 5485 5486

	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
		num_stripes = map->sub_stripes;
	else
		num_stripes = map->num_stripes;

5487 5488
	preferred_mirror = first + current->pid % num_stripes;

5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501
	if (dev_replace_is_ongoing &&
	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
		srcdev = fs_info->dev_replace.srcdev;
	else
		srcdev = NULL;

	/*
	 * try to avoid the drive that is the source drive for a
	 * dev-replace procedure, only choose it if no other non-missing
	 * mirror is available
	 */
	for (tolerance = 0; tolerance < 2; tolerance++) {
5502 5503 5504
		if (map->stripes[preferred_mirror].dev->bdev &&
		    (tolerance || map->stripes[preferred_mirror].dev != srcdev))
			return preferred_mirror;
5505
		for (i = first; i < first + num_stripes; i++) {
5506 5507 5508 5509
			if (map->stripes[i].dev->bdev &&
			    (tolerance || map->stripes[i].dev != srcdev))
				return i;
		}
5510
	}
5511

5512 5513 5514
	/* we couldn't find one that doesn't fail.  Just return something
	 * and the io error handling code will clean up eventually
	 */
5515
	return preferred_mirror;
5516 5517
}

D
David Woodhouse 已提交
5518
/* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5519
static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
D
David Woodhouse 已提交
5520 5521 5522 5523 5524 5525
{
	int i;
	int again = 1;

	while (again) {
		again = 0;
5526
		for (i = 0; i < num_stripes - 1; i++) {
5527 5528 5529 5530
			/* Swap if parity is on a smaller index */
			if (bbio->raid_map[i] > bbio->raid_map[i + 1]) {
				swap(bbio->stripes[i], bbio->stripes[i + 1]);
				swap(bbio->raid_map[i], bbio->raid_map[i + 1]);
D
David Woodhouse 已提交
5531 5532 5533 5534 5535 5536
				again = 1;
			}
		}
	}
}

5537 5538 5539
static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
{
	struct btrfs_bio *bbio = kzalloc(
5540
		 /* the size of the btrfs_bio */
5541
		sizeof(struct btrfs_bio) +
5542
		/* plus the variable array for the stripes */
5543
		sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5544
		/* plus the variable array for the tgt dev */
5545
		sizeof(int) * (real_stripes) +
5546 5547 5548 5549 5550
		/*
		 * plus the raid_map, which includes both the tgt dev
		 * and the stripes
		 */
		sizeof(u64) * (total_stripes),
5551
		GFP_NOFS|__GFP_NOFAIL);
5552 5553

	atomic_set(&bbio->error, 0);
5554
	refcount_set(&bbio->refs, 1);
5555

5556 5557 5558
	bbio->tgtdev_map = (int *)(bbio->stripes + total_stripes);
	bbio->raid_map = (u64 *)(bbio->tgtdev_map + real_stripes);

5559 5560 5561 5562 5563
	return bbio;
}

void btrfs_get_bbio(struct btrfs_bio *bbio)
{
5564 5565
	WARN_ON(!refcount_read(&bbio->refs));
	refcount_inc(&bbio->refs);
5566 5567 5568 5569 5570 5571
}

void btrfs_put_bbio(struct btrfs_bio *bbio)
{
	if (!bbio)
		return;
5572
	if (refcount_dec_and_test(&bbio->refs))
5573 5574 5575
		kfree(bbio);
}

5576 5577 5578 5579 5580 5581
/* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
/*
 * Please note that, discard won't be sent to target device of device
 * replace.
 */
static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
5582
					 u64 logical, u64 *length_ret,
5583 5584 5585 5586 5587
					 struct btrfs_bio **bbio_ret)
{
	struct extent_map *em;
	struct map_lookup *map;
	struct btrfs_bio *bbio;
5588
	u64 length = *length_ret;
5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608
	u64 offset;
	u64 stripe_nr;
	u64 stripe_nr_end;
	u64 stripe_end_offset;
	u64 stripe_cnt;
	u64 stripe_len;
	u64 stripe_offset;
	u64 num_stripes;
	u32 stripe_index;
	u32 factor = 0;
	u32 sub_stripes = 0;
	u64 stripes_per_dev = 0;
	u32 remaining_stripes = 0;
	u32 last_stripe = 0;
	int ret = 0;
	int i;

	/* discard always return a bbio */
	ASSERT(bbio_ret);

5609
	em = btrfs_get_chunk_map(fs_info, logical, length);
5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620
	if (IS_ERR(em))
		return PTR_ERR(em);

	map = em->map_lookup;
	/* we don't discard raid56 yet */
	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
		ret = -EOPNOTSUPP;
		goto out;
	}

	offset = logical - em->start;
5621
	length = min_t(u64, em->start + em->len - logical, length);
5622
	*length_ret = length;
5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634

	stripe_len = map->stripe_len;
	/*
	 * stripe_nr counts the total number of stripes we have to stride
	 * to get to this block
	 */
	stripe_nr = div64_u64(offset, stripe_len);

	/* stripe_offset is the offset of this block in its stripe */
	stripe_offset = offset - stripe_nr * stripe_len;

	stripe_nr_end = round_up(offset + length, map->stripe_len);
5635
	stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661
	stripe_cnt = stripe_nr_end - stripe_nr;
	stripe_end_offset = stripe_nr_end * map->stripe_len -
			    (offset + length);
	/*
	 * after this, stripe_nr is the number of stripes on this
	 * device we have to walk to find the data, and stripe_index is
	 * the number of our device in the stripe array
	 */
	num_stripes = 1;
	stripe_index = 0;
	if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
			 BTRFS_BLOCK_GROUP_RAID10)) {
		if (map->type & BTRFS_BLOCK_GROUP_RAID0)
			sub_stripes = 1;
		else
			sub_stripes = map->sub_stripes;

		factor = map->num_stripes / sub_stripes;
		num_stripes = min_t(u64, map->num_stripes,
				    sub_stripes * stripe_cnt);
		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
		stripe_index *= sub_stripes;
		stripes_per_dev = div_u64_rem(stripe_cnt, factor,
					      &remaining_stripes);
		div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
		last_stripe *= sub_stripes;
5662
	} else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729
				BTRFS_BLOCK_GROUP_DUP)) {
		num_stripes = map->num_stripes;
	} else {
		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
					&stripe_index);
	}

	bbio = alloc_btrfs_bio(num_stripes, 0);
	if (!bbio) {
		ret = -ENOMEM;
		goto out;
	}

	for (i = 0; i < num_stripes; i++) {
		bbio->stripes[i].physical =
			map->stripes[stripe_index].physical +
			stripe_offset + stripe_nr * map->stripe_len;
		bbio->stripes[i].dev = map->stripes[stripe_index].dev;

		if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
				 BTRFS_BLOCK_GROUP_RAID10)) {
			bbio->stripes[i].length = stripes_per_dev *
				map->stripe_len;

			if (i / sub_stripes < remaining_stripes)
				bbio->stripes[i].length +=
					map->stripe_len;

			/*
			 * Special for the first stripe and
			 * the last stripe:
			 *
			 * |-------|...|-------|
			 *     |----------|
			 *    off     end_off
			 */
			if (i < sub_stripes)
				bbio->stripes[i].length -=
					stripe_offset;

			if (stripe_index >= last_stripe &&
			    stripe_index <= (last_stripe +
					     sub_stripes - 1))
				bbio->stripes[i].length -=
					stripe_end_offset;

			if (i == sub_stripes - 1)
				stripe_offset = 0;
		} else {
			bbio->stripes[i].length = length;
		}

		stripe_index++;
		if (stripe_index == map->num_stripes) {
			stripe_index = 0;
			stripe_nr++;
		}
	}

	*bbio_ret = bbio;
	bbio->map_type = map->type;
	bbio->num_stripes = num_stripes;
out:
	free_extent_map(em);
	return ret;
}

5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806
/*
 * In dev-replace case, for repair case (that's the only case where the mirror
 * is selected explicitly when calling btrfs_map_block), blocks left of the
 * left cursor can also be read from the target drive.
 *
 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
 * array of stripes.
 * For READ, it also needs to be supported using the same mirror number.
 *
 * If the requested block is not left of the left cursor, EIO is returned. This
 * can happen because btrfs_num_copies() returns one more in the dev-replace
 * case.
 */
static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
					 u64 logical, u64 length,
					 u64 srcdev_devid, int *mirror_num,
					 u64 *physical)
{
	struct btrfs_bio *bbio = NULL;
	int num_stripes;
	int index_srcdev = 0;
	int found = 0;
	u64 physical_of_found = 0;
	int i;
	int ret = 0;

	ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
				logical, &length, &bbio, 0, 0);
	if (ret) {
		ASSERT(bbio == NULL);
		return ret;
	}

	num_stripes = bbio->num_stripes;
	if (*mirror_num > num_stripes) {
		/*
		 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
		 * that means that the requested area is not left of the left
		 * cursor
		 */
		btrfs_put_bbio(bbio);
		return -EIO;
	}

	/*
	 * process the rest of the function using the mirror_num of the source
	 * drive. Therefore look it up first.  At the end, patch the device
	 * pointer to the one of the target drive.
	 */
	for (i = 0; i < num_stripes; i++) {
		if (bbio->stripes[i].dev->devid != srcdev_devid)
			continue;

		/*
		 * In case of DUP, in order to keep it simple, only add the
		 * mirror with the lowest physical address
		 */
		if (found &&
		    physical_of_found <= bbio->stripes[i].physical)
			continue;

		index_srcdev = i;
		found = 1;
		physical_of_found = bbio->stripes[i].physical;
	}

	btrfs_put_bbio(bbio);

	ASSERT(found);
	if (!found)
		return -EIO;

	*mirror_num = index_srcdev + 1;
	*physical = physical_of_found;
	return ret;
}

5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900
static void handle_ops_on_dev_replace(enum btrfs_map_op op,
				      struct btrfs_bio **bbio_ret,
				      struct btrfs_dev_replace *dev_replace,
				      int *num_stripes_ret, int *max_errors_ret)
{
	struct btrfs_bio *bbio = *bbio_ret;
	u64 srcdev_devid = dev_replace->srcdev->devid;
	int tgtdev_indexes = 0;
	int num_stripes = *num_stripes_ret;
	int max_errors = *max_errors_ret;
	int i;

	if (op == BTRFS_MAP_WRITE) {
		int index_where_to_add;

		/*
		 * duplicate the write operations while the dev replace
		 * procedure is running. Since the copying of the old disk to
		 * the new disk takes place at run time while the filesystem is
		 * mounted writable, the regular write operations to the old
		 * disk have to be duplicated to go to the new disk as well.
		 *
		 * Note that device->missing is handled by the caller, and that
		 * the write to the old disk is already set up in the stripes
		 * array.
		 */
		index_where_to_add = num_stripes;
		for (i = 0; i < num_stripes; i++) {
			if (bbio->stripes[i].dev->devid == srcdev_devid) {
				/* write to new disk, too */
				struct btrfs_bio_stripe *new =
					bbio->stripes + index_where_to_add;
				struct btrfs_bio_stripe *old =
					bbio->stripes + i;

				new->physical = old->physical;
				new->length = old->length;
				new->dev = dev_replace->tgtdev;
				bbio->tgtdev_map[i] = index_where_to_add;
				index_where_to_add++;
				max_errors++;
				tgtdev_indexes++;
			}
		}
		num_stripes = index_where_to_add;
	} else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
		int index_srcdev = 0;
		int found = 0;
		u64 physical_of_found = 0;

		/*
		 * During the dev-replace procedure, the target drive can also
		 * be used to read data in case it is needed to repair a corrupt
		 * block elsewhere. This is possible if the requested area is
		 * left of the left cursor. In this area, the target drive is a
		 * full copy of the source drive.
		 */
		for (i = 0; i < num_stripes; i++) {
			if (bbio->stripes[i].dev->devid == srcdev_devid) {
				/*
				 * In case of DUP, in order to keep it simple,
				 * only add the mirror with the lowest physical
				 * address
				 */
				if (found &&
				    physical_of_found <=
				     bbio->stripes[i].physical)
					continue;
				index_srcdev = i;
				found = 1;
				physical_of_found = bbio->stripes[i].physical;
			}
		}
		if (found) {
			struct btrfs_bio_stripe *tgtdev_stripe =
				bbio->stripes + num_stripes;

			tgtdev_stripe->physical = physical_of_found;
			tgtdev_stripe->length =
				bbio->stripes[index_srcdev].length;
			tgtdev_stripe->dev = dev_replace->tgtdev;
			bbio->tgtdev_map[index_srcdev] = num_stripes;

			tgtdev_indexes++;
			num_stripes++;
		}
	}

	*num_stripes_ret = num_stripes;
	*max_errors_ret = max_errors;
	bbio->num_tgtdevs = tgtdev_indexes;
	*bbio_ret = bbio;
}

5901 5902 5903 5904 5905
static bool need_full_stripe(enum btrfs_map_op op)
{
	return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
}

5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920
/*
 * btrfs_get_io_geometry - calculates the geomery of a particular (address, len)
 *		       tuple. This information is used to calculate how big a
 *		       particular bio can get before it straddles a stripe.
 *
 * @fs_info - the filesystem
 * @logical - address that we want to figure out the geometry of
 * @len	    - the length of IO we are going to perform, starting at @logical
 * @op      - type of operation - write or read
 * @io_geom - pointer used to return values
 *
 * Returns < 0 in case a chunk for the given logical address cannot be found,
 * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
 */
int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
5921
			u64 logical, u64 len, struct btrfs_io_geometry *io_geom)
5922 5923 5924 5925 5926 5927 5928 5929 5930
{
	struct extent_map *em;
	struct map_lookup *map;
	u64 offset;
	u64 stripe_offset;
	u64 stripe_nr;
	u64 stripe_len;
	u64 raid56_full_stripe_start = (u64)-1;
	int data_stripes;
5931
	int ret = 0;
5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951

	ASSERT(op != BTRFS_MAP_DISCARD);

	em = btrfs_get_chunk_map(fs_info, logical, len);
	if (IS_ERR(em))
		return PTR_ERR(em);

	map = em->map_lookup;
	/* Offset of this logical address in the chunk */
	offset = logical - em->start;
	/* Len of a stripe in a chunk */
	stripe_len = map->stripe_len;
	/* Stripe wher this block falls in */
	stripe_nr = div64_u64(offset, stripe_len);
	/* Offset of stripe in the chunk */
	stripe_offset = stripe_nr * stripe_len;
	if (offset < stripe_offset) {
		btrfs_crit(fs_info,
"stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
			stripe_offset, offset, em->start, logical, stripe_len);
5952 5953
		ret = -EINVAL;
		goto out;
5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999
	}

	/* stripe_offset is the offset of this block in its stripe */
	stripe_offset = offset - stripe_offset;
	data_stripes = nr_data_stripes(map);

	if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
		u64 max_len = stripe_len - stripe_offset;

		/*
		 * In case of raid56, we need to know the stripe aligned start
		 */
		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
			unsigned long full_stripe_len = stripe_len * data_stripes;
			raid56_full_stripe_start = offset;

			/*
			 * Allow a write of a full stripe, but make sure we
			 * don't allow straddling of stripes
			 */
			raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
					full_stripe_len);
			raid56_full_stripe_start *= full_stripe_len;

			/*
			 * For writes to RAID[56], allow a full stripeset across
			 * all disks. For other RAID types and for RAID[56]
			 * reads, just allow a single stripe (on a single disk).
			 */
			if (op == BTRFS_MAP_WRITE) {
				max_len = stripe_len * data_stripes -
					  (offset - raid56_full_stripe_start);
			}
		}
		len = min_t(u64, em->len - offset, max_len);
	} else {
		len = em->len - offset;
	}

	io_geom->len = len;
	io_geom->offset = offset;
	io_geom->stripe_len = stripe_len;
	io_geom->stripe_nr = stripe_nr;
	io_geom->stripe_offset = stripe_offset;
	io_geom->raid56_stripe_offset = raid56_full_stripe_start;

6000 6001 6002 6003
out:
	/* once for us */
	free_extent_map(em);
	return ret;
6004 6005
}

6006 6007
static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
			     enum btrfs_map_op op,
6008
			     u64 logical, u64 *length,
6009
			     struct btrfs_bio **bbio_ret,
6010
			     int mirror_num, int need_raid_map)
6011 6012 6013
{
	struct extent_map *em;
	struct map_lookup *map;
6014 6015
	u64 stripe_offset;
	u64 stripe_nr;
D
David Woodhouse 已提交
6016
	u64 stripe_len;
6017
	u32 stripe_index;
6018
	int data_stripes;
6019
	int i;
L
Li Zefan 已提交
6020
	int ret = 0;
6021
	int num_stripes;
6022
	int max_errors = 0;
6023
	int tgtdev_indexes = 0;
6024
	struct btrfs_bio *bbio = NULL;
6025 6026 6027
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
	int dev_replace_is_ongoing = 0;
	int num_alloc_stripes;
6028 6029
	int patch_the_first_stripe_for_dev_replace = 0;
	u64 physical_to_patch_in_first_stripe = 0;
D
David Woodhouse 已提交
6030
	u64 raid56_full_stripe_start = (u64)-1;
6031 6032 6033
	struct btrfs_io_geometry geom;

	ASSERT(bbio_ret);
6034
	ASSERT(op != BTRFS_MAP_DISCARD);
6035

6036 6037 6038
	ret = btrfs_get_io_geometry(fs_info, op, logical, *length, &geom);
	if (ret < 0)
		return ret;
6039

6040
	em = btrfs_get_chunk_map(fs_info, logical, *length);
6041
	ASSERT(!IS_ERR(em));
6042
	map = em->map_lookup;
6043

6044 6045 6046 6047 6048
	*length = geom.len;
	stripe_len = geom.stripe_len;
	stripe_nr = geom.stripe_nr;
	stripe_offset = geom.stripe_offset;
	raid56_full_stripe_start = geom.raid56_stripe_offset;
6049
	data_stripes = nr_data_stripes(map);
6050

6051
	down_read(&dev_replace->rwsem);
6052
	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
6053 6054 6055 6056
	/*
	 * Hold the semaphore for read during the whole operation, write is
	 * requested at commit time but must wait.
	 */
6057
	if (!dev_replace_is_ongoing)
6058
		up_read(&dev_replace->rwsem);
6059

6060
	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
6061
	    !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
6062 6063 6064 6065 6066
		ret = get_extra_mirror_from_replace(fs_info, logical, *length,
						    dev_replace->srcdev->devid,
						    &mirror_num,
					    &physical_to_patch_in_first_stripe);
		if (ret)
6067
			goto out;
6068 6069
		else
			patch_the_first_stripe_for_dev_replace = 1;
6070 6071 6072 6073
	} else if (mirror_num > map->num_stripes) {
		mirror_num = 0;
	}

6074
	num_stripes = 1;
6075
	stripe_index = 0;
6076
	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
6077 6078
		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
				&stripe_index);
6079
		if (!need_full_stripe(op))
6080
			mirror_num = 1;
6081
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
6082
		if (need_full_stripe(op))
6083
			num_stripes = map->num_stripes;
6084
		else if (mirror_num)
6085
			stripe_index = mirror_num - 1;
6086
		else {
6087 6088
			stripe_index = find_live_mirror(fs_info, map, 0,
					    dev_replace_is_ongoing);
6089
			mirror_num = stripe_index + 1;
6090
		}
6091

6092
	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
6093
		if (need_full_stripe(op)) {
6094
			num_stripes = map->num_stripes;
6095
		} else if (mirror_num) {
6096
			stripe_index = mirror_num - 1;
6097 6098 6099
		} else {
			mirror_num = 1;
		}
6100

C
Chris Mason 已提交
6101
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
6102
		u32 factor = map->num_stripes / map->sub_stripes;
C
Chris Mason 已提交
6103

6104
		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
C
Chris Mason 已提交
6105 6106
		stripe_index *= map->sub_stripes;

6107
		if (need_full_stripe(op))
6108
			num_stripes = map->sub_stripes;
C
Chris Mason 已提交
6109 6110
		else if (mirror_num)
			stripe_index += mirror_num - 1;
6111
		else {
J
Jan Schmidt 已提交
6112
			int old_stripe_index = stripe_index;
6113 6114 6115
			stripe_index = find_live_mirror(fs_info, map,
					      stripe_index,
					      dev_replace_is_ongoing);
J
Jan Schmidt 已提交
6116
			mirror_num = stripe_index - old_stripe_index + 1;
6117
		}
D
David Woodhouse 已提交
6118

6119
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6120
		if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
D
David Woodhouse 已提交
6121
			/* push stripe_nr back to the start of the full stripe */
6122
			stripe_nr = div64_u64(raid56_full_stripe_start,
6123
					stripe_len * data_stripes);
D
David Woodhouse 已提交
6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137

			/* RAID[56] write or recovery. Return all stripes */
			num_stripes = map->num_stripes;
			max_errors = nr_parity_stripes(map);

			*length = map->stripe_len;
			stripe_index = 0;
			stripe_offset = 0;
		} else {
			/*
			 * Mirror #0 or #1 means the original data block.
			 * Mirror #2 is RAID5 parity block.
			 * Mirror #3 is RAID6 Q block.
			 */
6138
			stripe_nr = div_u64_rem(stripe_nr,
6139
					data_stripes, &stripe_index);
D
David Woodhouse 已提交
6140
			if (mirror_num > 1)
6141
				stripe_index = data_stripes + mirror_num - 2;
D
David Woodhouse 已提交
6142 6143

			/* We distribute the parity blocks across stripes */
6144 6145
			div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
					&stripe_index);
6146
			if (!need_full_stripe(op) && mirror_num <= 1)
6147
				mirror_num = 1;
D
David Woodhouse 已提交
6148
		}
6149 6150
	} else {
		/*
6151 6152 6153
		 * after this, stripe_nr is the number of stripes on this
		 * device we have to walk to find the data, and stripe_index is
		 * the number of our device in the stripe array
6154
		 */
6155 6156
		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
				&stripe_index);
6157
		mirror_num = stripe_index + 1;
6158
	}
6159
	if (stripe_index >= map->num_stripes) {
J
Jeff Mahoney 已提交
6160 6161
		btrfs_crit(fs_info,
			   "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6162 6163 6164 6165
			   stripe_index, map->num_stripes);
		ret = -EINVAL;
		goto out;
	}
6166

6167
	num_alloc_stripes = num_stripes;
6168
	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
6169
		if (op == BTRFS_MAP_WRITE)
6170
			num_alloc_stripes <<= 1;
6171
		if (op == BTRFS_MAP_GET_READ_MIRRORS)
6172
			num_alloc_stripes++;
6173
		tgtdev_indexes = num_stripes;
6174
	}
6175

6176
	bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
L
Li Zefan 已提交
6177 6178 6179 6180
	if (!bbio) {
		ret = -ENOMEM;
		goto out;
	}
6181 6182 6183 6184 6185 6186 6187

	for (i = 0; i < num_stripes; i++) {
		bbio->stripes[i].physical = map->stripes[stripe_index].physical +
			stripe_offset + stripe_nr * map->stripe_len;
		bbio->stripes[i].dev = map->stripes[stripe_index].dev;
		stripe_index++;
	}
L
Li Zefan 已提交
6188

6189
	/* build raid_map */
6190 6191
	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
	    (need_full_stripe(op) || mirror_num > 1)) {
6192
		u64 tmp;
6193
		unsigned rot;
6194 6195

		/* Work out the disk rotation on this stripe-set */
6196
		div_u64_rem(stripe_nr, num_stripes, &rot);
6197 6198

		/* Fill in the logical address of each stripe */
6199 6200
		tmp = stripe_nr * data_stripes;
		for (i = 0; i < data_stripes; i++)
6201 6202 6203 6204 6205 6206 6207 6208
			bbio->raid_map[(i+rot) % num_stripes] =
				em->start + (tmp + i) * map->stripe_len;

		bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
		if (map->type & BTRFS_BLOCK_GROUP_RAID6)
			bbio->raid_map[(i+rot+1) % num_stripes] =
				RAID6_Q_STRIPE;

6209
		sort_parity_stripes(bbio, num_stripes);
6210
	}
L
Li Zefan 已提交
6211

6212
	if (need_full_stripe(op))
6213
		max_errors = btrfs_chunk_max_errors(map);
L
Li Zefan 已提交
6214

6215
	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
6216
	    need_full_stripe(op)) {
6217 6218
		handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes,
					  &max_errors);
6219 6220
	}

L
Li Zefan 已提交
6221
	*bbio_ret = bbio;
Z
Zhao Lei 已提交
6222
	bbio->map_type = map->type;
L
Li Zefan 已提交
6223 6224 6225
	bbio->num_stripes = num_stripes;
	bbio->max_errors = max_errors;
	bbio->mirror_num = mirror_num;
6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237

	/*
	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
	 * mirror_num == num_stripes + 1 && dev_replace target drive is
	 * available as a mirror
	 */
	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
		WARN_ON(num_stripes > 1);
		bbio->stripes[0].dev = dev_replace->tgtdev;
		bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
		bbio->mirror_num = map->num_stripes + 1;
	}
6238
out:
6239
	if (dev_replace_is_ongoing) {
6240 6241
		lockdep_assert_held(&dev_replace->rwsem);
		/* Unlock and let waiting writers proceed */
6242
		up_read(&dev_replace->rwsem);
6243
	}
6244
	free_extent_map(em);
L
Li Zefan 已提交
6245
	return ret;
6246 6247
}

6248
int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6249
		      u64 logical, u64 *length,
6250
		      struct btrfs_bio **bbio_ret, int mirror_num)
6251
{
6252 6253 6254 6255
	if (op == BTRFS_MAP_DISCARD)
		return __btrfs_map_block_for_discard(fs_info, logical,
						     length, bbio_ret);

6256
	return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
6257
				 mirror_num, 0);
6258 6259
}

6260
/* For Scrub/replace */
6261
int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6262
		     u64 logical, u64 *length,
6263
		     struct btrfs_bio **bbio_ret)
6264
{
6265
	return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1);
6266 6267
}

6268
static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
6269
{
6270 6271
	bio->bi_private = bbio->private;
	bio->bi_end_io = bbio->end_io;
6272
	bio_endio(bio);
6273

6274
	btrfs_put_bbio(bbio);
6275 6276
}

6277
static void btrfs_end_bio(struct bio *bio)
6278
{
6279
	struct btrfs_bio *bbio = bio->bi_private;
6280
	int is_orig_bio = 0;
6281

6282
	if (bio->bi_status) {
6283
		atomic_inc(&bbio->error);
6284 6285
		if (bio->bi_status == BLK_STS_IOERR ||
		    bio->bi_status == BLK_STS_TARGET) {
6286
			struct btrfs_device *dev = btrfs_io_bio(bio)->device;
6287

6288 6289 6290
			ASSERT(dev->bdev);
			if (bio_op(bio) == REQ_OP_WRITE)
				btrfs_dev_stat_inc_and_print(dev,
6291
						BTRFS_DEV_STAT_WRITE_ERRS);
6292 6293
			else if (!(bio->bi_opf & REQ_RAHEAD))
				btrfs_dev_stat_inc_and_print(dev,
6294
						BTRFS_DEV_STAT_READ_ERRS);
6295 6296
			if (bio->bi_opf & REQ_PREFLUSH)
				btrfs_dev_stat_inc_and_print(dev,
6297
						BTRFS_DEV_STAT_FLUSH_ERRS);
6298 6299
		}
	}
6300

6301
	if (bio == bbio->orig_bio)
6302 6303
		is_orig_bio = 1;

6304 6305
	btrfs_bio_counter_dec(bbio->fs_info);

6306
	if (atomic_dec_and_test(&bbio->stripes_pending)) {
6307 6308
		if (!is_orig_bio) {
			bio_put(bio);
6309
			bio = bbio->orig_bio;
6310
		}
6311

6312
		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6313
		/* only send an error to the higher layers if it is
D
David Woodhouse 已提交
6314
		 * beyond the tolerance of the btrfs bio
6315
		 */
6316
		if (atomic_read(&bbio->error) > bbio->max_errors) {
6317
			bio->bi_status = BLK_STS_IOERR;
6318
		} else {
6319 6320 6321 6322
			/*
			 * this bio is actually up to date, we didn't
			 * go over the max number of errors
			 */
6323
			bio->bi_status = BLK_STS_OK;
6324
		}
6325

6326
		btrfs_end_bbio(bbio, bio);
6327
	} else if (!is_orig_bio) {
6328 6329 6330 6331
		bio_put(bio);
	}
}

6332
static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
6333
			      u64 physical, struct btrfs_device *dev)
6334
{
6335
	struct btrfs_fs_info *fs_info = bbio->fs_info;
6336 6337

	bio->bi_private = bbio;
6338
	btrfs_io_bio(bio)->device = dev;
6339
	bio->bi_end_io = btrfs_end_bio;
6340
	bio->bi_iter.bi_sector = physical >> 9;
6341 6342 6343
	btrfs_debug_in_rcu(fs_info,
	"btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
		bio_op(bio), bio->bi_opf, (u64)bio->bi_iter.bi_sector,
6344 6345
		(unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name),
		dev->devid, bio->bi_iter.bi_size);
6346
	bio_set_dev(bio, dev->bdev);
6347

6348
	btrfs_bio_counter_inc_noblocked(fs_info);
6349

6350
	btrfsic_submit_bio(bio);
6351 6352 6353 6354 6355 6356
}

static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
{
	atomic_inc(&bbio->error);
	if (atomic_dec_and_test(&bbio->stripes_pending)) {
6357
		/* Should be the original bio. */
6358 6359
		WARN_ON(bio != bbio->orig_bio);

6360
		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6361
		bio->bi_iter.bi_sector = logical >> 9;
6362 6363 6364 6365
		if (atomic_read(&bbio->error) > bbio->max_errors)
			bio->bi_status = BLK_STS_IOERR;
		else
			bio->bi_status = BLK_STS_OK;
6366
		btrfs_end_bbio(bbio, bio);
6367 6368 6369
	}
}

6370
blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6371
			   int mirror_num)
6372 6373
{
	struct btrfs_device *dev;
6374
	struct bio *first_bio = bio;
6375
	u64 logical = (u64)bio->bi_iter.bi_sector << 9;
6376 6377 6378
	u64 length = 0;
	u64 map_length;
	int ret;
6379 6380
	int dev_nr;
	int total_devs;
6381
	struct btrfs_bio *bbio = NULL;
6382

6383
	length = bio->bi_iter.bi_size;
6384
	map_length = length;
6385

6386
	btrfs_bio_counter_inc_blocked(fs_info);
6387
	ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
M
Mike Christie 已提交
6388
				&map_length, &bbio, mirror_num, 1);
6389
	if (ret) {
6390
		btrfs_bio_counter_dec(fs_info);
6391
		return errno_to_blk_status(ret);
6392
	}
6393

6394
	total_devs = bbio->num_stripes;
D
David Woodhouse 已提交
6395 6396 6397
	bbio->orig_bio = first_bio;
	bbio->private = first_bio->bi_private;
	bbio->end_io = first_bio->bi_end_io;
6398
	bbio->fs_info = fs_info;
D
David Woodhouse 已提交
6399 6400
	atomic_set(&bbio->stripes_pending, bbio->num_stripes);

6401
	if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
M
Mike Christie 已提交
6402
	    ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
D
David Woodhouse 已提交
6403 6404
		/* In this case, map_length has been set to the length of
		   a single stripe; not the whole write */
M
Mike Christie 已提交
6405
		if (bio_op(bio) == REQ_OP_WRITE) {
6406 6407
			ret = raid56_parity_write(fs_info, bio, bbio,
						  map_length);
D
David Woodhouse 已提交
6408
		} else {
6409 6410
			ret = raid56_parity_recover(fs_info, bio, bbio,
						    map_length, mirror_num, 1);
D
David Woodhouse 已提交
6411
		}
6412

6413
		btrfs_bio_counter_dec(fs_info);
6414
		return errno_to_blk_status(ret);
D
David Woodhouse 已提交
6415 6416
	}

6417
	if (map_length < length) {
6418
		btrfs_crit(fs_info,
J
Jeff Mahoney 已提交
6419 6420
			   "mapping failed logical %llu bio len %llu len %llu",
			   logical, length, map_length);
6421 6422
		BUG();
	}
6423

6424
	for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6425
		dev = bbio->stripes[dev_nr].dev;
6426 6427
		if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
						   &dev->dev_state) ||
6428 6429
		    (bio_op(first_bio) == REQ_OP_WRITE &&
		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
6430 6431 6432 6433
			bbio_error(bbio, first_bio, logical);
			continue;
		}

6434
		if (dev_nr < total_devs - 1)
6435
			bio = btrfs_bio_clone(first_bio);
6436
		else
6437
			bio = first_bio;
6438

6439
		submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical, dev);
6440
	}
6441
	btrfs_bio_counter_dec(fs_info);
6442
	return BLK_STS_OK;
6443 6444
}

6445 6446 6447 6448 6449 6450 6451 6452 6453
/*
 * Find a device specified by @devid or @uuid in the list of @fs_devices, or
 * return NULL.
 *
 * If devid and uuid are both specified, the match must be exact, otherwise
 * only devid is used.
 *
 * If @seed is true, traverse through the seed devices.
 */
6454
struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
6455 6456
				       u64 devid, u8 *uuid, u8 *fsid,
				       bool seed)
6457
{
Y
Yan Zheng 已提交
6458
	struct btrfs_device *device;
6459 6460 6461 6462 6463 6464 6465 6466 6467 6468
	struct btrfs_fs_devices *seed_devs;

	if (!fsid || !memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
		list_for_each_entry(device, &fs_devices->devices, dev_list) {
			if (device->devid == devid &&
			    (!uuid || memcmp(device->uuid, uuid,
					     BTRFS_UUID_SIZE) == 0))
				return device;
		}
	}
Y
Yan Zheng 已提交
6469

6470
	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
Y
Yan Zheng 已提交
6471
		if (!fsid ||
6472 6473
		    !memcmp(seed_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
			list_for_each_entry(device, &seed_devs->devices,
6474 6475 6476 6477 6478 6479
					    dev_list) {
				if (device->devid == devid &&
				    (!uuid || memcmp(device->uuid, uuid,
						     BTRFS_UUID_SIZE) == 0))
					return device;
			}
Y
Yan Zheng 已提交
6480 6481
		}
	}
6482

Y
Yan Zheng 已提交
6483
	return NULL;
6484 6485
}

6486
static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6487 6488 6489
					    u64 devid, u8 *dev_uuid)
{
	struct btrfs_device *device;
6490
	unsigned int nofs_flag;
6491

6492 6493 6494 6495 6496 6497 6498
	/*
	 * We call this under the chunk_mutex, so we want to use NOFS for this
	 * allocation, however we don't want to change btrfs_alloc_device() to
	 * always do NOFS because we use it in a lot of other GFP_KERNEL safe
	 * places.
	 */
	nofs_flag = memalloc_nofs_save();
6499
	device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6500
	memalloc_nofs_restore(nofs_flag);
6501
	if (IS_ERR(device))
6502
		return device;
6503 6504

	list_add(&device->dev_list, &fs_devices->devices);
Y
Yan Zheng 已提交
6505
	device->fs_devices = fs_devices;
6506
	fs_devices->num_devices++;
6507

6508
	set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6509
	fs_devices->missing_devices++;
6510

6511 6512 6513
	return device;
}

6514 6515 6516 6517 6518 6519 6520 6521 6522 6523
/**
 * btrfs_alloc_device - allocate struct btrfs_device
 * @fs_info:	used only for generating a new devid, can be NULL if
 *		devid is provided (i.e. @devid != NULL).
 * @devid:	a pointer to devid for this device.  If NULL a new devid
 *		is generated.
 * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
 *		is generated.
 *
 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6524
 * on error.  Returned struct is not linked onto any lists and must be
6525
 * destroyed with btrfs_free_device.
6526 6527 6528 6529 6530 6531 6532 6533
 */
struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
					const u64 *devid,
					const u8 *uuid)
{
	struct btrfs_device *dev;
	u64 tmp;

6534
	if (WARN_ON(!devid && !fs_info))
6535 6536
		return ERR_PTR(-EINVAL);

6537
	dev = __alloc_device(fs_info);
6538 6539 6540 6541 6542 6543 6544 6545 6546 6547
	if (IS_ERR(dev))
		return dev;

	if (devid)
		tmp = *devid;
	else {
		int ret;

		ret = find_next_devid(fs_info, &tmp);
		if (ret) {
6548
			btrfs_free_device(dev);
6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561
			return ERR_PTR(ret);
		}
	}
	dev->devid = tmp;

	if (uuid)
		memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
	else
		generate_random_uuid(dev->uuid);

	return dev;
}

6562
static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
6563
					u64 devid, u8 *uuid, bool error)
6564
{
6565 6566 6567 6568 6569 6570
	if (error)
		btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
			      devid, uuid);
	else
		btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
			      devid, uuid);
6571 6572
}

6573 6574 6575 6576
static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
{
	int index = btrfs_bg_flags_to_raid_index(type);
	int ncopies = btrfs_raid_array[index].ncopies;
6577
	const int nparity = btrfs_raid_array[index].nparity;
6578 6579
	int data_stripes;

6580 6581 6582
	if (nparity)
		data_stripes = num_stripes - nparity;
	else
6583
		data_stripes = num_stripes / ncopies;
6584

6585 6586 6587
	return div_u64(chunk_len, data_stripes);
}

6588
static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
6589 6590
			  struct btrfs_chunk *chunk)
{
6591
	struct btrfs_fs_info *fs_info = leaf->fs_info;
6592
	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606
	struct map_lookup *map;
	struct extent_map *em;
	u64 logical;
	u64 length;
	u64 devid;
	u8 uuid[BTRFS_UUID_SIZE];
	int num_stripes;
	int ret;
	int i;

	logical = key->offset;
	length = btrfs_chunk_length(leaf, chunk);
	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);

6607 6608 6609 6610 6611
	/*
	 * Only need to verify chunk item if we're reading from sys chunk array,
	 * as chunk item in tree block is already verified by tree-checker.
	 */
	if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
6612
		ret = btrfs_check_chunk_valid(leaf, chunk, logical);
6613 6614 6615
		if (ret)
			return ret;
	}
6616

6617 6618 6619
	read_lock(&map_tree->lock);
	em = lookup_extent_mapping(map_tree, logical, 1);
	read_unlock(&map_tree->lock);
6620 6621 6622 6623 6624 6625 6626 6627 6628

	/* already mapped? */
	if (em && em->start <= logical && em->start + em->len > logical) {
		free_extent_map(em);
		return 0;
	} else if (em) {
		free_extent_map(em);
	}

6629
	em = alloc_extent_map();
6630 6631
	if (!em)
		return -ENOMEM;
6632
	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6633 6634 6635 6636 6637
	if (!map) {
		free_extent_map(em);
		return -ENOMEM;
	}

6638
	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6639
	em->map_lookup = map;
6640 6641
	em->start = logical;
	em->len = length;
6642
	em->orig_start = 0;
6643
	em->block_start = 0;
C
Chris Mason 已提交
6644
	em->block_len = em->len;
6645

6646 6647 6648 6649 6650
	map->num_stripes = num_stripes;
	map->io_width = btrfs_chunk_io_width(leaf, chunk);
	map->io_align = btrfs_chunk_io_align(leaf, chunk);
	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
	map->type = btrfs_chunk_type(leaf, chunk);
C
Chris Mason 已提交
6651
	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6652
	map->verified_stripes = 0;
6653 6654
	em->orig_block_len = calc_stripe_length(map->type, em->len,
						map->num_stripes);
6655 6656 6657 6658
	for (i = 0; i < num_stripes; i++) {
		map->stripes[i].physical =
			btrfs_stripe_offset_nr(leaf, chunk, i);
		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6659 6660 6661
		read_extent_buffer(leaf, uuid, (unsigned long)
				   btrfs_stripe_dev_uuid_nr(chunk, i),
				   BTRFS_UUID_SIZE);
6662
		map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices,
6663
							devid, uuid, NULL, true);
6664
		if (!map->stripes[i].dev &&
6665
		    !btrfs_test_opt(fs_info, DEGRADED)) {
6666
			free_extent_map(em);
6667
			btrfs_report_missing_device(fs_info, devid, uuid, true);
6668
			return -ENOENT;
6669
		}
6670 6671
		if (!map->stripes[i].dev) {
			map->stripes[i].dev =
6672 6673
				add_missing_dev(fs_info->fs_devices, devid,
						uuid);
6674
			if (IS_ERR(map->stripes[i].dev)) {
6675
				free_extent_map(em);
6676 6677 6678 6679
				btrfs_err(fs_info,
					"failed to init missing dev %llu: %ld",
					devid, PTR_ERR(map->stripes[i].dev));
				return PTR_ERR(map->stripes[i].dev);
6680
			}
6681
			btrfs_report_missing_device(fs_info, devid, uuid, false);
6682
		}
6683 6684 6685
		set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
				&(map->stripes[i].dev->dev_state));

6686 6687
	}

6688 6689 6690
	write_lock(&map_tree->lock);
	ret = add_extent_mapping(map_tree, em, 0);
	write_unlock(&map_tree->lock);
6691 6692 6693 6694 6695
	if (ret < 0) {
		btrfs_err(fs_info,
			  "failed to add chunk map, start=%llu len=%llu: %d",
			  em->start, em->len, ret);
	}
6696 6697
	free_extent_map(em);

6698
	return ret;
6699 6700
}

6701
static void fill_device_from_item(struct extent_buffer *leaf,
6702 6703 6704 6705 6706 6707
				 struct btrfs_dev_item *dev_item,
				 struct btrfs_device *device)
{
	unsigned long ptr;

	device->devid = btrfs_device_id(leaf, dev_item);
6708 6709
	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
	device->total_bytes = device->disk_total_bytes;
6710
	device->commit_total_bytes = device->disk_total_bytes;
6711
	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6712
	device->commit_bytes_used = device->bytes_used;
6713 6714 6715 6716
	device->type = btrfs_device_type(leaf, dev_item);
	device->io_align = btrfs_device_io_align(leaf, dev_item);
	device->io_width = btrfs_device_io_width(leaf, dev_item);
	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6717
	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6718
	clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
6719

6720
	ptr = btrfs_device_uuid(dev_item);
6721
	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6722 6723
}

6724
static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
6725
						  u8 *fsid)
Y
Yan Zheng 已提交
6726 6727 6728 6729
{
	struct btrfs_fs_devices *fs_devices;
	int ret;

6730
	lockdep_assert_held(&uuid_mutex);
D
David Sterba 已提交
6731
	ASSERT(fsid);
Y
Yan Zheng 已提交
6732

6733
	/* This will match only for multi-device seed fs */
6734
	list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list)
6735
		if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
6736 6737
			return fs_devices;

Y
Yan Zheng 已提交
6738

6739
	fs_devices = find_fsid(fsid, NULL);
Y
Yan Zheng 已提交
6740
	if (!fs_devices) {
6741
		if (!btrfs_test_opt(fs_info, DEGRADED))
6742 6743
			return ERR_PTR(-ENOENT);

6744
		fs_devices = alloc_fs_devices(fsid, NULL);
6745 6746 6747
		if (IS_ERR(fs_devices))
			return fs_devices;

6748
		fs_devices->seeding = true;
6749 6750
		fs_devices->opened = 1;
		return fs_devices;
Y
Yan Zheng 已提交
6751
	}
Y
Yan Zheng 已提交
6752

6753 6754 6755 6756
	/*
	 * Upon first call for a seed fs fsid, just create a private copy of the
	 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list
	 */
Y
Yan Zheng 已提交
6757
	fs_devices = clone_fs_devices(fs_devices);
6758 6759
	if (IS_ERR(fs_devices))
		return fs_devices;
Y
Yan Zheng 已提交
6760

6761
	ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
6762 6763
	if (ret) {
		free_fs_devices(fs_devices);
6764
		fs_devices = ERR_PTR(ret);
Y
Yan Zheng 已提交
6765
		goto out;
6766
	}
Y
Yan Zheng 已提交
6767 6768

	if (!fs_devices->seeding) {
6769
		close_fs_devices(fs_devices);
Y
Yan Zheng 已提交
6770
		free_fs_devices(fs_devices);
6771
		fs_devices = ERR_PTR(-EINVAL);
Y
Yan Zheng 已提交
6772 6773 6774
		goto out;
	}

6775
	list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list);
Y
Yan Zheng 已提交
6776
out:
6777
	return fs_devices;
Y
Yan Zheng 已提交
6778 6779
}

6780
static int read_one_dev(struct extent_buffer *leaf,
6781 6782
			struct btrfs_dev_item *dev_item)
{
6783
	struct btrfs_fs_info *fs_info = leaf->fs_info;
6784
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6785 6786 6787
	struct btrfs_device *device;
	u64 devid;
	int ret;
6788
	u8 fs_uuid[BTRFS_FSID_SIZE];
6789 6790
	u8 dev_uuid[BTRFS_UUID_SIZE];

6791
	devid = btrfs_device_id(leaf, dev_item);
6792
	read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6793
			   BTRFS_UUID_SIZE);
6794
	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
6795
			   BTRFS_FSID_SIZE);
Y
Yan Zheng 已提交
6796

6797
	if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
6798
		fs_devices = open_seed_devices(fs_info, fs_uuid);
6799 6800
		if (IS_ERR(fs_devices))
			return PTR_ERR(fs_devices);
Y
Yan Zheng 已提交
6801 6802
	}

6803
	device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
6804
				   fs_uuid, true);
6805
	if (!device) {
6806
		if (!btrfs_test_opt(fs_info, DEGRADED)) {
6807 6808
			btrfs_report_missing_device(fs_info, devid,
							dev_uuid, true);
6809
			return -ENOENT;
6810
		}
Y
Yan Zheng 已提交
6811

6812
		device = add_missing_dev(fs_devices, devid, dev_uuid);
6813 6814 6815 6816 6817 6818
		if (IS_ERR(device)) {
			btrfs_err(fs_info,
				"failed to add missing dev %llu: %ld",
				devid, PTR_ERR(device));
			return PTR_ERR(device);
		}
6819
		btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
6820
	} else {
6821
		if (!device->bdev) {
6822 6823 6824
			if (!btrfs_test_opt(fs_info, DEGRADED)) {
				btrfs_report_missing_device(fs_info,
						devid, dev_uuid, true);
6825
				return -ENOENT;
6826 6827 6828
			}
			btrfs_report_missing_device(fs_info, devid,
							dev_uuid, false);
6829
		}
6830

6831 6832
		if (!device->bdev &&
		    !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
6833 6834 6835 6836 6837 6838
			/*
			 * this happens when a device that was properly setup
			 * in the device info lists suddenly goes bad.
			 * device->bdev is NULL, and so we have to set
			 * device->missing to one here
			 */
6839
			device->fs_devices->missing_devices++;
6840
			set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
Y
Yan Zheng 已提交
6841
		}
6842 6843 6844

		/* Move the device to its own fs_devices */
		if (device->fs_devices != fs_devices) {
6845 6846
			ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
							&device->dev_state));
6847 6848 6849 6850 6851 6852 6853 6854 6855 6856

			list_move(&device->dev_list, &fs_devices->devices);
			device->fs_devices->num_devices--;
			fs_devices->num_devices++;

			device->fs_devices->missing_devices--;
			fs_devices->missing_devices++;

			device->fs_devices = fs_devices;
		}
Y
Yan Zheng 已提交
6857 6858
	}

6859
	if (device->fs_devices != fs_info->fs_devices) {
6860
		BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
Y
Yan Zheng 已提交
6861 6862 6863
		if (device->generation !=
		    btrfs_device_generation(leaf, dev_item))
			return -EINVAL;
6864
	}
6865 6866

	fill_device_from_item(leaf, dev_item, device);
6867
	set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
6868
	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
6869
	   !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
Y
Yan Zheng 已提交
6870
		device->fs_devices->total_rw_bytes += device->total_bytes;
6871 6872
		atomic64_add(device->total_bytes - device->bytes_used,
				&fs_info->free_chunk_space);
6873
	}
6874 6875 6876 6877
	ret = 0;
	return ret;
}

6878
int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
6879
{
6880
	struct btrfs_root *root = fs_info->tree_root;
6881
	struct btrfs_super_block *super_copy = fs_info->super_copy;
6882
	struct extent_buffer *sb;
6883 6884
	struct btrfs_disk_key *disk_key;
	struct btrfs_chunk *chunk;
6885 6886
	u8 *array_ptr;
	unsigned long sb_array_offset;
6887
	int ret = 0;
6888 6889 6890
	u32 num_stripes;
	u32 array_size;
	u32 len = 0;
6891
	u32 cur_offset;
6892
	u64 type;
6893
	struct btrfs_key key;
6894

6895
	ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
6896 6897 6898 6899 6900
	/*
	 * This will create extent buffer of nodesize, superblock size is
	 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
	 * overallocate but we can keep it as-is, only the first page is used.
	 */
6901
	sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET);
6902 6903
	if (IS_ERR(sb))
		return PTR_ERR(sb);
6904
	set_extent_buffer_uptodate(sb);
6905
	btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6906
	/*
6907
	 * The sb extent buffer is artificial and just used to read the system array.
6908
	 * set_extent_buffer_uptodate() call does not properly mark all it's
6909 6910 6911 6912 6913 6914 6915 6916 6917
	 * pages up-to-date when the page is larger: extent does not cover the
	 * whole page and consequently check_page_uptodate does not find all
	 * the page's extents up-to-date (the hole beyond sb),
	 * write_extent_buffer then triggers a WARN_ON.
	 *
	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
	 * but sb spans only this function. Add an explicit SetPageUptodate call
	 * to silence the warning eg. on PowerPC 64.
	 */
6918
	if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
6919
		SetPageUptodate(sb->pages[0]);
6920

6921
	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
6922 6923
	array_size = btrfs_super_sys_array_size(super_copy);

6924 6925 6926
	array_ptr = super_copy->sys_chunk_array;
	sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
	cur_offset = 0;
6927

6928 6929
	while (cur_offset < array_size) {
		disk_key = (struct btrfs_disk_key *)array_ptr;
6930 6931 6932 6933
		len = sizeof(*disk_key);
		if (cur_offset + len > array_size)
			goto out_short_read;

6934 6935
		btrfs_disk_key_to_cpu(&key, disk_key);

6936 6937 6938
		array_ptr += len;
		sb_array_offset += len;
		cur_offset += len;
6939

6940 6941 6942 6943 6944 6945 6946
		if (key.type != BTRFS_CHUNK_ITEM_KEY) {
			btrfs_err(fs_info,
			    "unexpected item type %u in sys_array at offset %u",
				  (u32)key.type, cur_offset);
			ret = -EIO;
			break;
		}
6947

6948 6949 6950 6951 6952 6953 6954 6955
		chunk = (struct btrfs_chunk *)sb_array_offset;
		/*
		 * At least one btrfs_chunk with one stripe must be present,
		 * exact stripe count check comes afterwards
		 */
		len = btrfs_chunk_item_size(1);
		if (cur_offset + len > array_size)
			goto out_short_read;
6956

6957 6958 6959 6960 6961 6962 6963 6964
		num_stripes = btrfs_chunk_num_stripes(sb, chunk);
		if (!num_stripes) {
			btrfs_err(fs_info,
			"invalid number of stripes %u in sys_array at offset %u",
				  num_stripes, cur_offset);
			ret = -EIO;
			break;
		}
6965

6966 6967
		type = btrfs_chunk_type(sb, chunk);
		if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
6968
			btrfs_err(fs_info,
6969 6970
			"invalid chunk type %llu in sys_array at offset %u",
				  type, cur_offset);
6971 6972
			ret = -EIO;
			break;
6973
		}
6974 6975 6976 6977 6978 6979 6980 6981 6982

		len = btrfs_chunk_item_size(num_stripes);
		if (cur_offset + len > array_size)
			goto out_short_read;

		ret = read_one_chunk(&key, sb, chunk);
		if (ret)
			break;

6983 6984 6985
		array_ptr += len;
		sb_array_offset += len;
		cur_offset += len;
6986
	}
6987
	clear_extent_buffer_uptodate(sb);
6988
	free_extent_buffer_stale(sb);
6989
	return ret;
6990 6991

out_short_read:
6992
	btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
6993
			len, cur_offset);
6994
	clear_extent_buffer_uptodate(sb);
6995
	free_extent_buffer_stale(sb);
6996
	return -EIO;
6997 6998
}

6999 7000 7001
/*
 * Check if all chunks in the fs are OK for read-write degraded mount
 *
7002 7003
 * If the @failing_dev is specified, it's accounted as missing.
 *
7004 7005 7006
 * Return true if all chunks meet the minimal RW mount requirements.
 * Return false if any chunk doesn't meet the minimal RW mount requirements.
 */
7007 7008
bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
					struct btrfs_device *failing_dev)
7009
{
7010
	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
7011 7012 7013 7014
	struct extent_map *em;
	u64 next_start = 0;
	bool ret = true;

7015 7016 7017
	read_lock(&map_tree->lock);
	em = lookup_extent_mapping(map_tree, 0, (u64)-1);
	read_unlock(&map_tree->lock);
7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035
	/* No chunk at all? Return false anyway */
	if (!em) {
		ret = false;
		goto out;
	}
	while (em) {
		struct map_lookup *map;
		int missing = 0;
		int max_tolerated;
		int i;

		map = em->map_lookup;
		max_tolerated =
			btrfs_get_num_tolerated_disk_barrier_failures(
					map->type);
		for (i = 0; i < map->num_stripes; i++) {
			struct btrfs_device *dev = map->stripes[i].dev;

7036 7037
			if (!dev || !dev->bdev ||
			    test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
7038 7039
			    dev->last_flush_error)
				missing++;
7040 7041
			else if (failing_dev && failing_dev == dev)
				missing++;
7042 7043
		}
		if (missing > max_tolerated) {
7044 7045
			if (!failing_dev)
				btrfs_warn(fs_info,
7046
	"chunk %llu missing %d devices, max tolerance is %d for writable mount",
7047 7048 7049 7050 7051 7052 7053 7054
				   em->start, missing, max_tolerated);
			free_extent_map(em);
			ret = false;
			goto out;
		}
		next_start = extent_map_end(em);
		free_extent_map(em);

7055 7056
		read_lock(&map_tree->lock);
		em = lookup_extent_mapping(map_tree, next_start,
7057
					   (u64)(-1) - next_start);
7058
		read_unlock(&map_tree->lock);
7059 7060 7061 7062 7063
	}
out:
	return ret;
}

7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076
static void readahead_tree_node_children(struct extent_buffer *node)
{
	int i;
	const int nr_items = btrfs_header_nritems(node);

	for (i = 0; i < nr_items; i++) {
		u64 start;

		start = btrfs_node_blockptr(node, i);
		readahead_tree_block(node->fs_info, start);
	}
}

7077
int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
7078
{
7079
	struct btrfs_root *root = fs_info->chunk_root;
7080 7081 7082 7083 7084 7085
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	struct btrfs_key found_key;
	int ret;
	int slot;
7086
	u64 total_dev = 0;
7087
	u64 last_ra_node = 0;
7088 7089 7090 7091 7092

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

7093 7094 7095 7096
	/*
	 * uuid_mutex is needed only if we are mounting a sprout FS
	 * otherwise we don't need it.
	 */
7097 7098
	mutex_lock(&uuid_mutex);

7099 7100 7101 7102 7103 7104 7105 7106
	/*
	 * It is possible for mount and umount to race in such a way that
	 * we execute this code path, but open_fs_devices failed to clear
	 * total_rw_bytes. We certainly want it cleared before reading the
	 * device items, so clear it here.
	 */
	fs_info->fs_devices->total_rw_bytes = 0;

7107 7108 7109 7110 7111
	/*
	 * Read all device items, and then all the chunk items. All
	 * device items are found before any chunk item (their object id
	 * is smaller than the lowest possible object id for a chunk
	 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7112 7113 7114 7115 7116
	 */
	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.offset = 0;
	key.type = 0;
	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7117 7118
	if (ret < 0)
		goto error;
C
Chris Mason 已提交
7119
	while (1) {
7120 7121
		struct extent_buffer *node;

7122 7123 7124 7125 7126 7127 7128 7129 7130 7131
		leaf = path->nodes[0];
		slot = path->slots[0];
		if (slot >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret == 0)
				continue;
			if (ret < 0)
				goto error;
			break;
		}
7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142
		/*
		 * The nodes on level 1 are not locked but we don't need to do
		 * that during mount time as nothing else can access the tree
		 */
		node = path->nodes[1];
		if (node) {
			if (last_ra_node != node->start) {
				readahead_tree_node_children(node);
				last_ra_node = node->start;
			}
		}
7143
		btrfs_item_key_to_cpu(leaf, &found_key, slot);
7144 7145 7146
		if (found_key.type == BTRFS_DEV_ITEM_KEY) {
			struct btrfs_dev_item *dev_item;
			dev_item = btrfs_item_ptr(leaf, slot,
7147
						  struct btrfs_dev_item);
7148
			ret = read_one_dev(leaf, dev_item);
7149 7150
			if (ret)
				goto error;
7151
			total_dev++;
7152 7153 7154
		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
			struct btrfs_chunk *chunk;
			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
7155
			mutex_lock(&fs_info->chunk_mutex);
7156
			ret = read_one_chunk(&found_key, leaf, chunk);
7157
			mutex_unlock(&fs_info->chunk_mutex);
Y
Yan Zheng 已提交
7158 7159
			if (ret)
				goto error;
7160 7161 7162
		}
		path->slots[0]++;
	}
7163 7164 7165 7166 7167

	/*
	 * After loading chunk tree, we've got all device information,
	 * do another round of validation checks.
	 */
7168 7169
	if (total_dev != fs_info->fs_devices->total_devices) {
		btrfs_err(fs_info,
7170
	   "super_num_devices %llu mismatch with num_devices %llu found here",
7171
			  btrfs_super_num_devices(fs_info->super_copy),
7172 7173 7174 7175
			  total_dev);
		ret = -EINVAL;
		goto error;
	}
7176 7177 7178
	if (btrfs_super_total_bytes(fs_info->super_copy) <
	    fs_info->fs_devices->total_rw_bytes) {
		btrfs_err(fs_info,
7179
	"super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7180 7181
			  btrfs_super_total_bytes(fs_info->super_copy),
			  fs_info->fs_devices->total_rw_bytes);
7182 7183 7184
		ret = -EINVAL;
		goto error;
	}
7185 7186
	ret = 0;
error:
7187 7188
	mutex_unlock(&uuid_mutex);

Y
Yan Zheng 已提交
7189
	btrfs_free_path(path);
7190 7191
	return ret;
}
7192

7193 7194
void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
{
7195
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7196 7197
	struct btrfs_device *device;

7198 7199 7200 7201 7202 7203 7204 7205 7206 7207
	fs_devices->fs_info = fs_info;

	mutex_lock(&fs_devices->device_list_mutex);
	list_for_each_entry(device, &fs_devices->devices, dev_list)
		device->fs_info = fs_info;
	mutex_unlock(&fs_devices->device_list_mutex);

	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
		mutex_lock(&seed_devs->device_list_mutex);
		list_for_each_entry(device, &seed_devs->devices, dev_list)
7208
			device->fs_info = fs_info;
7209
		mutex_unlock(&seed_devs->device_list_mutex);
7210

7211
		seed_devs->fs_info = fs_info;
7212
	}
7213 7214
}

7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237
static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
				 const struct btrfs_dev_stats_item *ptr,
				 int index)
{
	u64 val;

	read_extent_buffer(eb, &val,
			   offsetof(struct btrfs_dev_stats_item, values) +
			    ((unsigned long)ptr) + (index * sizeof(u64)),
			   sizeof(val));
	return val;
}

static void btrfs_set_dev_stats_value(struct extent_buffer *eb,
				      struct btrfs_dev_stats_item *ptr,
				      int index, u64 val)
{
	write_extent_buffer(eb, &val,
			    offsetof(struct btrfs_dev_stats_item, values) +
			     ((unsigned long)ptr) + (index * sizeof(u64)),
			    sizeof(val));
}

7238 7239 7240 7241 7242 7243 7244 7245 7246 7247 7248 7249 7250
int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
{
	struct btrfs_key key;
	struct btrfs_root *dev_root = fs_info->dev_root;
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	struct extent_buffer *eb;
	int slot;
	int ret = 0;
	struct btrfs_device *device;
	struct btrfs_path *path = NULL;
	int i;

	path = btrfs_alloc_path();
A
Anand Jain 已提交
7251 7252
	if (!path)
		return -ENOMEM;
7253 7254 7255 7256 7257 7258

	mutex_lock(&fs_devices->device_list_mutex);
	list_for_each_entry(device, &fs_devices->devices, dev_list) {
		int item_size;
		struct btrfs_dev_stats_item *ptr;

7259 7260
		key.objectid = BTRFS_DEV_STATS_OBJECTID;
		key.type = BTRFS_PERSISTENT_ITEM_KEY;
7261 7262 7263
		key.offset = device->devid;
		ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
		if (ret) {
7264 7265
			for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
				btrfs_dev_stat_set(device, i, 0);
7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277 7278 7279 7280 7281
			device->dev_stats_valid = 1;
			btrfs_release_path(path);
			continue;
		}
		slot = path->slots[0];
		eb = path->nodes[0];
		item_size = btrfs_item_size_nr(eb, slot);

		ptr = btrfs_item_ptr(eb, slot,
				     struct btrfs_dev_stats_item);

		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
			if (item_size >= (1 + i) * sizeof(__le64))
				btrfs_dev_stat_set(device, i,
					btrfs_dev_stats_value(eb, ptr, i));
			else
7282
				btrfs_dev_stat_set(device, i, 0);
7283 7284 7285 7286 7287 7288 7289 7290 7291 7292 7293 7294 7295 7296 7297
		}

		device->dev_stats_valid = 1;
		btrfs_dev_stat_print_on_load(device);
		btrfs_release_path(path);
	}
	mutex_unlock(&fs_devices->device_list_mutex);

	btrfs_free_path(path);
	return ret < 0 ? ret : 0;
}

static int update_dev_stat_item(struct btrfs_trans_handle *trans,
				struct btrfs_device *device)
{
7298
	struct btrfs_fs_info *fs_info = trans->fs_info;
7299
	struct btrfs_root *dev_root = fs_info->dev_root;
7300 7301 7302 7303 7304 7305 7306
	struct btrfs_path *path;
	struct btrfs_key key;
	struct extent_buffer *eb;
	struct btrfs_dev_stats_item *ptr;
	int ret;
	int i;

7307 7308
	key.objectid = BTRFS_DEV_STATS_OBJECTID;
	key.type = BTRFS_PERSISTENT_ITEM_KEY;
7309 7310 7311
	key.offset = device->devid;

	path = btrfs_alloc_path();
7312 7313
	if (!path)
		return -ENOMEM;
7314 7315
	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
	if (ret < 0) {
7316
		btrfs_warn_in_rcu(fs_info,
7317
			"error %d while searching for dev_stats item for device %s",
7318
			      ret, rcu_str_deref(device->name));
7319 7320 7321 7322 7323 7324 7325 7326
		goto out;
	}

	if (ret == 0 &&
	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
		/* need to delete old one and insert a new one */
		ret = btrfs_del_item(trans, dev_root, path);
		if (ret != 0) {
7327
			btrfs_warn_in_rcu(fs_info,
7328
				"delete too small dev_stats item for device %s failed %d",
7329
				      rcu_str_deref(device->name), ret);
7330 7331 7332 7333 7334 7335 7336 7337 7338 7339 7340
			goto out;
		}
		ret = 1;
	}

	if (ret == 1) {
		/* need to insert a new item */
		btrfs_release_path(path);
		ret = btrfs_insert_empty_item(trans, dev_root, path,
					      &key, sizeof(*ptr));
		if (ret < 0) {
7341
			btrfs_warn_in_rcu(fs_info,
7342 7343
				"insert dev_stats item for device %s failed %d",
				rcu_str_deref(device->name), ret);
7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362
			goto out;
		}
	}

	eb = path->nodes[0];
	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
		btrfs_set_dev_stats_value(eb, ptr, i,
					  btrfs_dev_stat_read(device, i));
	btrfs_mark_buffer_dirty(eb);

out:
	btrfs_free_path(path);
	return ret;
}

/*
 * called from commit_transaction. Writes all changed device stats to disk.
 */
7363
int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
7364
{
7365
	struct btrfs_fs_info *fs_info = trans->fs_info;
7366 7367
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	struct btrfs_device *device;
7368
	int stats_cnt;
7369 7370 7371 7372
	int ret = 0;

	mutex_lock(&fs_devices->device_list_mutex);
	list_for_each_entry(device, &fs_devices->devices, dev_list) {
7373 7374
		stats_cnt = atomic_read(&device->dev_stats_ccnt);
		if (!device->dev_stats_valid || stats_cnt == 0)
7375 7376
			continue;

7377 7378 7379 7380 7381 7382 7383 7384 7385 7386 7387 7388 7389 7390

		/*
		 * There is a LOAD-LOAD control dependency between the value of
		 * dev_stats_ccnt and updating the on-disk values which requires
		 * reading the in-memory counters. Such control dependencies
		 * require explicit read memory barriers.
		 *
		 * This memory barriers pairs with smp_mb__before_atomic in
		 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
		 * barrier implied by atomic_xchg in
		 * btrfs_dev_stats_read_and_reset
		 */
		smp_rmb();

7391
		ret = update_dev_stat_item(trans, device);
7392
		if (!ret)
7393
			atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7394 7395 7396 7397 7398 7399
	}
	mutex_unlock(&fs_devices->device_list_mutex);

	return ret;
}

7400 7401 7402 7403 7404 7405
void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
{
	btrfs_dev_stat_inc(dev, index);
	btrfs_dev_stat_print_on_error(dev);
}

7406
static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
7407
{
7408 7409
	if (!dev->dev_stats_valid)
		return;
7410
	btrfs_err_rl_in_rcu(dev->fs_info,
7411
		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7412
			   rcu_str_deref(dev->name),
7413 7414 7415
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7416 7417
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7418
}
7419

7420 7421
static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
{
7422 7423 7424 7425 7426 7427 7428 7429
	int i;

	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
		if (btrfs_dev_stat_read(dev, i) != 0)
			break;
	if (i == BTRFS_DEV_STAT_VALUES_MAX)
		return; /* all values == 0, suppress message */

7430
	btrfs_info_in_rcu(dev->fs_info,
7431
		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7432
	       rcu_str_deref(dev->name),
7433 7434 7435 7436 7437 7438 7439
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
}

7440
int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
7441
			struct btrfs_ioctl_get_dev_stats *stats)
7442 7443
{
	struct btrfs_device *dev;
7444
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7445 7446 7447
	int i;

	mutex_lock(&fs_devices->device_list_mutex);
7448 7449
	dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL,
				true);
7450 7451 7452
	mutex_unlock(&fs_devices->device_list_mutex);

	if (!dev) {
7453
		btrfs_warn(fs_info, "get dev_stats failed, device not found");
7454
		return -ENODEV;
7455
	} else if (!dev->dev_stats_valid) {
7456
		btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
7457
		return -ENODEV;
7458
	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7459 7460 7461 7462 7463
		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
			if (stats->nr_items > i)
				stats->values[i] =
					btrfs_dev_stat_read_and_reset(dev, i);
			else
7464
				btrfs_dev_stat_set(dev, i, 0);
7465
		}
7466 7467
		btrfs_info(fs_info, "device stats zeroed by %s (%d)",
			   current->comm, task_pid_nr(current));
7468 7469 7470 7471 7472 7473 7474 7475 7476
	} else {
		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
			if (stats->nr_items > i)
				stats->values[i] = btrfs_dev_stat_read(dev, i);
	}
	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
	return 0;
}
7477

7478
/*
7479 7480 7481 7482 7483
 * Update the size and bytes used for each device where it changed.  This is
 * delayed since we would otherwise get errors while writing out the
 * superblocks.
 *
 * Must be invoked during transaction commit.
7484
 */
7485
void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
7486 7487 7488
{
	struct btrfs_device *curr, *next;

7489
	ASSERT(trans->state == TRANS_STATE_COMMIT_DOING);
7490

7491
	if (list_empty(&trans->dev_update_list))
7492 7493
		return;

7494 7495 7496 7497 7498 7499 7500 7501 7502 7503 7504
	/*
	 * We don't need the device_list_mutex here.  This list is owned by the
	 * transaction and the transaction must complete before the device is
	 * released.
	 */
	mutex_lock(&trans->fs_info->chunk_mutex);
	list_for_each_entry_safe(curr, next, &trans->dev_update_list,
				 post_commit_list) {
		list_del_init(&curr->post_commit_list);
		curr->commit_total_bytes = curr->disk_total_bytes;
		curr->commit_bytes_used = curr->bytes_used;
7505
	}
7506
	mutex_unlock(&trans->fs_info->chunk_mutex);
7507
}
7508

7509 7510 7511 7512 7513
/*
 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
 */
int btrfs_bg_type_to_factor(u64 flags)
{
7514 7515 7516
	const int index = btrfs_bg_flags_to_raid_index(flags);

	return btrfs_raid_array[index].ncopies;
7517
}
7518 7519 7520 7521 7522 7523 7524



static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
				 u64 chunk_offset, u64 devid,
				 u64 physical_offset, u64 physical_len)
{
7525
	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7526 7527
	struct extent_map *em;
	struct map_lookup *map;
7528
	struct btrfs_device *dev;
7529 7530 7531 7532 7533 7534 7535 7536 7537 7538 7539 7540 7541 7542 7543 7544 7545 7546 7547 7548 7549 7550 7551 7552 7553 7554 7555 7556 7557 7558 7559 7560 7561 7562 7563 7564 7565 7566 7567 7568 7569 7570 7571 7572 7573 7574 7575 7576 7577
	u64 stripe_len;
	bool found = false;
	int ret = 0;
	int i;

	read_lock(&em_tree->lock);
	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
	read_unlock(&em_tree->lock);

	if (!em) {
		btrfs_err(fs_info,
"dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
			  physical_offset, devid);
		ret = -EUCLEAN;
		goto out;
	}

	map = em->map_lookup;
	stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
	if (physical_len != stripe_len) {
		btrfs_err(fs_info,
"dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
			  physical_offset, devid, em->start, physical_len,
			  stripe_len);
		ret = -EUCLEAN;
		goto out;
	}

	for (i = 0; i < map->num_stripes; i++) {
		if (map->stripes[i].dev->devid == devid &&
		    map->stripes[i].physical == physical_offset) {
			found = true;
			if (map->verified_stripes >= map->num_stripes) {
				btrfs_err(fs_info,
				"too many dev extents for chunk %llu found",
					  em->start);
				ret = -EUCLEAN;
				goto out;
			}
			map->verified_stripes++;
			break;
		}
	}
	if (!found) {
		btrfs_err(fs_info,
	"dev extent physical offset %llu devid %llu has no corresponding chunk",
			physical_offset, devid);
		ret = -EUCLEAN;
	}
7578 7579

	/* Make sure no dev extent is beyond device bondary */
7580
	dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
7581 7582 7583 7584 7585
	if (!dev) {
		btrfs_err(fs_info, "failed to find devid %llu", devid);
		ret = -EUCLEAN;
		goto out;
	}
7586 7587 7588

	/* It's possible this device is a dummy for seed device */
	if (dev->disk_total_bytes == 0) {
7589 7590 7591 7592 7593
		struct btrfs_fs_devices *devs;

		devs = list_first_entry(&fs_info->fs_devices->seed_list,
					struct btrfs_fs_devices, seed_list);
		dev = btrfs_find_device(devs, devid, NULL, NULL, false);
7594 7595 7596 7597 7598 7599 7600 7601
		if (!dev) {
			btrfs_err(fs_info, "failed to find seed devid %llu",
				  devid);
			ret = -EUCLEAN;
			goto out;
		}
	}

7602 7603 7604 7605 7606 7607 7608 7609
	if (physical_offset + physical_len > dev->disk_total_bytes) {
		btrfs_err(fs_info,
"dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
			  devid, physical_offset, physical_len,
			  dev->disk_total_bytes);
		ret = -EUCLEAN;
		goto out;
	}
7610 7611 7612 7613 7614 7615 7616
out:
	free_extent_map(em);
	return ret;
}

static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
{
7617
	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7618 7619 7620 7621 7622
	struct extent_map *em;
	struct rb_node *node;
	int ret = 0;

	read_lock(&em_tree->lock);
L
Liu Bo 已提交
7623
	for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
7624 7625 7626 7627 7628 7629 7630 7631 7632 7633 7634 7635 7636 7637 7638 7639 7640 7641 7642 7643 7644 7645 7646 7647 7648 7649 7650 7651
		em = rb_entry(node, struct extent_map, rb_node);
		if (em->map_lookup->num_stripes !=
		    em->map_lookup->verified_stripes) {
			btrfs_err(fs_info,
			"chunk %llu has missing dev extent, have %d expect %d",
				  em->start, em->map_lookup->verified_stripes,
				  em->map_lookup->num_stripes);
			ret = -EUCLEAN;
			goto out;
		}
	}
out:
	read_unlock(&em_tree->lock);
	return ret;
}

/*
 * Ensure that all dev extents are mapped to correct chunk, otherwise
 * later chunk allocation/free would cause unexpected behavior.
 *
 * NOTE: This will iterate through the whole device tree, which should be of
 * the same size level as the chunk tree.  This slightly increases mount time.
 */
int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
{
	struct btrfs_path *path;
	struct btrfs_root *root = fs_info->dev_root;
	struct btrfs_key key;
7652 7653
	u64 prev_devid = 0;
	u64 prev_dev_ext_end = 0;
7654 7655 7656 7657 7658 7659 7660 7661 7662 7663 7664 7665 7666 7667 7668 7669 7670 7671 7672 7673 7674 7675 7676 7677 7678 7679 7680 7681 7682 7683 7684 7685 7686 7687 7688 7689 7690 7691 7692 7693 7694 7695 7696 7697
	int ret = 0;

	key.objectid = 1;
	key.type = BTRFS_DEV_EXTENT_KEY;
	key.offset = 0;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	path->reada = READA_FORWARD;
	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		goto out;

	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
		ret = btrfs_next_item(root, path);
		if (ret < 0)
			goto out;
		/* No dev extents at all? Not good */
		if (ret > 0) {
			ret = -EUCLEAN;
			goto out;
		}
	}
	while (1) {
		struct extent_buffer *leaf = path->nodes[0];
		struct btrfs_dev_extent *dext;
		int slot = path->slots[0];
		u64 chunk_offset;
		u64 physical_offset;
		u64 physical_len;
		u64 devid;

		btrfs_item_key_to_cpu(leaf, &key, slot);
		if (key.type != BTRFS_DEV_EXTENT_KEY)
			break;
		devid = key.objectid;
		physical_offset = key.offset;

		dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
		chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
		physical_len = btrfs_dev_extent_length(leaf, dext);

7698 7699 7700 7701 7702 7703 7704 7705 7706
		/* Check if this dev extent overlaps with the previous one */
		if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
			btrfs_err(fs_info,
"dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
				  devid, physical_offset, prev_dev_ext_end);
			ret = -EUCLEAN;
			goto out;
		}

7707 7708 7709 7710
		ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
					    physical_offset, physical_len);
		if (ret < 0)
			goto out;
7711 7712 7713
		prev_devid = devid;
		prev_dev_ext_end = physical_offset + physical_len;

7714 7715 7716 7717 7718 7719 7720 7721 7722 7723 7724 7725 7726 7727 7728
		ret = btrfs_next_item(root, path);
		if (ret < 0)
			goto out;
		if (ret > 0) {
			ret = 0;
			break;
		}
	}

	/* Ensure all chunks have corresponding dev extents */
	ret = verify_chunk_dev_extent_mapping(fs_info);
out:
	btrfs_free_path(path);
	return ret;
}
7729 7730 7731 7732 7733 7734 7735 7736 7737 7738 7739 7740 7741 7742 7743 7744 7745 7746 7747 7748 7749 7750 7751 7752

/*
 * Check whether the given block group or device is pinned by any inode being
 * used as a swapfile.
 */
bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
{
	struct btrfs_swapfile_pin *sp;
	struct rb_node *node;

	spin_lock(&fs_info->swapfile_pins_lock);
	node = fs_info->swapfile_pins.rb_node;
	while (node) {
		sp = rb_entry(node, struct btrfs_swapfile_pin, node);
		if (ptr < sp->ptr)
			node = node->rb_left;
		else if (ptr > sp->ptr)
			node = node->rb_right;
		else
			break;
	}
	spin_unlock(&fs_info->swapfile_pins_lock);
	return node != NULL;
}