volumes.c 205.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 */
5

6 7
#include <linux/sched.h>
#include <linux/bio.h>
8
#include <linux/slab.h>
9
#include <linux/buffer_head.h>
10
#include <linux/blkdev.h>
11
#include <linux/ratelimit.h>
I
Ilya Dryomov 已提交
12
#include <linux/kthread.h>
D
David Woodhouse 已提交
13
#include <linux/raid/pq.h>
S
Stefan Behrens 已提交
14
#include <linux/semaphore.h>
15
#include <linux/uuid.h>
A
Anand Jain 已提交
16
#include <linux/list_sort.h>
17 18 19 20 21 22
#include "ctree.h"
#include "extent_map.h"
#include "disk-io.h"
#include "transaction.h"
#include "print-tree.h"
#include "volumes.h"
D
David Woodhouse 已提交
23
#include "raid56.h"
24
#include "async-thread.h"
25
#include "check-integrity.h"
26
#include "rcu-string.h"
27
#include "math.h"
28
#include "dev-replace.h"
29
#include "sysfs.h"
30
#include "tree-checker.h"
31
#include "space-info.h"
32
#include "block-group.h"
33

Z
Zhao Lei 已提交
34 35 36 37 38 39
const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
	[BTRFS_RAID_RAID10] = {
		.sub_stripes	= 2,
		.dev_stripes	= 1,
		.devs_max	= 0,	/* 0 == as many as possible */
		.devs_min	= 4,
40
		.tolerated_failures = 1,
Z
Zhao Lei 已提交
41 42
		.devs_increment	= 2,
		.ncopies	= 2,
43
		.nparity        = 0,
44
		.raid_name	= "raid10",
45
		.bg_flag	= BTRFS_BLOCK_GROUP_RAID10,
46
		.mindev_error	= BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
Z
Zhao Lei 已提交
47 48 49 50 51 52
	},
	[BTRFS_RAID_RAID1] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 2,
		.devs_min	= 2,
53
		.tolerated_failures = 1,
Z
Zhao Lei 已提交
54 55
		.devs_increment	= 2,
		.ncopies	= 2,
56
		.nparity        = 0,
57
		.raid_name	= "raid1",
58
		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1,
59
		.mindev_error	= BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
Z
Zhao Lei 已提交
60 61 62 63 64 65
	},
	[BTRFS_RAID_DUP] = {
		.sub_stripes	= 1,
		.dev_stripes	= 2,
		.devs_max	= 1,
		.devs_min	= 1,
66
		.tolerated_failures = 0,
Z
Zhao Lei 已提交
67 68
		.devs_increment	= 1,
		.ncopies	= 2,
69
		.nparity        = 0,
70
		.raid_name	= "dup",
71
		.bg_flag	= BTRFS_BLOCK_GROUP_DUP,
72
		.mindev_error	= 0,
Z
Zhao Lei 已提交
73 74 75 76 77 78
	},
	[BTRFS_RAID_RAID0] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 0,
		.devs_min	= 2,
79
		.tolerated_failures = 0,
Z
Zhao Lei 已提交
80 81
		.devs_increment	= 1,
		.ncopies	= 1,
82
		.nparity        = 0,
83
		.raid_name	= "raid0",
84
		.bg_flag	= BTRFS_BLOCK_GROUP_RAID0,
85
		.mindev_error	= 0,
Z
Zhao Lei 已提交
86 87 88 89 90 91
	},
	[BTRFS_RAID_SINGLE] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 1,
		.devs_min	= 1,
92
		.tolerated_failures = 0,
Z
Zhao Lei 已提交
93 94
		.devs_increment	= 1,
		.ncopies	= 1,
95
		.nparity        = 0,
96
		.raid_name	= "single",
97
		.bg_flag	= 0,
98
		.mindev_error	= 0,
Z
Zhao Lei 已提交
99 100 101 102 103 104
	},
	[BTRFS_RAID_RAID5] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 0,
		.devs_min	= 2,
105
		.tolerated_failures = 1,
Z
Zhao Lei 已提交
106
		.devs_increment	= 1,
107
		.ncopies	= 1,
108
		.nparity        = 1,
109
		.raid_name	= "raid5",
110
		.bg_flag	= BTRFS_BLOCK_GROUP_RAID5,
111
		.mindev_error	= BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
Z
Zhao Lei 已提交
112 113 114 115 116 117
	},
	[BTRFS_RAID_RAID6] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 0,
		.devs_min	= 3,
118
		.tolerated_failures = 2,
Z
Zhao Lei 已提交
119
		.devs_increment	= 1,
120
		.ncopies	= 1,
121
		.nparity        = 2,
122
		.raid_name	= "raid6",
123
		.bg_flag	= BTRFS_BLOCK_GROUP_RAID6,
124
		.mindev_error	= BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
Z
Zhao Lei 已提交
125 126 127
	},
};

128
const char *btrfs_bg_type_to_raid_name(u64 flags)
129
{
130 131 132
	const int index = btrfs_bg_flags_to_raid_index(flags);

	if (index >= BTRFS_NR_RAID_TYPES)
133 134
		return NULL;

135
	return btrfs_raid_array[index].raid_name;
136 137
}

138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
/*
 * Fill @buf with textual description of @bg_flags, no more than @size_buf
 * bytes including terminating null byte.
 */
void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
{
	int i;
	int ret;
	char *bp = buf;
	u64 flags = bg_flags;
	u32 size_bp = size_buf;

	if (!flags) {
		strcpy(bp, "NONE");
		return;
	}

#define DESCRIBE_FLAG(flag, desc)						\
	do {								\
		if (flags & (flag)) {					\
			ret = snprintf(bp, size_bp, "%s|", (desc));	\
			if (ret < 0 || ret >= size_bp)			\
				goto out_overflow;			\
			size_bp -= ret;					\
			bp += ret;					\
			flags &= ~(flag);				\
		}							\
	} while (0)

	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");

	DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
		DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
			      btrfs_raid_array[i].raid_name);
#undef DESCRIBE_FLAG

	if (flags) {
		ret = snprintf(bp, size_bp, "0x%llx|", flags);
		size_bp -= ret;
	}

	if (size_bp < size_buf)
		buf[size_buf - size_bp - 1] = '\0'; /* remove last | */

	/*
	 * The text is trimmed, it's up to the caller to provide sufficiently
	 * large buffer
	 */
out_overflow:;
}

192
static int init_first_rw_device(struct btrfs_trans_handle *trans);
193
static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
194
static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
195
static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
196 197 198 199 200
static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
			     enum btrfs_map_op op,
			     u64 logical, u64 *length,
			     struct btrfs_bio **bbio_ret,
			     int mirror_num, int need_raid_map);
Y
Yan Zheng 已提交
201

D
David Sterba 已提交
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
/*
 * Device locking
 * ==============
 *
 * There are several mutexes that protect manipulation of devices and low-level
 * structures like chunks but not block groups, extents or files
 *
 * uuid_mutex (global lock)
 * ------------------------
 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
 * the SCAN_DEV ioctl registration or from mount either implicitly (the first
 * device) or requested by the device= mount option
 *
 * the mutex can be very coarse and can cover long-running operations
 *
 * protects: updates to fs_devices counters like missing devices, rw devices,
218
 * seeding, structure cloning, opening/closing devices at mount/umount time
D
David Sterba 已提交
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
 *
 * global::fs_devs - add, remove, updates to the global list
 *
 * does not protect: manipulation of the fs_devices::devices list!
 *
 * btrfs_device::name - renames (write side), read is RCU
 *
 * fs_devices::device_list_mutex (per-fs, with RCU)
 * ------------------------------------------------
 * protects updates to fs_devices::devices, ie. adding and deleting
 *
 * simple list traversal with read-only actions can be done with RCU protection
 *
 * may be used to exclude some operations from running concurrently without any
 * modifications to the list (see write_all_supers)
 *
 * balance_mutex
 * -------------
 * protects balance structures (status, state) and context accessed from
 * several places (internally, ioctl)
 *
 * chunk_mutex
 * -----------
 * protects chunks, adding or removing during allocation, trim or when a new
243 244 245
 * device is added/removed. Additionally it also protects post_commit_list of
 * individual devices, since they can be added to the transaction's
 * post_commit_list only with chunk_mutex held.
D
David Sterba 已提交
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
 *
 * cleaner_mutex
 * -------------
 * a big lock that is held by the cleaner thread and prevents running subvolume
 * cleaning together with relocation or delayed iputs
 *
 *
 * Lock nesting
 * ============
 *
 * uuid_mutex
 *   volume_mutex
 *     device_list_mutex
 *       chunk_mutex
 *     balance_mutex
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
 *
 *
 * Exclusive operations, BTRFS_FS_EXCL_OP
 * ======================================
 *
 * Maintains the exclusivity of the following operations that apply to the
 * whole filesystem and cannot run in parallel.
 *
 * - Balance (*)
 * - Device add
 * - Device remove
 * - Device replace (*)
 * - Resize
 *
 * The device operations (as above) can be in one of the following states:
 *
 * - Running state
 * - Paused state
 * - Completed state
 *
 * Only device operations marked with (*) can go into the Paused state for the
 * following reasons:
 *
 * - ioctl (only Balance can be Paused through ioctl)
 * - filesystem remounted as read-only
 * - filesystem unmounted and mounted as read-only
 * - system power-cycle and filesystem mounted as read-only
 * - filesystem or device errors leading to forced read-only
 *
 * BTRFS_FS_EXCL_OP flag is set and cleared using atomic operations.
 * During the course of Paused state, the BTRFS_FS_EXCL_OP remains set.
 * A device operation in Paused or Running state can be canceled or resumed
 * either by ioctl (Balance only) or when remounted as read-write.
 * BTRFS_FS_EXCL_OP flag is cleared when the device operation is canceled or
 * completed.
D
David Sterba 已提交
296 297
 */

298
DEFINE_MUTEX(uuid_mutex);
299
static LIST_HEAD(fs_uuids);
300 301 302 303
struct list_head *btrfs_get_fs_uuids(void)
{
	return &fs_uuids;
}
304

D
David Sterba 已提交
305 306
/*
 * alloc_fs_devices - allocate struct btrfs_fs_devices
307 308
 * @fsid:		if not NULL, copy the UUID to fs_devices::fsid
 * @metadata_fsid:	if not NULL, copy the UUID to fs_devices::metadata_fsid
D
David Sterba 已提交
309 310 311 312 313
 *
 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
 * The returned struct is not linked onto any lists and can be destroyed with
 * kfree() right away.
 */
314 315
static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
						 const u8 *metadata_fsid)
316 317 318
{
	struct btrfs_fs_devices *fs_devs;

319
	fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
320 321 322 323 324 325 326
	if (!fs_devs)
		return ERR_PTR(-ENOMEM);

	mutex_init(&fs_devs->device_list_mutex);

	INIT_LIST_HEAD(&fs_devs->devices);
	INIT_LIST_HEAD(&fs_devs->alloc_list);
327
	INIT_LIST_HEAD(&fs_devs->fs_list);
328 329 330
	if (fsid)
		memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);

331 332 333 334 335
	if (metadata_fsid)
		memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE);
	else if (fsid)
		memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);

336 337 338
	return fs_devs;
}

339
void btrfs_free_device(struct btrfs_device *device)
340
{
341
	WARN_ON(!list_empty(&device->post_commit_list));
342
	rcu_string_free(device->name);
343
	extent_io_tree_release(&device->alloc_state);
344 345 346 347
	bio_put(device->flush_bio);
	kfree(device);
}

Y
Yan Zheng 已提交
348 349 350 351 352 353 354 355
static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
{
	struct btrfs_device *device;
	WARN_ON(fs_devices->opened);
	while (!list_empty(&fs_devices->devices)) {
		device = list_entry(fs_devices->devices.next,
				    struct btrfs_device, dev_list);
		list_del(&device->dev_list);
356
		btrfs_free_device(device);
Y
Yan Zheng 已提交
357 358 359 360
	}
	kfree(fs_devices);
}

361
void __exit btrfs_cleanup_fs_uuids(void)
362 363 364
{
	struct btrfs_fs_devices *fs_devices;

Y
Yan Zheng 已提交
365 366
	while (!list_empty(&fs_uuids)) {
		fs_devices = list_entry(fs_uuids.next,
367 368
					struct btrfs_fs_devices, fs_list);
		list_del(&fs_devices->fs_list);
Y
Yan Zheng 已提交
369
		free_fs_devices(fs_devices);
370 371 372
	}
}

373 374 375
/*
 * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error.
 * Returned struct is not linked onto any lists and must be destroyed using
376
 * btrfs_free_device.
377
 */
378 379 380 381
static struct btrfs_device *__alloc_device(void)
{
	struct btrfs_device *dev;

382
	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
383 384 385
	if (!dev)
		return ERR_PTR(-ENOMEM);

386 387 388 389 390 391 392 393 394 395
	/*
	 * Preallocate a bio that's always going to be used for flushing device
	 * barriers and matches the device lifespan
	 */
	dev->flush_bio = bio_alloc_bioset(GFP_KERNEL, 0, NULL);
	if (!dev->flush_bio) {
		kfree(dev);
		return ERR_PTR(-ENOMEM);
	}

396 397
	INIT_LIST_HEAD(&dev->dev_list);
	INIT_LIST_HEAD(&dev->dev_alloc_list);
398
	INIT_LIST_HEAD(&dev->post_commit_list);
399 400 401 402

	spin_lock_init(&dev->io_lock);

	atomic_set(&dev->reada_in_flight, 0);
403
	atomic_set(&dev->dev_stats_ccnt, 0);
404
	btrfs_device_data_ordered_init(dev);
405
	INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
406
	INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
407
	extent_io_tree_init(NULL, &dev->alloc_state, 0, NULL);
408 409 410 411

	return dev;
}

412 413
static noinline struct btrfs_fs_devices *find_fsid(
		const u8 *fsid, const u8 *metadata_fsid)
414 415 416
{
	struct btrfs_fs_devices *fs_devices;

417 418
	ASSERT(fsid);

419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
	if (metadata_fsid) {
		/*
		 * Handle scanned device having completed its fsid change but
		 * belonging to a fs_devices that was created by first scanning
		 * a device which didn't have its fsid/metadata_uuid changed
		 * at all and the CHANGING_FSID_V2 flag set.
		 */
		list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
			if (fs_devices->fsid_change &&
			    memcmp(metadata_fsid, fs_devices->fsid,
				   BTRFS_FSID_SIZE) == 0 &&
			    memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
				   BTRFS_FSID_SIZE) == 0) {
				return fs_devices;
			}
		}
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
		/*
		 * Handle scanned device having completed its fsid change but
		 * belonging to a fs_devices that was created by a device that
		 * has an outdated pair of fsid/metadata_uuid and
		 * CHANGING_FSID_V2 flag set.
		 */
		list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
			if (fs_devices->fsid_change &&
			    memcmp(fs_devices->metadata_uuid,
				   fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
			    memcmp(metadata_fsid, fs_devices->metadata_uuid,
				   BTRFS_FSID_SIZE) == 0) {
				return fs_devices;
			}
		}
450 451 452
	}

	/* Handle non-split brain cases */
453
	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
454 455 456 457 458 459 460 461 462
		if (metadata_fsid) {
			if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0
			    && memcmp(metadata_fsid, fs_devices->metadata_uuid,
				      BTRFS_FSID_SIZE) == 0)
				return fs_devices;
		} else {
			if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
				return fs_devices;
		}
463 464 465 466
	}
	return NULL;
}

467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
static int
btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
		      int flush, struct block_device **bdev,
		      struct buffer_head **bh)
{
	int ret;

	*bdev = blkdev_get_by_path(device_path, flags, holder);

	if (IS_ERR(*bdev)) {
		ret = PTR_ERR(*bdev);
		goto error;
	}

	if (flush)
		filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
483
	ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
484 485 486 487 488 489
	if (ret) {
		blkdev_put(*bdev, flags);
		goto error;
	}
	invalidate_bdev(*bdev);
	*bh = btrfs_read_dev_super(*bdev);
490 491
	if (IS_ERR(*bh)) {
		ret = PTR_ERR(*bh);
492 493 494 495 496 497 498 499 500 501 502 503
		blkdev_put(*bdev, flags);
		goto error;
	}

	return 0;

error:
	*bdev = NULL;
	*bh = NULL;
	return ret;
}

504 505 506 507 508 509 510 511 512 513 514 515 516 517
static void requeue_list(struct btrfs_pending_bios *pending_bios,
			struct bio *head, struct bio *tail)
{

	struct bio *old_head;

	old_head = pending_bios->head;
	pending_bios->head = head;
	if (pending_bios->tail)
		tail->bi_next = old_head;
	else
		pending_bios->tail = tail;
}

518 519 520 521 522 523 524 525 526 527 528
/*
 * we try to collect pending bios for a device so we don't get a large
 * number of procs sending bios down to the same device.  This greatly
 * improves the schedulers ability to collect and merge the bios.
 *
 * But, it also turns into a long list of bios to process and that is sure
 * to eventually make the worker thread block.  The solution here is to
 * make some progress and then put this work struct back at the end of
 * the list if the block device is congested.  This way, multiple devices
 * can make progress from a single worker thread.
 */
529
static noinline void run_scheduled_bios(struct btrfs_device *device)
530
{
531
	struct btrfs_fs_info *fs_info = device->fs_info;
532 533
	struct bio *pending;
	struct backing_dev_info *bdi;
534
	struct btrfs_pending_bios *pending_bios;
535 536 537
	struct bio *tail;
	struct bio *cur;
	int again = 0;
538
	unsigned long num_run;
539
	unsigned long batch_run = 0;
540
	unsigned long last_waited = 0;
541
	int force_reg = 0;
M
Miao Xie 已提交
542
	int sync_pending = 0;
543 544 545 546 547 548 549 550 551
	struct blk_plug plug;

	/*
	 * this function runs all the bios we've collected for
	 * a particular device.  We don't want to wander off to
	 * another device without first sending all of these down.
	 * So, setup a plug here and finish it off before we return
	 */
	blk_start_plug(&plug);
552

553
	bdi = device->bdev->bd_bdi;
554

555 556 557
loop:
	spin_lock(&device->io_lock);

558
loop_lock:
559
	num_run = 0;
560

561 562 563 564 565
	/* take all the bios off the list at once and process them
	 * later on (without the lock held).  But, remember the
	 * tail and other pointers so the bios can be properly reinserted
	 * into the list if we hit congestion
	 */
566
	if (!force_reg && device->pending_sync_bios.head) {
567
		pending_bios = &device->pending_sync_bios;
568 569
		force_reg = 1;
	} else {
570
		pending_bios = &device->pending_bios;
571 572
		force_reg = 0;
	}
573 574 575

	pending = pending_bios->head;
	tail = pending_bios->tail;
576 577 578 579 580 581 582 583 584 585
	WARN_ON(pending && !tail);

	/*
	 * if pending was null this time around, no bios need processing
	 * at all and we can stop.  Otherwise it'll loop back up again
	 * and do an additional check so no bios are missed.
	 *
	 * device->running_pending is used to synchronize with the
	 * schedule_bio code.
	 */
586 587
	if (device->pending_sync_bios.head == NULL &&
	    device->pending_bios.head == NULL) {
588 589
		again = 0;
		device->running_pending = 0;
590 591 592
	} else {
		again = 1;
		device->running_pending = 1;
593
	}
594 595 596 597

	pending_bios->head = NULL;
	pending_bios->tail = NULL;

598 599
	spin_unlock(&device->io_lock);

C
Chris Mason 已提交
600
	while (pending) {
601 602

		rmb();
603 604 605 606 607 608 609 610
		/* we want to work on both lists, but do more bios on the
		 * sync list than the regular list
		 */
		if ((num_run > 32 &&
		    pending_bios != &device->pending_sync_bios &&
		    device->pending_sync_bios.head) ||
		   (num_run > 64 && pending_bios == &device->pending_sync_bios &&
		    device->pending_bios.head)) {
611 612 613 614 615
			spin_lock(&device->io_lock);
			requeue_list(pending_bios, pending, tail);
			goto loop_lock;
		}

616 617 618
		cur = pending;
		pending = pending->bi_next;
		cur->bi_next = NULL;
619

620
		BUG_ON(atomic_read(&cur->__bi_cnt) == 0);
621

622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
		/*
		 * if we're doing the sync list, record that our
		 * plug has some sync requests on it
		 *
		 * If we're doing the regular list and there are
		 * sync requests sitting around, unplug before
		 * we add more
		 */
		if (pending_bios == &device->pending_sync_bios) {
			sync_pending = 1;
		} else if (sync_pending) {
			blk_finish_plug(&plug);
			blk_start_plug(&plug);
			sync_pending = 0;
		}

638
		btrfsic_submit_bio(cur);
639 640
		num_run++;
		batch_run++;
641 642

		cond_resched();
643 644 645 646 647 648

		/*
		 * we made progress, there is more work to do and the bdi
		 * is now congested.  Back off and let other work structs
		 * run instead
		 */
C
Chris Mason 已提交
649
		if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
650
		    fs_info->fs_devices->open_devices > 1) {
651
			struct io_context *ioc;
652

653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674
			ioc = current->io_context;

			/*
			 * the main goal here is that we don't want to
			 * block if we're going to be able to submit
			 * more requests without blocking.
			 *
			 * This code does two great things, it pokes into
			 * the elevator code from a filesystem _and_
			 * it makes assumptions about how batching works.
			 */
			if (ioc && ioc->nr_batch_requests > 0 &&
			    time_before(jiffies, ioc->last_waited + HZ/50UL) &&
			    (last_waited == 0 ||
			     ioc->last_waited == last_waited)) {
				/*
				 * we want to go through our batch of
				 * requests and stop.  So, we copy out
				 * the ioc->last_waited time and test
				 * against it before looping
				 */
				last_waited = ioc->last_waited;
675
				cond_resched();
676 677
				continue;
			}
678
			spin_lock(&device->io_lock);
679
			requeue_list(pending_bios, pending, tail);
680
			device->running_pending = 1;
681 682

			spin_unlock(&device->io_lock);
683 684
			btrfs_queue_work(fs_info->submit_workers,
					 &device->work);
685 686 687
			goto done;
		}
	}
688

689 690 691 692 693 694 695 696 697
	cond_resched();
	if (again)
		goto loop;

	spin_lock(&device->io_lock);
	if (device->pending_bios.head || device->pending_sync_bios.head)
		goto loop_lock;
	spin_unlock(&device->io_lock);

698
done:
699
	blk_finish_plug(&plug);
700 701
}

702
static void pending_bios_fn(struct btrfs_work *work)
703 704 705 706 707 708 709
{
	struct btrfs_device *device;

	device = container_of(work, struct btrfs_device, work);
	run_scheduled_bios(device);
}

710 711 712 713 714 715 716 717 718 719 720
static bool device_path_matched(const char *path, struct btrfs_device *device)
{
	int found;

	rcu_read_lock();
	found = strcmp(rcu_str_deref(device->name), path);
	rcu_read_unlock();

	return found == 0;
}

721 722 723 724 725 726 727
/*
 *  Search and remove all stale (devices which are not mounted) devices.
 *  When both inputs are NULL, it will search and release all stale devices.
 *  path:	Optional. When provided will it release all unmounted devices
 *		matching this path only.
 *  skip_dev:	Optional. Will skip this device when searching for the stale
 *		devices.
728 729 730
 *  Return:	0 for success or if @path is NULL.
 * 		-EBUSY if @path is a mounted device.
 * 		-ENOENT if @path does not match any device in the list.
731
 */
732
static int btrfs_free_stale_devices(const char *path,
733
				     struct btrfs_device *skip_device)
A
Anand Jain 已提交
734
{
735 736
	struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
	struct btrfs_device *device, *tmp_device;
737 738 739 740
	int ret = 0;

	if (path)
		ret = -ENOENT;
A
Anand Jain 已提交
741

742
	list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
A
Anand Jain 已提交
743

744
		mutex_lock(&fs_devices->device_list_mutex);
745 746 747
		list_for_each_entry_safe(device, tmp_device,
					 &fs_devices->devices, dev_list) {
			if (skip_device && skip_device == device)
748
				continue;
749
			if (path && !device->name)
A
Anand Jain 已提交
750
				continue;
751
			if (path && !device_path_matched(path, device))
752
				continue;
753 754 755 756 757 758
			if (fs_devices->opened) {
				/* for an already deleted device return 0 */
				if (path && ret != 0)
					ret = -EBUSY;
				break;
			}
A
Anand Jain 已提交
759 760

			/* delete the stale device */
761 762 763 764
			fs_devices->num_devices--;
			list_del(&device->dev_list);
			btrfs_free_device(device);

765
			ret = 0;
766
			if (fs_devices->num_devices == 0)
767
				break;
768 769
		}
		mutex_unlock(&fs_devices->device_list_mutex);
770

771 772 773 774
		if (fs_devices->num_devices == 0) {
			btrfs_sysfs_remove_fsid(fs_devices);
			list_del(&fs_devices->fs_list);
			free_fs_devices(fs_devices);
A
Anand Jain 已提交
775 776
		}
	}
777 778

	return ret;
A
Anand Jain 已提交
779 780
}

781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812
static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
			struct btrfs_device *device, fmode_t flags,
			void *holder)
{
	struct request_queue *q;
	struct block_device *bdev;
	struct buffer_head *bh;
	struct btrfs_super_block *disk_super;
	u64 devid;
	int ret;

	if (device->bdev)
		return -EINVAL;
	if (!device->name)
		return -EINVAL;

	ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
				    &bdev, &bh);
	if (ret)
		return ret;

	disk_super = (struct btrfs_super_block *)bh->b_data;
	devid = btrfs_stack_device_id(&disk_super->dev_item);
	if (devid != device->devid)
		goto error_brelse;

	if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
		goto error_brelse;

	device->generation = btrfs_super_generation(disk_super);

	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
813 814 815 816 817 818 819
		if (btrfs_super_incompat_flags(disk_super) &
		    BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
			pr_err(
		"BTRFS: Invalid seeding and uuid-changed device detected\n");
			goto error_brelse;
		}

820
		clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
821 822
		fs_devices->seeding = 1;
	} else {
823 824 825 826
		if (bdev_read_only(bdev))
			clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
		else
			set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
827 828 829 830 831 832 833
	}

	q = bdev_get_queue(bdev);
	if (!blk_queue_nonrot(q))
		fs_devices->rotating = 1;

	device->bdev = bdev;
834
	clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
835 836 837
	device->mode = flags;

	fs_devices->open_devices++;
838 839
	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
840
		fs_devices->rw_devices++;
841
		list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
842 843 844 845 846 847 848 849 850 851 852 853
	}
	brelse(bh);

	return 0;

error_brelse:
	brelse(bh);
	blkdev_put(bdev, flags);

	return -EINVAL;
}

854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874
/*
 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
 * being created with a disk that has already completed its fsid change.
 */
static struct btrfs_fs_devices *find_fsid_inprogress(
					struct btrfs_super_block *disk_super)
{
	struct btrfs_fs_devices *fs_devices;

	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
			   BTRFS_FSID_SIZE) != 0 &&
		    memcmp(fs_devices->metadata_uuid, disk_super->fsid,
			   BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) {
			return fs_devices;
		}
	}

	return NULL;
}

875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898

static struct btrfs_fs_devices *find_fsid_changed(
					struct btrfs_super_block *disk_super)
{
	struct btrfs_fs_devices *fs_devices;

	/*
	 * Handles the case where scanned device is part of an fs that had
	 * multiple successful changes of FSID but curently device didn't
	 * observe it. Meaning our fsid will be different than theirs.
	 */
	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
			   BTRFS_FSID_SIZE) != 0 &&
		    memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid,
			   BTRFS_FSID_SIZE) == 0 &&
		    memcmp(fs_devices->fsid, disk_super->fsid,
			   BTRFS_FSID_SIZE) != 0) {
			return fs_devices;
		}
	}

	return NULL;
}
899 900 901 902
/*
 * Add new device to list of registered devices
 *
 * Returns:
903 904
 * device pointer which was just added or updated when successful
 * error pointer when failed
905
 */
906
static noinline struct btrfs_device *device_list_add(const char *path,
907 908
			   struct btrfs_super_block *disk_super,
			   bool *new_device_added)
909 910
{
	struct btrfs_device *device;
911
	struct btrfs_fs_devices *fs_devices = NULL;
912
	struct rcu_string *name;
913
	u64 found_transid = btrfs_super_generation(disk_super);
914
	u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
915 916
	bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
		BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
917 918
	bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
					BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
919

920 921 922 923 924 925 926 927 928 929 930 931 932 933
	if (fsid_change_in_progress) {
		if (!has_metadata_uuid) {
			/*
			 * When we have an image which has CHANGING_FSID_V2 set
			 * it might belong to either a filesystem which has
			 * disks with completed fsid change or it might belong
			 * to fs with no UUID changes in effect, handle both.
			 */
			fs_devices = find_fsid_inprogress(disk_super);
			if (!fs_devices)
				fs_devices = find_fsid(disk_super->fsid, NULL);
		} else {
			fs_devices = find_fsid_changed(disk_super);
		}
934 935 936 937
	} else if (has_metadata_uuid) {
		fs_devices = find_fsid(disk_super->fsid,
				       disk_super->metadata_uuid);
	} else {
938
		fs_devices = find_fsid(disk_super->fsid, NULL);
939 940
	}

941 942

	if (!fs_devices) {
943 944 945 946 947 948
		if (has_metadata_uuid)
			fs_devices = alloc_fs_devices(disk_super->fsid,
						      disk_super->metadata_uuid);
		else
			fs_devices = alloc_fs_devices(disk_super->fsid, NULL);

949
		if (IS_ERR(fs_devices))
950
			return ERR_CAST(fs_devices);
951

952 953
		fs_devices->fsid_change = fsid_change_in_progress;

954
		mutex_lock(&fs_devices->device_list_mutex);
955
		list_add(&fs_devices->fs_list, &fs_uuids);
956

957 958
		device = NULL;
	} else {
959
		mutex_lock(&fs_devices->device_list_mutex);
960 961
		device = btrfs_find_device(fs_devices, devid,
				disk_super->dev_item.uuid, NULL, false);
962 963 964 965 966 967 968 969 970 971 972 973 974 975 976

		/*
		 * If this disk has been pulled into an fs devices created by
		 * a device which had the CHANGING_FSID_V2 flag then replace the
		 * metadata_uuid/fsid values of the fs_devices.
		 */
		if (has_metadata_uuid && fs_devices->fsid_change &&
		    found_transid > fs_devices->latest_generation) {
			memcpy(fs_devices->fsid, disk_super->fsid,
					BTRFS_FSID_SIZE);
			memcpy(fs_devices->metadata_uuid,
					disk_super->metadata_uuid, BTRFS_FSID_SIZE);

			fs_devices->fsid_change = false;
		}
977
	}
978

979
	if (!device) {
980 981
		if (fs_devices->opened) {
			mutex_unlock(&fs_devices->device_list_mutex);
982
			return ERR_PTR(-EBUSY);
983
		}
Y
Yan Zheng 已提交
984

985 986 987
		device = btrfs_alloc_device(NULL, &devid,
					    disk_super->dev_item.uuid);
		if (IS_ERR(device)) {
988
			mutex_unlock(&fs_devices->device_list_mutex);
989
			/* we can safely leave the fs_devices entry around */
990
			return device;
991
		}
992 993 994

		name = rcu_string_strdup(path, GFP_NOFS);
		if (!name) {
995
			btrfs_free_device(device);
996
			mutex_unlock(&fs_devices->device_list_mutex);
997
			return ERR_PTR(-ENOMEM);
998
		}
999
		rcu_assign_pointer(device->name, name);
1000

1001
		list_add_rcu(&device->dev_list, &fs_devices->devices);
1002
		fs_devices->num_devices++;
1003

Y
Yan Zheng 已提交
1004
		device->fs_devices = fs_devices;
1005
		*new_device_added = true;
1006 1007 1008 1009 1010 1011 1012 1013

		if (disk_super->label[0])
			pr_info("BTRFS: device label %s devid %llu transid %llu %s\n",
				disk_super->label, devid, found_transid, path);
		else
			pr_info("BTRFS: device fsid %pU devid %llu transid %llu %s\n",
				disk_super->fsid, devid, found_transid, path);

1014
	} else if (!device->name || strcmp(device->name->str, path)) {
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
		/*
		 * When FS is already mounted.
		 * 1. If you are here and if the device->name is NULL that
		 *    means this device was missing at time of FS mount.
		 * 2. If you are here and if the device->name is different
		 *    from 'path' that means either
		 *      a. The same device disappeared and reappeared with
		 *         different name. or
		 *      b. The missing-disk-which-was-replaced, has
		 *         reappeared now.
		 *
		 * We must allow 1 and 2a above. But 2b would be a spurious
		 * and unintentional.
		 *
		 * Further in case of 1 and 2a above, the disk at 'path'
		 * would have missed some transaction when it was away and
		 * in case of 2a the stale bdev has to be updated as well.
		 * 2b must not be allowed at all time.
		 */

		/*
1036 1037 1038 1039
		 * For now, we do allow update to btrfs_fs_device through the
		 * btrfs dev scan cli after FS has been mounted.  We're still
		 * tracking a problem where systems fail mount by subvolume id
		 * when we reject replacement on a mounted FS.
1040
		 */
1041
		if (!fs_devices->opened && found_transid < device->generation) {
1042 1043 1044 1045 1046 1047 1048
			/*
			 * That is if the FS is _not_ mounted and if you
			 * are here, that means there is more than one
			 * disk with same uuid and devid.We keep the one
			 * with larger generation number or the last-in if
			 * generation are equal.
			 */
1049
			mutex_unlock(&fs_devices->device_list_mutex);
1050
			return ERR_PTR(-EEXIST);
1051
		}
1052

1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
		/*
		 * We are going to replace the device path for a given devid,
		 * make sure it's the same device if the device is mounted
		 */
		if (device->bdev) {
			struct block_device *path_bdev;

			path_bdev = lookup_bdev(path);
			if (IS_ERR(path_bdev)) {
				mutex_unlock(&fs_devices->device_list_mutex);
				return ERR_CAST(path_bdev);
			}

			if (device->bdev != path_bdev) {
				bdput(path_bdev);
				mutex_unlock(&fs_devices->device_list_mutex);
				btrfs_warn_in_rcu(device->fs_info,
			"duplicate device fsid:devid for %pU:%llu old:%s new:%s",
					disk_super->fsid, devid,
					rcu_str_deref(device->name), path);
				return ERR_PTR(-EEXIST);
			}
			bdput(path_bdev);
			btrfs_info_in_rcu(device->fs_info,
				"device fsid %pU devid %llu moved old:%s new:%s",
				disk_super->fsid, devid,
				rcu_str_deref(device->name), path);
		}

1082
		name = rcu_string_strdup(path, GFP_NOFS);
1083 1084
		if (!name) {
			mutex_unlock(&fs_devices->device_list_mutex);
1085
			return ERR_PTR(-ENOMEM);
1086
		}
1087 1088
		rcu_string_free(device->name);
		rcu_assign_pointer(device->name, name);
1089
		if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
1090
			fs_devices->missing_devices--;
1091
			clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
1092
		}
1093 1094
	}

1095 1096 1097 1098 1099 1100
	/*
	 * Unmount does not free the btrfs_device struct but would zero
	 * generation along with most of the other members. So just update
	 * it back. We need it to pick the disk with largest generation
	 * (as above).
	 */
1101
	if (!fs_devices->opened) {
1102
		device->generation = found_transid;
1103 1104 1105
		fs_devices->latest_generation = max_t(u64, found_transid,
						fs_devices->latest_generation);
	}
1106

1107 1108
	fs_devices->total_devices = btrfs_super_num_devices(disk_super);

1109
	mutex_unlock(&fs_devices->device_list_mutex);
1110
	return device;
1111 1112
}

Y
Yan Zheng 已提交
1113 1114 1115 1116 1117 1118
static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
{
	struct btrfs_fs_devices *fs_devices;
	struct btrfs_device *device;
	struct btrfs_device *orig_dev;

1119
	fs_devices = alloc_fs_devices(orig->fsid, NULL);
1120 1121
	if (IS_ERR(fs_devices))
		return fs_devices;
Y
Yan Zheng 已提交
1122

1123
	mutex_lock(&orig->device_list_mutex);
J
Josef Bacik 已提交
1124
	fs_devices->total_devices = orig->total_devices;
Y
Yan Zheng 已提交
1125 1126

	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
1127 1128
		struct rcu_string *name;

1129 1130 1131
		device = btrfs_alloc_device(NULL, &orig_dev->devid,
					    orig_dev->uuid);
		if (IS_ERR(device))
Y
Yan Zheng 已提交
1132 1133
			goto error;

1134 1135 1136 1137
		/*
		 * This is ok to do without rcu read locked because we hold the
		 * uuid mutex so nothing we touch in here is going to disappear.
		 */
1138
		if (orig_dev->name) {
1139 1140
			name = rcu_string_strdup(orig_dev->name->str,
					GFP_KERNEL);
1141
			if (!name) {
1142
				btrfs_free_device(device);
1143 1144 1145
				goto error;
			}
			rcu_assign_pointer(device->name, name);
J
Julia Lawall 已提交
1146
		}
Y
Yan Zheng 已提交
1147 1148 1149 1150 1151

		list_add(&device->dev_list, &fs_devices->devices);
		device->fs_devices = fs_devices;
		fs_devices->num_devices++;
	}
1152
	mutex_unlock(&orig->device_list_mutex);
Y
Yan Zheng 已提交
1153 1154
	return fs_devices;
error:
1155
	mutex_unlock(&orig->device_list_mutex);
Y
Yan Zheng 已提交
1156 1157 1158 1159
	free_fs_devices(fs_devices);
	return ERR_PTR(-ENOMEM);
}

1160 1161 1162 1163 1164
/*
 * After we have read the system tree and know devids belonging to
 * this filesystem, remove the device which does not belong there.
 */
void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step)
1165
{
Q
Qinghuang Feng 已提交
1166
	struct btrfs_device *device, *next;
1167
	struct btrfs_device *latest_dev = NULL;
1168

1169 1170
	mutex_lock(&uuid_mutex);
again:
1171
	/* This is the initialized path, it is safe to release the devices. */
Q
Qinghuang Feng 已提交
1172
	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
1173 1174
		if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
							&device->dev_state)) {
1175 1176 1177 1178
			if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
			     &device->dev_state) &&
			     (!latest_dev ||
			      device->generation > latest_dev->generation)) {
1179
				latest_dev = device;
1180
			}
Y
Yan Zheng 已提交
1181
			continue;
1182
		}
Y
Yan Zheng 已提交
1183

1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194
		if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
			/*
			 * In the first step, keep the device which has
			 * the correct fsid and the devid that is used
			 * for the dev_replace procedure.
			 * In the second step, the dev_replace state is
			 * read from the device tree and it is known
			 * whether the procedure is really active or
			 * not, which means whether this device is
			 * used or whether it should be removed.
			 */
1195 1196
			if (step == 0 || test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
						  &device->dev_state)) {
1197 1198 1199
				continue;
			}
		}
Y
Yan Zheng 已提交
1200
		if (device->bdev) {
1201
			blkdev_put(device->bdev, device->mode);
Y
Yan Zheng 已提交
1202 1203 1204
			device->bdev = NULL;
			fs_devices->open_devices--;
		}
1205
		if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
Y
Yan Zheng 已提交
1206
			list_del_init(&device->dev_alloc_list);
1207
			clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1208 1209
			if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
				      &device->dev_state))
1210
				fs_devices->rw_devices--;
Y
Yan Zheng 已提交
1211
		}
Y
Yan Zheng 已提交
1212 1213
		list_del_init(&device->dev_list);
		fs_devices->num_devices--;
1214
		btrfs_free_device(device);
1215
	}
Y
Yan Zheng 已提交
1216 1217 1218 1219 1220 1221

	if (fs_devices->seed) {
		fs_devices = fs_devices->seed;
		goto again;
	}

1222
	fs_devices->latest_bdev = latest_dev->bdev;
1223

1224 1225
	mutex_unlock(&uuid_mutex);
}
1226

1227 1228
static void btrfs_close_bdev(struct btrfs_device *device)
{
D
David Sterba 已提交
1229 1230 1231
	if (!device->bdev)
		return;

1232
	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1233 1234 1235 1236
		sync_blockdev(device->bdev);
		invalidate_bdev(device->bdev);
	}

D
David Sterba 已提交
1237
	blkdev_put(device->bdev, device->mode);
1238 1239
}

1240
static void btrfs_close_one_device(struct btrfs_device *device)
1241 1242 1243 1244 1245 1246 1247 1248
{
	struct btrfs_fs_devices *fs_devices = device->fs_devices;
	struct btrfs_device *new_device;
	struct rcu_string *name;

	if (device->bdev)
		fs_devices->open_devices--;

1249
	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1250 1251 1252 1253 1254
	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
		list_del_init(&device->dev_alloc_list);
		fs_devices->rw_devices--;
	}

1255
	if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
1256 1257
		fs_devices->missing_devices--;

1258 1259
	btrfs_close_bdev(device);

1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272
	new_device = btrfs_alloc_device(NULL, &device->devid,
					device->uuid);
	BUG_ON(IS_ERR(new_device)); /* -ENOMEM */

	/* Safe because we are under uuid_mutex */
	if (device->name) {
		name = rcu_string_strdup(device->name->str, GFP_NOFS);
		BUG_ON(!name); /* -ENOMEM */
		rcu_assign_pointer(new_device->name, name);
	}

	list_replace_rcu(&device->dev_list, &new_device->dev_list);
	new_device->fs_devices = device->fs_devices;
1273

1274 1275
	synchronize_rcu();
	btrfs_free_device(device);
1276 1277
}

1278
static int close_fs_devices(struct btrfs_fs_devices *fs_devices)
1279
{
1280
	struct btrfs_device *device, *tmp;
Y
Yan Zheng 已提交
1281

Y
Yan Zheng 已提交
1282 1283
	if (--fs_devices->opened > 0)
		return 0;
1284

1285
	mutex_lock(&fs_devices->device_list_mutex);
1286
	list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
1287
		btrfs_close_one_device(device);
1288
	}
1289 1290
	mutex_unlock(&fs_devices->device_list_mutex);

Y
Yan Zheng 已提交
1291 1292
	WARN_ON(fs_devices->open_devices);
	WARN_ON(fs_devices->rw_devices);
Y
Yan Zheng 已提交
1293 1294 1295
	fs_devices->opened = 0;
	fs_devices->seeding = 0;

1296 1297 1298
	return 0;
}

Y
Yan Zheng 已提交
1299 1300
int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
{
Y
Yan Zheng 已提交
1301
	struct btrfs_fs_devices *seed_devices = NULL;
Y
Yan Zheng 已提交
1302 1303 1304
	int ret;

	mutex_lock(&uuid_mutex);
1305
	ret = close_fs_devices(fs_devices);
Y
Yan Zheng 已提交
1306 1307 1308 1309
	if (!fs_devices->opened) {
		seed_devices = fs_devices->seed;
		fs_devices->seed = NULL;
	}
Y
Yan Zheng 已提交
1310
	mutex_unlock(&uuid_mutex);
Y
Yan Zheng 已提交
1311 1312 1313 1314

	while (seed_devices) {
		fs_devices = seed_devices;
		seed_devices = fs_devices->seed;
1315
		close_fs_devices(fs_devices);
Y
Yan Zheng 已提交
1316 1317
		free_fs_devices(fs_devices);
	}
Y
Yan Zheng 已提交
1318 1319 1320
	return ret;
}

1321
static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
Y
Yan Zheng 已提交
1322
				fmode_t flags, void *holder)
1323 1324
{
	struct btrfs_device *device;
1325
	struct btrfs_device *latest_dev = NULL;
1326
	int ret = 0;
1327

1328 1329
	flags |= FMODE_EXCL;

1330
	list_for_each_entry(device, &fs_devices->devices, dev_list) {
1331
		/* Just open everything we can; ignore failures here */
1332
		if (btrfs_open_one_device(fs_devices, device, flags, holder))
1333
			continue;
1334

1335 1336 1337
		if (!latest_dev ||
		    device->generation > latest_dev->generation)
			latest_dev = device;
1338
	}
1339
	if (fs_devices->open_devices == 0) {
1340
		ret = -EINVAL;
1341 1342
		goto out;
	}
Y
Yan Zheng 已提交
1343
	fs_devices->opened = 1;
1344
	fs_devices->latest_bdev = latest_dev->bdev;
Y
Yan Zheng 已提交
1345
	fs_devices->total_rw_bytes = 0;
1346
out:
Y
Yan Zheng 已提交
1347 1348 1349
	return ret;
}

A
Anand Jain 已提交
1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363
static int devid_cmp(void *priv, struct list_head *a, struct list_head *b)
{
	struct btrfs_device *dev1, *dev2;

	dev1 = list_entry(a, struct btrfs_device, dev_list);
	dev2 = list_entry(b, struct btrfs_device, dev_list);

	if (dev1->devid < dev2->devid)
		return -1;
	else if (dev1->devid > dev2->devid)
		return 1;
	return 0;
}

Y
Yan Zheng 已提交
1364
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1365
		       fmode_t flags, void *holder)
Y
Yan Zheng 已提交
1366 1367 1368
{
	int ret;

1369 1370
	lockdep_assert_held(&uuid_mutex);

1371
	mutex_lock(&fs_devices->device_list_mutex);
Y
Yan Zheng 已提交
1372
	if (fs_devices->opened) {
Y
Yan Zheng 已提交
1373 1374
		fs_devices->opened++;
		ret = 0;
Y
Yan Zheng 已提交
1375
	} else {
A
Anand Jain 已提交
1376
		list_sort(NULL, &fs_devices->devices, devid_cmp);
1377
		ret = open_fs_devices(fs_devices, flags, holder);
Y
Yan Zheng 已提交
1378
	}
1379 1380
	mutex_unlock(&fs_devices->device_list_mutex);

1381 1382 1383
	return ret;
}

1384
static void btrfs_release_disk_super(struct page *page)
1385 1386 1387 1388 1389
{
	kunmap(page);
	put_page(page);
}

1390 1391 1392
static int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr,
				 struct page **page,
				 struct btrfs_super_block **disk_super)
1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419
{
	void *p;
	pgoff_t index;

	/* make sure our super fits in the device */
	if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
		return 1;

	/* make sure our super fits in the page */
	if (sizeof(**disk_super) > PAGE_SIZE)
		return 1;

	/* make sure our super doesn't straddle pages on disk */
	index = bytenr >> PAGE_SHIFT;
	if ((bytenr + sizeof(**disk_super) - 1) >> PAGE_SHIFT != index)
		return 1;

	/* pull in the page with our super */
	*page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
				   index, GFP_KERNEL);

	if (IS_ERR_OR_NULL(*page))
		return 1;

	p = kmap(*page);

	/* align our pointer to the offset of the super block */
1420
	*disk_super = p + offset_in_page(bytenr);
1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434

	if (btrfs_super_bytenr(*disk_super) != bytenr ||
	    btrfs_super_magic(*disk_super) != BTRFS_MAGIC) {
		btrfs_release_disk_super(*page);
		return 1;
	}

	if ((*disk_super)->label[0] &&
		(*disk_super)->label[BTRFS_LABEL_SIZE - 1])
		(*disk_super)->label[BTRFS_LABEL_SIZE - 1] = '\0';

	return 0;
}

1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
int btrfs_forget_devices(const char *path)
{
	int ret;

	mutex_lock(&uuid_mutex);
	ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL);
	mutex_unlock(&uuid_mutex);

	return ret;
}

1446 1447 1448 1449 1450
/*
 * Look for a btrfs signature on a device. This may be called out of the mount path
 * and we are not allowed to call set_blocksize during the scan. The superblock
 * is read via pagecache
 */
1451 1452
struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
					   void *holder)
1453 1454
{
	struct btrfs_super_block *disk_super;
1455
	bool new_device_added = false;
1456
	struct btrfs_device *device = NULL;
1457
	struct block_device *bdev;
1458 1459
	struct page *page;
	u64 bytenr;
1460

1461 1462
	lockdep_assert_held(&uuid_mutex);

1463 1464 1465 1466 1467 1468 1469
	/*
	 * we would like to check all the supers, but that would make
	 * a btrfs mount succeed after a mkfs from a different FS.
	 * So, we need to add a special mount option to scan for
	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
	 */
	bytenr = btrfs_sb_offset(0);
1470
	flags |= FMODE_EXCL;
1471 1472

	bdev = blkdev_get_by_path(path, flags, holder);
1473
	if (IS_ERR(bdev))
1474
		return ERR_CAST(bdev);
1475

1476
	if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super)) {
1477
		device = ERR_PTR(-EINVAL);
1478
		goto error_bdev_put;
1479
	}
1480

1481
	device = device_list_add(path, disk_super, &new_device_added);
1482
	if (!IS_ERR(device)) {
1483 1484 1485
		if (new_device_added)
			btrfs_free_stale_devices(path, device);
	}
1486

1487
	btrfs_release_disk_super(page);
1488 1489

error_bdev_put:
1490
	blkdev_put(bdev, flags);
1491

1492
	return device;
1493
}
1494

1495 1496 1497 1498 1499 1500
/*
 * Try to find a chunk that intersects [start, start + len] range and when one
 * such is found, record the end of it in *start
 */
static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
				    u64 len)
1501
{
1502
	u64 physical_start, physical_end;
1503

1504
	lockdep_assert_held(&device->fs_info->chunk_mutex);
1505

1506 1507 1508
	if (!find_first_extent_bit(&device->alloc_state, *start,
				   &physical_start, &physical_end,
				   CHUNK_ALLOCATED, NULL)) {
1509

1510 1511 1512 1513 1514
		if (in_range(physical_start, *start, len) ||
		    in_range(*start, physical_start,
			     physical_end - physical_start)) {
			*start = physical_end + 1;
			return true;
1515 1516
		}
	}
1517
	return false;
1518 1519 1520
}


1521
/*
1522 1523 1524 1525 1526 1527 1528
 * find_free_dev_extent_start - find free space in the specified device
 * @device:	  the device which we search the free space in
 * @num_bytes:	  the size of the free space that we need
 * @search_start: the position from which to begin the search
 * @start:	  store the start of the free space.
 * @len:	  the size of the free space. that we find, or the size
 *		  of the max free space if we don't find suitable free space
1529
 *
1530 1531 1532
 * this uses a pretty simple search, the expectation is that it is
 * called very infrequently and that a given device has a small number
 * of extents
1533 1534 1535 1536 1537 1538 1539 1540
 *
 * @start is used to store the start of the free space if we find. But if we
 * don't find suitable free space, it will be used to store the start position
 * of the max free space.
 *
 * @len is used to store the size of the free space that we find.
 * But if we don't find suitable free space, it is used to store the size of
 * the max free space.
1541 1542 1543 1544 1545 1546
 *
 * NOTE: This function will search *commit* root of device tree, and does extra
 * check to ensure dev extents are not double allocated.
 * This makes the function safe to allocate dev extents but may not report
 * correct usable device space, as device extent freed in current transaction
 * is not reported as avaiable.
1547
 */
1548 1549 1550
static int find_free_dev_extent_start(struct btrfs_device *device,
				u64 num_bytes, u64 search_start, u64 *start,
				u64 *len)
1551
{
1552 1553
	struct btrfs_fs_info *fs_info = device->fs_info;
	struct btrfs_root *root = fs_info->dev_root;
1554
	struct btrfs_key key;
1555
	struct btrfs_dev_extent *dev_extent;
Y
Yan Zheng 已提交
1556
	struct btrfs_path *path;
1557 1558 1559 1560
	u64 hole_size;
	u64 max_hole_start;
	u64 max_hole_size;
	u64 extent_end;
1561 1562
	u64 search_end = device->total_bytes;
	int ret;
1563
	int slot;
1564
	struct extent_buffer *l;
1565 1566 1567 1568 1569 1570

	/*
	 * We don't want to overwrite the superblock on the drive nor any area
	 * used by the boot loader (grub for example), so we make sure to start
	 * at an offset of at least 1MB.
	 */
1571
	search_start = max_t(u64, search_start, SZ_1M);
1572

1573 1574 1575
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
1576

1577 1578 1579
	max_hole_start = search_start;
	max_hole_size = 0;

1580
again:
1581 1582
	if (search_start >= search_end ||
		test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1583
		ret = -ENOSPC;
1584
		goto out;
1585 1586
	}

1587
	path->reada = READA_FORWARD;
1588 1589
	path->search_commit_root = 1;
	path->skip_locking = 1;
1590

1591 1592 1593
	key.objectid = device->devid;
	key.offset = search_start;
	key.type = BTRFS_DEV_EXTENT_KEY;
1594

1595
	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1596
	if (ret < 0)
1597
		goto out;
1598 1599 1600
	if (ret > 0) {
		ret = btrfs_previous_item(root, path, key.objectid, key.type);
		if (ret < 0)
1601
			goto out;
1602
	}
1603

1604 1605 1606 1607 1608 1609 1610 1611
	while (1) {
		l = path->nodes[0];
		slot = path->slots[0];
		if (slot >= btrfs_header_nritems(l)) {
			ret = btrfs_next_leaf(root, path);
			if (ret == 0)
				continue;
			if (ret < 0)
1612 1613 1614
				goto out;

			break;
1615 1616 1617 1618 1619 1620 1621
		}
		btrfs_item_key_to_cpu(l, &key, slot);

		if (key.objectid < device->devid)
			goto next;

		if (key.objectid > device->devid)
1622
			break;
1623

1624
		if (key.type != BTRFS_DEV_EXTENT_KEY)
1625
			goto next;
1626

1627 1628
		if (key.offset > search_start) {
			hole_size = key.offset - search_start;
1629

1630 1631 1632 1633
			/*
			 * Have to check before we set max_hole_start, otherwise
			 * we could end up sending back this offset anyway.
			 */
1634
			if (contains_pending_extent(device, &search_start,
1635
						    hole_size)) {
1636
				if (key.offset >= search_start)
1637
					hole_size = key.offset - search_start;
1638
				else
1639 1640
					hole_size = 0;
			}
1641

1642 1643 1644 1645
			if (hole_size > max_hole_size) {
				max_hole_start = search_start;
				max_hole_size = hole_size;
			}
1646

1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658
			/*
			 * If this free space is greater than which we need,
			 * it must be the max free space that we have found
			 * until now, so max_hole_start must point to the start
			 * of this free space and the length of this free space
			 * is stored in max_hole_size. Thus, we return
			 * max_hole_start and max_hole_size and go back to the
			 * caller.
			 */
			if (hole_size >= num_bytes) {
				ret = 0;
				goto out;
1659 1660 1661 1662
			}
		}

		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1663 1664 1665 1666
		extent_end = key.offset + btrfs_dev_extent_length(l,
								  dev_extent);
		if (extent_end > search_start)
			search_start = extent_end;
1667 1668 1669 1670 1671
next:
		path->slots[0]++;
		cond_resched();
	}

1672 1673 1674 1675 1676
	/*
	 * At this point, search_start should be the end of
	 * allocated dev extents, and when shrinking the device,
	 * search_end may be smaller than search_start.
	 */
1677
	if (search_end > search_start) {
1678 1679
		hole_size = search_end - search_start;

1680
		if (contains_pending_extent(device, &search_start, hole_size)) {
1681 1682 1683
			btrfs_release_path(path);
			goto again;
		}
1684

1685 1686 1687 1688
		if (hole_size > max_hole_size) {
			max_hole_start = search_start;
			max_hole_size = hole_size;
		}
1689 1690
	}

1691
	/* See above. */
1692
	if (max_hole_size < num_bytes)
1693 1694 1695 1696 1697
		ret = -ENOSPC;
	else
		ret = 0;

out:
Y
Yan Zheng 已提交
1698
	btrfs_free_path(path);
1699
	*start = max_hole_start;
1700
	if (len)
1701
		*len = max_hole_size;
1702 1703 1704
	return ret;
}

1705
int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
1706 1707 1708
			 u64 *start, u64 *len)
{
	/* FIXME use last free of some kind */
1709
	return find_free_dev_extent_start(device, num_bytes, 0, start, len);
1710 1711
}

1712
static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1713
			  struct btrfs_device *device,
M
Miao Xie 已提交
1714
			  u64 start, u64 *dev_extent_len)
1715
{
1716 1717
	struct btrfs_fs_info *fs_info = device->fs_info;
	struct btrfs_root *root = fs_info->dev_root;
1718 1719 1720
	int ret;
	struct btrfs_path *path;
	struct btrfs_key key;
1721 1722 1723
	struct btrfs_key found_key;
	struct extent_buffer *leaf = NULL;
	struct btrfs_dev_extent *extent = NULL;
1724 1725 1726 1727 1728 1729 1730 1731

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = device->devid;
	key.offset = start;
	key.type = BTRFS_DEV_EXTENT_KEY;
M
Miao Xie 已提交
1732
again:
1733
	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1734 1735 1736
	if (ret > 0) {
		ret = btrfs_previous_item(root, path, key.objectid,
					  BTRFS_DEV_EXTENT_KEY);
1737 1738
		if (ret)
			goto out;
1739 1740 1741 1742 1743 1744
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
		extent = btrfs_item_ptr(leaf, path->slots[0],
					struct btrfs_dev_extent);
		BUG_ON(found_key.offset > start || found_key.offset +
		       btrfs_dev_extent_length(leaf, extent) < start);
M
Miao Xie 已提交
1745 1746 1747
		key = found_key;
		btrfs_release_path(path);
		goto again;
1748 1749 1750 1751
	} else if (ret == 0) {
		leaf = path->nodes[0];
		extent = btrfs_item_ptr(leaf, path->slots[0],
					struct btrfs_dev_extent);
1752
	} else {
1753
		btrfs_handle_fs_error(fs_info, ret, "Slot search failed");
1754
		goto out;
1755
	}
1756

M
Miao Xie 已提交
1757 1758
	*dev_extent_len = btrfs_dev_extent_length(leaf, extent);

1759
	ret = btrfs_del_item(trans, root, path);
1760
	if (ret) {
1761 1762
		btrfs_handle_fs_error(fs_info, ret,
				      "Failed to remove dev extent item");
Z
Zhao Lei 已提交
1763
	} else {
1764
		set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1765
	}
1766
out:
1767 1768 1769 1770
	btrfs_free_path(path);
	return ret;
}

1771 1772 1773
static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
				  struct btrfs_device *device,
				  u64 chunk_offset, u64 start, u64 num_bytes)
1774 1775 1776
{
	int ret;
	struct btrfs_path *path;
1777 1778
	struct btrfs_fs_info *fs_info = device->fs_info;
	struct btrfs_root *root = fs_info->dev_root;
1779 1780 1781 1782
	struct btrfs_dev_extent *extent;
	struct extent_buffer *leaf;
	struct btrfs_key key;

1783
	WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state));
1784
	WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1785 1786 1787 1788 1789
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = device->devid;
Y
Yan Zheng 已提交
1790
	key.offset = start;
1791 1792 1793
	key.type = BTRFS_DEV_EXTENT_KEY;
	ret = btrfs_insert_empty_item(trans, root, path, &key,
				      sizeof(*extent));
1794 1795
	if (ret)
		goto out;
1796 1797 1798 1799

	leaf = path->nodes[0];
	extent = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_dev_extent);
1800 1801
	btrfs_set_dev_extent_chunk_tree(leaf, extent,
					BTRFS_CHUNK_TREE_OBJECTID);
1802 1803
	btrfs_set_dev_extent_chunk_objectid(leaf, extent,
					    BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1804 1805
	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);

1806 1807
	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
	btrfs_mark_buffer_dirty(leaf);
1808
out:
1809 1810 1811 1812
	btrfs_free_path(path);
	return ret;
}

1813
static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1814
{
1815 1816 1817 1818
	struct extent_map_tree *em_tree;
	struct extent_map *em;
	struct rb_node *n;
	u64 ret = 0;
1819

1820
	em_tree = &fs_info->mapping_tree;
1821
	read_lock(&em_tree->lock);
L
Liu Bo 已提交
1822
	n = rb_last(&em_tree->map.rb_root);
1823 1824 1825
	if (n) {
		em = rb_entry(n, struct extent_map, rb_node);
		ret = em->start + em->len;
1826
	}
1827 1828
	read_unlock(&em_tree->lock);

1829 1830 1831
	return ret;
}

1832 1833
static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
				    u64 *devid_ret)
1834 1835 1836 1837
{
	int ret;
	struct btrfs_key key;
	struct btrfs_key found_key;
Y
Yan Zheng 已提交
1838 1839 1840 1841 1842
	struct btrfs_path *path;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
1843 1844 1845 1846 1847

	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
	key.offset = (u64)-1;

1848
	ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1849 1850 1851
	if (ret < 0)
		goto error;

1852
	BUG_ON(ret == 0); /* Corruption */
1853

1854 1855
	ret = btrfs_previous_item(fs_info->chunk_root, path,
				  BTRFS_DEV_ITEMS_OBJECTID,
1856 1857
				  BTRFS_DEV_ITEM_KEY);
	if (ret) {
1858
		*devid_ret = 1;
1859 1860 1861
	} else {
		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
				      path->slots[0]);
1862
		*devid_ret = found_key.offset + 1;
1863 1864 1865
	}
	ret = 0;
error:
Y
Yan Zheng 已提交
1866
	btrfs_free_path(path);
1867 1868 1869 1870 1871 1872 1873
	return ret;
}

/*
 * the device information is stored in the chunk root
 * the btrfs_device struct should be fully filled in
 */
1874
static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
1875
			    struct btrfs_device *device)
1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_dev_item *dev_item;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	unsigned long ptr;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
Y
Yan Zheng 已提交
1890
	key.offset = device->devid;
1891

1892 1893
	ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
				      &key, sizeof(*dev_item));
1894 1895 1896 1897 1898 1899 1900
	if (ret)
		goto out;

	leaf = path->nodes[0];
	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);

	btrfs_set_device_id(leaf, dev_item, device->devid);
Y
Yan Zheng 已提交
1901
	btrfs_set_device_generation(leaf, dev_item, 0);
1902 1903 1904 1905
	btrfs_set_device_type(leaf, dev_item, device->type);
	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1906 1907 1908 1909
	btrfs_set_device_total_bytes(leaf, dev_item,
				     btrfs_device_get_disk_total_bytes(device));
	btrfs_set_device_bytes_used(leaf, dev_item,
				    btrfs_device_get_bytes_used(device));
1910 1911 1912
	btrfs_set_device_group(leaf, dev_item, 0);
	btrfs_set_device_seek_speed(leaf, dev_item, 0);
	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1913
	btrfs_set_device_start_offset(leaf, dev_item, 0);
1914

1915
	ptr = btrfs_device_uuid(dev_item);
1916
	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1917
	ptr = btrfs_device_fsid(dev_item);
1918 1919
	write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
			    ptr, BTRFS_FSID_SIZE);
1920 1921
	btrfs_mark_buffer_dirty(leaf);

Y
Yan Zheng 已提交
1922
	ret = 0;
1923 1924 1925 1926
out:
	btrfs_free_path(path);
	return ret;
}
1927

1928 1929 1930 1931
/*
 * Function to update ctime/mtime for a given device path.
 * Mainly used for ctime/mtime based probe like libblkid.
 */
1932
static void update_dev_time(const char *path_name)
1933 1934 1935 1936
{
	struct file *filp;

	filp = filp_open(path_name, O_RDWR, 0);
1937
	if (IS_ERR(filp))
1938 1939 1940 1941 1942
		return;
	file_update_time(filp);
	filp_close(filp, NULL);
}

1943
static int btrfs_rm_dev_item(struct btrfs_device *device)
1944
{
1945
	struct btrfs_root *root = device->fs_info->chunk_root;
1946 1947 1948 1949 1950 1951 1952 1953 1954
	int ret;
	struct btrfs_path *path;
	struct btrfs_key key;
	struct btrfs_trans_handle *trans;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

1955
	trans = btrfs_start_transaction(root, 0);
1956 1957 1958 1959
	if (IS_ERR(trans)) {
		btrfs_free_path(path);
		return PTR_ERR(trans);
	}
1960 1961 1962 1963 1964
	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
	key.offset = device->devid;

	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1965 1966 1967 1968 1969
	if (ret) {
		if (ret > 0)
			ret = -ENOENT;
		btrfs_abort_transaction(trans, ret);
		btrfs_end_transaction(trans);
1970 1971 1972 1973
		goto out;
	}

	ret = btrfs_del_item(trans, root, path);
1974 1975 1976 1977 1978
	if (ret) {
		btrfs_abort_transaction(trans, ret);
		btrfs_end_transaction(trans);
	}

1979 1980
out:
	btrfs_free_path(path);
1981 1982
	if (!ret)
		ret = btrfs_commit_transaction(trans);
1983 1984 1985
	return ret;
}

1986 1987 1988 1989 1990 1991 1992
/*
 * Verify that @num_devices satisfies the RAID profile constraints in the whole
 * filesystem. It's up to the caller to adjust that number regarding eg. device
 * replace.
 */
static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
		u64 num_devices)
1993 1994
{
	u64 all_avail;
1995
	unsigned seq;
1996
	int i;
1997

1998
	do {
1999
		seq = read_seqbegin(&fs_info->profiles_lock);
2000

2001 2002 2003 2004
		all_avail = fs_info->avail_data_alloc_bits |
			    fs_info->avail_system_alloc_bits |
			    fs_info->avail_metadata_alloc_bits;
	} while (read_seqretry(&fs_info->profiles_lock, seq));
2005

2006
	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
2007
		if (!(all_avail & btrfs_raid_array[i].bg_flag))
2008
			continue;
2009

2010
		if (num_devices < btrfs_raid_array[i].devs_min) {
2011
			int ret = btrfs_raid_array[i].mindev_error;
2012

2013 2014 2015
			if (ret)
				return ret;
		}
D
David Woodhouse 已提交
2016 2017
	}

2018
	return 0;
2019 2020
}

2021 2022
static struct btrfs_device * btrfs_find_next_active_device(
		struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
2023
{
Y
Yan Zheng 已提交
2024
	struct btrfs_device *next_device;
2025 2026 2027

	list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
		if (next_device != device &&
2028 2029
		    !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
		    && next_device->bdev)
2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041
			return next_device;
	}

	return NULL;
}

/*
 * Helper function to check if the given device is part of s_bdev / latest_bdev
 * and replace it with the provided or the next active device, in the context
 * where this function called, there should be always be another device (or
 * this_dev) which is active.
 */
2042 2043
void btrfs_assign_next_active_device(struct btrfs_device *device,
				     struct btrfs_device *this_dev)
2044
{
2045
	struct btrfs_fs_info *fs_info = device->fs_info;
2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062
	struct btrfs_device *next_device;

	if (this_dev)
		next_device = this_dev;
	else
		next_device = btrfs_find_next_active_device(fs_info->fs_devices,
								device);
	ASSERT(next_device);

	if (fs_info->sb->s_bdev &&
			(fs_info->sb->s_bdev == device->bdev))
		fs_info->sb->s_bdev = next_device->bdev;

	if (fs_info->fs_devices->latest_bdev == device->bdev)
		fs_info->fs_devices->latest_bdev = next_device->bdev;
}

2063 2064 2065 2066 2067 2068 2069 2070
/*
 * Return btrfs_fs_devices::num_devices excluding the device that's being
 * currently replaced.
 */
static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
{
	u64 num_devices = fs_info->fs_devices->num_devices;

2071
	down_read(&fs_info->dev_replace.rwsem);
2072 2073 2074 2075
	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
		ASSERT(num_devices > 1);
		num_devices--;
	}
2076
	up_read(&fs_info->dev_replace.rwsem);
2077 2078 2079 2080

	return num_devices;
}

2081 2082
int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
		u64 devid)
2083 2084
{
	struct btrfs_device *device;
2085
	struct btrfs_fs_devices *cur_devices;
2086
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
Y
Yan Zheng 已提交
2087
	u64 num_devices;
2088 2089 2090 2091
	int ret = 0;

	mutex_lock(&uuid_mutex);

2092
	num_devices = btrfs_num_devices(fs_info);
2093

2094
	ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
2095
	if (ret)
2096 2097
		goto out;

2098 2099 2100 2101 2102 2103 2104 2105
	device = btrfs_find_device_by_devspec(fs_info, devid, device_path);

	if (IS_ERR(device)) {
		if (PTR_ERR(device) == -ENOENT &&
		    strcmp(device_path, "missing") == 0)
			ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
		else
			ret = PTR_ERR(device);
D
David Woodhouse 已提交
2106
		goto out;
2107
	}
2108

2109 2110 2111 2112 2113 2114 2115 2116
	if (btrfs_pinned_by_swapfile(fs_info, device)) {
		btrfs_warn_in_rcu(fs_info,
		  "cannot remove device %s (devid %llu) due to active swapfile",
				  rcu_str_deref(device->name), device->devid);
		ret = -ETXTBSY;
		goto out;
	}

2117
	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2118
		ret = BTRFS_ERROR_DEV_TGT_REPLACE;
2119
		goto out;
2120 2121
	}

2122 2123
	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
	    fs_info->fs_devices->rw_devices == 1) {
2124
		ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
2125
		goto out;
Y
Yan Zheng 已提交
2126 2127
	}

2128
	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2129
		mutex_lock(&fs_info->chunk_mutex);
Y
Yan Zheng 已提交
2130
		list_del_init(&device->dev_alloc_list);
2131
		device->fs_devices->rw_devices--;
2132
		mutex_unlock(&fs_info->chunk_mutex);
2133
	}
2134

2135
	mutex_unlock(&uuid_mutex);
2136
	ret = btrfs_shrink_device(device, 0);
2137
	mutex_lock(&uuid_mutex);
2138
	if (ret)
2139
		goto error_undo;
2140

2141 2142 2143 2144 2145
	/*
	 * TODO: the superblock still includes this device in its num_devices
	 * counter although write_all_supers() is not locked out. This
	 * could give a filesystem state which requires a degraded mount.
	 */
2146
	ret = btrfs_rm_dev_item(device);
2147
	if (ret)
2148
		goto error_undo;
2149

2150
	clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2151
	btrfs_scrub_cancel_dev(device);
2152 2153 2154 2155

	/*
	 * the device list mutex makes sure that we don't change
	 * the device list while someone else is writing out all
2156 2157 2158 2159 2160
	 * the device supers. Whoever is writing all supers, should
	 * lock the device list mutex before getting the number of
	 * devices in the super block (super_copy). Conversely,
	 * whoever updates the number of devices in the super block
	 * (super_copy) should hold the device list mutex.
2161
	 */
2162

2163 2164 2165 2166 2167
	/*
	 * In normal cases the cur_devices == fs_devices. But in case
	 * of deleting a seed device, the cur_devices should point to
	 * its own fs_devices listed under the fs_devices->seed.
	 */
2168
	cur_devices = device->fs_devices;
2169
	mutex_lock(&fs_devices->device_list_mutex);
2170
	list_del_rcu(&device->dev_list);
2171

2172 2173
	cur_devices->num_devices--;
	cur_devices->total_devices--;
2174 2175 2176
	/* Update total_devices of the parent fs_devices if it's seed */
	if (cur_devices != fs_devices)
		fs_devices->total_devices--;
Y
Yan Zheng 已提交
2177

2178
	if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
2179
		cur_devices->missing_devices--;
2180

2181
	btrfs_assign_next_active_device(device, NULL);
Y
Yan Zheng 已提交
2182

2183
	if (device->bdev) {
2184
		cur_devices->open_devices--;
2185
		/* remove sysfs entry */
2186
		btrfs_sysfs_rm_device_link(fs_devices, device);
2187
	}
2188

2189 2190
	num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
	btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
2191
	mutex_unlock(&fs_devices->device_list_mutex);
Y
Yan Zheng 已提交
2192

2193 2194 2195 2196 2197
	/*
	 * at this point, the device is zero sized and detached from
	 * the devices list.  All that's left is to zero out the old
	 * supers and free the device.
	 */
2198
	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2199 2200 2201
		btrfs_scratch_superblocks(device->bdev, device->name->str);

	btrfs_close_bdev(device);
2202 2203
	synchronize_rcu();
	btrfs_free_device(device);
2204

2205
	if (cur_devices->open_devices == 0) {
Y
Yan Zheng 已提交
2206
		while (fs_devices) {
2207 2208
			if (fs_devices->seed == cur_devices) {
				fs_devices->seed = cur_devices->seed;
Y
Yan Zheng 已提交
2209
				break;
2210
			}
Y
Yan Zheng 已提交
2211
			fs_devices = fs_devices->seed;
Y
Yan Zheng 已提交
2212
		}
2213
		cur_devices->seed = NULL;
2214
		close_fs_devices(cur_devices);
2215
		free_fs_devices(cur_devices);
Y
Yan Zheng 已提交
2216 2217
	}

2218 2219 2220
out:
	mutex_unlock(&uuid_mutex);
	return ret;
2221

2222
error_undo:
2223
	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2224
		mutex_lock(&fs_info->chunk_mutex);
2225
		list_add(&device->dev_alloc_list,
2226
			 &fs_devices->alloc_list);
2227
		device->fs_devices->rw_devices++;
2228
		mutex_unlock(&fs_info->chunk_mutex);
2229
	}
2230
	goto out;
2231 2232
}

2233
void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
2234
{
2235 2236
	struct btrfs_fs_devices *fs_devices;

2237
	lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
2238

2239 2240 2241 2242 2243 2244 2245
	/*
	 * in case of fs with no seed, srcdev->fs_devices will point
	 * to fs_devices of fs_info. However when the dev being replaced is
	 * a seed dev it will point to the seed's local fs_devices. In short
	 * srcdev will have its correct fs_devices in both the cases.
	 */
	fs_devices = srcdev->fs_devices;
2246

2247
	list_del_rcu(&srcdev->dev_list);
2248
	list_del(&srcdev->dev_alloc_list);
2249
	fs_devices->num_devices--;
2250
	if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
2251
		fs_devices->missing_devices--;
2252

2253
	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
2254
		fs_devices->rw_devices--;
2255

2256
	if (srcdev->bdev)
2257
		fs_devices->open_devices--;
2258 2259
}

2260
void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev)
2261
{
2262
	struct btrfs_fs_info *fs_info = srcdev->fs_info;
2263
	struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2264

2265
	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) {
2266 2267 2268
		/* zero out the old super if it is writable */
		btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
	}
2269 2270

	btrfs_close_bdev(srcdev);
2271 2272
	synchronize_rcu();
	btrfs_free_device(srcdev);
2273 2274 2275 2276 2277

	/* if this is no devs we rather delete the fs_devices */
	if (!fs_devices->num_devices) {
		struct btrfs_fs_devices *tmp_fs_devices;

2278 2279 2280 2281 2282 2283 2284 2285
		/*
		 * On a mounted FS, num_devices can't be zero unless it's a
		 * seed. In case of a seed device being replaced, the replace
		 * target added to the sprout FS, so there will be no more
		 * device left under the seed FS.
		 */
		ASSERT(fs_devices->seeding);

2286 2287 2288 2289 2290 2291 2292 2293 2294
		tmp_fs_devices = fs_info->fs_devices;
		while (tmp_fs_devices) {
			if (tmp_fs_devices->seed == fs_devices) {
				tmp_fs_devices->seed = fs_devices->seed;
				break;
			}
			tmp_fs_devices = tmp_fs_devices->seed;
		}
		fs_devices->seed = NULL;
2295
		close_fs_devices(fs_devices);
2296
		free_fs_devices(fs_devices);
2297
	}
2298 2299
}

2300
void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
2301
{
2302
	struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
2303

2304
	WARN_ON(!tgtdev);
2305
	mutex_lock(&fs_devices->device_list_mutex);
2306

2307
	btrfs_sysfs_rm_device_link(fs_devices, tgtdev);
2308

2309
	if (tgtdev->bdev)
2310
		fs_devices->open_devices--;
2311

2312
	fs_devices->num_devices--;
2313

2314
	btrfs_assign_next_active_device(tgtdev, NULL);
2315 2316 2317

	list_del_rcu(&tgtdev->dev_list);

2318
	mutex_unlock(&fs_devices->device_list_mutex);
2319 2320 2321 2322 2323 2324 2325 2326 2327

	/*
	 * The update_dev_time() with in btrfs_scratch_superblocks()
	 * may lead to a call to btrfs_show_devname() which will try
	 * to hold device_list_mutex. And here this device
	 * is already out of device list, so we don't have to hold
	 * the device_list_mutex lock.
	 */
	btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
2328 2329

	btrfs_close_bdev(tgtdev);
2330 2331
	synchronize_rcu();
	btrfs_free_device(tgtdev);
2332 2333
}

2334 2335
static struct btrfs_device *btrfs_find_device_by_path(
		struct btrfs_fs_info *fs_info, const char *device_path)
2336 2337 2338 2339 2340 2341 2342
{
	int ret = 0;
	struct btrfs_super_block *disk_super;
	u64 devid;
	u8 *dev_uuid;
	struct block_device *bdev;
	struct buffer_head *bh;
2343
	struct btrfs_device *device;
2344 2345

	ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
2346
				    fs_info->bdev_holder, 0, &bdev, &bh);
2347
	if (ret)
2348
		return ERR_PTR(ret);
2349 2350 2351
	disk_super = (struct btrfs_super_block *)bh->b_data;
	devid = btrfs_stack_device_id(&disk_super->dev_item);
	dev_uuid = disk_super->dev_item.uuid;
2352
	if (btrfs_fs_incompat(fs_info, METADATA_UUID))
2353
		device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2354
					   disk_super->metadata_uuid, true);
2355
	else
2356
		device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2357
					   disk_super->fsid, true);
2358

2359
	brelse(bh);
2360 2361
	if (!device)
		device = ERR_PTR(-ENOENT);
2362
	blkdev_put(bdev, FMODE_READ);
2363
	return device;
2364 2365
}

2366 2367 2368
/*
 * Lookup a device given by device id, or the path if the id is 0.
 */
2369
struct btrfs_device *btrfs_find_device_by_devspec(
2370 2371
		struct btrfs_fs_info *fs_info, u64 devid,
		const char *device_path)
2372
{
2373
	struct btrfs_device *device;
2374

2375
	if (devid) {
2376
		device = btrfs_find_device(fs_info->fs_devices, devid, NULL,
2377
					   NULL, true);
2378 2379
		if (!device)
			return ERR_PTR(-ENOENT);
2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392
		return device;
	}

	if (!device_path || !device_path[0])
		return ERR_PTR(-EINVAL);

	if (strcmp(device_path, "missing") == 0) {
		/* Find first missing device */
		list_for_each_entry(device, &fs_info->fs_devices->devices,
				    dev_list) {
			if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
				     &device->dev_state) && !device->bdev)
				return device;
2393
		}
2394
		return ERR_PTR(-ENOENT);
2395
	}
2396 2397

	return btrfs_find_device_by_path(fs_info, device_path);
2398 2399
}

Y
Yan Zheng 已提交
2400 2401 2402
/*
 * does all the dirty work required for changing file system's UUID.
 */
2403
static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
Y
Yan Zheng 已提交
2404
{
2405
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
Y
Yan Zheng 已提交
2406
	struct btrfs_fs_devices *old_devices;
Y
Yan Zheng 已提交
2407
	struct btrfs_fs_devices *seed_devices;
2408
	struct btrfs_super_block *disk_super = fs_info->super_copy;
Y
Yan Zheng 已提交
2409 2410 2411
	struct btrfs_device *device;
	u64 super_flags;

2412
	lockdep_assert_held(&uuid_mutex);
Y
Yan Zheng 已提交
2413
	if (!fs_devices->seeding)
Y
Yan Zheng 已提交
2414 2415
		return -EINVAL;

2416
	seed_devices = alloc_fs_devices(NULL, NULL);
2417 2418
	if (IS_ERR(seed_devices))
		return PTR_ERR(seed_devices);
Y
Yan Zheng 已提交
2419

Y
Yan Zheng 已提交
2420 2421 2422 2423
	old_devices = clone_fs_devices(fs_devices);
	if (IS_ERR(old_devices)) {
		kfree(seed_devices);
		return PTR_ERR(old_devices);
Y
Yan Zheng 已提交
2424
	}
Y
Yan Zheng 已提交
2425

2426
	list_add(&old_devices->fs_list, &fs_uuids);
Y
Yan Zheng 已提交
2427

Y
Yan Zheng 已提交
2428 2429 2430 2431
	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
	seed_devices->opened = 1;
	INIT_LIST_HEAD(&seed_devices->devices);
	INIT_LIST_HEAD(&seed_devices->alloc_list);
2432
	mutex_init(&seed_devices->device_list_mutex);
2433

2434
	mutex_lock(&fs_devices->device_list_mutex);
2435 2436
	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
			      synchronize_rcu);
M
Miao Xie 已提交
2437 2438
	list_for_each_entry(device, &seed_devices->devices, dev_list)
		device->fs_devices = seed_devices;
2439

2440
	mutex_lock(&fs_info->chunk_mutex);
Y
Yan Zheng 已提交
2441
	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
2442
	mutex_unlock(&fs_info->chunk_mutex);
Y
Yan Zheng 已提交
2443

Y
Yan Zheng 已提交
2444 2445 2446
	fs_devices->seeding = 0;
	fs_devices->num_devices = 0;
	fs_devices->open_devices = 0;
2447 2448
	fs_devices->missing_devices = 0;
	fs_devices->rotating = 0;
Y
Yan Zheng 已提交
2449
	fs_devices->seed = seed_devices;
Y
Yan Zheng 已提交
2450 2451

	generate_random_uuid(fs_devices->fsid);
2452
	memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
Y
Yan Zheng 已提交
2453
	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2454
	mutex_unlock(&fs_devices->device_list_mutex);
2455

Y
Yan Zheng 已提交
2456 2457 2458 2459 2460 2461 2462 2463
	super_flags = btrfs_super_flags(disk_super) &
		      ~BTRFS_SUPER_FLAG_SEEDING;
	btrfs_set_super_flags(disk_super, super_flags);

	return 0;
}

/*
2464
 * Store the expected generation for seed devices in device items.
Y
Yan Zheng 已提交
2465
 */
2466
static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
Y
Yan Zheng 已提交
2467
{
2468
	struct btrfs_fs_info *fs_info = trans->fs_info;
2469
	struct btrfs_root *root = fs_info->chunk_root;
Y
Yan Zheng 已提交
2470 2471 2472 2473 2474
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_dev_item *dev_item;
	struct btrfs_device *device;
	struct btrfs_key key;
2475
	u8 fs_uuid[BTRFS_FSID_SIZE];
Y
Yan Zheng 已提交
2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502
	u8 dev_uuid[BTRFS_UUID_SIZE];
	u64 devid;
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.offset = 0;
	key.type = BTRFS_DEV_ITEM_KEY;

	while (1) {
		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
		if (ret < 0)
			goto error;

		leaf = path->nodes[0];
next_slot:
		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret > 0)
				break;
			if (ret < 0)
				goto error;
			leaf = path->nodes[0];
			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2503
			btrfs_release_path(path);
Y
Yan Zheng 已提交
2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514
			continue;
		}

		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
		    key.type != BTRFS_DEV_ITEM_KEY)
			break;

		dev_item = btrfs_item_ptr(leaf, path->slots[0],
					  struct btrfs_dev_item);
		devid = btrfs_device_id(leaf, dev_item);
2515
		read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
Y
Yan Zheng 已提交
2516
				   BTRFS_UUID_SIZE);
2517
		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2518
				   BTRFS_FSID_SIZE);
2519
		device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2520
					   fs_uuid, true);
2521
		BUG_ON(!device); /* Logic error */
Y
Yan Zheng 已提交
2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537

		if (device->fs_devices->seeding) {
			btrfs_set_device_generation(leaf, dev_item,
						    device->generation);
			btrfs_mark_buffer_dirty(leaf);
		}

		path->slots[0]++;
		goto next_slot;
	}
	ret = 0;
error:
	btrfs_free_path(path);
	return ret;
}

2538
int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2539
{
2540
	struct btrfs_root *root = fs_info->dev_root;
2541
	struct request_queue *q;
2542 2543 2544
	struct btrfs_trans_handle *trans;
	struct btrfs_device *device;
	struct block_device *bdev;
2545
	struct super_block *sb = fs_info->sb;
2546
	struct rcu_string *name;
2547
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2548 2549
	u64 orig_super_total_bytes;
	u64 orig_super_num_devices;
Y
Yan Zheng 已提交
2550
	int seeding_dev = 0;
2551
	int ret = 0;
2552
	bool unlocked = false;
2553

2554
	if (sb_rdonly(sb) && !fs_devices->seeding)
2555
		return -EROFS;
2556

2557
	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2558
				  fs_info->bdev_holder);
2559 2560
	if (IS_ERR(bdev))
		return PTR_ERR(bdev);
2561

2562
	if (fs_devices->seeding) {
Y
Yan Zheng 已提交
2563 2564 2565 2566 2567
		seeding_dev = 1;
		down_write(&sb->s_umount);
		mutex_lock(&uuid_mutex);
	}

2568
	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2569

2570
	mutex_lock(&fs_devices->device_list_mutex);
2571
	list_for_each_entry(device, &fs_devices->devices, dev_list) {
2572 2573
		if (device->bdev == bdev) {
			ret = -EEXIST;
2574
			mutex_unlock(
2575
				&fs_devices->device_list_mutex);
Y
Yan Zheng 已提交
2576
			goto error;
2577 2578
		}
	}
2579
	mutex_unlock(&fs_devices->device_list_mutex);
2580

2581
	device = btrfs_alloc_device(fs_info, NULL, NULL);
2582
	if (IS_ERR(device)) {
2583
		/* we can safely leave the fs_devices entry around */
2584
		ret = PTR_ERR(device);
Y
Yan Zheng 已提交
2585
		goto error;
2586 2587
	}

2588
	name = rcu_string_strdup(device_path, GFP_KERNEL);
2589
	if (!name) {
Y
Yan Zheng 已提交
2590
		ret = -ENOMEM;
2591
		goto error_free_device;
2592
	}
2593
	rcu_assign_pointer(device->name, name);
Y
Yan Zheng 已提交
2594

2595
	trans = btrfs_start_transaction(root, 0);
2596 2597
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
2598
		goto error_free_device;
2599 2600
	}

2601
	q = bdev_get_queue(bdev);
2602
	set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
Y
Yan Zheng 已提交
2603
	device->generation = trans->transid;
2604 2605 2606
	device->io_width = fs_info->sectorsize;
	device->io_align = fs_info->sectorsize;
	device->sector_size = fs_info->sectorsize;
2607 2608
	device->total_bytes = round_down(i_size_read(bdev->bd_inode),
					 fs_info->sectorsize);
2609
	device->disk_total_bytes = device->total_bytes;
2610
	device->commit_total_bytes = device->total_bytes;
2611
	device->fs_info = fs_info;
2612
	device->bdev = bdev;
2613
	set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2614
	clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
2615
	device->mode = FMODE_EXCL;
2616
	device->dev_stats_valid = 1;
2617
	set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2618

Y
Yan Zheng 已提交
2619
	if (seeding_dev) {
2620
		sb->s_flags &= ~SB_RDONLY;
2621
		ret = btrfs_prepare_sprout(fs_info);
2622 2623 2624 2625
		if (ret) {
			btrfs_abort_transaction(trans, ret);
			goto error_trans;
		}
Y
Yan Zheng 已提交
2626
	}
2627

2628
	device->fs_devices = fs_devices;
2629

2630
	mutex_lock(&fs_devices->device_list_mutex);
2631
	mutex_lock(&fs_info->chunk_mutex);
2632 2633 2634 2635 2636 2637 2638
	list_add_rcu(&device->dev_list, &fs_devices->devices);
	list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
	fs_devices->num_devices++;
	fs_devices->open_devices++;
	fs_devices->rw_devices++;
	fs_devices->total_devices++;
	fs_devices->total_rw_bytes += device->total_bytes;
2639

2640
	atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2641

2642
	if (!blk_queue_nonrot(q))
2643
		fs_devices->rotating = 1;
C
Chris Mason 已提交
2644

2645
	orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2646
	btrfs_set_super_total_bytes(fs_info->super_copy,
2647 2648
		round_down(orig_super_total_bytes + device->total_bytes,
			   fs_info->sectorsize));
2649

2650 2651 2652
	orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
	btrfs_set_super_num_devices(fs_info->super_copy,
				    orig_super_num_devices + 1);
2653 2654

	/* add sysfs device entry */
2655
	btrfs_sysfs_add_device_link(fs_devices, device);
2656

M
Miao Xie 已提交
2657 2658 2659 2660
	/*
	 * we've got more storage, clear any full flags on the space
	 * infos
	 */
2661
	btrfs_clear_space_info_full(fs_info);
M
Miao Xie 已提交
2662

2663
	mutex_unlock(&fs_info->chunk_mutex);
2664
	mutex_unlock(&fs_devices->device_list_mutex);
2665

Y
Yan Zheng 已提交
2666
	if (seeding_dev) {
2667
		mutex_lock(&fs_info->chunk_mutex);
2668
		ret = init_first_rw_device(trans);
2669
		mutex_unlock(&fs_info->chunk_mutex);
2670
		if (ret) {
2671
			btrfs_abort_transaction(trans, ret);
2672
			goto error_sysfs;
2673
		}
M
Miao Xie 已提交
2674 2675
	}

2676
	ret = btrfs_add_dev_item(trans, device);
M
Miao Xie 已提交
2677
	if (ret) {
2678
		btrfs_abort_transaction(trans, ret);
2679
		goto error_sysfs;
M
Miao Xie 已提交
2680 2681 2682
	}

	if (seeding_dev) {
2683
		ret = btrfs_finish_sprout(trans);
2684
		if (ret) {
2685
			btrfs_abort_transaction(trans, ret);
2686
			goto error_sysfs;
2687
		}
2688

2689 2690
		btrfs_sysfs_update_sprout_fsid(fs_devices,
				fs_info->fs_devices->fsid);
Y
Yan Zheng 已提交
2691 2692
	}

2693
	ret = btrfs_commit_transaction(trans);
2694

Y
Yan Zheng 已提交
2695 2696 2697
	if (seeding_dev) {
		mutex_unlock(&uuid_mutex);
		up_write(&sb->s_umount);
2698
		unlocked = true;
2699

2700 2701 2702
		if (ret) /* transaction commit */
			return ret;

2703
		ret = btrfs_relocate_sys_chunks(fs_info);
2704
		if (ret < 0)
2705
			btrfs_handle_fs_error(fs_info, ret,
J
Jeff Mahoney 已提交
2706
				    "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2707 2708 2709 2710
		trans = btrfs_attach_transaction(root);
		if (IS_ERR(trans)) {
			if (PTR_ERR(trans) == -ENOENT)
				return 0;
2711 2712 2713
			ret = PTR_ERR(trans);
			trans = NULL;
			goto error_sysfs;
2714
		}
2715
		ret = btrfs_commit_transaction(trans);
Y
Yan Zheng 已提交
2716
	}
2717

2718 2719
	/* Update ctime/mtime for libblkid */
	update_dev_time(device_path);
Y
Yan Zheng 已提交
2720
	return ret;
2721

2722
error_sysfs:
2723
	btrfs_sysfs_rm_device_link(fs_devices, device);
2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739
	mutex_lock(&fs_info->fs_devices->device_list_mutex);
	mutex_lock(&fs_info->chunk_mutex);
	list_del_rcu(&device->dev_list);
	list_del(&device->dev_alloc_list);
	fs_info->fs_devices->num_devices--;
	fs_info->fs_devices->open_devices--;
	fs_info->fs_devices->rw_devices--;
	fs_info->fs_devices->total_devices--;
	fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
	atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
	btrfs_set_super_total_bytes(fs_info->super_copy,
				    orig_super_total_bytes);
	btrfs_set_super_num_devices(fs_info->super_copy,
				    orig_super_num_devices);
	mutex_unlock(&fs_info->chunk_mutex);
	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2740
error_trans:
2741
	if (seeding_dev)
2742
		sb->s_flags |= SB_RDONLY;
2743 2744
	if (trans)
		btrfs_end_transaction(trans);
2745
error_free_device:
2746
	btrfs_free_device(device);
Y
Yan Zheng 已提交
2747
error:
2748
	blkdev_put(bdev, FMODE_EXCL);
2749
	if (seeding_dev && !unlocked) {
Y
Yan Zheng 已提交
2750 2751 2752
		mutex_unlock(&uuid_mutex);
		up_write(&sb->s_umount);
	}
2753
	return ret;
2754 2755
}

C
Chris Mason 已提交
2756 2757
static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
					struct btrfs_device *device)
2758 2759 2760
{
	int ret;
	struct btrfs_path *path;
2761
	struct btrfs_root *root = device->fs_info->chunk_root;
2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790
	struct btrfs_dev_item *dev_item;
	struct extent_buffer *leaf;
	struct btrfs_key key;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
	key.offset = device->devid;

	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
	if (ret < 0)
		goto out;

	if (ret > 0) {
		ret = -ENOENT;
		goto out;
	}

	leaf = path->nodes[0];
	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);

	btrfs_set_device_id(leaf, dev_item, device->devid);
	btrfs_set_device_type(leaf, dev_item, device->type);
	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2791 2792 2793 2794
	btrfs_set_device_total_bytes(leaf, dev_item,
				     btrfs_device_get_disk_total_bytes(device));
	btrfs_set_device_bytes_used(leaf, dev_item,
				    btrfs_device_get_bytes_used(device));
2795 2796 2797 2798 2799 2800 2801
	btrfs_mark_buffer_dirty(leaf);

out:
	btrfs_free_path(path);
	return ret;
}

M
Miao Xie 已提交
2802
int btrfs_grow_device(struct btrfs_trans_handle *trans,
2803 2804
		      struct btrfs_device *device, u64 new_size)
{
2805 2806
	struct btrfs_fs_info *fs_info = device->fs_info;
	struct btrfs_super_block *super_copy = fs_info->super_copy;
M
Miao Xie 已提交
2807 2808
	u64 old_total;
	u64 diff;
2809

2810
	if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
Y
Yan Zheng 已提交
2811
		return -EACCES;
M
Miao Xie 已提交
2812

2813 2814
	new_size = round_down(new_size, fs_info->sectorsize);

2815
	mutex_lock(&fs_info->chunk_mutex);
M
Miao Xie 已提交
2816
	old_total = btrfs_super_total_bytes(super_copy);
2817
	diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
M
Miao Xie 已提交
2818

2819
	if (new_size <= device->total_bytes ||
2820
	    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2821
		mutex_unlock(&fs_info->chunk_mutex);
Y
Yan Zheng 已提交
2822
		return -EINVAL;
M
Miao Xie 已提交
2823
	}
Y
Yan Zheng 已提交
2824

2825 2826
	btrfs_set_super_total_bytes(super_copy,
			round_down(old_total + diff, fs_info->sectorsize));
Y
Yan Zheng 已提交
2827 2828
	device->fs_devices->total_rw_bytes += diff;

2829 2830
	btrfs_device_set_total_bytes(device, new_size);
	btrfs_device_set_disk_total_bytes(device, new_size);
2831
	btrfs_clear_space_info_full(device->fs_info);
2832 2833 2834
	if (list_empty(&device->post_commit_list))
		list_add_tail(&device->post_commit_list,
			      &trans->transaction->dev_update_list);
2835
	mutex_unlock(&fs_info->chunk_mutex);
2836

2837 2838 2839
	return btrfs_update_device(trans, device);
}

2840
static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2841
{
2842
	struct btrfs_fs_info *fs_info = trans->fs_info;
2843
	struct btrfs_root *root = fs_info->chunk_root;
2844 2845 2846 2847 2848 2849 2850 2851
	int ret;
	struct btrfs_path *path;
	struct btrfs_key key;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

2852
	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2853 2854 2855 2856
	key.offset = chunk_offset;
	key.type = BTRFS_CHUNK_ITEM_KEY;

	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2857 2858 2859
	if (ret < 0)
		goto out;
	else if (ret > 0) { /* Logic error or corruption */
2860 2861
		btrfs_handle_fs_error(fs_info, -ENOENT,
				      "Failed lookup while freeing chunk.");
2862 2863 2864
		ret = -ENOENT;
		goto out;
	}
2865 2866

	ret = btrfs_del_item(trans, root, path);
2867
	if (ret < 0)
2868 2869
		btrfs_handle_fs_error(fs_info, ret,
				      "Failed to delete chunk item.");
2870
out:
2871
	btrfs_free_path(path);
2872
	return ret;
2873 2874
}

2875
static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2876
{
2877
	struct btrfs_super_block *super_copy = fs_info->super_copy;
2878 2879 2880 2881 2882 2883 2884 2885 2886 2887
	struct btrfs_disk_key *disk_key;
	struct btrfs_chunk *chunk;
	u8 *ptr;
	int ret = 0;
	u32 num_stripes;
	u32 array_size;
	u32 len = 0;
	u32 cur;
	struct btrfs_key key;

2888
	mutex_lock(&fs_info->chunk_mutex);
2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907
	array_size = btrfs_super_sys_array_size(super_copy);

	ptr = super_copy->sys_chunk_array;
	cur = 0;

	while (cur < array_size) {
		disk_key = (struct btrfs_disk_key *)ptr;
		btrfs_disk_key_to_cpu(&key, disk_key);

		len = sizeof(*disk_key);

		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
			chunk = (struct btrfs_chunk *)(ptr + len);
			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
			len += btrfs_chunk_item_size(num_stripes);
		} else {
			ret = -EIO;
			break;
		}
2908
		if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
2909 2910 2911 2912 2913 2914 2915 2916 2917
		    key.offset == chunk_offset) {
			memmove(ptr, ptr + len, array_size - (cur + len));
			array_size -= len;
			btrfs_set_super_sys_array_size(super_copy, array_size);
		} else {
			ptr += len;
			cur += len;
		}
	}
2918
	mutex_unlock(&fs_info->chunk_mutex);
2919 2920 2921
	return ret;
}

2922 2923 2924 2925 2926 2927 2928 2929 2930
/*
 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
 * @logical: Logical block offset in bytes.
 * @length: Length of extent in bytes.
 *
 * Return: Chunk mapping or ERR_PTR.
 */
struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
				       u64 logical, u64 length)
2931 2932 2933 2934
{
	struct extent_map_tree *em_tree;
	struct extent_map *em;

2935
	em_tree = &fs_info->mapping_tree;
2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957
	read_lock(&em_tree->lock);
	em = lookup_extent_mapping(em_tree, logical, length);
	read_unlock(&em_tree->lock);

	if (!em) {
		btrfs_crit(fs_info, "unable to find logical %llu length %llu",
			   logical, length);
		return ERR_PTR(-EINVAL);
	}

	if (em->start > logical || em->start + em->len < logical) {
		btrfs_crit(fs_info,
			   "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
			   logical, length, em->start, em->start + em->len);
		free_extent_map(em);
		return ERR_PTR(-EINVAL);
	}

	/* callers are responsible for dropping em's ref. */
	return em;
}

2958
int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2959
{
2960
	struct btrfs_fs_info *fs_info = trans->fs_info;
2961 2962
	struct extent_map *em;
	struct map_lookup *map;
M
Miao Xie 已提交
2963
	u64 dev_extent_len = 0;
2964
	int i, ret = 0;
2965
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2966

2967
	em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
2968
	if (IS_ERR(em)) {
2969 2970
		/*
		 * This is a logic error, but we don't want to just rely on the
2971
		 * user having built with ASSERT enabled, so if ASSERT doesn't
2972 2973 2974
		 * do anything we still error out.
		 */
		ASSERT(0);
2975
		return PTR_ERR(em);
2976
	}
2977
	map = em->map_lookup;
2978
	mutex_lock(&fs_info->chunk_mutex);
2979
	check_system_chunk(trans, map->type);
2980
	mutex_unlock(&fs_info->chunk_mutex);
2981

2982 2983 2984 2985 2986 2987
	/*
	 * Take the device list mutex to prevent races with the final phase of
	 * a device replace operation that replaces the device object associated
	 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
	 */
	mutex_lock(&fs_devices->device_list_mutex);
2988
	for (i = 0; i < map->num_stripes; i++) {
2989
		struct btrfs_device *device = map->stripes[i].dev;
M
Miao Xie 已提交
2990 2991 2992
		ret = btrfs_free_dev_extent(trans, device,
					    map->stripes[i].physical,
					    &dev_extent_len);
2993
		if (ret) {
2994
			mutex_unlock(&fs_devices->device_list_mutex);
2995
			btrfs_abort_transaction(trans, ret);
2996 2997
			goto out;
		}
2998

M
Miao Xie 已提交
2999
		if (device->bytes_used > 0) {
3000
			mutex_lock(&fs_info->chunk_mutex);
M
Miao Xie 已提交
3001 3002
			btrfs_device_set_bytes_used(device,
					device->bytes_used - dev_extent_len);
3003
			atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
3004
			btrfs_clear_space_info_full(fs_info);
3005
			mutex_unlock(&fs_info->chunk_mutex);
M
Miao Xie 已提交
3006
		}
3007

3008 3009 3010 3011 3012
		ret = btrfs_update_device(trans, device);
		if (ret) {
			mutex_unlock(&fs_devices->device_list_mutex);
			btrfs_abort_transaction(trans, ret);
			goto out;
3013
		}
3014
	}
3015 3016
	mutex_unlock(&fs_devices->device_list_mutex);

3017
	ret = btrfs_free_chunk(trans, chunk_offset);
3018
	if (ret) {
3019
		btrfs_abort_transaction(trans, ret);
3020 3021
		goto out;
	}
3022

3023
	trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
3024

3025
	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3026
		ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
3027
		if (ret) {
3028
			btrfs_abort_transaction(trans, ret);
3029 3030
			goto out;
		}
3031 3032
	}

3033
	ret = btrfs_remove_block_group(trans, chunk_offset, em);
3034
	if (ret) {
3035
		btrfs_abort_transaction(trans, ret);
3036 3037
		goto out;
	}
Y
Yan Zheng 已提交
3038

3039
out:
Y
Yan Zheng 已提交
3040 3041
	/* once for us */
	free_extent_map(em);
3042 3043
	return ret;
}
Y
Yan Zheng 已提交
3044

3045
static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3046
{
3047
	struct btrfs_root *root = fs_info->chunk_root;
3048
	struct btrfs_trans_handle *trans;
3049
	int ret;
Y
Yan Zheng 已提交
3050

3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062
	/*
	 * Prevent races with automatic removal of unused block groups.
	 * After we relocate and before we remove the chunk with offset
	 * chunk_offset, automatic removal of the block group can kick in,
	 * resulting in a failure when calling btrfs_remove_chunk() below.
	 *
	 * Make sure to acquire this mutex before doing a tree search (dev
	 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
	 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
	 * we release the path used to search the chunk/dev tree and before
	 * the current task acquires this mutex and calls us.
	 */
3063
	lockdep_assert_held(&fs_info->delete_unused_bgs_mutex);
3064

3065
	/* step one, relocate all the extents inside this chunk */
3066
	btrfs_scrub_pause(fs_info);
3067
	ret = btrfs_relocate_block_group(fs_info, chunk_offset);
3068
	btrfs_scrub_continue(fs_info);
3069 3070 3071
	if (ret)
		return ret;

3072 3073 3074 3075 3076 3077 3078 3079
	trans = btrfs_start_trans_remove_block_group(root->fs_info,
						     chunk_offset);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		btrfs_handle_fs_error(root->fs_info, ret, NULL);
		return ret;
	}

3080
	/*
3081 3082
	 * step two, delete the device extents and the
	 * chunk tree entries
3083
	 */
3084
	ret = btrfs_remove_chunk(trans, chunk_offset);
3085
	btrfs_end_transaction(trans);
3086
	return ret;
Y
Yan Zheng 已提交
3087 3088
}

3089
static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
Y
Yan Zheng 已提交
3090
{
3091
	struct btrfs_root *chunk_root = fs_info->chunk_root;
Y
Yan Zheng 已提交
3092 3093 3094 3095 3096 3097
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_chunk *chunk;
	struct btrfs_key key;
	struct btrfs_key found_key;
	u64 chunk_type;
3098 3099
	bool retried = false;
	int failed = 0;
Y
Yan Zheng 已提交
3100 3101 3102 3103 3104 3105
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

3106
again:
Y
Yan Zheng 已提交
3107 3108 3109 3110 3111
	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
	key.offset = (u64)-1;
	key.type = BTRFS_CHUNK_ITEM_KEY;

	while (1) {
3112
		mutex_lock(&fs_info->delete_unused_bgs_mutex);
Y
Yan Zheng 已提交
3113
		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3114
		if (ret < 0) {
3115
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
Y
Yan Zheng 已提交
3116
			goto error;
3117
		}
3118
		BUG_ON(ret == 0); /* Corruption */
Y
Yan Zheng 已提交
3119 3120 3121

		ret = btrfs_previous_item(chunk_root, path, key.objectid,
					  key.type);
3122
		if (ret)
3123
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
Y
Yan Zheng 已提交
3124 3125 3126 3127
		if (ret < 0)
			goto error;
		if (ret > 0)
			break;
Z
Zheng Yan 已提交
3128

Y
Yan Zheng 已提交
3129 3130
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
Z
Zheng Yan 已提交
3131

Y
Yan Zheng 已提交
3132 3133 3134
		chunk = btrfs_item_ptr(leaf, path->slots[0],
				       struct btrfs_chunk);
		chunk_type = btrfs_chunk_type(leaf, chunk);
3135
		btrfs_release_path(path);
3136

Y
Yan Zheng 已提交
3137
		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
3138
			ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3139 3140
			if (ret == -ENOSPC)
				failed++;
H
HIMANGI SARAOGI 已提交
3141 3142
			else
				BUG_ON(ret);
Y
Yan Zheng 已提交
3143
		}
3144
		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3145

Y
Yan Zheng 已提交
3146 3147 3148 3149 3150
		if (found_key.offset == 0)
			break;
		key.offset = found_key.offset - 1;
	}
	ret = 0;
3151 3152 3153 3154
	if (failed && !retried) {
		failed = 0;
		retried = true;
		goto again;
3155
	} else if (WARN_ON(failed && retried)) {
3156 3157
		ret = -ENOSPC;
	}
Y
Yan Zheng 已提交
3158 3159 3160
error:
	btrfs_free_path(path);
	return ret;
3161 3162
}

3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192
/*
 * return 1 : allocate a data chunk successfully,
 * return <0: errors during allocating a data chunk,
 * return 0 : no need to allocate a data chunk.
 */
static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
				      u64 chunk_offset)
{
	struct btrfs_block_group_cache *cache;
	u64 bytes_used;
	u64 chunk_type;

	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
	ASSERT(cache);
	chunk_type = cache->flags;
	btrfs_put_block_group(cache);

	if (chunk_type & BTRFS_BLOCK_GROUP_DATA) {
		spin_lock(&fs_info->data_sinfo->lock);
		bytes_used = fs_info->data_sinfo->bytes_used;
		spin_unlock(&fs_info->data_sinfo->lock);

		if (!bytes_used) {
			struct btrfs_trans_handle *trans;
			int ret;

			trans =	btrfs_join_transaction(fs_info->tree_root);
			if (IS_ERR(trans))
				return PTR_ERR(trans);

3193
			ret = btrfs_force_chunk_alloc(trans,
3194 3195 3196 3197 3198 3199 3200 3201 3202 3203
						      BTRFS_BLOCK_GROUP_DATA);
			btrfs_end_transaction(trans);
			if (ret < 0)
				return ret;
			return 1;
		}
	}
	return 0;
}

3204
static int insert_balance_item(struct btrfs_fs_info *fs_info,
3205 3206
			       struct btrfs_balance_control *bctl)
{
3207
	struct btrfs_root *root = fs_info->tree_root;
3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226
	struct btrfs_trans_handle *trans;
	struct btrfs_balance_item *item;
	struct btrfs_disk_balance_args disk_bargs;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	int ret, err;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	trans = btrfs_start_transaction(root, 0);
	if (IS_ERR(trans)) {
		btrfs_free_path(path);
		return PTR_ERR(trans);
	}

	key.objectid = BTRFS_BALANCE_OBJECTID;
3227
	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3228 3229 3230 3231 3232 3233 3234 3235 3236 3237
	key.offset = 0;

	ret = btrfs_insert_empty_item(trans, root, path, &key,
				      sizeof(*item));
	if (ret)
		goto out;

	leaf = path->nodes[0];
	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);

3238
	memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251

	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
	btrfs_set_balance_data(leaf, item, &disk_bargs);
	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
	btrfs_set_balance_meta(leaf, item, &disk_bargs);
	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
	btrfs_set_balance_sys(leaf, item, &disk_bargs);

	btrfs_set_balance_flags(leaf, item, bctl->flags);

	btrfs_mark_buffer_dirty(leaf);
out:
	btrfs_free_path(path);
3252
	err = btrfs_commit_transaction(trans);
3253 3254 3255 3256 3257
	if (err && !ret)
		ret = err;
	return ret;
}

3258
static int del_balance_item(struct btrfs_fs_info *fs_info)
3259
{
3260
	struct btrfs_root *root = fs_info->tree_root;
3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276
	struct btrfs_trans_handle *trans;
	struct btrfs_path *path;
	struct btrfs_key key;
	int ret, err;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	trans = btrfs_start_transaction(root, 0);
	if (IS_ERR(trans)) {
		btrfs_free_path(path);
		return PTR_ERR(trans);
	}

	key.objectid = BTRFS_BALANCE_OBJECTID;
3277
	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290
	key.offset = 0;

	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
	if (ret < 0)
		goto out;
	if (ret > 0) {
		ret = -ENOENT;
		goto out;
	}

	ret = btrfs_del_item(trans, root, path);
out:
	btrfs_free_path(path);
3291
	err = btrfs_commit_transaction(trans);
3292 3293 3294 3295 3296
	if (err && !ret)
		ret = err;
	return ret;
}

I
Ilya Dryomov 已提交
3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320
/*
 * This is a heuristic used to reduce the number of chunks balanced on
 * resume after balance was interrupted.
 */
static void update_balance_args(struct btrfs_balance_control *bctl)
{
	/*
	 * Turn on soft mode for chunk types that were being converted.
	 */
	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;

	/*
	 * Turn on usage filter if is not already used.  The idea is
	 * that chunks that we have already balanced should be
	 * reasonably full.  Don't do it for chunks that are being
	 * converted - that will keep us from relocating unconverted
	 * (albeit full) chunks.
	 */
	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3321
	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
I
Ilya Dryomov 已提交
3322 3323 3324 3325 3326
	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
		bctl->data.usage = 90;
	}
	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3327
	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
I
Ilya Dryomov 已提交
3328 3329 3330 3331 3332
	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
		bctl->sys.usage = 90;
	}
	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3333
	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
I
Ilya Dryomov 已提交
3334 3335 3336 3337 3338 3339
	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
		bctl->meta.usage = 90;
	}
}

3340 3341 3342 3343
/*
 * Clear the balance status in fs_info and delete the balance item from disk.
 */
static void reset_balance_state(struct btrfs_fs_info *fs_info)
3344 3345
{
	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3346
	int ret;
3347 3348 3349 3350 3351 3352 3353 3354

	BUG_ON(!fs_info->balance_ctl);

	spin_lock(&fs_info->balance_lock);
	fs_info->balance_ctl = NULL;
	spin_unlock(&fs_info->balance_lock);

	kfree(bctl);
3355 3356 3357
	ret = del_balance_item(fs_info);
	if (ret)
		btrfs_handle_fs_error(fs_info, ret, NULL);
3358 3359
}

I
Ilya Dryomov 已提交
3360 3361 3362 3363
/*
 * Balance filters.  Return 1 if chunk should be filtered out
 * (should not be balanced).
 */
3364
static int chunk_profiles_filter(u64 chunk_type,
I
Ilya Dryomov 已提交
3365 3366
				 struct btrfs_balance_args *bargs)
{
3367 3368
	chunk_type = chunk_to_extended(chunk_type) &
				BTRFS_EXTENDED_PROFILE_MASK;
I
Ilya Dryomov 已提交
3369

3370
	if (bargs->profiles & chunk_type)
I
Ilya Dryomov 已提交
3371 3372 3373 3374 3375
		return 0;

	return 1;
}

3376
static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
I
Ilya Dryomov 已提交
3377
			      struct btrfs_balance_args *bargs)
3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408
{
	struct btrfs_block_group_cache *cache;
	u64 chunk_used;
	u64 user_thresh_min;
	u64 user_thresh_max;
	int ret = 1;

	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
	chunk_used = btrfs_block_group_used(&cache->item);

	if (bargs->usage_min == 0)
		user_thresh_min = 0;
	else
		user_thresh_min = div_factor_fine(cache->key.offset,
					bargs->usage_min);

	if (bargs->usage_max == 0)
		user_thresh_max = 1;
	else if (bargs->usage_max > 100)
		user_thresh_max = cache->key.offset;
	else
		user_thresh_max = div_factor_fine(cache->key.offset,
					bargs->usage_max);

	if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
		ret = 0;

	btrfs_put_block_group(cache);
	return ret;
}

3409
static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3410
		u64 chunk_offset, struct btrfs_balance_args *bargs)
I
Ilya Dryomov 已提交
3411 3412 3413 3414 3415 3416 3417 3418
{
	struct btrfs_block_group_cache *cache;
	u64 chunk_used, user_thresh;
	int ret = 1;

	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
	chunk_used = btrfs_block_group_used(&cache->item);

3419
	if (bargs->usage_min == 0)
3420
		user_thresh = 1;
3421 3422 3423 3424 3425 3426
	else if (bargs->usage > 100)
		user_thresh = cache->key.offset;
	else
		user_thresh = div_factor_fine(cache->key.offset,
					      bargs->usage);

I
Ilya Dryomov 已提交
3427 3428 3429 3430 3431 3432 3433
	if (chunk_used < user_thresh)
		ret = 0;

	btrfs_put_block_group(cache);
	return ret;
}

I
Ilya Dryomov 已提交
3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450
static int chunk_devid_filter(struct extent_buffer *leaf,
			      struct btrfs_chunk *chunk,
			      struct btrfs_balance_args *bargs)
{
	struct btrfs_stripe *stripe;
	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
	int i;

	for (i = 0; i < num_stripes; i++) {
		stripe = btrfs_stripe_nr(chunk, i);
		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
			return 0;
	}

	return 1;
}

3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462
static u64 calc_data_stripes(u64 type, int num_stripes)
{
	const int index = btrfs_bg_flags_to_raid_index(type);
	const int ncopies = btrfs_raid_array[index].ncopies;
	const int nparity = btrfs_raid_array[index].nparity;

	if (nparity)
		return num_stripes - nparity;
	else
		return num_stripes / ncopies;
}

I
Ilya Dryomov 已提交
3463 3464 3465 3466 3467 3468 3469 3470 3471
/* [pstart, pend) */
static int chunk_drange_filter(struct extent_buffer *leaf,
			       struct btrfs_chunk *chunk,
			       struct btrfs_balance_args *bargs)
{
	struct btrfs_stripe *stripe;
	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
	u64 stripe_offset;
	u64 stripe_length;
3472
	u64 type;
I
Ilya Dryomov 已提交
3473 3474 3475 3476 3477 3478
	int factor;
	int i;

	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
		return 0;

3479 3480
	type = btrfs_chunk_type(leaf, chunk);
	factor = calc_data_stripes(type, num_stripes);
I
Ilya Dryomov 已提交
3481 3482 3483 3484 3485 3486 3487 3488

	for (i = 0; i < num_stripes; i++) {
		stripe = btrfs_stripe_nr(chunk, i);
		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
			continue;

		stripe_offset = btrfs_stripe_offset(leaf, stripe);
		stripe_length = btrfs_chunk_length(leaf, chunk);
3489
		stripe_length = div_u64(stripe_length, factor);
I
Ilya Dryomov 已提交
3490 3491 3492 3493 3494 3495 3496 3497 3498

		if (stripe_offset < bargs->pend &&
		    stripe_offset + stripe_length > bargs->pstart)
			return 0;
	}

	return 1;
}

3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512
/* [vstart, vend) */
static int chunk_vrange_filter(struct extent_buffer *leaf,
			       struct btrfs_chunk *chunk,
			       u64 chunk_offset,
			       struct btrfs_balance_args *bargs)
{
	if (chunk_offset < bargs->vend &&
	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
		/* at least part of the chunk is inside this vrange */
		return 0;

	return 1;
}

3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525
static int chunk_stripes_range_filter(struct extent_buffer *leaf,
			       struct btrfs_chunk *chunk,
			       struct btrfs_balance_args *bargs)
{
	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);

	if (bargs->stripes_min <= num_stripes
			&& num_stripes <= bargs->stripes_max)
		return 0;

	return 1;
}

3526
static int chunk_soft_convert_filter(u64 chunk_type,
3527 3528 3529 3530 3531
				     struct btrfs_balance_args *bargs)
{
	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
		return 0;

3532 3533
	chunk_type = chunk_to_extended(chunk_type) &
				BTRFS_EXTENDED_PROFILE_MASK;
3534

3535
	if (bargs->target == chunk_type)
3536 3537 3538 3539 3540
		return 1;

	return 0;
}

3541
static int should_balance_chunk(struct extent_buffer *leaf,
3542 3543
				struct btrfs_chunk *chunk, u64 chunk_offset)
{
3544
	struct btrfs_fs_info *fs_info = leaf->fs_info;
3545
	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561
	struct btrfs_balance_args *bargs = NULL;
	u64 chunk_type = btrfs_chunk_type(leaf, chunk);

	/* type filter */
	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
		return 0;
	}

	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
		bargs = &bctl->data;
	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
		bargs = &bctl->sys;
	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
		bargs = &bctl->meta;

I
Ilya Dryomov 已提交
3562 3563 3564 3565
	/* profiles filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
	    chunk_profiles_filter(chunk_type, bargs)) {
		return 0;
I
Ilya Dryomov 已提交
3566 3567 3568 3569
	}

	/* usage filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3570
	    chunk_usage_filter(fs_info, chunk_offset, bargs)) {
I
Ilya Dryomov 已提交
3571
		return 0;
3572
	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3573
	    chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
3574
		return 0;
I
Ilya Dryomov 已提交
3575 3576 3577 3578 3579 3580
	}

	/* devid filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
	    chunk_devid_filter(leaf, chunk, bargs)) {
		return 0;
I
Ilya Dryomov 已提交
3581 3582 3583 3584
	}

	/* drange filter, makes sense only with devid filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3585
	    chunk_drange_filter(leaf, chunk, bargs)) {
I
Ilya Dryomov 已提交
3586
		return 0;
3587 3588 3589 3590 3591 3592
	}

	/* vrange filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
		return 0;
I
Ilya Dryomov 已提交
3593 3594
	}

3595 3596 3597 3598 3599 3600
	/* stripes filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
	    chunk_stripes_range_filter(leaf, chunk, bargs)) {
		return 0;
	}

3601 3602 3603 3604 3605 3606
	/* soft profile changing mode */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
	    chunk_soft_convert_filter(chunk_type, bargs)) {
		return 0;
	}

3607 3608 3609 3610 3611 3612 3613 3614
	/*
	 * limited by count, must be the last filter
	 */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
		if (bargs->limit == 0)
			return 0;
		else
			bargs->limit--;
3615 3616 3617
	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
		/*
		 * Same logic as the 'limit' filter; the minimum cannot be
3618
		 * determined here because we do not have the global information
3619 3620 3621 3622 3623 3624
		 * about the count of all chunks that satisfy the filters.
		 */
		if (bargs->limit_max == 0)
			return 0;
		else
			bargs->limit_max--;
3625 3626
	}

3627 3628 3629
	return 1;
}

3630
static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3631
{
3632
	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3633
	struct btrfs_root *chunk_root = fs_info->chunk_root;
3634
	u64 chunk_type;
3635
	struct btrfs_chunk *chunk;
3636
	struct btrfs_path *path = NULL;
3637 3638
	struct btrfs_key key;
	struct btrfs_key found_key;
3639 3640
	struct extent_buffer *leaf;
	int slot;
3641 3642
	int ret;
	int enospc_errors = 0;
3643
	bool counting = true;
3644
	/* The single value limit and min/max limits use the same bytes in the */
3645 3646 3647
	u64 limit_data = bctl->data.limit;
	u64 limit_meta = bctl->meta.limit;
	u64 limit_sys = bctl->sys.limit;
3648 3649 3650
	u32 count_data = 0;
	u32 count_meta = 0;
	u32 count_sys = 0;
3651
	int chunk_reserved = 0;
3652 3653

	path = btrfs_alloc_path();
3654 3655 3656 3657
	if (!path) {
		ret = -ENOMEM;
		goto error;
	}
3658 3659 3660 3661 3662 3663

	/* zero out stat counters */
	spin_lock(&fs_info->balance_lock);
	memset(&bctl->stat, 0, sizeof(bctl->stat));
	spin_unlock(&fs_info->balance_lock);
again:
3664
	if (!counting) {
3665 3666 3667 3668
		/*
		 * The single value limit and min/max limits use the same bytes
		 * in the
		 */
3669 3670 3671 3672
		bctl->data.limit = limit_data;
		bctl->meta.limit = limit_meta;
		bctl->sys.limit = limit_sys;
	}
3673 3674 3675 3676
	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
	key.offset = (u64)-1;
	key.type = BTRFS_CHUNK_ITEM_KEY;

C
Chris Mason 已提交
3677
	while (1) {
3678
		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3679
		    atomic_read(&fs_info->balance_cancel_req)) {
3680 3681 3682 3683
			ret = -ECANCELED;
			goto error;
		}

3684
		mutex_lock(&fs_info->delete_unused_bgs_mutex);
3685
		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3686 3687
		if (ret < 0) {
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3688
			goto error;
3689
		}
3690 3691 3692 3693 3694 3695

		/*
		 * this shouldn't happen, it means the last relocate
		 * failed
		 */
		if (ret == 0)
3696
			BUG(); /* FIXME break ? */
3697 3698 3699

		ret = btrfs_previous_item(chunk_root, path, 0,
					  BTRFS_CHUNK_ITEM_KEY);
3700
		if (ret) {
3701
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3702
			ret = 0;
3703
			break;
3704
		}
3705

3706 3707 3708
		leaf = path->nodes[0];
		slot = path->slots[0];
		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3709

3710 3711
		if (found_key.objectid != key.objectid) {
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3712
			break;
3713
		}
3714

3715
		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3716
		chunk_type = btrfs_chunk_type(leaf, chunk);
3717

3718 3719 3720 3721 3722 3723
		if (!counting) {
			spin_lock(&fs_info->balance_lock);
			bctl->stat.considered++;
			spin_unlock(&fs_info->balance_lock);
		}

3724
		ret = should_balance_chunk(leaf, chunk, found_key.offset);
3725

3726
		btrfs_release_path(path);
3727 3728
		if (!ret) {
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3729
			goto loop;
3730
		}
3731

3732
		if (counting) {
3733
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3734 3735 3736
			spin_lock(&fs_info->balance_lock);
			bctl->stat.expected++;
			spin_unlock(&fs_info->balance_lock);
3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758

			if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
				count_data++;
			else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
				count_sys++;
			else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
				count_meta++;

			goto loop;
		}

		/*
		 * Apply limit_min filter, no need to check if the LIMITS
		 * filter is used, limit_min is 0 by default
		 */
		if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
					count_data < bctl->data.limit_min)
				|| ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
					count_meta < bctl->meta.limit_min)
				|| ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
					count_sys < bctl->sys.limit_min)) {
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3759 3760 3761
			goto loop;
		}

3762 3763 3764 3765 3766 3767 3768 3769 3770
		if (!chunk_reserved) {
			/*
			 * We may be relocating the only data chunk we have,
			 * which could potentially end up with losing data's
			 * raid profile, so lets allocate an empty one in
			 * advance.
			 */
			ret = btrfs_may_alloc_data_chunk(fs_info,
							 found_key.offset);
3771 3772 3773
			if (ret < 0) {
				mutex_unlock(&fs_info->delete_unused_bgs_mutex);
				goto error;
3774 3775
			} else if (ret == 1) {
				chunk_reserved = 1;
3776 3777 3778
			}
		}

3779
		ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3780
		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3781
		if (ret == -ENOSPC) {
3782
			enospc_errors++;
3783 3784 3785 3786 3787 3788 3789
		} else if (ret == -ETXTBSY) {
			btrfs_info(fs_info,
	   "skipping relocation of block group %llu due to active swapfile",
				   found_key.offset);
			ret = 0;
		} else if (ret) {
			goto error;
3790 3791 3792 3793 3794
		} else {
			spin_lock(&fs_info->balance_lock);
			bctl->stat.completed++;
			spin_unlock(&fs_info->balance_lock);
		}
3795
loop:
3796 3797
		if (found_key.offset == 0)
			break;
3798
		key.offset = found_key.offset - 1;
3799
	}
3800

3801 3802 3803 3804 3805
	if (counting) {
		btrfs_release_path(path);
		counting = false;
		goto again;
	}
3806 3807
error:
	btrfs_free_path(path);
3808
	if (enospc_errors) {
3809
		btrfs_info(fs_info, "%d enospc errors during balance",
J
Jeff Mahoney 已提交
3810
			   enospc_errors);
3811 3812 3813 3814
		if (!ret)
			ret = -ENOSPC;
	}

3815 3816 3817
	return ret;
}

3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838
/**
 * alloc_profile_is_valid - see if a given profile is valid and reduced
 * @flags: profile to validate
 * @extended: if true @flags is treated as an extended profile
 */
static int alloc_profile_is_valid(u64 flags, int extended)
{
	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
			       BTRFS_BLOCK_GROUP_PROFILE_MASK);

	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;

	/* 1) check that all other bits are zeroed */
	if (flags & ~mask)
		return 0;

	/* 2) see if profile is reduced */
	if (flags == 0)
		return !extended; /* "0" is valid for usual profiles */

	/* true if exactly one bit set */
3839
	return is_power_of_2(flags);
3840 3841
}

3842 3843
static inline int balance_need_close(struct btrfs_fs_info *fs_info)
{
3844 3845 3846 3847
	/* cancel requested || normal exit path */
	return atomic_read(&fs_info->balance_cancel_req) ||
		(atomic_read(&fs_info->balance_pause_req) == 0 &&
		 atomic_read(&fs_info->balance_cancel_req) == 0);
3848 3849
}

3850 3851 3852 3853 3854 3855 3856 3857 3858
/* Non-zero return value signifies invalidity */
static inline int validate_convert_profile(struct btrfs_balance_args *bctl_arg,
		u64 allowed)
{
	return ((bctl_arg->flags & BTRFS_BALANCE_ARGS_CONVERT) &&
		(!alloc_profile_is_valid(bctl_arg->target, 1) ||
		 (bctl_arg->target & ~allowed)));
}

3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902
/*
 * Fill @buf with textual description of balance filter flags @bargs, up to
 * @size_buf including the terminating null. The output may be trimmed if it
 * does not fit into the provided buffer.
 */
static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
				 u32 size_buf)
{
	int ret;
	u32 size_bp = size_buf;
	char *bp = buf;
	u64 flags = bargs->flags;
	char tmp_buf[128] = {'\0'};

	if (!flags)
		return;

#define CHECK_APPEND_NOARG(a)						\
	do {								\
		ret = snprintf(bp, size_bp, (a));			\
		if (ret < 0 || ret >= size_bp)				\
			goto out_overflow;				\
		size_bp -= ret;						\
		bp += ret;						\
	} while (0)

#define CHECK_APPEND_1ARG(a, v1)					\
	do {								\
		ret = snprintf(bp, size_bp, (a), (v1));			\
		if (ret < 0 || ret >= size_bp)				\
			goto out_overflow;				\
		size_bp -= ret;						\
		bp += ret;						\
	} while (0)

#define CHECK_APPEND_2ARG(a, v1, v2)					\
	do {								\
		ret = snprintf(bp, size_bp, (a), (v1), (v2));		\
		if (ret < 0 || ret >= size_bp)				\
			goto out_overflow;				\
		size_bp -= ret;						\
		bp += ret;						\
	} while (0)

3903 3904 3905
	if (flags & BTRFS_BALANCE_ARGS_CONVERT)
		CHECK_APPEND_1ARG("convert=%s,",
				  btrfs_bg_type_to_raid_name(bargs->target));
3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012

	if (flags & BTRFS_BALANCE_ARGS_SOFT)
		CHECK_APPEND_NOARG("soft,");

	if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
		btrfs_describe_block_groups(bargs->profiles, tmp_buf,
					    sizeof(tmp_buf));
		CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
	}

	if (flags & BTRFS_BALANCE_ARGS_USAGE)
		CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);

	if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
		CHECK_APPEND_2ARG("usage=%u..%u,",
				  bargs->usage_min, bargs->usage_max);

	if (flags & BTRFS_BALANCE_ARGS_DEVID)
		CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);

	if (flags & BTRFS_BALANCE_ARGS_DRANGE)
		CHECK_APPEND_2ARG("drange=%llu..%llu,",
				  bargs->pstart, bargs->pend);

	if (flags & BTRFS_BALANCE_ARGS_VRANGE)
		CHECK_APPEND_2ARG("vrange=%llu..%llu,",
				  bargs->vstart, bargs->vend);

	if (flags & BTRFS_BALANCE_ARGS_LIMIT)
		CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);

	if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
		CHECK_APPEND_2ARG("limit=%u..%u,",
				bargs->limit_min, bargs->limit_max);

	if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
		CHECK_APPEND_2ARG("stripes=%u..%u,",
				  bargs->stripes_min, bargs->stripes_max);

#undef CHECK_APPEND_2ARG
#undef CHECK_APPEND_1ARG
#undef CHECK_APPEND_NOARG

out_overflow:

	if (size_bp < size_buf)
		buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
	else
		buf[0] = '\0';
}

static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
{
	u32 size_buf = 1024;
	char tmp_buf[192] = {'\0'};
	char *buf;
	char *bp;
	u32 size_bp = size_buf;
	int ret;
	struct btrfs_balance_control *bctl = fs_info->balance_ctl;

	buf = kzalloc(size_buf, GFP_KERNEL);
	if (!buf)
		return;

	bp = buf;

#define CHECK_APPEND_1ARG(a, v1)					\
	do {								\
		ret = snprintf(bp, size_bp, (a), (v1));			\
		if (ret < 0 || ret >= size_bp)				\
			goto out_overflow;				\
		size_bp -= ret;						\
		bp += ret;						\
	} while (0)

	if (bctl->flags & BTRFS_BALANCE_FORCE)
		CHECK_APPEND_1ARG("%s", "-f ");

	if (bctl->flags & BTRFS_BALANCE_DATA) {
		describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
		CHECK_APPEND_1ARG("-d%s ", tmp_buf);
	}

	if (bctl->flags & BTRFS_BALANCE_METADATA) {
		describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
		CHECK_APPEND_1ARG("-m%s ", tmp_buf);
	}

	if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
		describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
		CHECK_APPEND_1ARG("-s%s ", tmp_buf);
	}

#undef CHECK_APPEND_1ARG

out_overflow:

	if (size_bp < size_buf)
		buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
	btrfs_info(fs_info, "balance: %s %s",
		   (bctl->flags & BTRFS_BALANCE_RESUME) ?
		   "resume" : "start", buf);

	kfree(buf);
}

4013
/*
4014
 * Should be called with balance mutexe held
4015
 */
4016 4017
int btrfs_balance(struct btrfs_fs_info *fs_info,
		  struct btrfs_balance_control *bctl,
4018 4019
		  struct btrfs_ioctl_balance_args *bargs)
{
4020
	u64 meta_target, data_target;
4021
	u64 allowed;
4022
	int mixed = 0;
4023
	int ret;
4024
	u64 num_devices;
4025
	unsigned seq;
4026
	bool reducing_integrity;
4027
	int i;
4028

4029
	if (btrfs_fs_closing(fs_info) ||
4030 4031
	    atomic_read(&fs_info->balance_pause_req) ||
	    atomic_read(&fs_info->balance_cancel_req)) {
4032 4033 4034 4035
		ret = -EINVAL;
		goto out;
	}

4036 4037 4038 4039
	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
		mixed = 1;

4040 4041 4042 4043
	/*
	 * In case of mixed groups both data and meta should be picked,
	 * and identical options should be given for both of them.
	 */
4044 4045
	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
	if (mixed && (bctl->flags & allowed)) {
4046 4047 4048
		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
J
Jeff Mahoney 已提交
4049
			btrfs_err(fs_info,
4050
	  "balance: mixed groups data and metadata options must be the same");
4051 4052 4053 4054 4055
			ret = -EINVAL;
			goto out;
		}
	}

4056
	num_devices = btrfs_num_devices(fs_info);
4057 4058 4059 4060
	allowed = 0;
	for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
		if (num_devices >= btrfs_raid_array[i].devs_min)
			allowed |= btrfs_raid_array[i].bg_flag;
4061

4062
	if (validate_convert_profile(&bctl->data, allowed)) {
J
Jeff Mahoney 已提交
4063
		btrfs_err(fs_info,
4064
			  "balance: invalid convert data profile %s",
4065
			  btrfs_bg_type_to_raid_name(bctl->data.target));
4066 4067 4068
		ret = -EINVAL;
		goto out;
	}
4069
	if (validate_convert_profile(&bctl->meta, allowed)) {
4070
		btrfs_err(fs_info,
4071
			  "balance: invalid convert metadata profile %s",
4072
			  btrfs_bg_type_to_raid_name(bctl->meta.target));
4073 4074 4075
		ret = -EINVAL;
		goto out;
	}
4076
	if (validate_convert_profile(&bctl->sys, allowed)) {
4077
		btrfs_err(fs_info,
4078
			  "balance: invalid convert system profile %s",
4079
			  btrfs_bg_type_to_raid_name(bctl->sys.target));
4080 4081 4082 4083
		ret = -EINVAL;
		goto out;
	}

4084 4085 4086 4087 4088 4089 4090 4091 4092 4093
	/*
	 * Allow to reduce metadata or system integrity only if force set for
	 * profiles with redundancy (copies, parity)
	 */
	allowed = 0;
	for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) {
		if (btrfs_raid_array[i].ncopies >= 2 ||
		    btrfs_raid_array[i].tolerated_failures >= 1)
			allowed |= btrfs_raid_array[i].bg_flag;
	}
4094 4095 4096 4097 4098 4099 4100 4101
	do {
		seq = read_seqbegin(&fs_info->profiles_lock);

		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
		     (fs_info->avail_system_alloc_bits & allowed) &&
		     !(bctl->sys.target & allowed)) ||
		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
		     (fs_info->avail_metadata_alloc_bits & allowed) &&
4102 4103 4104 4105 4106 4107 4108 4109 4110 4111
		     !(bctl->meta.target & allowed)))
			reducing_integrity = true;
		else
			reducing_integrity = false;

		/* if we're not converting, the target field is uninitialized */
		meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
			bctl->meta.target : fs_info->avail_metadata_alloc_bits;
		data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
			bctl->data.target : fs_info->avail_data_alloc_bits;
4112
	} while (read_seqretry(&fs_info->profiles_lock, seq));
4113

4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125
	if (reducing_integrity) {
		if (bctl->flags & BTRFS_BALANCE_FORCE) {
			btrfs_info(fs_info,
				   "balance: force reducing metadata integrity");
		} else {
			btrfs_err(fs_info,
	  "balance: reduces metadata integrity, use --force if you want this");
			ret = -EINVAL;
			goto out;
		}
	}

4126 4127
	if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
		btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
4128
		btrfs_warn(fs_info,
4129
	"balance: metadata profile %s has lower redundancy than data profile %s",
4130 4131
				btrfs_bg_type_to_raid_name(meta_target),
				btrfs_bg_type_to_raid_name(data_target));
4132 4133
	}

4134 4135 4136 4137 4138 4139 4140 4141
	if (fs_info->send_in_progress) {
		btrfs_warn_rl(fs_info,
"cannot run balance while send operations are in progress (%d in progress)",
			      fs_info->send_in_progress);
		ret = -EAGAIN;
		goto out;
	}

4142
	ret = insert_balance_item(fs_info, bctl);
I
Ilya Dryomov 已提交
4143
	if (ret && ret != -EEXIST)
4144 4145
		goto out;

I
Ilya Dryomov 已提交
4146 4147
	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
		BUG_ON(ret == -EEXIST);
4148 4149 4150 4151
		BUG_ON(fs_info->balance_ctl);
		spin_lock(&fs_info->balance_lock);
		fs_info->balance_ctl = bctl;
		spin_unlock(&fs_info->balance_lock);
I
Ilya Dryomov 已提交
4152 4153 4154 4155 4156 4157
	} else {
		BUG_ON(ret != -EEXIST);
		spin_lock(&fs_info->balance_lock);
		update_balance_args(bctl);
		spin_unlock(&fs_info->balance_lock);
	}
4158

4159 4160
	ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
	set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4161
	describe_balance_start_or_resume(fs_info);
4162 4163 4164 4165 4166
	mutex_unlock(&fs_info->balance_mutex);

	ret = __btrfs_balance(fs_info);

	mutex_lock(&fs_info->balance_mutex);
4167 4168 4169 4170 4171 4172 4173
	if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req))
		btrfs_info(fs_info, "balance: paused");
	else if (ret == -ECANCELED && atomic_read(&fs_info->balance_cancel_req))
		btrfs_info(fs_info, "balance: canceled");
	else
		btrfs_info(fs_info, "balance: ended with status: %d", ret);

4174
	clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4175 4176 4177

	if (bargs) {
		memset(bargs, 0, sizeof(*bargs));
4178
		btrfs_update_ioctl_balance_args(fs_info, bargs);
4179 4180
	}

4181 4182
	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
	    balance_need_close(fs_info)) {
4183
		reset_balance_state(fs_info);
4184
		clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
4185 4186
	}

4187
	wake_up(&fs_info->balance_wait_q);
4188 4189 4190

	return ret;
out:
I
Ilya Dryomov 已提交
4191
	if (bctl->flags & BTRFS_BALANCE_RESUME)
4192
		reset_balance_state(fs_info);
4193
	else
I
Ilya Dryomov 已提交
4194
		kfree(bctl);
4195 4196
	clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);

I
Ilya Dryomov 已提交
4197 4198 4199 4200 4201
	return ret;
}

static int balance_kthread(void *data)
{
4202
	struct btrfs_fs_info *fs_info = data;
4203
	int ret = 0;
I
Ilya Dryomov 已提交
4204 4205

	mutex_lock(&fs_info->balance_mutex);
4206
	if (fs_info->balance_ctl)
4207
		ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
I
Ilya Dryomov 已提交
4208
	mutex_unlock(&fs_info->balance_mutex);
4209

I
Ilya Dryomov 已提交
4210 4211 4212
	return ret;
}

4213 4214 4215 4216
int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
{
	struct task_struct *tsk;

4217
	mutex_lock(&fs_info->balance_mutex);
4218
	if (!fs_info->balance_ctl) {
4219
		mutex_unlock(&fs_info->balance_mutex);
4220 4221
		return 0;
	}
4222
	mutex_unlock(&fs_info->balance_mutex);
4223

4224
	if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
4225
		btrfs_info(fs_info, "balance: resume skipped");
4226 4227 4228
		return 0;
	}

4229 4230 4231 4232 4233 4234 4235 4236 4237
	/*
	 * A ro->rw remount sequence should continue with the paused balance
	 * regardless of who pauses it, system or the user as of now, so set
	 * the resume flag.
	 */
	spin_lock(&fs_info->balance_lock);
	fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
	spin_unlock(&fs_info->balance_lock);

4238
	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
4239
	return PTR_ERR_OR_ZERO(tsk);
4240 4241
}

4242
int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
I
Ilya Dryomov 已提交
4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256
{
	struct btrfs_balance_control *bctl;
	struct btrfs_balance_item *item;
	struct btrfs_disk_balance_args disk_bargs;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = BTRFS_BALANCE_OBJECTID;
4257
	key.type = BTRFS_TEMPORARY_ITEM_KEY;
I
Ilya Dryomov 已提交
4258 4259
	key.offset = 0;

4260
	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
I
Ilya Dryomov 已提交
4261
	if (ret < 0)
4262
		goto out;
I
Ilya Dryomov 已提交
4263 4264
	if (ret > 0) { /* ret = -ENOENT; */
		ret = 0;
4265 4266 4267 4268 4269 4270 4271
		goto out;
	}

	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
	if (!bctl) {
		ret = -ENOMEM;
		goto out;
I
Ilya Dryomov 已提交
4272 4273 4274 4275 4276
	}

	leaf = path->nodes[0];
	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);

4277 4278
	bctl->flags = btrfs_balance_flags(leaf, item);
	bctl->flags |= BTRFS_BALANCE_RESUME;
I
Ilya Dryomov 已提交
4279 4280 4281 4282 4283 4284 4285 4286

	btrfs_balance_data(leaf, item, &disk_bargs);
	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
	btrfs_balance_meta(leaf, item, &disk_bargs);
	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
	btrfs_balance_sys(leaf, item, &disk_bargs);
	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);

4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298
	/*
	 * This should never happen, as the paused balance state is recovered
	 * during mount without any chance of other exclusive ops to collide.
	 *
	 * This gives the exclusive op status to balance and keeps in paused
	 * state until user intervention (cancel or umount). If the ownership
	 * cannot be assigned, show a message but do not fail. The balance
	 * is in a paused state and must have fs_info::balance_ctl properly
	 * set up.
	 */
	if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags))
		btrfs_warn(fs_info,
4299
	"balance: cannot set exclusive op status, resume manually");
4300

4301
	mutex_lock(&fs_info->balance_mutex);
4302 4303 4304 4305
	BUG_ON(fs_info->balance_ctl);
	spin_lock(&fs_info->balance_lock);
	fs_info->balance_ctl = bctl;
	spin_unlock(&fs_info->balance_lock);
4306
	mutex_unlock(&fs_info->balance_mutex);
I
Ilya Dryomov 已提交
4307 4308
out:
	btrfs_free_path(path);
4309 4310 4311
	return ret;
}

4312 4313 4314 4315 4316 4317 4318 4319 4320 4321
int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
{
	int ret = 0;

	mutex_lock(&fs_info->balance_mutex);
	if (!fs_info->balance_ctl) {
		mutex_unlock(&fs_info->balance_mutex);
		return -ENOTCONN;
	}

4322
	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4323 4324 4325 4326
		atomic_inc(&fs_info->balance_pause_req);
		mutex_unlock(&fs_info->balance_mutex);

		wait_event(fs_info->balance_wait_q,
4327
			   !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4328 4329 4330

		mutex_lock(&fs_info->balance_mutex);
		/* we are good with balance_ctl ripped off from under us */
4331
		BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4332 4333 4334 4335 4336 4337 4338 4339 4340
		atomic_dec(&fs_info->balance_pause_req);
	} else {
		ret = -ENOTCONN;
	}

	mutex_unlock(&fs_info->balance_mutex);
	return ret;
}

4341 4342 4343 4344 4345 4346 4347 4348
int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
{
	mutex_lock(&fs_info->balance_mutex);
	if (!fs_info->balance_ctl) {
		mutex_unlock(&fs_info->balance_mutex);
		return -ENOTCONN;
	}

4349 4350 4351 4352 4353 4354 4355 4356 4357 4358
	/*
	 * A paused balance with the item stored on disk can be resumed at
	 * mount time if the mount is read-write. Otherwise it's still paused
	 * and we must not allow cancelling as it deletes the item.
	 */
	if (sb_rdonly(fs_info->sb)) {
		mutex_unlock(&fs_info->balance_mutex);
		return -EROFS;
	}

4359 4360 4361 4362 4363
	atomic_inc(&fs_info->balance_cancel_req);
	/*
	 * if we are running just wait and return, balance item is
	 * deleted in btrfs_balance in this case
	 */
4364
	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4365 4366
		mutex_unlock(&fs_info->balance_mutex);
		wait_event(fs_info->balance_wait_q,
4367
			   !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4368 4369 4370
		mutex_lock(&fs_info->balance_mutex);
	} else {
		mutex_unlock(&fs_info->balance_mutex);
4371 4372 4373 4374
		/*
		 * Lock released to allow other waiters to continue, we'll
		 * reexamine the status again.
		 */
4375 4376
		mutex_lock(&fs_info->balance_mutex);

4377
		if (fs_info->balance_ctl) {
4378
			reset_balance_state(fs_info);
4379
			clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
4380
			btrfs_info(fs_info, "balance: canceled");
4381
		}
4382 4383
	}

4384 4385
	BUG_ON(fs_info->balance_ctl ||
		test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4386 4387 4388 4389 4390
	atomic_dec(&fs_info->balance_cancel_req);
	mutex_unlock(&fs_info->balance_mutex);
	return 0;
}

S
Stefan Behrens 已提交
4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401
static int btrfs_uuid_scan_kthread(void *data)
{
	struct btrfs_fs_info *fs_info = data;
	struct btrfs_root *root = fs_info->tree_root;
	struct btrfs_key key;
	struct btrfs_path *path = NULL;
	int ret = 0;
	struct extent_buffer *eb;
	int slot;
	struct btrfs_root_item root_item;
	u32 item_size;
4402
	struct btrfs_trans_handle *trans = NULL;
S
Stefan Behrens 已提交
4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414

	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

	key.objectid = 0;
	key.type = BTRFS_ROOT_ITEM_KEY;
	key.offset = 0;

	while (1) {
4415 4416
		ret = btrfs_search_forward(root, &key, path,
				BTRFS_OLDEST_GENERATION);
S
Stefan Behrens 已提交
4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439
		if (ret) {
			if (ret > 0)
				ret = 0;
			break;
		}

		if (key.type != BTRFS_ROOT_ITEM_KEY ||
		    (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
		     key.objectid != BTRFS_FS_TREE_OBJECTID) ||
		    key.objectid > BTRFS_LAST_FREE_OBJECTID)
			goto skip;

		eb = path->nodes[0];
		slot = path->slots[0];
		item_size = btrfs_item_size_nr(eb, slot);
		if (item_size < sizeof(root_item))
			goto skip;

		read_extent_buffer(eb, &root_item,
				   btrfs_item_ptr_offset(eb, slot),
				   (int)sizeof(root_item));
		if (btrfs_root_refs(&root_item) == 0)
			goto skip;
4440 4441 4442 4443 4444 4445 4446

		if (!btrfs_is_empty_uuid(root_item.uuid) ||
		    !btrfs_is_empty_uuid(root_item.received_uuid)) {
			if (trans)
				goto update_tree;

			btrfs_release_path(path);
S
Stefan Behrens 已提交
4447 4448 4449 4450 4451 4452 4453 4454 4455
			/*
			 * 1 - subvol uuid item
			 * 1 - received_subvol uuid item
			 */
			trans = btrfs_start_transaction(fs_info->uuid_root, 2);
			if (IS_ERR(trans)) {
				ret = PTR_ERR(trans);
				break;
			}
4456 4457 4458 4459 4460 4461
			continue;
		} else {
			goto skip;
		}
update_tree:
		if (!btrfs_is_empty_uuid(root_item.uuid)) {
4462
			ret = btrfs_uuid_tree_add(trans, root_item.uuid,
S
Stefan Behrens 已提交
4463 4464 4465
						  BTRFS_UUID_KEY_SUBVOL,
						  key.objectid);
			if (ret < 0) {
4466
				btrfs_warn(fs_info, "uuid_tree_add failed %d",
S
Stefan Behrens 已提交
4467 4468 4469 4470 4471 4472
					ret);
				break;
			}
		}

		if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4473
			ret = btrfs_uuid_tree_add(trans,
S
Stefan Behrens 已提交
4474 4475 4476 4477
						  root_item.received_uuid,
						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
						  key.objectid);
			if (ret < 0) {
4478
				btrfs_warn(fs_info, "uuid_tree_add failed %d",
S
Stefan Behrens 已提交
4479 4480 4481 4482 4483
					ret);
				break;
			}
		}

4484
skip:
S
Stefan Behrens 已提交
4485
		if (trans) {
4486
			ret = btrfs_end_transaction(trans);
4487
			trans = NULL;
S
Stefan Behrens 已提交
4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509
			if (ret)
				break;
		}

		btrfs_release_path(path);
		if (key.offset < (u64)-1) {
			key.offset++;
		} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
			key.offset = 0;
			key.type = BTRFS_ROOT_ITEM_KEY;
		} else if (key.objectid < (u64)-1) {
			key.offset = 0;
			key.type = BTRFS_ROOT_ITEM_KEY;
			key.objectid++;
		} else {
			break;
		}
		cond_resched();
	}

out:
	btrfs_free_path(path);
4510
	if (trans && !IS_ERR(trans))
4511
		btrfs_end_transaction(trans);
S
Stefan Behrens 已提交
4512
	if (ret)
4513
		btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4514
	else
4515
		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
S
Stefan Behrens 已提交
4516 4517 4518 4519
	up(&fs_info->uuid_tree_rescan_sem);
	return 0;
}

4520 4521 4522 4523
/*
 * Callback for btrfs_uuid_tree_iterate().
 * returns:
 * 0	check succeeded, the entry is not outdated.
4524
 * < 0	if an error occurred.
4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576
 * > 0	if the check failed, which means the caller shall remove the entry.
 */
static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
				       u8 *uuid, u8 type, u64 subid)
{
	struct btrfs_key key;
	int ret = 0;
	struct btrfs_root *subvol_root;

	if (type != BTRFS_UUID_KEY_SUBVOL &&
	    type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
		goto out;

	key.objectid = subid;
	key.type = BTRFS_ROOT_ITEM_KEY;
	key.offset = (u64)-1;
	subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
	if (IS_ERR(subvol_root)) {
		ret = PTR_ERR(subvol_root);
		if (ret == -ENOENT)
			ret = 1;
		goto out;
	}

	switch (type) {
	case BTRFS_UUID_KEY_SUBVOL:
		if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
			ret = 1;
		break;
	case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
		if (memcmp(uuid, subvol_root->root_item.received_uuid,
			   BTRFS_UUID_SIZE))
			ret = 1;
		break;
	}

out:
	return ret;
}

static int btrfs_uuid_rescan_kthread(void *data)
{
	struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
	int ret;

	/*
	 * 1st step is to iterate through the existing UUID tree and
	 * to delete all entries that contain outdated data.
	 * 2nd step is to add all missing entries to the UUID tree.
	 */
	ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
	if (ret < 0) {
4577
		btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret);
4578 4579 4580 4581 4582 4583
		up(&fs_info->uuid_tree_rescan_sem);
		return ret;
	}
	return btrfs_uuid_scan_kthread(data);
}

4584 4585 4586 4587 4588
int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *tree_root = fs_info->tree_root;
	struct btrfs_root *uuid_root;
S
Stefan Behrens 已提交
4589 4590
	struct task_struct *task;
	int ret;
4591 4592 4593 4594 4595 4596 4597 4598 4599

	/*
	 * 1 - root node
	 * 1 - root item
	 */
	trans = btrfs_start_transaction(tree_root, 2);
	if (IS_ERR(trans))
		return PTR_ERR(trans);

4600
	uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
4601
	if (IS_ERR(uuid_root)) {
4602
		ret = PTR_ERR(uuid_root);
4603
		btrfs_abort_transaction(trans, ret);
4604
		btrfs_end_transaction(trans);
4605
		return ret;
4606 4607 4608 4609
	}

	fs_info->uuid_root = uuid_root;

4610
	ret = btrfs_commit_transaction(trans);
S
Stefan Behrens 已提交
4611 4612 4613 4614 4615 4616
	if (ret)
		return ret;

	down(&fs_info->uuid_tree_rescan_sem);
	task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
	if (IS_ERR(task)) {
4617
		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4618
		btrfs_warn(fs_info, "failed to start uuid_scan task");
S
Stefan Behrens 已提交
4619 4620 4621 4622 4623
		up(&fs_info->uuid_tree_rescan_sem);
		return PTR_ERR(task);
	}

	return 0;
4624
}
S
Stefan Behrens 已提交
4625

4626 4627 4628 4629 4630 4631 4632 4633
int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
{
	struct task_struct *task;

	down(&fs_info->uuid_tree_rescan_sem);
	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
	if (IS_ERR(task)) {
		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4634
		btrfs_warn(fs_info, "failed to start uuid_rescan task");
4635 4636 4637 4638 4639 4640 4641
		up(&fs_info->uuid_tree_rescan_sem);
		return PTR_ERR(task);
	}

	return 0;
}

4642 4643 4644 4645 4646 4647 4648
/*
 * shrinking a device means finding all of the device extents past
 * the new size, and then following the back refs to the chunks.
 * The chunk relocation code actually frees the device extent
 */
int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
{
4649 4650
	struct btrfs_fs_info *fs_info = device->fs_info;
	struct btrfs_root *root = fs_info->dev_root;
4651 4652 4653 4654 4655 4656 4657
	struct btrfs_trans_handle *trans;
	struct btrfs_dev_extent *dev_extent = NULL;
	struct btrfs_path *path;
	u64 length;
	u64 chunk_offset;
	int ret;
	int slot;
4658 4659
	int failed = 0;
	bool retried = false;
4660 4661
	struct extent_buffer *l;
	struct btrfs_key key;
4662
	struct btrfs_super_block *super_copy = fs_info->super_copy;
4663
	u64 old_total = btrfs_super_total_bytes(super_copy);
4664
	u64 old_size = btrfs_device_get_total_bytes(device);
4665
	u64 diff;
4666
	u64 start;
4667 4668

	new_size = round_down(new_size, fs_info->sectorsize);
4669
	start = new_size;
4670
	diff = round_down(old_size - new_size, fs_info->sectorsize);
4671

4672
	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4673 4674
		return -EINVAL;

4675 4676 4677 4678
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

4679
	path->reada = READA_BACK;
4680

4681 4682 4683 4684 4685 4686
	trans = btrfs_start_transaction(root, 0);
	if (IS_ERR(trans)) {
		btrfs_free_path(path);
		return PTR_ERR(trans);
	}

4687
	mutex_lock(&fs_info->chunk_mutex);
4688

4689
	btrfs_device_set_total_bytes(device, new_size);
4690
	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
Y
Yan Zheng 已提交
4691
		device->fs_devices->total_rw_bytes -= diff;
4692
		atomic64_sub(diff, &fs_info->free_chunk_space);
4693
	}
4694 4695 4696 4697 4698 4699

	/*
	 * Once the device's size has been set to the new size, ensure all
	 * in-memory chunks are synced to disk so that the loop below sees them
	 * and relocates them accordingly.
	 */
4700
	if (contains_pending_extent(device, &start, diff)) {
4701 4702 4703 4704 4705 4706 4707 4708
		mutex_unlock(&fs_info->chunk_mutex);
		ret = btrfs_commit_transaction(trans);
		if (ret)
			goto done;
	} else {
		mutex_unlock(&fs_info->chunk_mutex);
		btrfs_end_transaction(trans);
	}
4709

4710
again:
4711 4712 4713 4714
	key.objectid = device->devid;
	key.offset = (u64)-1;
	key.type = BTRFS_DEV_EXTENT_KEY;

4715
	do {
4716
		mutex_lock(&fs_info->delete_unused_bgs_mutex);
4717
		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4718
		if (ret < 0) {
4719
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4720
			goto done;
4721
		}
4722 4723

		ret = btrfs_previous_item(root, path, 0, key.type);
4724
		if (ret)
4725
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4726 4727 4728 4729
		if (ret < 0)
			goto done;
		if (ret) {
			ret = 0;
4730
			btrfs_release_path(path);
4731
			break;
4732 4733 4734 4735 4736 4737
		}

		l = path->nodes[0];
		slot = path->slots[0];
		btrfs_item_key_to_cpu(l, &key, path->slots[0]);

4738
		if (key.objectid != device->devid) {
4739
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4740
			btrfs_release_path(path);
4741
			break;
4742
		}
4743 4744 4745 4746

		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
		length = btrfs_dev_extent_length(l, dev_extent);

4747
		if (key.offset + length <= new_size) {
4748
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4749
			btrfs_release_path(path);
4750
			break;
4751
		}
4752 4753

		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4754
		btrfs_release_path(path);
4755

4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767
		/*
		 * We may be relocating the only data chunk we have,
		 * which could potentially end up with losing data's
		 * raid profile, so lets allocate an empty one in
		 * advance.
		 */
		ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
		if (ret < 0) {
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
			goto done;
		}

4768 4769
		ret = btrfs_relocate_chunk(fs_info, chunk_offset);
		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4770
		if (ret == -ENOSPC) {
4771
			failed++;
4772 4773 4774 4775 4776 4777 4778 4779
		} else if (ret) {
			if (ret == -ETXTBSY) {
				btrfs_warn(fs_info,
		   "could not shrink block group %llu due to active swapfile",
					   chunk_offset);
			}
			goto done;
		}
4780
	} while (key.offset-- > 0);
4781 4782 4783 4784 4785 4786 4787 4788

	if (failed && !retried) {
		failed = 0;
		retried = true;
		goto again;
	} else if (failed && retried) {
		ret = -ENOSPC;
		goto done;
4789 4790
	}

4791
	/* Shrinking succeeded, else we would be at "done". */
4792
	trans = btrfs_start_transaction(root, 0);
4793 4794 4795 4796 4797
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto done;
	}

4798
	mutex_lock(&fs_info->chunk_mutex);
4799
	btrfs_device_set_disk_total_bytes(device, new_size);
4800 4801 4802
	if (list_empty(&device->post_commit_list))
		list_add_tail(&device->post_commit_list,
			      &trans->transaction->dev_update_list);
4803 4804

	WARN_ON(diff > old_total);
4805 4806
	btrfs_set_super_total_bytes(super_copy,
			round_down(old_total - diff, fs_info->sectorsize));
4807
	mutex_unlock(&fs_info->chunk_mutex);
M
Miao Xie 已提交
4808 4809 4810

	/* Now btrfs_update_device() will change the on-disk size. */
	ret = btrfs_update_device(trans, device);
4811 4812 4813 4814 4815 4816
	if (ret < 0) {
		btrfs_abort_transaction(trans, ret);
		btrfs_end_transaction(trans);
	} else {
		ret = btrfs_commit_transaction(trans);
	}
4817 4818
done:
	btrfs_free_path(path);
4819
	if (ret) {
4820
		mutex_lock(&fs_info->chunk_mutex);
4821
		btrfs_device_set_total_bytes(device, old_size);
4822
		if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
4823
			device->fs_devices->total_rw_bytes += diff;
4824
		atomic64_add(diff, &fs_info->free_chunk_space);
4825
		mutex_unlock(&fs_info->chunk_mutex);
4826
	}
4827 4828 4829
	return ret;
}

4830
static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
4831 4832 4833
			   struct btrfs_key *key,
			   struct btrfs_chunk *chunk, int item_size)
{
4834
	struct btrfs_super_block *super_copy = fs_info->super_copy;
4835 4836 4837 4838
	struct btrfs_disk_key disk_key;
	u32 array_size;
	u8 *ptr;

4839
	mutex_lock(&fs_info->chunk_mutex);
4840
	array_size = btrfs_super_sys_array_size(super_copy);
4841
	if (array_size + item_size + sizeof(disk_key)
4842
			> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4843
		mutex_unlock(&fs_info->chunk_mutex);
4844
		return -EFBIG;
4845
	}
4846 4847 4848 4849 4850 4851 4852 4853

	ptr = super_copy->sys_chunk_array + array_size;
	btrfs_cpu_key_to_disk(&disk_key, key);
	memcpy(ptr, &disk_key, sizeof(disk_key));
	ptr += sizeof(disk_key);
	memcpy(ptr, chunk, item_size);
	item_size += sizeof(disk_key);
	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4854
	mutex_unlock(&fs_info->chunk_mutex);
4855

4856 4857 4858
	return 0;
}

4859 4860 4861 4862
/*
 * sort the devices in descending order by max_avail, total_avail
 */
static int btrfs_cmp_device_info(const void *a, const void *b)
4863
{
4864 4865
	const struct btrfs_device_info *di_a = a;
	const struct btrfs_device_info *di_b = b;
4866

4867
	if (di_a->max_avail > di_b->max_avail)
4868
		return -1;
4869
	if (di_a->max_avail < di_b->max_avail)
4870
		return 1;
4871 4872 4873 4874 4875
	if (di_a->total_avail > di_b->total_avail)
		return -1;
	if (di_a->total_avail < di_b->total_avail)
		return 1;
	return 0;
4876
}
4877

D
David Woodhouse 已提交
4878 4879
static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
{
4880
	if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
D
David Woodhouse 已提交
4881 4882
		return;

4883
	btrfs_set_fs_incompat(info, RAID56);
D
David Woodhouse 已提交
4884 4885
}

4886
static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4887
			       u64 start, u64 type)
4888
{
4889
	struct btrfs_fs_info *info = trans->fs_info;
4890
	struct btrfs_fs_devices *fs_devices = info->fs_devices;
4891
	struct btrfs_device *device;
4892 4893 4894 4895 4896 4897
	struct map_lookup *map = NULL;
	struct extent_map_tree *em_tree;
	struct extent_map *em;
	struct btrfs_device_info *devices_info = NULL;
	u64 total_avail;
	int num_stripes;	/* total number of stripes to allocate */
D
David Woodhouse 已提交
4898 4899
	int data_stripes;	/* number of stripes that count for
				   block group size */
4900 4901 4902 4903 4904 4905
	int sub_stripes;	/* sub_stripes info for map */
	int dev_stripes;	/* stripes per dev */
	int devs_max;		/* max devs to use */
	int devs_min;		/* min devs needed */
	int devs_increment;	/* ndevs has to be a multiple of this */
	int ncopies;		/* how many copies to data has */
4906 4907
	int nparity;		/* number of stripes worth of bytes to
				   store parity information */
4908 4909 4910 4911
	int ret;
	u64 max_stripe_size;
	u64 max_chunk_size;
	u64 stripe_size;
4912
	u64 chunk_size;
4913 4914 4915
	int ndevs;
	int i;
	int j;
4916
	int index;
4917

4918
	BUG_ON(!alloc_profile_is_valid(type, 0));
4919

4920 4921 4922
	if (list_empty(&fs_devices->alloc_list)) {
		if (btrfs_test_opt(info, ENOSPC_DEBUG))
			btrfs_debug(info, "%s: no writable device", __func__);
4923
		return -ENOSPC;
4924
	}
4925

4926
	index = btrfs_bg_flags_to_raid_index(type);
4927

4928 4929 4930
	sub_stripes = btrfs_raid_array[index].sub_stripes;
	dev_stripes = btrfs_raid_array[index].dev_stripes;
	devs_max = btrfs_raid_array[index].devs_max;
4931 4932
	if (!devs_max)
		devs_max = BTRFS_MAX_DEVS(info);
4933 4934 4935
	devs_min = btrfs_raid_array[index].devs_min;
	devs_increment = btrfs_raid_array[index].devs_increment;
	ncopies = btrfs_raid_array[index].ncopies;
4936
	nparity = btrfs_raid_array[index].nparity;
4937

4938
	if (type & BTRFS_BLOCK_GROUP_DATA) {
4939
		max_stripe_size = SZ_1G;
4940
		max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
4941
	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4942
		/* for larger filesystems, use larger metadata chunks */
4943 4944
		if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
			max_stripe_size = SZ_1G;
4945
		else
4946
			max_stripe_size = SZ_256M;
4947
		max_chunk_size = max_stripe_size;
4948
	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4949
		max_stripe_size = SZ_32M;
4950 4951
		max_chunk_size = 2 * max_stripe_size;
	} else {
4952
		btrfs_err(info, "invalid chunk type 0x%llx requested",
4953
		       type);
4954
		BUG();
4955 4956
	}

4957
	/* We don't want a chunk larger than 10% of writable space */
Y
Yan Zheng 已提交
4958 4959
	max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
			     max_chunk_size);
4960

4961
	devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
4962 4963 4964
			       GFP_NOFS);
	if (!devices_info)
		return -ENOMEM;
4965

4966
	/*
4967 4968
	 * in the first pass through the devices list, we gather information
	 * about the available holes on each device.
4969
	 */
4970
	ndevs = 0;
4971
	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
4972 4973
		u64 max_avail;
		u64 dev_offset;
4974

4975
		if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
J
Julia Lawall 已提交
4976
			WARN(1, KERN_ERR
4977
			       "BTRFS: read-only device in alloc_list\n");
4978 4979
			continue;
		}
4980

4981 4982
		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
					&device->dev_state) ||
4983
		    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4984
			continue;
4985

4986 4987 4988 4989
		if (device->total_bytes > device->bytes_used)
			total_avail = device->total_bytes - device->bytes_used;
		else
			total_avail = 0;
4990 4991 4992 4993

		/* If there is no space on this device, skip it. */
		if (total_avail == 0)
			continue;
4994

4995
		ret = find_free_dev_extent(device,
4996 4997 4998 4999
					   max_stripe_size * dev_stripes,
					   &dev_offset, &max_avail);
		if (ret && ret != -ENOSPC)
			goto error;
5000

5001 5002
		if (ret == 0)
			max_avail = max_stripe_size * dev_stripes;
5003

5004 5005 5006 5007 5008 5009
		if (max_avail < BTRFS_STRIPE_LEN * dev_stripes) {
			if (btrfs_test_opt(info, ENOSPC_DEBUG))
				btrfs_debug(info,
			"%s: devid %llu has no free space, have=%llu want=%u",
					    __func__, device->devid, max_avail,
					    BTRFS_STRIPE_LEN * dev_stripes);
5010
			continue;
5011
		}
5012

5013 5014 5015 5016 5017
		if (ndevs == fs_devices->rw_devices) {
			WARN(1, "%s: found more than %llu devices\n",
			     __func__, fs_devices->rw_devices);
			break;
		}
5018 5019 5020 5021 5022 5023
		devices_info[ndevs].dev_offset = dev_offset;
		devices_info[ndevs].max_avail = max_avail;
		devices_info[ndevs].total_avail = total_avail;
		devices_info[ndevs].dev = device;
		++ndevs;
	}
5024

5025 5026 5027 5028 5029
	/*
	 * now sort the devices by hole size / available space
	 */
	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
	     btrfs_cmp_device_info, NULL);
5030

5031
	/* round down to number of usable stripes */
5032
	ndevs = round_down(ndevs, devs_increment);
5033

5034
	if (ndevs < devs_min) {
5035
		ret = -ENOSPC;
5036 5037 5038
		if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
			btrfs_debug(info,
	"%s: not enough devices with free space: have=%d minimum required=%d",
5039
				    __func__, ndevs, devs_min);
5040
		}
5041
		goto error;
5042
	}
5043

5044 5045
	ndevs = min(ndevs, devs_max);

5046
	/*
5047 5048 5049 5050 5051
	 * The primary goal is to maximize the number of stripes, so use as
	 * many devices as possible, even if the stripes are not maximum sized.
	 *
	 * The DUP profile stores more than one stripe per device, the
	 * max_avail is the total size so we have to adjust.
5052
	 */
5053
	stripe_size = div_u64(devices_info[ndevs - 1].max_avail, dev_stripes);
5054
	num_stripes = ndevs * dev_stripes;
5055

D
David Woodhouse 已提交
5056 5057 5058 5059
	/*
	 * this will have to be fixed for RAID1 and RAID10 over
	 * more drives
	 */
5060
	data_stripes = (num_stripes - nparity) / ncopies;
5061 5062 5063 5064

	/*
	 * Use the number of data stripes to figure out how big this chunk
	 * is really going to be in terms of logical address space,
5065 5066
	 * and compare that answer with the max chunk size. If it's higher,
	 * we try to reduce stripe_size.
5067 5068
	 */
	if (stripe_size * data_stripes > max_chunk_size) {
5069
		/*
5070 5071 5072
		 * Reduce stripe_size, round it up to a 16MB boundary again and
		 * then use it, unless it ends up being even bigger than the
		 * previous value we had already.
5073
		 */
5074 5075
		stripe_size = min(round_up(div_u64(max_chunk_size,
						   data_stripes), SZ_16M),
5076
				  stripe_size);
5077 5078
	}

5079
	/* align to BTRFS_STRIPE_LEN */
5080
	stripe_size = round_down(stripe_size, BTRFS_STRIPE_LEN);
5081 5082 5083 5084 5085 5086 5087

	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
	if (!map) {
		ret = -ENOMEM;
		goto error;
	}
	map->num_stripes = num_stripes;
5088

5089 5090 5091 5092 5093 5094
	for (i = 0; i < ndevs; ++i) {
		for (j = 0; j < dev_stripes; ++j) {
			int s = i * dev_stripes + j;
			map->stripes[s].dev = devices_info[i].dev;
			map->stripes[s].physical = devices_info[i].dev_offset +
						   j * stripe_size;
5095 5096
		}
	}
5097 5098 5099
	map->stripe_len = BTRFS_STRIPE_LEN;
	map->io_align = BTRFS_STRIPE_LEN;
	map->io_width = BTRFS_STRIPE_LEN;
Y
Yan Zheng 已提交
5100 5101
	map->type = type;
	map->sub_stripes = sub_stripes;
5102

5103
	chunk_size = stripe_size * data_stripes;
5104

5105
	trace_btrfs_chunk_alloc(info, map, start, chunk_size);
5106

5107
	em = alloc_extent_map();
Y
Yan Zheng 已提交
5108
	if (!em) {
5109
		kfree(map);
5110 5111
		ret = -ENOMEM;
		goto error;
5112
	}
5113
	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
5114
	em->map_lookup = map;
Y
Yan Zheng 已提交
5115
	em->start = start;
5116
	em->len = chunk_size;
Y
Yan Zheng 已提交
5117 5118
	em->block_start = 0;
	em->block_len = em->len;
5119
	em->orig_block_len = stripe_size;
5120

5121
	em_tree = &info->mapping_tree;
5122
	write_lock(&em_tree->lock);
J
Josef Bacik 已提交
5123
	ret = add_extent_mapping(em_tree, em, 0);
5124
	if (ret) {
5125
		write_unlock(&em_tree->lock);
5126
		free_extent_map(em);
5127
		goto error;
5128
	}
5129 5130
	write_unlock(&em_tree->lock);

5131
	ret = btrfs_make_block_group(trans, 0, type, start, chunk_size);
5132 5133
	if (ret)
		goto error_del_extent;
Y
Yan Zheng 已提交
5134

5135 5136 5137 5138 5139 5140 5141 5142
	for (i = 0; i < map->num_stripes; i++) {
		struct btrfs_device *dev = map->stripes[i].dev;

		btrfs_device_set_bytes_used(dev, dev->bytes_used + stripe_size);
		if (list_empty(&dev->post_commit_list))
			list_add_tail(&dev->post_commit_list,
				      &trans->transaction->dev_update_list);
	}
5143

5144
	atomic64_sub(stripe_size * map->num_stripes, &info->free_chunk_space);
5145

5146
	free_extent_map(em);
5147
	check_raid56_incompat_flag(info, type);
D
David Woodhouse 已提交
5148

5149
	kfree(devices_info);
Y
Yan Zheng 已提交
5150
	return 0;
5151

5152
error_del_extent:
5153 5154 5155 5156 5157 5158 5159 5160
	write_lock(&em_tree->lock);
	remove_extent_mapping(em_tree, em);
	write_unlock(&em_tree->lock);

	/* One for our allocation */
	free_extent_map(em);
	/* One for the tree reference */
	free_extent_map(em);
5161 5162 5163
error:
	kfree(devices_info);
	return ret;
Y
Yan Zheng 已提交
5164 5165
}

5166
int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
5167
			     u64 chunk_offset, u64 chunk_size)
Y
Yan Zheng 已提交
5168
{
5169
	struct btrfs_fs_info *fs_info = trans->fs_info;
5170 5171
	struct btrfs_root *extent_root = fs_info->extent_root;
	struct btrfs_root *chunk_root = fs_info->chunk_root;
Y
Yan Zheng 已提交
5172 5173 5174 5175
	struct btrfs_key key;
	struct btrfs_device *device;
	struct btrfs_chunk *chunk;
	struct btrfs_stripe *stripe;
5176 5177 5178 5179 5180 5181
	struct extent_map *em;
	struct map_lookup *map;
	size_t item_size;
	u64 dev_offset;
	u64 stripe_size;
	int i = 0;
5182
	int ret = 0;
Y
Yan Zheng 已提交
5183

5184
	em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
5185 5186
	if (IS_ERR(em))
		return PTR_ERR(em);
5187

5188
	map = em->map_lookup;
5189 5190 5191
	item_size = btrfs_chunk_item_size(map->num_stripes);
	stripe_size = em->orig_block_len;

Y
Yan Zheng 已提交
5192
	chunk = kzalloc(item_size, GFP_NOFS);
5193 5194 5195 5196 5197
	if (!chunk) {
		ret = -ENOMEM;
		goto out;
	}

5198 5199 5200 5201 5202 5203 5204
	/*
	 * Take the device list mutex to prevent races with the final phase of
	 * a device replace operation that replaces the device object associated
	 * with the map's stripes, because the device object's id can change
	 * at any time during that final phase of the device replace operation
	 * (dev-replace.c:btrfs_dev_replace_finishing()).
	 */
5205
	mutex_lock(&fs_info->fs_devices->device_list_mutex);
5206 5207 5208
	for (i = 0; i < map->num_stripes; i++) {
		device = map->stripes[i].dev;
		dev_offset = map->stripes[i].physical;
Y
Yan Zheng 已提交
5209

5210
		ret = btrfs_update_device(trans, device);
5211
		if (ret)
5212
			break;
5213 5214
		ret = btrfs_alloc_dev_extent(trans, device, chunk_offset,
					     dev_offset, stripe_size);
5215
		if (ret)
5216 5217 5218
			break;
	}
	if (ret) {
5219
		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5220
		goto out;
Y
Yan Zheng 已提交
5221 5222 5223
	}

	stripe = &chunk->stripe;
5224 5225 5226
	for (i = 0; i < map->num_stripes; i++) {
		device = map->stripes[i].dev;
		dev_offset = map->stripes[i].physical;
5227

5228 5229 5230
		btrfs_set_stack_stripe_devid(stripe, device->devid);
		btrfs_set_stack_stripe_offset(stripe, dev_offset);
		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
Y
Yan Zheng 已提交
5231
		stripe++;
5232
	}
5233
	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5234

Y
Yan Zheng 已提交
5235
	btrfs_set_stack_chunk_length(chunk, chunk_size);
5236
	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
Y
Yan Zheng 已提交
5237 5238 5239 5240 5241
	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
	btrfs_set_stack_chunk_type(chunk, map->type);
	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
5242
	btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
Y
Yan Zheng 已提交
5243
	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5244

Y
Yan Zheng 已提交
5245 5246 5247
	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
	key.type = BTRFS_CHUNK_ITEM_KEY;
	key.offset = chunk_offset;
5248

Y
Yan Zheng 已提交
5249
	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
5250 5251 5252 5253 5254
	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
		/*
		 * TODO: Cleanup of inserted chunk root in case of
		 * failure.
		 */
5255
		ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
5256
	}
5257

5258
out:
5259
	kfree(chunk);
5260
	free_extent_map(em);
5261
	return ret;
Y
Yan Zheng 已提交
5262
}
5263

Y
Yan Zheng 已提交
5264
/*
5265 5266 5267 5268
 * Chunk allocation falls into two parts. The first part does work
 * that makes the new allocated chunk usable, but does not do any operation
 * that modifies the chunk tree. The second part does the work that
 * requires modifying the chunk tree. This division is important for the
Y
Yan Zheng 已提交
5269 5270
 * bootstrap process of adding storage to a seed btrfs.
 */
5271
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
Y
Yan Zheng 已提交
5272 5273 5274
{
	u64 chunk_offset;

5275 5276
	lockdep_assert_held(&trans->fs_info->chunk_mutex);
	chunk_offset = find_next_chunk(trans->fs_info);
5277
	return __btrfs_alloc_chunk(trans, chunk_offset, type);
Y
Yan Zheng 已提交
5278 5279
}

5280
static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
Y
Yan Zheng 已提交
5281
{
5282
	struct btrfs_fs_info *fs_info = trans->fs_info;
Y
Yan Zheng 已提交
5283 5284 5285 5286 5287
	u64 chunk_offset;
	u64 sys_chunk_offset;
	u64 alloc_profile;
	int ret;

5288
	chunk_offset = find_next_chunk(fs_info);
5289
	alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5290
	ret = __btrfs_alloc_chunk(trans, chunk_offset, alloc_profile);
5291 5292
	if (ret)
		return ret;
Y
Yan Zheng 已提交
5293

5294
	sys_chunk_offset = find_next_chunk(fs_info);
5295
	alloc_profile = btrfs_system_alloc_profile(fs_info);
5296
	ret = __btrfs_alloc_chunk(trans, sys_chunk_offset, alloc_profile);
5297
	return ret;
Y
Yan Zheng 已提交
5298 5299
}

5300 5301
static inline int btrfs_chunk_max_errors(struct map_lookup *map)
{
5302
	const int index = btrfs_bg_flags_to_raid_index(map->type);
Y
Yan Zheng 已提交
5303

5304
	return btrfs_raid_array[index].tolerated_failures;
Y
Yan Zheng 已提交
5305 5306
}

5307
int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
Y
Yan Zheng 已提交
5308 5309 5310 5311
{
	struct extent_map *em;
	struct map_lookup *map;
	int readonly = 0;
5312
	int miss_ndevs = 0;
Y
Yan Zheng 已提交
5313 5314
	int i;

5315
	em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
5316
	if (IS_ERR(em))
Y
Yan Zheng 已提交
5317 5318
		return 1;

5319
	map = em->map_lookup;
Y
Yan Zheng 已提交
5320
	for (i = 0; i < map->num_stripes; i++) {
5321 5322
		if (test_bit(BTRFS_DEV_STATE_MISSING,
					&map->stripes[i].dev->dev_state)) {
5323 5324 5325
			miss_ndevs++;
			continue;
		}
5326 5327
		if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
					&map->stripes[i].dev->dev_state)) {
Y
Yan Zheng 已提交
5328
			readonly = 1;
5329
			goto end;
Y
Yan Zheng 已提交
5330 5331
		}
	}
5332 5333 5334 5335 5336 5337 5338 5339 5340

	/*
	 * If the number of missing devices is larger than max errors,
	 * we can not write the data into that chunk successfully, so
	 * set it readonly.
	 */
	if (miss_ndevs > btrfs_chunk_max_errors(map))
		readonly = 1;
end:
5341
	free_extent_map(em);
Y
Yan Zheng 已提交
5342
	return readonly;
5343 5344
}

5345
void btrfs_mapping_tree_free(struct extent_map_tree *tree)
5346 5347 5348
{
	struct extent_map *em;

C
Chris Mason 已提交
5349
	while (1) {
5350 5351
		write_lock(&tree->lock);
		em = lookup_extent_mapping(tree, 0, (u64)-1);
5352
		if (em)
5353 5354
			remove_extent_mapping(tree, em);
		write_unlock(&tree->lock);
5355 5356 5357 5358 5359 5360 5361 5362 5363
		if (!em)
			break;
		/* once for us */
		free_extent_map(em);
		/* once for the tree */
		free_extent_map(em);
	}
}

5364
int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5365 5366 5367 5368 5369
{
	struct extent_map *em;
	struct map_lookup *map;
	int ret;

5370
	em = btrfs_get_chunk_map(fs_info, logical, len);
5371 5372 5373 5374 5375 5376 5377
	if (IS_ERR(em))
		/*
		 * We could return errors for these cases, but that could get
		 * ugly and we'd probably do the same thing which is just not do
		 * anything else and exit, so return 1 so the callers don't try
		 * to use other copies.
		 */
5378 5379
		return 1;

5380
	map = em->map_lookup;
5381
	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK))
5382
		ret = map->num_stripes;
C
Chris Mason 已提交
5383 5384
	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
		ret = map->sub_stripes;
D
David Woodhouse 已提交
5385 5386 5387
	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
		ret = 2;
	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
L
Liu Bo 已提交
5388 5389 5390
		/*
		 * There could be two corrupted data stripes, we need
		 * to loop retry in order to rebuild the correct data.
5391
		 *
L
Liu Bo 已提交
5392 5393 5394 5395
		 * Fail a stripe at a time on every retry except the
		 * stripe under reconstruction.
		 */
		ret = map->num_stripes;
5396 5397 5398
	else
		ret = 1;
	free_extent_map(em);
5399

5400
	down_read(&fs_info->dev_replace.rwsem);
5401 5402
	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
	    fs_info->dev_replace.tgtdev)
5403
		ret++;
5404
	up_read(&fs_info->dev_replace.rwsem);
5405

5406 5407 5408
	return ret;
}

5409
unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
D
David Woodhouse 已提交
5410 5411 5412 5413
				    u64 logical)
{
	struct extent_map *em;
	struct map_lookup *map;
5414
	unsigned long len = fs_info->sectorsize;
D
David Woodhouse 已提交
5415

5416
	em = btrfs_get_chunk_map(fs_info, logical, len);
D
David Woodhouse 已提交
5417

5418 5419 5420 5421 5422 5423
	if (!WARN_ON(IS_ERR(em))) {
		map = em->map_lookup;
		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
			len = map->stripe_len * nr_data_stripes(map);
		free_extent_map(em);
	}
D
David Woodhouse 已提交
5424 5425 5426
	return len;
}

5427
int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
D
David Woodhouse 已提交
5428 5429 5430 5431 5432
{
	struct extent_map *em;
	struct map_lookup *map;
	int ret = 0;

5433
	em = btrfs_get_chunk_map(fs_info, logical, len);
D
David Woodhouse 已提交
5434

5435 5436 5437 5438 5439 5440
	if(!WARN_ON(IS_ERR(em))) {
		map = em->map_lookup;
		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
			ret = 1;
		free_extent_map(em);
	}
D
David Woodhouse 已提交
5441 5442 5443
	return ret;
}

5444
static int find_live_mirror(struct btrfs_fs_info *fs_info,
5445
			    struct map_lookup *map, int first,
5446
			    int dev_replace_is_ongoing)
5447 5448
{
	int i;
5449
	int num_stripes;
5450
	int preferred_mirror;
5451 5452 5453
	int tolerance;
	struct btrfs_device *srcdev;

5454
	ASSERT((map->type &
5455
		 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)));
5456 5457 5458 5459 5460 5461

	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
		num_stripes = map->sub_stripes;
	else
		num_stripes = map->num_stripes;

5462 5463
	preferred_mirror = first + current->pid % num_stripes;

5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476
	if (dev_replace_is_ongoing &&
	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
		srcdev = fs_info->dev_replace.srcdev;
	else
		srcdev = NULL;

	/*
	 * try to avoid the drive that is the source drive for a
	 * dev-replace procedure, only choose it if no other non-missing
	 * mirror is available
	 */
	for (tolerance = 0; tolerance < 2; tolerance++) {
5477 5478 5479
		if (map->stripes[preferred_mirror].dev->bdev &&
		    (tolerance || map->stripes[preferred_mirror].dev != srcdev))
			return preferred_mirror;
5480
		for (i = first; i < first + num_stripes; i++) {
5481 5482 5483 5484
			if (map->stripes[i].dev->bdev &&
			    (tolerance || map->stripes[i].dev != srcdev))
				return i;
		}
5485
	}
5486

5487 5488 5489
	/* we couldn't find one that doesn't fail.  Just return something
	 * and the io error handling code will clean up eventually
	 */
5490
	return preferred_mirror;
5491 5492
}

D
David Woodhouse 已提交
5493 5494 5495 5496 5497 5498
static inline int parity_smaller(u64 a, u64 b)
{
	return a > b;
}

/* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5499
static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
D
David Woodhouse 已提交
5500 5501 5502 5503 5504 5505 5506 5507
{
	struct btrfs_bio_stripe s;
	int i;
	u64 l;
	int again = 1;

	while (again) {
		again = 0;
5508
		for (i = 0; i < num_stripes - 1; i++) {
5509 5510
			if (parity_smaller(bbio->raid_map[i],
					   bbio->raid_map[i+1])) {
D
David Woodhouse 已提交
5511
				s = bbio->stripes[i];
5512
				l = bbio->raid_map[i];
D
David Woodhouse 已提交
5513
				bbio->stripes[i] = bbio->stripes[i+1];
5514
				bbio->raid_map[i] = bbio->raid_map[i+1];
D
David Woodhouse 已提交
5515
				bbio->stripes[i+1] = s;
5516
				bbio->raid_map[i+1] = l;
5517

D
David Woodhouse 已提交
5518 5519 5520 5521 5522 5523
				again = 1;
			}
		}
	}
}

5524 5525 5526
static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
{
	struct btrfs_bio *bbio = kzalloc(
5527
		 /* the size of the btrfs_bio */
5528
		sizeof(struct btrfs_bio) +
5529
		/* plus the variable array for the stripes */
5530
		sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5531
		/* plus the variable array for the tgt dev */
5532
		sizeof(int) * (real_stripes) +
5533 5534 5535 5536 5537
		/*
		 * plus the raid_map, which includes both the tgt dev
		 * and the stripes
		 */
		sizeof(u64) * (total_stripes),
5538
		GFP_NOFS|__GFP_NOFAIL);
5539 5540

	atomic_set(&bbio->error, 0);
5541
	refcount_set(&bbio->refs, 1);
5542 5543 5544 5545 5546 5547

	return bbio;
}

void btrfs_get_bbio(struct btrfs_bio *bbio)
{
5548 5549
	WARN_ON(!refcount_read(&bbio->refs));
	refcount_inc(&bbio->refs);
5550 5551 5552 5553 5554 5555
}

void btrfs_put_bbio(struct btrfs_bio *bbio)
{
	if (!bbio)
		return;
5556
	if (refcount_dec_and_test(&bbio->refs))
5557 5558 5559
		kfree(bbio);
}

5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591
/* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
/*
 * Please note that, discard won't be sent to target device of device
 * replace.
 */
static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
					 u64 logical, u64 length,
					 struct btrfs_bio **bbio_ret)
{
	struct extent_map *em;
	struct map_lookup *map;
	struct btrfs_bio *bbio;
	u64 offset;
	u64 stripe_nr;
	u64 stripe_nr_end;
	u64 stripe_end_offset;
	u64 stripe_cnt;
	u64 stripe_len;
	u64 stripe_offset;
	u64 num_stripes;
	u32 stripe_index;
	u32 factor = 0;
	u32 sub_stripes = 0;
	u64 stripes_per_dev = 0;
	u32 remaining_stripes = 0;
	u32 last_stripe = 0;
	int ret = 0;
	int i;

	/* discard always return a bbio */
	ASSERT(bbio_ret);

5592
	em = btrfs_get_chunk_map(fs_info, logical, length);
5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616
	if (IS_ERR(em))
		return PTR_ERR(em);

	map = em->map_lookup;
	/* we don't discard raid56 yet */
	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
		ret = -EOPNOTSUPP;
		goto out;
	}

	offset = logical - em->start;
	length = min_t(u64, em->len - offset, length);

	stripe_len = map->stripe_len;
	/*
	 * stripe_nr counts the total number of stripes we have to stride
	 * to get to this block
	 */
	stripe_nr = div64_u64(offset, stripe_len);

	/* stripe_offset is the offset of this block in its stripe */
	stripe_offset = offset - stripe_nr * stripe_len;

	stripe_nr_end = round_up(offset + length, map->stripe_len);
5617
	stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643
	stripe_cnt = stripe_nr_end - stripe_nr;
	stripe_end_offset = stripe_nr_end * map->stripe_len -
			    (offset + length);
	/*
	 * after this, stripe_nr is the number of stripes on this
	 * device we have to walk to find the data, and stripe_index is
	 * the number of our device in the stripe array
	 */
	num_stripes = 1;
	stripe_index = 0;
	if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
			 BTRFS_BLOCK_GROUP_RAID10)) {
		if (map->type & BTRFS_BLOCK_GROUP_RAID0)
			sub_stripes = 1;
		else
			sub_stripes = map->sub_stripes;

		factor = map->num_stripes / sub_stripes;
		num_stripes = min_t(u64, map->num_stripes,
				    sub_stripes * stripe_cnt);
		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
		stripe_index *= sub_stripes;
		stripes_per_dev = div_u64_rem(stripe_cnt, factor,
					      &remaining_stripes);
		div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
		last_stripe *= sub_stripes;
5644
	} else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711
				BTRFS_BLOCK_GROUP_DUP)) {
		num_stripes = map->num_stripes;
	} else {
		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
					&stripe_index);
	}

	bbio = alloc_btrfs_bio(num_stripes, 0);
	if (!bbio) {
		ret = -ENOMEM;
		goto out;
	}

	for (i = 0; i < num_stripes; i++) {
		bbio->stripes[i].physical =
			map->stripes[stripe_index].physical +
			stripe_offset + stripe_nr * map->stripe_len;
		bbio->stripes[i].dev = map->stripes[stripe_index].dev;

		if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
				 BTRFS_BLOCK_GROUP_RAID10)) {
			bbio->stripes[i].length = stripes_per_dev *
				map->stripe_len;

			if (i / sub_stripes < remaining_stripes)
				bbio->stripes[i].length +=
					map->stripe_len;

			/*
			 * Special for the first stripe and
			 * the last stripe:
			 *
			 * |-------|...|-------|
			 *     |----------|
			 *    off     end_off
			 */
			if (i < sub_stripes)
				bbio->stripes[i].length -=
					stripe_offset;

			if (stripe_index >= last_stripe &&
			    stripe_index <= (last_stripe +
					     sub_stripes - 1))
				bbio->stripes[i].length -=
					stripe_end_offset;

			if (i == sub_stripes - 1)
				stripe_offset = 0;
		} else {
			bbio->stripes[i].length = length;
		}

		stripe_index++;
		if (stripe_index == map->num_stripes) {
			stripe_index = 0;
			stripe_nr++;
		}
	}

	*bbio_ret = bbio;
	bbio->map_type = map->type;
	bbio->num_stripes = num_stripes;
out:
	free_extent_map(em);
	return ret;
}

5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788
/*
 * In dev-replace case, for repair case (that's the only case where the mirror
 * is selected explicitly when calling btrfs_map_block), blocks left of the
 * left cursor can also be read from the target drive.
 *
 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
 * array of stripes.
 * For READ, it also needs to be supported using the same mirror number.
 *
 * If the requested block is not left of the left cursor, EIO is returned. This
 * can happen because btrfs_num_copies() returns one more in the dev-replace
 * case.
 */
static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
					 u64 logical, u64 length,
					 u64 srcdev_devid, int *mirror_num,
					 u64 *physical)
{
	struct btrfs_bio *bbio = NULL;
	int num_stripes;
	int index_srcdev = 0;
	int found = 0;
	u64 physical_of_found = 0;
	int i;
	int ret = 0;

	ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
				logical, &length, &bbio, 0, 0);
	if (ret) {
		ASSERT(bbio == NULL);
		return ret;
	}

	num_stripes = bbio->num_stripes;
	if (*mirror_num > num_stripes) {
		/*
		 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
		 * that means that the requested area is not left of the left
		 * cursor
		 */
		btrfs_put_bbio(bbio);
		return -EIO;
	}

	/*
	 * process the rest of the function using the mirror_num of the source
	 * drive. Therefore look it up first.  At the end, patch the device
	 * pointer to the one of the target drive.
	 */
	for (i = 0; i < num_stripes; i++) {
		if (bbio->stripes[i].dev->devid != srcdev_devid)
			continue;

		/*
		 * In case of DUP, in order to keep it simple, only add the
		 * mirror with the lowest physical address
		 */
		if (found &&
		    physical_of_found <= bbio->stripes[i].physical)
			continue;

		index_srcdev = i;
		found = 1;
		physical_of_found = bbio->stripes[i].physical;
	}

	btrfs_put_bbio(bbio);

	ASSERT(found);
	if (!found)
		return -EIO;

	*mirror_num = index_srcdev + 1;
	*physical = physical_of_found;
	return ret;
}

5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882
static void handle_ops_on_dev_replace(enum btrfs_map_op op,
				      struct btrfs_bio **bbio_ret,
				      struct btrfs_dev_replace *dev_replace,
				      int *num_stripes_ret, int *max_errors_ret)
{
	struct btrfs_bio *bbio = *bbio_ret;
	u64 srcdev_devid = dev_replace->srcdev->devid;
	int tgtdev_indexes = 0;
	int num_stripes = *num_stripes_ret;
	int max_errors = *max_errors_ret;
	int i;

	if (op == BTRFS_MAP_WRITE) {
		int index_where_to_add;

		/*
		 * duplicate the write operations while the dev replace
		 * procedure is running. Since the copying of the old disk to
		 * the new disk takes place at run time while the filesystem is
		 * mounted writable, the regular write operations to the old
		 * disk have to be duplicated to go to the new disk as well.
		 *
		 * Note that device->missing is handled by the caller, and that
		 * the write to the old disk is already set up in the stripes
		 * array.
		 */
		index_where_to_add = num_stripes;
		for (i = 0; i < num_stripes; i++) {
			if (bbio->stripes[i].dev->devid == srcdev_devid) {
				/* write to new disk, too */
				struct btrfs_bio_stripe *new =
					bbio->stripes + index_where_to_add;
				struct btrfs_bio_stripe *old =
					bbio->stripes + i;

				new->physical = old->physical;
				new->length = old->length;
				new->dev = dev_replace->tgtdev;
				bbio->tgtdev_map[i] = index_where_to_add;
				index_where_to_add++;
				max_errors++;
				tgtdev_indexes++;
			}
		}
		num_stripes = index_where_to_add;
	} else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
		int index_srcdev = 0;
		int found = 0;
		u64 physical_of_found = 0;

		/*
		 * During the dev-replace procedure, the target drive can also
		 * be used to read data in case it is needed to repair a corrupt
		 * block elsewhere. This is possible if the requested area is
		 * left of the left cursor. In this area, the target drive is a
		 * full copy of the source drive.
		 */
		for (i = 0; i < num_stripes; i++) {
			if (bbio->stripes[i].dev->devid == srcdev_devid) {
				/*
				 * In case of DUP, in order to keep it simple,
				 * only add the mirror with the lowest physical
				 * address
				 */
				if (found &&
				    physical_of_found <=
				     bbio->stripes[i].physical)
					continue;
				index_srcdev = i;
				found = 1;
				physical_of_found = bbio->stripes[i].physical;
			}
		}
		if (found) {
			struct btrfs_bio_stripe *tgtdev_stripe =
				bbio->stripes + num_stripes;

			tgtdev_stripe->physical = physical_of_found;
			tgtdev_stripe->length =
				bbio->stripes[index_srcdev].length;
			tgtdev_stripe->dev = dev_replace->tgtdev;
			bbio->tgtdev_map[index_srcdev] = num_stripes;

			tgtdev_indexes++;
			num_stripes++;
		}
	}

	*num_stripes_ret = num_stripes;
	*max_errors_ret = max_errors;
	bbio->num_tgtdevs = tgtdev_indexes;
	*bbio_ret = bbio;
}

5883 5884 5885 5886 5887
static bool need_full_stripe(enum btrfs_map_op op)
{
	return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
}

5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902
/*
 * btrfs_get_io_geometry - calculates the geomery of a particular (address, len)
 *		       tuple. This information is used to calculate how big a
 *		       particular bio can get before it straddles a stripe.
 *
 * @fs_info - the filesystem
 * @logical - address that we want to figure out the geometry of
 * @len	    - the length of IO we are going to perform, starting at @logical
 * @op      - type of operation - write or read
 * @io_geom - pointer used to return values
 *
 * Returns < 0 in case a chunk for the given logical address cannot be found,
 * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
 */
int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
5903
			u64 logical, u64 len, struct btrfs_io_geometry *io_geom)
5904 5905 5906 5907 5908 5909 5910 5911 5912
{
	struct extent_map *em;
	struct map_lookup *map;
	u64 offset;
	u64 stripe_offset;
	u64 stripe_nr;
	u64 stripe_len;
	u64 raid56_full_stripe_start = (u64)-1;
	int data_stripes;
5913
	int ret = 0;
5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933

	ASSERT(op != BTRFS_MAP_DISCARD);

	em = btrfs_get_chunk_map(fs_info, logical, len);
	if (IS_ERR(em))
		return PTR_ERR(em);

	map = em->map_lookup;
	/* Offset of this logical address in the chunk */
	offset = logical - em->start;
	/* Len of a stripe in a chunk */
	stripe_len = map->stripe_len;
	/* Stripe wher this block falls in */
	stripe_nr = div64_u64(offset, stripe_len);
	/* Offset of stripe in the chunk */
	stripe_offset = stripe_nr * stripe_len;
	if (offset < stripe_offset) {
		btrfs_crit(fs_info,
"stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
			stripe_offset, offset, em->start, logical, stripe_len);
5934 5935
		ret = -EINVAL;
		goto out;
5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981
	}

	/* stripe_offset is the offset of this block in its stripe */
	stripe_offset = offset - stripe_offset;
	data_stripes = nr_data_stripes(map);

	if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
		u64 max_len = stripe_len - stripe_offset;

		/*
		 * In case of raid56, we need to know the stripe aligned start
		 */
		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
			unsigned long full_stripe_len = stripe_len * data_stripes;
			raid56_full_stripe_start = offset;

			/*
			 * Allow a write of a full stripe, but make sure we
			 * don't allow straddling of stripes
			 */
			raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
					full_stripe_len);
			raid56_full_stripe_start *= full_stripe_len;

			/*
			 * For writes to RAID[56], allow a full stripeset across
			 * all disks. For other RAID types and for RAID[56]
			 * reads, just allow a single stripe (on a single disk).
			 */
			if (op == BTRFS_MAP_WRITE) {
				max_len = stripe_len * data_stripes -
					  (offset - raid56_full_stripe_start);
			}
		}
		len = min_t(u64, em->len - offset, max_len);
	} else {
		len = em->len - offset;
	}

	io_geom->len = len;
	io_geom->offset = offset;
	io_geom->stripe_len = stripe_len;
	io_geom->stripe_nr = stripe_nr;
	io_geom->stripe_offset = stripe_offset;
	io_geom->raid56_stripe_offset = raid56_full_stripe_start;

5982 5983 5984 5985
out:
	/* once for us */
	free_extent_map(em);
	return ret;
5986 5987
}

5988 5989
static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
			     enum btrfs_map_op op,
5990
			     u64 logical, u64 *length,
5991
			     struct btrfs_bio **bbio_ret,
5992
			     int mirror_num, int need_raid_map)
5993 5994 5995
{
	struct extent_map *em;
	struct map_lookup *map;
5996 5997
	u64 stripe_offset;
	u64 stripe_nr;
D
David Woodhouse 已提交
5998
	u64 stripe_len;
5999
	u32 stripe_index;
6000
	int data_stripes;
6001
	int i;
L
Li Zefan 已提交
6002
	int ret = 0;
6003
	int num_stripes;
6004
	int max_errors = 0;
6005
	int tgtdev_indexes = 0;
6006
	struct btrfs_bio *bbio = NULL;
6007 6008 6009
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
	int dev_replace_is_ongoing = 0;
	int num_alloc_stripes;
6010 6011
	int patch_the_first_stripe_for_dev_replace = 0;
	u64 physical_to_patch_in_first_stripe = 0;
D
David Woodhouse 已提交
6012
	u64 raid56_full_stripe_start = (u64)-1;
6013 6014 6015
	struct btrfs_io_geometry geom;

	ASSERT(bbio_ret);
6016

6017 6018 6019 6020
	if (op == BTRFS_MAP_DISCARD)
		return __btrfs_map_block_for_discard(fs_info, logical,
						     *length, bbio_ret);

6021 6022 6023
	ret = btrfs_get_io_geometry(fs_info, op, logical, *length, &geom);
	if (ret < 0)
		return ret;
6024

6025 6026
	em = btrfs_get_chunk_map(fs_info, logical, *length);
	ASSERT(em);
6027
	map = em->map_lookup;
6028

6029 6030 6031 6032 6033
	*length = geom.len;
	stripe_len = geom.stripe_len;
	stripe_nr = geom.stripe_nr;
	stripe_offset = geom.stripe_offset;
	raid56_full_stripe_start = geom.raid56_stripe_offset;
6034
	data_stripes = nr_data_stripes(map);
6035

6036
	down_read(&dev_replace->rwsem);
6037
	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
6038 6039 6040 6041
	/*
	 * Hold the semaphore for read during the whole operation, write is
	 * requested at commit time but must wait.
	 */
6042
	if (!dev_replace_is_ongoing)
6043
		up_read(&dev_replace->rwsem);
6044

6045
	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
6046
	    !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
6047 6048 6049 6050 6051
		ret = get_extra_mirror_from_replace(fs_info, logical, *length,
						    dev_replace->srcdev->devid,
						    &mirror_num,
					    &physical_to_patch_in_first_stripe);
		if (ret)
6052
			goto out;
6053 6054
		else
			patch_the_first_stripe_for_dev_replace = 1;
6055 6056 6057 6058
	} else if (mirror_num > map->num_stripes) {
		mirror_num = 0;
	}

6059
	num_stripes = 1;
6060
	stripe_index = 0;
6061
	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
6062 6063
		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
				&stripe_index);
6064
		if (!need_full_stripe(op))
6065
			mirror_num = 1;
6066
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
6067
		if (need_full_stripe(op))
6068
			num_stripes = map->num_stripes;
6069
		else if (mirror_num)
6070
			stripe_index = mirror_num - 1;
6071
		else {
6072 6073
			stripe_index = find_live_mirror(fs_info, map, 0,
					    dev_replace_is_ongoing);
6074
			mirror_num = stripe_index + 1;
6075
		}
6076

6077
	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
6078
		if (need_full_stripe(op)) {
6079
			num_stripes = map->num_stripes;
6080
		} else if (mirror_num) {
6081
			stripe_index = mirror_num - 1;
6082 6083 6084
		} else {
			mirror_num = 1;
		}
6085

C
Chris Mason 已提交
6086
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
6087
		u32 factor = map->num_stripes / map->sub_stripes;
C
Chris Mason 已提交
6088

6089
		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
C
Chris Mason 已提交
6090 6091
		stripe_index *= map->sub_stripes;

6092
		if (need_full_stripe(op))
6093
			num_stripes = map->sub_stripes;
C
Chris Mason 已提交
6094 6095
		else if (mirror_num)
			stripe_index += mirror_num - 1;
6096
		else {
J
Jan Schmidt 已提交
6097
			int old_stripe_index = stripe_index;
6098 6099 6100
			stripe_index = find_live_mirror(fs_info, map,
					      stripe_index,
					      dev_replace_is_ongoing);
J
Jan Schmidt 已提交
6101
			mirror_num = stripe_index - old_stripe_index + 1;
6102
		}
D
David Woodhouse 已提交
6103

6104
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6105
		if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
D
David Woodhouse 已提交
6106
			/* push stripe_nr back to the start of the full stripe */
6107
			stripe_nr = div64_u64(raid56_full_stripe_start,
6108
					stripe_len * data_stripes);
D
David Woodhouse 已提交
6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122

			/* RAID[56] write or recovery. Return all stripes */
			num_stripes = map->num_stripes;
			max_errors = nr_parity_stripes(map);

			*length = map->stripe_len;
			stripe_index = 0;
			stripe_offset = 0;
		} else {
			/*
			 * Mirror #0 or #1 means the original data block.
			 * Mirror #2 is RAID5 parity block.
			 * Mirror #3 is RAID6 Q block.
			 */
6123
			stripe_nr = div_u64_rem(stripe_nr,
6124
					data_stripes, &stripe_index);
D
David Woodhouse 已提交
6125
			if (mirror_num > 1)
6126
				stripe_index = data_stripes + mirror_num - 2;
D
David Woodhouse 已提交
6127 6128

			/* We distribute the parity blocks across stripes */
6129 6130
			div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
					&stripe_index);
6131
			if (!need_full_stripe(op) && mirror_num <= 1)
6132
				mirror_num = 1;
D
David Woodhouse 已提交
6133
		}
6134 6135
	} else {
		/*
6136 6137 6138
		 * after this, stripe_nr is the number of stripes on this
		 * device we have to walk to find the data, and stripe_index is
		 * the number of our device in the stripe array
6139
		 */
6140 6141
		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
				&stripe_index);
6142
		mirror_num = stripe_index + 1;
6143
	}
6144
	if (stripe_index >= map->num_stripes) {
J
Jeff Mahoney 已提交
6145 6146
		btrfs_crit(fs_info,
			   "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6147 6148 6149 6150
			   stripe_index, map->num_stripes);
		ret = -EINVAL;
		goto out;
	}
6151

6152
	num_alloc_stripes = num_stripes;
6153
	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
6154
		if (op == BTRFS_MAP_WRITE)
6155
			num_alloc_stripes <<= 1;
6156
		if (op == BTRFS_MAP_GET_READ_MIRRORS)
6157
			num_alloc_stripes++;
6158
		tgtdev_indexes = num_stripes;
6159
	}
6160

6161
	bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
L
Li Zefan 已提交
6162 6163 6164 6165
	if (!bbio) {
		ret = -ENOMEM;
		goto out;
	}
6166
	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
6167
		bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
L
Li Zefan 已提交
6168

6169
	/* build raid_map */
6170 6171
	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
	    (need_full_stripe(op) || mirror_num > 1)) {
6172
		u64 tmp;
6173
		unsigned rot;
6174 6175 6176 6177 6178 6179 6180

		bbio->raid_map = (u64 *)((void *)bbio->stripes +
				 sizeof(struct btrfs_bio_stripe) *
				 num_alloc_stripes +
				 sizeof(int) * tgtdev_indexes);

		/* Work out the disk rotation on this stripe-set */
6181
		div_u64_rem(stripe_nr, num_stripes, &rot);
6182 6183

		/* Fill in the logical address of each stripe */
6184 6185
		tmp = stripe_nr * data_stripes;
		for (i = 0; i < data_stripes; i++)
6186 6187 6188 6189 6190 6191 6192 6193 6194
			bbio->raid_map[(i+rot) % num_stripes] =
				em->start + (tmp + i) * map->stripe_len;

		bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
		if (map->type & BTRFS_BLOCK_GROUP_RAID6)
			bbio->raid_map[(i+rot+1) % num_stripes] =
				RAID6_Q_STRIPE;
	}

L
Liu Bo 已提交
6195

6196 6197 6198 6199 6200 6201 6202 6203
	for (i = 0; i < num_stripes; i++) {
		bbio->stripes[i].physical =
			map->stripes[stripe_index].physical +
			stripe_offset +
			stripe_nr * map->stripe_len;
		bbio->stripes[i].dev =
			map->stripes[stripe_index].dev;
		stripe_index++;
6204
	}
L
Li Zefan 已提交
6205

6206
	if (need_full_stripe(op))
6207
		max_errors = btrfs_chunk_max_errors(map);
L
Li Zefan 已提交
6208

6209 6210
	if (bbio->raid_map)
		sort_parity_stripes(bbio, num_stripes);
6211

6212
	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
6213
	    need_full_stripe(op)) {
6214 6215
		handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes,
					  &max_errors);
6216 6217
	}

L
Li Zefan 已提交
6218
	*bbio_ret = bbio;
Z
Zhao Lei 已提交
6219
	bbio->map_type = map->type;
L
Li Zefan 已提交
6220 6221 6222
	bbio->num_stripes = num_stripes;
	bbio->max_errors = max_errors;
	bbio->mirror_num = mirror_num;
6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234

	/*
	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
	 * mirror_num == num_stripes + 1 && dev_replace target drive is
	 * available as a mirror
	 */
	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
		WARN_ON(num_stripes > 1);
		bbio->stripes[0].dev = dev_replace->tgtdev;
		bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
		bbio->mirror_num = map->num_stripes + 1;
	}
6235
out:
6236
	if (dev_replace_is_ongoing) {
6237 6238
		lockdep_assert_held(&dev_replace->rwsem);
		/* Unlock and let waiting writers proceed */
6239
		up_read(&dev_replace->rwsem);
6240
	}
6241
	free_extent_map(em);
L
Li Zefan 已提交
6242
	return ret;
6243 6244
}

6245
int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6246
		      u64 logical, u64 *length,
6247
		      struct btrfs_bio **bbio_ret, int mirror_num)
6248
{
6249
	return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
6250
				 mirror_num, 0);
6251 6252
}

6253
/* For Scrub/replace */
6254
int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6255
		     u64 logical, u64 *length,
6256
		     struct btrfs_bio **bbio_ret)
6257
{
6258
	return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1);
6259 6260
}

6261 6262
int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
		     u64 physical, u64 **logical, int *naddrs, int *stripe_len)
Y
Yan Zheng 已提交
6263 6264 6265 6266 6267 6268 6269
{
	struct extent_map *em;
	struct map_lookup *map;
	u64 *buf;
	u64 bytenr;
	u64 length;
	u64 stripe_nr;
D
David Woodhouse 已提交
6270
	u64 rmap_len;
Y
Yan Zheng 已提交
6271 6272
	int i, j, nr = 0;

6273
	em = btrfs_get_chunk_map(fs_info, chunk_start, 1);
6274
	if (IS_ERR(em))
6275 6276
		return -EIO;

6277
	map = em->map_lookup;
Y
Yan Zheng 已提交
6278
	length = em->len;
D
David Woodhouse 已提交
6279 6280
	rmap_len = map->stripe_len;

Y
Yan Zheng 已提交
6281
	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
6282
		length = div_u64(length, map->num_stripes / map->sub_stripes);
Y
Yan Zheng 已提交
6283
	else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
6284
		length = div_u64(length, map->num_stripes);
6285
	else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6286
		length = div_u64(length, nr_data_stripes(map));
D
David Woodhouse 已提交
6287 6288
		rmap_len = map->stripe_len * nr_data_stripes(map);
	}
Y
Yan Zheng 已提交
6289

6290
	buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
6291
	BUG_ON(!buf); /* -ENOMEM */
Y
Yan Zheng 已提交
6292 6293 6294 6295 6296 6297 6298

	for (i = 0; i < map->num_stripes; i++) {
		if (map->stripes[i].physical > physical ||
		    map->stripes[i].physical + length <= physical)
			continue;

		stripe_nr = physical - map->stripes[i].physical;
6299
		stripe_nr = div64_u64(stripe_nr, map->stripe_len);
Y
Yan Zheng 已提交
6300 6301 6302

		if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
			stripe_nr = stripe_nr * map->num_stripes + i;
6303
			stripe_nr = div_u64(stripe_nr, map->sub_stripes);
Y
Yan Zheng 已提交
6304 6305
		} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
			stripe_nr = stripe_nr * map->num_stripes + i;
D
David Woodhouse 已提交
6306 6307 6308 6309 6310
		} /* else if RAID[56], multiply by nr_data_stripes().
		   * Alternatively, just use rmap_len below instead of
		   * map->stripe_len */

		bytenr = chunk_start + stripe_nr * rmap_len;
6311
		WARN_ON(nr >= map->num_stripes);
Y
Yan Zheng 已提交
6312 6313 6314 6315
		for (j = 0; j < nr; j++) {
			if (buf[j] == bytenr)
				break;
		}
6316 6317
		if (j == nr) {
			WARN_ON(nr >= map->num_stripes);
Y
Yan Zheng 已提交
6318
			buf[nr++] = bytenr;
6319
		}
Y
Yan Zheng 已提交
6320 6321 6322 6323
	}

	*logical = buf;
	*naddrs = nr;
D
David Woodhouse 已提交
6324
	*stripe_len = rmap_len;
Y
Yan Zheng 已提交
6325 6326 6327

	free_extent_map(em);
	return 0;
6328 6329
}

6330
static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
6331
{
6332 6333
	bio->bi_private = bbio->private;
	bio->bi_end_io = bbio->end_io;
6334
	bio_endio(bio);
6335

6336
	btrfs_put_bbio(bbio);
6337 6338
}

6339
static void btrfs_end_bio(struct bio *bio)
6340
{
6341
	struct btrfs_bio *bbio = bio->bi_private;
6342
	int is_orig_bio = 0;
6343

6344
	if (bio->bi_status) {
6345
		atomic_inc(&bbio->error);
6346 6347
		if (bio->bi_status == BLK_STS_IOERR ||
		    bio->bi_status == BLK_STS_TARGET) {
6348
			unsigned int stripe_index =
6349
				btrfs_io_bio(bio)->stripe_index;
6350
			struct btrfs_device *dev;
6351 6352 6353

			BUG_ON(stripe_index >= bbio->num_stripes);
			dev = bbio->stripes[stripe_index].dev;
6354
			if (dev->bdev) {
M
Mike Christie 已提交
6355
				if (bio_op(bio) == REQ_OP_WRITE)
6356
					btrfs_dev_stat_inc_and_print(dev,
6357
						BTRFS_DEV_STAT_WRITE_ERRS);
6358
				else if (!(bio->bi_opf & REQ_RAHEAD))
6359
					btrfs_dev_stat_inc_and_print(dev,
6360
						BTRFS_DEV_STAT_READ_ERRS);
6361
				if (bio->bi_opf & REQ_PREFLUSH)
6362
					btrfs_dev_stat_inc_and_print(dev,
6363 6364
						BTRFS_DEV_STAT_FLUSH_ERRS);
			}
6365 6366
		}
	}
6367

6368
	if (bio == bbio->orig_bio)
6369 6370
		is_orig_bio = 1;

6371 6372
	btrfs_bio_counter_dec(bbio->fs_info);

6373
	if (atomic_dec_and_test(&bbio->stripes_pending)) {
6374 6375
		if (!is_orig_bio) {
			bio_put(bio);
6376
			bio = bbio->orig_bio;
6377
		}
6378

6379
		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6380
		/* only send an error to the higher layers if it is
D
David Woodhouse 已提交
6381
		 * beyond the tolerance of the btrfs bio
6382
		 */
6383
		if (atomic_read(&bbio->error) > bbio->max_errors) {
6384
			bio->bi_status = BLK_STS_IOERR;
6385
		} else {
6386 6387 6388 6389
			/*
			 * this bio is actually up to date, we didn't
			 * go over the max number of errors
			 */
6390
			bio->bi_status = BLK_STS_OK;
6391
		}
6392

6393
		btrfs_end_bbio(bbio, bio);
6394
	} else if (!is_orig_bio) {
6395 6396 6397 6398
		bio_put(bio);
	}
}

6399 6400 6401 6402 6403 6404 6405
/*
 * see run_scheduled_bios for a description of why bios are collected for
 * async submit.
 *
 * This will add one bio to the pending list for a device and make sure
 * the work struct is scheduled.
 */
6406
static noinline void btrfs_schedule_bio(struct btrfs_device *device,
6407
					struct bio *bio)
6408
{
6409
	struct btrfs_fs_info *fs_info = device->fs_info;
6410
	int should_queue = 1;
6411
	struct btrfs_pending_bios *pending_bios;
6412 6413

	/* don't bother with additional async steps for reads, right now */
M
Mike Christie 已提交
6414
	if (bio_op(bio) == REQ_OP_READ) {
6415
		btrfsic_submit_bio(bio);
6416
		return;
6417 6418
	}

6419
	WARN_ON(bio->bi_next);
6420 6421 6422
	bio->bi_next = NULL;

	spin_lock(&device->io_lock);
6423
	if (op_is_sync(bio->bi_opf))
6424 6425 6426
		pending_bios = &device->pending_sync_bios;
	else
		pending_bios = &device->pending_bios;
6427

6428 6429
	if (pending_bios->tail)
		pending_bios->tail->bi_next = bio;
6430

6431 6432 6433
	pending_bios->tail = bio;
	if (!pending_bios->head)
		pending_bios->head = bio;
6434 6435 6436 6437 6438 6439
	if (device->running_pending)
		should_queue = 0;

	spin_unlock(&device->io_lock);

	if (should_queue)
6440
		btrfs_queue_work(fs_info->submit_workers, &device->work);
6441 6442
}

6443 6444
static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
			      u64 physical, int dev_nr, int async)
6445 6446
{
	struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
6447
	struct btrfs_fs_info *fs_info = bbio->fs_info;
6448 6449

	bio->bi_private = bbio;
6450
	btrfs_io_bio(bio)->stripe_index = dev_nr;
6451
	bio->bi_end_io = btrfs_end_bio;
6452
	bio->bi_iter.bi_sector = physical >> 9;
6453 6454 6455 6456 6457
	btrfs_debug_in_rcu(fs_info,
	"btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
		bio_op(bio), bio->bi_opf, (u64)bio->bi_iter.bi_sector,
		(u_long)dev->bdev->bd_dev, rcu_str_deref(dev->name), dev->devid,
		bio->bi_iter.bi_size);
6458
	bio_set_dev(bio, dev->bdev);
6459

6460
	btrfs_bio_counter_inc_noblocked(fs_info);
6461

6462
	if (async)
6463
		btrfs_schedule_bio(dev, bio);
6464
	else
6465
		btrfsic_submit_bio(bio);
6466 6467 6468 6469 6470 6471
}

static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
{
	atomic_inc(&bbio->error);
	if (atomic_dec_and_test(&bbio->stripes_pending)) {
6472
		/* Should be the original bio. */
6473 6474
		WARN_ON(bio != bbio->orig_bio);

6475
		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6476
		bio->bi_iter.bi_sector = logical >> 9;
6477 6478 6479 6480
		if (atomic_read(&bbio->error) > bbio->max_errors)
			bio->bi_status = BLK_STS_IOERR;
		else
			bio->bi_status = BLK_STS_OK;
6481
		btrfs_end_bbio(bbio, bio);
6482 6483 6484
	}
}

6485 6486
blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
			   int mirror_num, int async_submit)
6487 6488
{
	struct btrfs_device *dev;
6489
	struct bio *first_bio = bio;
6490
	u64 logical = (u64)bio->bi_iter.bi_sector << 9;
6491 6492 6493
	u64 length = 0;
	u64 map_length;
	int ret;
6494 6495
	int dev_nr;
	int total_devs;
6496
	struct btrfs_bio *bbio = NULL;
6497

6498
	length = bio->bi_iter.bi_size;
6499
	map_length = length;
6500

6501
	btrfs_bio_counter_inc_blocked(fs_info);
6502
	ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
M
Mike Christie 已提交
6503
				&map_length, &bbio, mirror_num, 1);
6504
	if (ret) {
6505
		btrfs_bio_counter_dec(fs_info);
6506
		return errno_to_blk_status(ret);
6507
	}
6508

6509
	total_devs = bbio->num_stripes;
D
David Woodhouse 已提交
6510 6511 6512
	bbio->orig_bio = first_bio;
	bbio->private = first_bio->bi_private;
	bbio->end_io = first_bio->bi_end_io;
6513
	bbio->fs_info = fs_info;
D
David Woodhouse 已提交
6514 6515
	atomic_set(&bbio->stripes_pending, bbio->num_stripes);

6516
	if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
M
Mike Christie 已提交
6517
	    ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
D
David Woodhouse 已提交
6518 6519
		/* In this case, map_length has been set to the length of
		   a single stripe; not the whole write */
M
Mike Christie 已提交
6520
		if (bio_op(bio) == REQ_OP_WRITE) {
6521 6522
			ret = raid56_parity_write(fs_info, bio, bbio,
						  map_length);
D
David Woodhouse 已提交
6523
		} else {
6524 6525
			ret = raid56_parity_recover(fs_info, bio, bbio,
						    map_length, mirror_num, 1);
D
David Woodhouse 已提交
6526
		}
6527

6528
		btrfs_bio_counter_dec(fs_info);
6529
		return errno_to_blk_status(ret);
D
David Woodhouse 已提交
6530 6531
	}

6532
	if (map_length < length) {
6533
		btrfs_crit(fs_info,
J
Jeff Mahoney 已提交
6534 6535
			   "mapping failed logical %llu bio len %llu len %llu",
			   logical, length, map_length);
6536 6537
		BUG();
	}
6538

6539
	for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6540
		dev = bbio->stripes[dev_nr].dev;
6541 6542
		if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
						   &dev->dev_state) ||
6543 6544
		    (bio_op(first_bio) == REQ_OP_WRITE &&
		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
6545 6546 6547 6548
			bbio_error(bbio, first_bio, logical);
			continue;
		}

6549
		if (dev_nr < total_devs - 1)
6550
			bio = btrfs_bio_clone(first_bio);
6551
		else
6552
			bio = first_bio;
6553

6554 6555
		submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical,
				  dev_nr, async_submit);
6556
	}
6557
	btrfs_bio_counter_dec(fs_info);
6558
	return BLK_STS_OK;
6559 6560
}

6561 6562 6563 6564 6565 6566 6567 6568 6569
/*
 * Find a device specified by @devid or @uuid in the list of @fs_devices, or
 * return NULL.
 *
 * If devid and uuid are both specified, the match must be exact, otherwise
 * only devid is used.
 *
 * If @seed is true, traverse through the seed devices.
 */
6570
struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
6571 6572
				       u64 devid, u8 *uuid, u8 *fsid,
				       bool seed)
6573
{
Y
Yan Zheng 已提交
6574 6575
	struct btrfs_device *device;

6576
	while (fs_devices) {
Y
Yan Zheng 已提交
6577
		if (!fsid ||
6578
		    !memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
6579 6580 6581 6582 6583 6584 6585
			list_for_each_entry(device, &fs_devices->devices,
					    dev_list) {
				if (device->devid == devid &&
				    (!uuid || memcmp(device->uuid, uuid,
						     BTRFS_UUID_SIZE) == 0))
					return device;
			}
Y
Yan Zheng 已提交
6586
		}
6587 6588 6589 6590
		if (seed)
			fs_devices = fs_devices->seed;
		else
			return NULL;
Y
Yan Zheng 已提交
6591 6592
	}
	return NULL;
6593 6594
}

6595
static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6596 6597 6598 6599
					    u64 devid, u8 *dev_uuid)
{
	struct btrfs_device *device;

6600 6601
	device = btrfs_alloc_device(NULL, &devid, dev_uuid);
	if (IS_ERR(device))
6602
		return device;
6603 6604

	list_add(&device->dev_list, &fs_devices->devices);
Y
Yan Zheng 已提交
6605
	device->fs_devices = fs_devices;
6606
	fs_devices->num_devices++;
6607

6608
	set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6609
	fs_devices->missing_devices++;
6610

6611 6612 6613
	return device;
}

6614 6615 6616 6617 6618 6619 6620 6621 6622 6623
/**
 * btrfs_alloc_device - allocate struct btrfs_device
 * @fs_info:	used only for generating a new devid, can be NULL if
 *		devid is provided (i.e. @devid != NULL).
 * @devid:	a pointer to devid for this device.  If NULL a new devid
 *		is generated.
 * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
 *		is generated.
 *
 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6624
 * on error.  Returned struct is not linked onto any lists and must be
6625
 * destroyed with btrfs_free_device.
6626 6627 6628 6629 6630 6631 6632 6633
 */
struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
					const u64 *devid,
					const u8 *uuid)
{
	struct btrfs_device *dev;
	u64 tmp;

6634
	if (WARN_ON(!devid && !fs_info))
6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647
		return ERR_PTR(-EINVAL);

	dev = __alloc_device();
	if (IS_ERR(dev))
		return dev;

	if (devid)
		tmp = *devid;
	else {
		int ret;

		ret = find_next_devid(fs_info, &tmp);
		if (ret) {
6648
			btrfs_free_device(dev);
6649 6650 6651 6652 6653 6654 6655 6656 6657 6658
			return ERR_PTR(ret);
		}
	}
	dev->devid = tmp;

	if (uuid)
		memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
	else
		generate_random_uuid(dev->uuid);

6659 6660
	btrfs_init_work(&dev->work, btrfs_submit_helper,
			pending_bios_fn, NULL, NULL);
6661 6662 6663 6664

	return dev;
}

6665
static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
6666
					u64 devid, u8 *uuid, bool error)
6667
{
6668 6669 6670 6671 6672 6673
	if (error)
		btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
			      devid, uuid);
	else
		btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
			      devid, uuid);
6674 6675
}

6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695
static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
{
	int index = btrfs_bg_flags_to_raid_index(type);
	int ncopies = btrfs_raid_array[index].ncopies;
	int data_stripes;

	switch (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
	case BTRFS_BLOCK_GROUP_RAID5:
		data_stripes = num_stripes - 1;
		break;
	case BTRFS_BLOCK_GROUP_RAID6:
		data_stripes = num_stripes - 2;
		break;
	default:
		data_stripes = num_stripes / ncopies;
		break;
	}
	return div_u64(chunk_len, data_stripes);
}

6696
static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
6697 6698
			  struct btrfs_chunk *chunk)
{
6699
	struct btrfs_fs_info *fs_info = leaf->fs_info;
6700
	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714
	struct map_lookup *map;
	struct extent_map *em;
	u64 logical;
	u64 length;
	u64 devid;
	u8 uuid[BTRFS_UUID_SIZE];
	int num_stripes;
	int ret;
	int i;

	logical = key->offset;
	length = btrfs_chunk_length(leaf, chunk);
	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);

6715 6716 6717 6718 6719
	/*
	 * Only need to verify chunk item if we're reading from sys chunk array,
	 * as chunk item in tree block is already verified by tree-checker.
	 */
	if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
6720
		ret = btrfs_check_chunk_valid(leaf, chunk, logical);
6721 6722 6723
		if (ret)
			return ret;
	}
6724

6725 6726 6727
	read_lock(&map_tree->lock);
	em = lookup_extent_mapping(map_tree, logical, 1);
	read_unlock(&map_tree->lock);
6728 6729 6730 6731 6732 6733 6734 6735 6736

	/* already mapped? */
	if (em && em->start <= logical && em->start + em->len > logical) {
		free_extent_map(em);
		return 0;
	} else if (em) {
		free_extent_map(em);
	}

6737
	em = alloc_extent_map();
6738 6739
	if (!em)
		return -ENOMEM;
6740
	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6741 6742 6743 6744 6745
	if (!map) {
		free_extent_map(em);
		return -ENOMEM;
	}

6746
	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6747
	em->map_lookup = map;
6748 6749
	em->start = logical;
	em->len = length;
6750
	em->orig_start = 0;
6751
	em->block_start = 0;
C
Chris Mason 已提交
6752
	em->block_len = em->len;
6753

6754 6755 6756 6757 6758
	map->num_stripes = num_stripes;
	map->io_width = btrfs_chunk_io_width(leaf, chunk);
	map->io_align = btrfs_chunk_io_align(leaf, chunk);
	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
	map->type = btrfs_chunk_type(leaf, chunk);
C
Chris Mason 已提交
6759
	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6760
	map->verified_stripes = 0;
6761 6762
	em->orig_block_len = calc_stripe_length(map->type, em->len,
						map->num_stripes);
6763 6764 6765 6766
	for (i = 0; i < num_stripes; i++) {
		map->stripes[i].physical =
			btrfs_stripe_offset_nr(leaf, chunk, i);
		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6767 6768 6769
		read_extent_buffer(leaf, uuid, (unsigned long)
				   btrfs_stripe_dev_uuid_nr(chunk, i),
				   BTRFS_UUID_SIZE);
6770
		map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices,
6771
							devid, uuid, NULL, true);
6772
		if (!map->stripes[i].dev &&
6773
		    !btrfs_test_opt(fs_info, DEGRADED)) {
6774
			free_extent_map(em);
6775
			btrfs_report_missing_device(fs_info, devid, uuid, true);
6776
			return -ENOENT;
6777
		}
6778 6779
		if (!map->stripes[i].dev) {
			map->stripes[i].dev =
6780 6781
				add_missing_dev(fs_info->fs_devices, devid,
						uuid);
6782
			if (IS_ERR(map->stripes[i].dev)) {
6783
				free_extent_map(em);
6784 6785 6786 6787
				btrfs_err(fs_info,
					"failed to init missing dev %llu: %ld",
					devid, PTR_ERR(map->stripes[i].dev));
				return PTR_ERR(map->stripes[i].dev);
6788
			}
6789
			btrfs_report_missing_device(fs_info, devid, uuid, false);
6790
		}
6791 6792 6793
		set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
				&(map->stripes[i].dev->dev_state));

6794 6795
	}

6796 6797 6798
	write_lock(&map_tree->lock);
	ret = add_extent_mapping(map_tree, em, 0);
	write_unlock(&map_tree->lock);
6799 6800 6801 6802 6803
	if (ret < 0) {
		btrfs_err(fs_info,
			  "failed to add chunk map, start=%llu len=%llu: %d",
			  em->start, em->len, ret);
	}
6804 6805
	free_extent_map(em);

6806
	return ret;
6807 6808
}

6809
static void fill_device_from_item(struct extent_buffer *leaf,
6810 6811 6812 6813 6814 6815
				 struct btrfs_dev_item *dev_item,
				 struct btrfs_device *device)
{
	unsigned long ptr;

	device->devid = btrfs_device_id(leaf, dev_item);
6816 6817
	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
	device->total_bytes = device->disk_total_bytes;
6818
	device->commit_total_bytes = device->disk_total_bytes;
6819
	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6820
	device->commit_bytes_used = device->bytes_used;
6821 6822 6823 6824
	device->type = btrfs_device_type(leaf, dev_item);
	device->io_align = btrfs_device_io_align(leaf, dev_item);
	device->io_width = btrfs_device_io_width(leaf, dev_item);
	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6825
	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6826
	clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
6827

6828
	ptr = btrfs_device_uuid(dev_item);
6829
	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6830 6831
}

6832
static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
6833
						  u8 *fsid)
Y
Yan Zheng 已提交
6834 6835 6836 6837
{
	struct btrfs_fs_devices *fs_devices;
	int ret;

6838
	lockdep_assert_held(&uuid_mutex);
D
David Sterba 已提交
6839
	ASSERT(fsid);
Y
Yan Zheng 已提交
6840

6841
	fs_devices = fs_info->fs_devices->seed;
Y
Yan Zheng 已提交
6842
	while (fs_devices) {
6843
		if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
6844 6845
			return fs_devices;

Y
Yan Zheng 已提交
6846 6847 6848
		fs_devices = fs_devices->seed;
	}

6849
	fs_devices = find_fsid(fsid, NULL);
Y
Yan Zheng 已提交
6850
	if (!fs_devices) {
6851
		if (!btrfs_test_opt(fs_info, DEGRADED))
6852 6853
			return ERR_PTR(-ENOENT);

6854
		fs_devices = alloc_fs_devices(fsid, NULL);
6855 6856 6857 6858 6859 6860
		if (IS_ERR(fs_devices))
			return fs_devices;

		fs_devices->seeding = 1;
		fs_devices->opened = 1;
		return fs_devices;
Y
Yan Zheng 已提交
6861
	}
Y
Yan Zheng 已提交
6862 6863

	fs_devices = clone_fs_devices(fs_devices);
6864 6865
	if (IS_ERR(fs_devices))
		return fs_devices;
Y
Yan Zheng 已提交
6866

6867
	ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
6868 6869
	if (ret) {
		free_fs_devices(fs_devices);
6870
		fs_devices = ERR_PTR(ret);
Y
Yan Zheng 已提交
6871
		goto out;
6872
	}
Y
Yan Zheng 已提交
6873 6874

	if (!fs_devices->seeding) {
6875
		close_fs_devices(fs_devices);
Y
Yan Zheng 已提交
6876
		free_fs_devices(fs_devices);
6877
		fs_devices = ERR_PTR(-EINVAL);
Y
Yan Zheng 已提交
6878 6879 6880
		goto out;
	}

6881 6882
	fs_devices->seed = fs_info->fs_devices->seed;
	fs_info->fs_devices->seed = fs_devices;
Y
Yan Zheng 已提交
6883
out:
6884
	return fs_devices;
Y
Yan Zheng 已提交
6885 6886
}

6887
static int read_one_dev(struct extent_buffer *leaf,
6888 6889
			struct btrfs_dev_item *dev_item)
{
6890
	struct btrfs_fs_info *fs_info = leaf->fs_info;
6891
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6892 6893 6894
	struct btrfs_device *device;
	u64 devid;
	int ret;
6895
	u8 fs_uuid[BTRFS_FSID_SIZE];
6896 6897
	u8 dev_uuid[BTRFS_UUID_SIZE];

6898
	devid = btrfs_device_id(leaf, dev_item);
6899
	read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6900
			   BTRFS_UUID_SIZE);
6901
	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
6902
			   BTRFS_FSID_SIZE);
Y
Yan Zheng 已提交
6903

6904
	if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
6905
		fs_devices = open_seed_devices(fs_info, fs_uuid);
6906 6907
		if (IS_ERR(fs_devices))
			return PTR_ERR(fs_devices);
Y
Yan Zheng 已提交
6908 6909
	}

6910
	device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
6911
				   fs_uuid, true);
6912
	if (!device) {
6913
		if (!btrfs_test_opt(fs_info, DEGRADED)) {
6914 6915
			btrfs_report_missing_device(fs_info, devid,
							dev_uuid, true);
6916
			return -ENOENT;
6917
		}
Y
Yan Zheng 已提交
6918

6919
		device = add_missing_dev(fs_devices, devid, dev_uuid);
6920 6921 6922 6923 6924 6925
		if (IS_ERR(device)) {
			btrfs_err(fs_info,
				"failed to add missing dev %llu: %ld",
				devid, PTR_ERR(device));
			return PTR_ERR(device);
		}
6926
		btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
6927
	} else {
6928
		if (!device->bdev) {
6929 6930 6931
			if (!btrfs_test_opt(fs_info, DEGRADED)) {
				btrfs_report_missing_device(fs_info,
						devid, dev_uuid, true);
6932
				return -ENOENT;
6933 6934 6935
			}
			btrfs_report_missing_device(fs_info, devid,
							dev_uuid, false);
6936
		}
6937

6938 6939
		if (!device->bdev &&
		    !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
6940 6941 6942 6943 6944 6945
			/*
			 * this happens when a device that was properly setup
			 * in the device info lists suddenly goes bad.
			 * device->bdev is NULL, and so we have to set
			 * device->missing to one here
			 */
6946
			device->fs_devices->missing_devices++;
6947
			set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
Y
Yan Zheng 已提交
6948
		}
6949 6950 6951

		/* Move the device to its own fs_devices */
		if (device->fs_devices != fs_devices) {
6952 6953
			ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
							&device->dev_state));
6954 6955 6956 6957 6958 6959 6960 6961 6962 6963

			list_move(&device->dev_list, &fs_devices->devices);
			device->fs_devices->num_devices--;
			fs_devices->num_devices++;

			device->fs_devices->missing_devices--;
			fs_devices->missing_devices++;

			device->fs_devices = fs_devices;
		}
Y
Yan Zheng 已提交
6964 6965
	}

6966
	if (device->fs_devices != fs_info->fs_devices) {
6967
		BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
Y
Yan Zheng 已提交
6968 6969 6970
		if (device->generation !=
		    btrfs_device_generation(leaf, dev_item))
			return -EINVAL;
6971
	}
6972 6973

	fill_device_from_item(leaf, dev_item, device);
6974
	set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
6975
	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
6976
	   !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
Y
Yan Zheng 已提交
6977
		device->fs_devices->total_rw_bytes += device->total_bytes;
6978 6979
		atomic64_add(device->total_bytes - device->bytes_used,
				&fs_info->free_chunk_space);
6980
	}
6981 6982 6983 6984
	ret = 0;
	return ret;
}

6985
int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
6986
{
6987
	struct btrfs_root *root = fs_info->tree_root;
6988
	struct btrfs_super_block *super_copy = fs_info->super_copy;
6989
	struct extent_buffer *sb;
6990 6991
	struct btrfs_disk_key *disk_key;
	struct btrfs_chunk *chunk;
6992 6993
	u8 *array_ptr;
	unsigned long sb_array_offset;
6994
	int ret = 0;
6995 6996 6997
	u32 num_stripes;
	u32 array_size;
	u32 len = 0;
6998
	u32 cur_offset;
6999
	u64 type;
7000
	struct btrfs_key key;
7001

7002
	ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
7003 7004 7005 7006 7007
	/*
	 * This will create extent buffer of nodesize, superblock size is
	 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
	 * overallocate but we can keep it as-is, only the first page is used.
	 */
7008
	sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET);
7009 7010
	if (IS_ERR(sb))
		return PTR_ERR(sb);
7011
	set_extent_buffer_uptodate(sb);
7012
	btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
7013
	/*
7014
	 * The sb extent buffer is artificial and just used to read the system array.
7015
	 * set_extent_buffer_uptodate() call does not properly mark all it's
7016 7017 7018 7019 7020 7021 7022 7023 7024
	 * pages up-to-date when the page is larger: extent does not cover the
	 * whole page and consequently check_page_uptodate does not find all
	 * the page's extents up-to-date (the hole beyond sb),
	 * write_extent_buffer then triggers a WARN_ON.
	 *
	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
	 * but sb spans only this function. Add an explicit SetPageUptodate call
	 * to silence the warning eg. on PowerPC 64.
	 */
7025
	if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
7026
		SetPageUptodate(sb->pages[0]);
7027

7028
	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
7029 7030
	array_size = btrfs_super_sys_array_size(super_copy);

7031 7032 7033
	array_ptr = super_copy->sys_chunk_array;
	sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
	cur_offset = 0;
7034

7035 7036
	while (cur_offset < array_size) {
		disk_key = (struct btrfs_disk_key *)array_ptr;
7037 7038 7039 7040
		len = sizeof(*disk_key);
		if (cur_offset + len > array_size)
			goto out_short_read;

7041 7042
		btrfs_disk_key_to_cpu(&key, disk_key);

7043 7044 7045
		array_ptr += len;
		sb_array_offset += len;
		cur_offset += len;
7046

7047
		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
7048
			chunk = (struct btrfs_chunk *)sb_array_offset;
7049 7050 7051 7052 7053 7054 7055 7056 7057
			/*
			 * At least one btrfs_chunk with one stripe must be
			 * present, exact stripe count check comes afterwards
			 */
			len = btrfs_chunk_item_size(1);
			if (cur_offset + len > array_size)
				goto out_short_read;

			num_stripes = btrfs_chunk_num_stripes(sb, chunk);
7058
			if (!num_stripes) {
7059 7060
				btrfs_err(fs_info,
					"invalid number of stripes %u in sys_array at offset %u",
7061 7062 7063 7064 7065
					num_stripes, cur_offset);
				ret = -EIO;
				break;
			}

7066 7067
			type = btrfs_chunk_type(sb, chunk);
			if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
7068
				btrfs_err(fs_info,
7069 7070 7071 7072 7073 7074
			    "invalid chunk type %llu in sys_array at offset %u",
					type, cur_offset);
				ret = -EIO;
				break;
			}

7075 7076 7077 7078
			len = btrfs_chunk_item_size(num_stripes);
			if (cur_offset + len > array_size)
				goto out_short_read;

7079
			ret = read_one_chunk(&key, sb, chunk);
7080 7081
			if (ret)
				break;
7082
		} else {
7083 7084 7085
			btrfs_err(fs_info,
			    "unexpected item type %u in sys_array at offset %u",
				  (u32)key.type, cur_offset);
7086 7087
			ret = -EIO;
			break;
7088
		}
7089 7090 7091
		array_ptr += len;
		sb_array_offset += len;
		cur_offset += len;
7092
	}
7093
	clear_extent_buffer_uptodate(sb);
7094
	free_extent_buffer_stale(sb);
7095
	return ret;
7096 7097

out_short_read:
7098
	btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
7099
			len, cur_offset);
7100
	clear_extent_buffer_uptodate(sb);
7101
	free_extent_buffer_stale(sb);
7102
	return -EIO;
7103 7104
}

7105 7106 7107
/*
 * Check if all chunks in the fs are OK for read-write degraded mount
 *
7108 7109
 * If the @failing_dev is specified, it's accounted as missing.
 *
7110 7111 7112
 * Return true if all chunks meet the minimal RW mount requirements.
 * Return false if any chunk doesn't meet the minimal RW mount requirements.
 */
7113 7114
bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
					struct btrfs_device *failing_dev)
7115
{
7116
	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
7117 7118 7119 7120
	struct extent_map *em;
	u64 next_start = 0;
	bool ret = true;

7121 7122 7123
	read_lock(&map_tree->lock);
	em = lookup_extent_mapping(map_tree, 0, (u64)-1);
	read_unlock(&map_tree->lock);
7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141
	/* No chunk at all? Return false anyway */
	if (!em) {
		ret = false;
		goto out;
	}
	while (em) {
		struct map_lookup *map;
		int missing = 0;
		int max_tolerated;
		int i;

		map = em->map_lookup;
		max_tolerated =
			btrfs_get_num_tolerated_disk_barrier_failures(
					map->type);
		for (i = 0; i < map->num_stripes; i++) {
			struct btrfs_device *dev = map->stripes[i].dev;

7142 7143
			if (!dev || !dev->bdev ||
			    test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
7144 7145
			    dev->last_flush_error)
				missing++;
7146 7147
			else if (failing_dev && failing_dev == dev)
				missing++;
7148 7149
		}
		if (missing > max_tolerated) {
7150 7151
			if (!failing_dev)
				btrfs_warn(fs_info,
7152
	"chunk %llu missing %d devices, max tolerance is %d for writable mount",
7153 7154 7155 7156 7157 7158 7159 7160
				   em->start, missing, max_tolerated);
			free_extent_map(em);
			ret = false;
			goto out;
		}
		next_start = extent_map_end(em);
		free_extent_map(em);

7161 7162
		read_lock(&map_tree->lock);
		em = lookup_extent_mapping(map_tree, next_start,
7163
					   (u64)(-1) - next_start);
7164
		read_unlock(&map_tree->lock);
7165 7166 7167 7168 7169
	}
out:
	return ret;
}

7170
int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
7171
{
7172
	struct btrfs_root *root = fs_info->chunk_root;
7173 7174 7175 7176 7177 7178
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	struct btrfs_key found_key;
	int ret;
	int slot;
7179
	u64 total_dev = 0;
7180 7181 7182 7183 7184

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

7185 7186 7187 7188
	/*
	 * uuid_mutex is needed only if we are mounting a sprout FS
	 * otherwise we don't need it.
	 */
7189
	mutex_lock(&uuid_mutex);
7190
	mutex_lock(&fs_info->chunk_mutex);
7191

7192 7193 7194 7195 7196
	/*
	 * Read all device items, and then all the chunk items. All
	 * device items are found before any chunk item (their object id
	 * is smaller than the lowest possible object id for a chunk
	 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7197 7198 7199 7200 7201
	 */
	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.offset = 0;
	key.type = 0;
	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7202 7203
	if (ret < 0)
		goto error;
C
Chris Mason 已提交
7204
	while (1) {
7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215
		leaf = path->nodes[0];
		slot = path->slots[0];
		if (slot >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret == 0)
				continue;
			if (ret < 0)
				goto error;
			break;
		}
		btrfs_item_key_to_cpu(leaf, &found_key, slot);
7216 7217 7218
		if (found_key.type == BTRFS_DEV_ITEM_KEY) {
			struct btrfs_dev_item *dev_item;
			dev_item = btrfs_item_ptr(leaf, slot,
7219
						  struct btrfs_dev_item);
7220
			ret = read_one_dev(leaf, dev_item);
7221 7222
			if (ret)
				goto error;
7223
			total_dev++;
7224 7225 7226
		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
			struct btrfs_chunk *chunk;
			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
7227
			ret = read_one_chunk(&found_key, leaf, chunk);
Y
Yan Zheng 已提交
7228 7229
			if (ret)
				goto error;
7230 7231 7232
		}
		path->slots[0]++;
	}
7233 7234 7235 7236 7237

	/*
	 * After loading chunk tree, we've got all device information,
	 * do another round of validation checks.
	 */
7238 7239
	if (total_dev != fs_info->fs_devices->total_devices) {
		btrfs_err(fs_info,
7240
	   "super_num_devices %llu mismatch with num_devices %llu found here",
7241
			  btrfs_super_num_devices(fs_info->super_copy),
7242 7243 7244 7245
			  total_dev);
		ret = -EINVAL;
		goto error;
	}
7246 7247 7248
	if (btrfs_super_total_bytes(fs_info->super_copy) <
	    fs_info->fs_devices->total_rw_bytes) {
		btrfs_err(fs_info,
7249
	"super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7250 7251
			  btrfs_super_total_bytes(fs_info->super_copy),
			  fs_info->fs_devices->total_rw_bytes);
7252 7253 7254
		ret = -EINVAL;
		goto error;
	}
7255 7256
	ret = 0;
error:
7257
	mutex_unlock(&fs_info->chunk_mutex);
7258 7259
	mutex_unlock(&uuid_mutex);

Y
Yan Zheng 已提交
7260
	btrfs_free_path(path);
7261 7262
	return ret;
}
7263

7264 7265 7266 7267 7268
void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
{
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	struct btrfs_device *device;

7269 7270 7271
	while (fs_devices) {
		mutex_lock(&fs_devices->device_list_mutex);
		list_for_each_entry(device, &fs_devices->devices, dev_list)
7272
			device->fs_info = fs_info;
7273 7274 7275 7276
		mutex_unlock(&fs_devices->device_list_mutex);

		fs_devices = fs_devices->seed;
	}
7277 7278
}

7279 7280 7281 7282 7283 7284 7285 7286 7287 7288 7289 7290 7291
int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
{
	struct btrfs_key key;
	struct btrfs_root *dev_root = fs_info->dev_root;
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	struct extent_buffer *eb;
	int slot;
	int ret = 0;
	struct btrfs_device *device;
	struct btrfs_path *path = NULL;
	int i;

	path = btrfs_alloc_path();
A
Anand Jain 已提交
7292 7293
	if (!path)
		return -ENOMEM;
7294 7295 7296 7297 7298 7299

	mutex_lock(&fs_devices->device_list_mutex);
	list_for_each_entry(device, &fs_devices->devices, dev_list) {
		int item_size;
		struct btrfs_dev_stats_item *ptr;

7300 7301
		key.objectid = BTRFS_DEV_STATS_OBJECTID;
		key.type = BTRFS_PERSISTENT_ITEM_KEY;
7302 7303 7304
		key.offset = device->devid;
		ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
		if (ret) {
7305 7306
			for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
				btrfs_dev_stat_set(device, i, 0);
7307 7308 7309 7310 7311 7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322
			device->dev_stats_valid = 1;
			btrfs_release_path(path);
			continue;
		}
		slot = path->slots[0];
		eb = path->nodes[0];
		item_size = btrfs_item_size_nr(eb, slot);

		ptr = btrfs_item_ptr(eb, slot,
				     struct btrfs_dev_stats_item);

		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
			if (item_size >= (1 + i) * sizeof(__le64))
				btrfs_dev_stat_set(device, i,
					btrfs_dev_stats_value(eb, ptr, i));
			else
7323
				btrfs_dev_stat_set(device, i, 0);
7324 7325 7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338
		}

		device->dev_stats_valid = 1;
		btrfs_dev_stat_print_on_load(device);
		btrfs_release_path(path);
	}
	mutex_unlock(&fs_devices->device_list_mutex);

	btrfs_free_path(path);
	return ret < 0 ? ret : 0;
}

static int update_dev_stat_item(struct btrfs_trans_handle *trans,
				struct btrfs_device *device)
{
7339
	struct btrfs_fs_info *fs_info = trans->fs_info;
7340
	struct btrfs_root *dev_root = fs_info->dev_root;
7341 7342 7343 7344 7345 7346 7347
	struct btrfs_path *path;
	struct btrfs_key key;
	struct extent_buffer *eb;
	struct btrfs_dev_stats_item *ptr;
	int ret;
	int i;

7348 7349
	key.objectid = BTRFS_DEV_STATS_OBJECTID;
	key.type = BTRFS_PERSISTENT_ITEM_KEY;
7350 7351 7352
	key.offset = device->devid;

	path = btrfs_alloc_path();
7353 7354
	if (!path)
		return -ENOMEM;
7355 7356
	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
	if (ret < 0) {
7357
		btrfs_warn_in_rcu(fs_info,
7358
			"error %d while searching for dev_stats item for device %s",
7359
			      ret, rcu_str_deref(device->name));
7360 7361 7362 7363 7364 7365 7366 7367
		goto out;
	}

	if (ret == 0 &&
	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
		/* need to delete old one and insert a new one */
		ret = btrfs_del_item(trans, dev_root, path);
		if (ret != 0) {
7368
			btrfs_warn_in_rcu(fs_info,
7369
				"delete too small dev_stats item for device %s failed %d",
7370
				      rcu_str_deref(device->name), ret);
7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381
			goto out;
		}
		ret = 1;
	}

	if (ret == 1) {
		/* need to insert a new item */
		btrfs_release_path(path);
		ret = btrfs_insert_empty_item(trans, dev_root, path,
					      &key, sizeof(*ptr));
		if (ret < 0) {
7382
			btrfs_warn_in_rcu(fs_info,
7383 7384
				"insert dev_stats item for device %s failed %d",
				rcu_str_deref(device->name), ret);
7385 7386 7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398 7399 7400 7401 7402 7403
			goto out;
		}
	}

	eb = path->nodes[0];
	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
		btrfs_set_dev_stats_value(eb, ptr, i,
					  btrfs_dev_stat_read(device, i));
	btrfs_mark_buffer_dirty(eb);

out:
	btrfs_free_path(path);
	return ret;
}

/*
 * called from commit_transaction. Writes all changed device stats to disk.
 */
7404
int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
7405
{
7406
	struct btrfs_fs_info *fs_info = trans->fs_info;
7407 7408
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	struct btrfs_device *device;
7409
	int stats_cnt;
7410 7411 7412 7413
	int ret = 0;

	mutex_lock(&fs_devices->device_list_mutex);
	list_for_each_entry(device, &fs_devices->devices, dev_list) {
7414 7415
		stats_cnt = atomic_read(&device->dev_stats_ccnt);
		if (!device->dev_stats_valid || stats_cnt == 0)
7416 7417
			continue;

7418 7419 7420 7421 7422 7423 7424 7425 7426 7427 7428 7429 7430 7431

		/*
		 * There is a LOAD-LOAD control dependency between the value of
		 * dev_stats_ccnt and updating the on-disk values which requires
		 * reading the in-memory counters. Such control dependencies
		 * require explicit read memory barriers.
		 *
		 * This memory barriers pairs with smp_mb__before_atomic in
		 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
		 * barrier implied by atomic_xchg in
		 * btrfs_dev_stats_read_and_reset
		 */
		smp_rmb();

7432
		ret = update_dev_stat_item(trans, device);
7433
		if (!ret)
7434
			atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7435 7436 7437 7438 7439 7440
	}
	mutex_unlock(&fs_devices->device_list_mutex);

	return ret;
}

7441 7442 7443 7444 7445 7446
void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
{
	btrfs_dev_stat_inc(dev, index);
	btrfs_dev_stat_print_on_error(dev);
}

7447
static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
7448
{
7449 7450
	if (!dev->dev_stats_valid)
		return;
7451
	btrfs_err_rl_in_rcu(dev->fs_info,
7452
		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7453
			   rcu_str_deref(dev->name),
7454 7455 7456
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7457 7458
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7459
}
7460

7461 7462
static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
{
7463 7464 7465 7466 7467 7468 7469 7470
	int i;

	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
		if (btrfs_dev_stat_read(dev, i) != 0)
			break;
	if (i == BTRFS_DEV_STAT_VALUES_MAX)
		return; /* all values == 0, suppress message */

7471
	btrfs_info_in_rcu(dev->fs_info,
7472
		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7473
	       rcu_str_deref(dev->name),
7474 7475 7476 7477 7478 7479 7480
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
}

7481
int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
7482
			struct btrfs_ioctl_get_dev_stats *stats)
7483 7484
{
	struct btrfs_device *dev;
7485
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7486 7487 7488
	int i;

	mutex_lock(&fs_devices->device_list_mutex);
7489 7490
	dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL,
				true);
7491 7492 7493
	mutex_unlock(&fs_devices->device_list_mutex);

	if (!dev) {
7494
		btrfs_warn(fs_info, "get dev_stats failed, device not found");
7495
		return -ENODEV;
7496
	} else if (!dev->dev_stats_valid) {
7497
		btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
7498
		return -ENODEV;
7499
	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7500 7501 7502 7503 7504
		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
			if (stats->nr_items > i)
				stats->values[i] =
					btrfs_dev_stat_read_and_reset(dev, i);
			else
7505
				btrfs_dev_stat_set(dev, i, 0);
7506 7507 7508 7509 7510 7511 7512 7513 7514 7515
		}
	} else {
		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
			if (stats->nr_items > i)
				stats->values[i] = btrfs_dev_stat_read(dev, i);
	}
	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
	return 0;
}
7516

7517
void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path)
7518 7519 7520
{
	struct buffer_head *bh;
	struct btrfs_super_block *disk_super;
7521
	int copy_num;
7522

7523 7524
	if (!bdev)
		return;
7525

7526 7527
	for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX;
		copy_num++) {
7528

7529 7530 7531 7532 7533 7534 7535 7536 7537 7538 7539 7540 7541 7542 7543 7544
		if (btrfs_read_dev_one_super(bdev, copy_num, &bh))
			continue;

		disk_super = (struct btrfs_super_block *)bh->b_data;

		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
		set_buffer_dirty(bh);
		sync_dirty_buffer(bh);
		brelse(bh);
	}

	/* Notify udev that device has changed */
	btrfs_kobject_uevent(bdev, KOBJ_CHANGE);

	/* Update ctime/mtime for device path for libblkid */
	update_dev_time(device_path);
7545
}
7546 7547

/*
7548 7549 7550 7551 7552
 * Update the size and bytes used for each device where it changed.  This is
 * delayed since we would otherwise get errors while writing out the
 * superblocks.
 *
 * Must be invoked during transaction commit.
7553
 */
7554
void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
7555 7556 7557
{
	struct btrfs_device *curr, *next;

7558
	ASSERT(trans->state == TRANS_STATE_COMMIT_DOING);
7559

7560
	if (list_empty(&trans->dev_update_list))
7561 7562
		return;

7563 7564 7565 7566 7567 7568 7569 7570 7571 7572 7573
	/*
	 * We don't need the device_list_mutex here.  This list is owned by the
	 * transaction and the transaction must complete before the device is
	 * released.
	 */
	mutex_lock(&trans->fs_info->chunk_mutex);
	list_for_each_entry_safe(curr, next, &trans->dev_update_list,
				 post_commit_list) {
		list_del_init(&curr->post_commit_list);
		curr->commit_total_bytes = curr->disk_total_bytes;
		curr->commit_bytes_used = curr->bytes_used;
7574
	}
7575
	mutex_unlock(&trans->fs_info->chunk_mutex);
7576
}
7577 7578 7579 7580 7581 7582 7583 7584 7585 7586 7587 7588 7589 7590 7591 7592 7593 7594

void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info)
{
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	while (fs_devices) {
		fs_devices->fs_info = fs_info;
		fs_devices = fs_devices->seed;
	}
}

void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
{
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	while (fs_devices) {
		fs_devices->fs_info = NULL;
		fs_devices = fs_devices->seed;
	}
}
7595 7596 7597 7598 7599 7600

/*
 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
 */
int btrfs_bg_type_to_factor(u64 flags)
{
7601 7602 7603
	const int index = btrfs_bg_flags_to_raid_index(flags);

	return btrfs_raid_array[index].ncopies;
7604
}
7605 7606 7607 7608 7609 7610 7611



static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
				 u64 chunk_offset, u64 devid,
				 u64 physical_offset, u64 physical_len)
{
7612
	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7613 7614
	struct extent_map *em;
	struct map_lookup *map;
7615
	struct btrfs_device *dev;
7616 7617 7618 7619 7620 7621 7622 7623 7624 7625 7626 7627 7628 7629 7630 7631 7632 7633 7634 7635 7636 7637 7638 7639 7640 7641 7642 7643 7644 7645 7646 7647 7648 7649 7650 7651 7652 7653 7654 7655 7656 7657 7658 7659 7660 7661 7662 7663 7664
	u64 stripe_len;
	bool found = false;
	int ret = 0;
	int i;

	read_lock(&em_tree->lock);
	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
	read_unlock(&em_tree->lock);

	if (!em) {
		btrfs_err(fs_info,
"dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
			  physical_offset, devid);
		ret = -EUCLEAN;
		goto out;
	}

	map = em->map_lookup;
	stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
	if (physical_len != stripe_len) {
		btrfs_err(fs_info,
"dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
			  physical_offset, devid, em->start, physical_len,
			  stripe_len);
		ret = -EUCLEAN;
		goto out;
	}

	for (i = 0; i < map->num_stripes; i++) {
		if (map->stripes[i].dev->devid == devid &&
		    map->stripes[i].physical == physical_offset) {
			found = true;
			if (map->verified_stripes >= map->num_stripes) {
				btrfs_err(fs_info,
				"too many dev extents for chunk %llu found",
					  em->start);
				ret = -EUCLEAN;
				goto out;
			}
			map->verified_stripes++;
			break;
		}
	}
	if (!found) {
		btrfs_err(fs_info,
	"dev extent physical offset %llu devid %llu has no corresponding chunk",
			physical_offset, devid);
		ret = -EUCLEAN;
	}
7665 7666

	/* Make sure no dev extent is beyond device bondary */
7667
	dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
7668 7669 7670 7671 7672
	if (!dev) {
		btrfs_err(fs_info, "failed to find devid %llu", devid);
		ret = -EUCLEAN;
		goto out;
	}
7673 7674 7675

	/* It's possible this device is a dummy for seed device */
	if (dev->disk_total_bytes == 0) {
7676 7677
		dev = btrfs_find_device(fs_info->fs_devices->seed, devid, NULL,
					NULL, false);
7678 7679 7680 7681 7682 7683 7684 7685
		if (!dev) {
			btrfs_err(fs_info, "failed to find seed devid %llu",
				  devid);
			ret = -EUCLEAN;
			goto out;
		}
	}

7686 7687 7688 7689 7690 7691 7692 7693
	if (physical_offset + physical_len > dev->disk_total_bytes) {
		btrfs_err(fs_info,
"dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
			  devid, physical_offset, physical_len,
			  dev->disk_total_bytes);
		ret = -EUCLEAN;
		goto out;
	}
7694 7695 7696 7697 7698 7699 7700
out:
	free_extent_map(em);
	return ret;
}

static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
{
7701
	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7702 7703 7704 7705 7706
	struct extent_map *em;
	struct rb_node *node;
	int ret = 0;

	read_lock(&em_tree->lock);
L
Liu Bo 已提交
7707
	for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
7708 7709 7710 7711 7712 7713 7714 7715 7716 7717 7718 7719 7720 7721 7722 7723 7724 7725 7726 7727 7728 7729 7730 7731 7732 7733 7734 7735
		em = rb_entry(node, struct extent_map, rb_node);
		if (em->map_lookup->num_stripes !=
		    em->map_lookup->verified_stripes) {
			btrfs_err(fs_info,
			"chunk %llu has missing dev extent, have %d expect %d",
				  em->start, em->map_lookup->verified_stripes,
				  em->map_lookup->num_stripes);
			ret = -EUCLEAN;
			goto out;
		}
	}
out:
	read_unlock(&em_tree->lock);
	return ret;
}

/*
 * Ensure that all dev extents are mapped to correct chunk, otherwise
 * later chunk allocation/free would cause unexpected behavior.
 *
 * NOTE: This will iterate through the whole device tree, which should be of
 * the same size level as the chunk tree.  This slightly increases mount time.
 */
int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
{
	struct btrfs_path *path;
	struct btrfs_root *root = fs_info->dev_root;
	struct btrfs_key key;
7736 7737
	u64 prev_devid = 0;
	u64 prev_dev_ext_end = 0;
7738 7739 7740 7741 7742 7743 7744 7745 7746 7747 7748 7749 7750 7751 7752 7753 7754 7755 7756 7757 7758 7759 7760 7761 7762 7763 7764 7765 7766 7767 7768 7769 7770 7771 7772 7773 7774 7775 7776 7777 7778 7779 7780 7781
	int ret = 0;

	key.objectid = 1;
	key.type = BTRFS_DEV_EXTENT_KEY;
	key.offset = 0;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	path->reada = READA_FORWARD;
	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		goto out;

	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
		ret = btrfs_next_item(root, path);
		if (ret < 0)
			goto out;
		/* No dev extents at all? Not good */
		if (ret > 0) {
			ret = -EUCLEAN;
			goto out;
		}
	}
	while (1) {
		struct extent_buffer *leaf = path->nodes[0];
		struct btrfs_dev_extent *dext;
		int slot = path->slots[0];
		u64 chunk_offset;
		u64 physical_offset;
		u64 physical_len;
		u64 devid;

		btrfs_item_key_to_cpu(leaf, &key, slot);
		if (key.type != BTRFS_DEV_EXTENT_KEY)
			break;
		devid = key.objectid;
		physical_offset = key.offset;

		dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
		chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
		physical_len = btrfs_dev_extent_length(leaf, dext);

7782 7783 7784 7785 7786 7787 7788 7789 7790
		/* Check if this dev extent overlaps with the previous one */
		if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
			btrfs_err(fs_info,
"dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
				  devid, physical_offset, prev_dev_ext_end);
			ret = -EUCLEAN;
			goto out;
		}

7791 7792 7793 7794
		ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
					    physical_offset, physical_len);
		if (ret < 0)
			goto out;
7795 7796 7797
		prev_devid = devid;
		prev_dev_ext_end = physical_offset + physical_len;

7798 7799 7800 7801 7802 7803 7804 7805 7806 7807 7808 7809 7810 7811 7812
		ret = btrfs_next_item(root, path);
		if (ret < 0)
			goto out;
		if (ret > 0) {
			ret = 0;
			break;
		}
	}

	/* Ensure all chunks have corresponding dev extents */
	ret = verify_chunk_dev_extent_mapping(fs_info);
out:
	btrfs_free_path(path);
	return ret;
}
7813 7814 7815 7816 7817 7818 7819 7820 7821 7822 7823 7824 7825 7826 7827 7828 7829 7830 7831 7832 7833 7834 7835 7836

/*
 * Check whether the given block group or device is pinned by any inode being
 * used as a swapfile.
 */
bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
{
	struct btrfs_swapfile_pin *sp;
	struct rb_node *node;

	spin_lock(&fs_info->swapfile_pins_lock);
	node = fs_info->swapfile_pins.rb_node;
	while (node) {
		sp = rb_entry(node, struct btrfs_swapfile_pin, node);
		if (ptr < sp->ptr)
			node = node->rb_left;
		else if (ptr > sp->ptr)
			node = node->rb_right;
		else
			break;
	}
	spin_unlock(&fs_info->swapfile_pins_lock);
	return node != NULL;
}