volumes.c 183.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */
#include <linux/sched.h>
#include <linux/bio.h>
20
#include <linux/slab.h>
21
#include <linux/buffer_head.h>
22
#include <linux/blkdev.h>
23
#include <linux/random.h>
24
#include <linux/iocontext.h>
25
#include <linux/capability.h>
26
#include <linux/ratelimit.h>
I
Ilya Dryomov 已提交
27
#include <linux/kthread.h>
D
David Woodhouse 已提交
28
#include <linux/raid/pq.h>
S
Stefan Behrens 已提交
29
#include <linux/semaphore.h>
D
David Woodhouse 已提交
30
#include <asm/div64.h>
31 32 33 34 35 36
#include "ctree.h"
#include "extent_map.h"
#include "disk-io.h"
#include "transaction.h"
#include "print-tree.h"
#include "volumes.h"
D
David Woodhouse 已提交
37
#include "raid56.h"
38
#include "async-thread.h"
39
#include "check-integrity.h"
40
#include "rcu-string.h"
41
#include "math.h"
42
#include "dev-replace.h"
43
#include "sysfs.h"
44

Z
Zhao Lei 已提交
45 46 47 48 49 50
const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
	[BTRFS_RAID_RAID10] = {
		.sub_stripes	= 2,
		.dev_stripes	= 1,
		.devs_max	= 0,	/* 0 == as many as possible */
		.devs_min	= 4,
51
		.tolerated_failures = 1,
Z
Zhao Lei 已提交
52 53 54 55 56 57 58 59
		.devs_increment	= 2,
		.ncopies	= 2,
	},
	[BTRFS_RAID_RAID1] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 2,
		.devs_min	= 2,
60
		.tolerated_failures = 1,
Z
Zhao Lei 已提交
61 62 63 64 65 66 67 68
		.devs_increment	= 2,
		.ncopies	= 2,
	},
	[BTRFS_RAID_DUP] = {
		.sub_stripes	= 1,
		.dev_stripes	= 2,
		.devs_max	= 1,
		.devs_min	= 1,
69
		.tolerated_failures = 0,
Z
Zhao Lei 已提交
70 71 72 73 74 75 76 77
		.devs_increment	= 1,
		.ncopies	= 2,
	},
	[BTRFS_RAID_RAID0] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 0,
		.devs_min	= 2,
78
		.tolerated_failures = 0,
Z
Zhao Lei 已提交
79 80 81 82 83 84 85 86
		.devs_increment	= 1,
		.ncopies	= 1,
	},
	[BTRFS_RAID_SINGLE] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 1,
		.devs_min	= 1,
87
		.tolerated_failures = 0,
Z
Zhao Lei 已提交
88 89 90 91 92 93 94 95
		.devs_increment	= 1,
		.ncopies	= 1,
	},
	[BTRFS_RAID_RAID5] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 0,
		.devs_min	= 2,
96
		.tolerated_failures = 1,
Z
Zhao Lei 已提交
97 98 99 100 101 102 103 104
		.devs_increment	= 1,
		.ncopies	= 2,
	},
	[BTRFS_RAID_RAID6] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 0,
		.devs_min	= 3,
105
		.tolerated_failures = 2,
Z
Zhao Lei 已提交
106 107 108 109 110
		.devs_increment	= 1,
		.ncopies	= 3,
	},
};

111
const u64 btrfs_raid_group[BTRFS_NR_RAID_TYPES] = {
Z
Zhao Lei 已提交
112 113 114 115 116 117 118 119 120
	[BTRFS_RAID_RAID10] = BTRFS_BLOCK_GROUP_RAID10,
	[BTRFS_RAID_RAID1]  = BTRFS_BLOCK_GROUP_RAID1,
	[BTRFS_RAID_DUP]    = BTRFS_BLOCK_GROUP_DUP,
	[BTRFS_RAID_RAID0]  = BTRFS_BLOCK_GROUP_RAID0,
	[BTRFS_RAID_SINGLE] = 0,
	[BTRFS_RAID_RAID5]  = BTRFS_BLOCK_GROUP_RAID5,
	[BTRFS_RAID_RAID6]  = BTRFS_BLOCK_GROUP_RAID6,
};

121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
/*
 * Table to convert BTRFS_RAID_* to the error code if minimum number of devices
 * condition is not met. Zero means there's no corresponding
 * BTRFS_ERROR_DEV_*_NOT_MET value.
 */
const int btrfs_raid_mindev_error[BTRFS_NR_RAID_TYPES] = {
	[BTRFS_RAID_RAID10] = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
	[BTRFS_RAID_RAID1]  = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
	[BTRFS_RAID_DUP]    = 0,
	[BTRFS_RAID_RAID0]  = 0,
	[BTRFS_RAID_SINGLE] = 0,
	[BTRFS_RAID_RAID5]  = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
	[BTRFS_RAID_RAID6]  = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
};

Y
Yan Zheng 已提交
136 137 138 139
static int init_first_rw_device(struct btrfs_trans_handle *trans,
				struct btrfs_root *root,
				struct btrfs_device *device);
static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
140
static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
141
static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
142
static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
143
static void btrfs_close_one_device(struct btrfs_device *device);
Y
Yan Zheng 已提交
144

145
DEFINE_MUTEX(uuid_mutex);
146
static LIST_HEAD(fs_uuids);
147 148 149 150
struct list_head *btrfs_get_fs_uuids(void)
{
	return &fs_uuids;
}
151

152 153 154 155
static struct btrfs_fs_devices *__alloc_fs_devices(void)
{
	struct btrfs_fs_devices *fs_devs;

156
	fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
157 158 159 160 161 162
	if (!fs_devs)
		return ERR_PTR(-ENOMEM);

	mutex_init(&fs_devs->device_list_mutex);

	INIT_LIST_HEAD(&fs_devs->devices);
163
	INIT_LIST_HEAD(&fs_devs->resized_devices);
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
	INIT_LIST_HEAD(&fs_devs->alloc_list);
	INIT_LIST_HEAD(&fs_devs->list);

	return fs_devs;
}

/**
 * alloc_fs_devices - allocate struct btrfs_fs_devices
 * @fsid:	a pointer to UUID for this FS.  If NULL a new UUID is
 *		generated.
 *
 * Return: a pointer to a new &struct btrfs_fs_devices on success;
 * ERR_PTR() on error.  Returned struct is not linked onto any lists and
 * can be destroyed with kfree() right away.
 */
static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
{
	struct btrfs_fs_devices *fs_devs;

	fs_devs = __alloc_fs_devices();
	if (IS_ERR(fs_devs))
		return fs_devs;

	if (fsid)
		memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
	else
		generate_random_uuid(fs_devs->fsid);

	return fs_devs;
}

Y
Yan Zheng 已提交
195 196 197 198 199 200 201 202
static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
{
	struct btrfs_device *device;
	WARN_ON(fs_devices->opened);
	while (!list_empty(&fs_devices->devices)) {
		device = list_entry(fs_devices->devices.next,
				    struct btrfs_device, dev_list);
		list_del(&device->dev_list);
203
		rcu_string_free(device->name);
Y
Yan Zheng 已提交
204 205 206 207 208
		kfree(device);
	}
	kfree(fs_devices);
}

209 210 211 212 213 214 215
static void btrfs_kobject_uevent(struct block_device *bdev,
				 enum kobject_action action)
{
	int ret;

	ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
	if (ret)
216
		pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
217 218 219 220 221
			action,
			kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
			&disk_to_dev(bdev->bd_disk)->kobj);
}

222
void btrfs_cleanup_fs_uuids(void)
223 224 225
{
	struct btrfs_fs_devices *fs_devices;

Y
Yan Zheng 已提交
226 227 228 229
	while (!list_empty(&fs_uuids)) {
		fs_devices = list_entry(fs_uuids.next,
					struct btrfs_fs_devices, list);
		list_del(&fs_devices->list);
Y
Yan Zheng 已提交
230
		free_fs_devices(fs_devices);
231 232 233
	}
}

234 235 236 237
static struct btrfs_device *__alloc_device(void)
{
	struct btrfs_device *dev;

238
	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
239 240 241 242 243
	if (!dev)
		return ERR_PTR(-ENOMEM);

	INIT_LIST_HEAD(&dev->dev_list);
	INIT_LIST_HEAD(&dev->dev_alloc_list);
244
	INIT_LIST_HEAD(&dev->resized_list);
245 246 247 248 249

	spin_lock_init(&dev->io_lock);

	spin_lock_init(&dev->reada_lock);
	atomic_set(&dev->reada_in_flight, 0);
250
	atomic_set(&dev->dev_stats_ccnt, 0);
251
	btrfs_device_data_ordered_init(dev);
252 253
	INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
	INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
254 255 256 257

	return dev;
}

258 259
static noinline struct btrfs_device *__find_device(struct list_head *head,
						   u64 devid, u8 *uuid)
260 261 262
{
	struct btrfs_device *dev;

Q
Qinghuang Feng 已提交
263
	list_for_each_entry(dev, head, dev_list) {
264
		if (dev->devid == devid &&
265
		    (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
266
			return dev;
267
		}
268 269 270 271
	}
	return NULL;
}

272
static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
273 274 275
{
	struct btrfs_fs_devices *fs_devices;

Q
Qinghuang Feng 已提交
276
	list_for_each_entry(fs_devices, &fs_uuids, list) {
277 278 279 280 281 282
		if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
			return fs_devices;
	}
	return NULL;
}

283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
static int
btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
		      int flush, struct block_device **bdev,
		      struct buffer_head **bh)
{
	int ret;

	*bdev = blkdev_get_by_path(device_path, flags, holder);

	if (IS_ERR(*bdev)) {
		ret = PTR_ERR(*bdev);
		goto error;
	}

	if (flush)
		filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
	ret = set_blocksize(*bdev, 4096);
	if (ret) {
		blkdev_put(*bdev, flags);
		goto error;
	}
	invalidate_bdev(*bdev);
	*bh = btrfs_read_dev_super(*bdev);
306 307
	if (IS_ERR(*bh)) {
		ret = PTR_ERR(*bh);
308 309 310 311 312 313 314 315 316 317 318 319
		blkdev_put(*bdev, flags);
		goto error;
	}

	return 0;

error:
	*bdev = NULL;
	*bh = NULL;
	return ret;
}

320 321 322 323 324 325 326 327 328 329 330 331 332 333
static void requeue_list(struct btrfs_pending_bios *pending_bios,
			struct bio *head, struct bio *tail)
{

	struct bio *old_head;

	old_head = pending_bios->head;
	pending_bios->head = head;
	if (pending_bios->tail)
		tail->bi_next = old_head;
	else
		pending_bios->tail = tail;
}

334 335 336 337 338 339 340 341 342 343 344
/*
 * we try to collect pending bios for a device so we don't get a large
 * number of procs sending bios down to the same device.  This greatly
 * improves the schedulers ability to collect and merge the bios.
 *
 * But, it also turns into a long list of bios to process and that is sure
 * to eventually make the worker thread block.  The solution here is to
 * make some progress and then put this work struct back at the end of
 * the list if the block device is congested.  This way, multiple devices
 * can make progress from a single worker thread.
 */
345
static noinline void run_scheduled_bios(struct btrfs_device *device)
346 347 348
{
	struct bio *pending;
	struct backing_dev_info *bdi;
349
	struct btrfs_fs_info *fs_info;
350
	struct btrfs_pending_bios *pending_bios;
351 352 353
	struct bio *tail;
	struct bio *cur;
	int again = 0;
354
	unsigned long num_run;
355
	unsigned long batch_run = 0;
356
	unsigned long limit;
357
	unsigned long last_waited = 0;
358
	int force_reg = 0;
M
Miao Xie 已提交
359
	int sync_pending = 0;
360 361 362 363 364 365 366 367 368
	struct blk_plug plug;

	/*
	 * this function runs all the bios we've collected for
	 * a particular device.  We don't want to wander off to
	 * another device without first sending all of these down.
	 * So, setup a plug here and finish it off before we return
	 */
	blk_start_plug(&plug);
369

370
	bdi = blk_get_backing_dev_info(device->bdev);
371 372 373 374
	fs_info = device->dev_root->fs_info;
	limit = btrfs_async_submit_limit(fs_info);
	limit = limit * 2 / 3;

375 376 377
loop:
	spin_lock(&device->io_lock);

378
loop_lock:
379
	num_run = 0;
380

381 382 383 384 385
	/* take all the bios off the list at once and process them
	 * later on (without the lock held).  But, remember the
	 * tail and other pointers so the bios can be properly reinserted
	 * into the list if we hit congestion
	 */
386
	if (!force_reg && device->pending_sync_bios.head) {
387
		pending_bios = &device->pending_sync_bios;
388 389
		force_reg = 1;
	} else {
390
		pending_bios = &device->pending_bios;
391 392
		force_reg = 0;
	}
393 394 395

	pending = pending_bios->head;
	tail = pending_bios->tail;
396 397 398 399 400 401 402 403 404 405
	WARN_ON(pending && !tail);

	/*
	 * if pending was null this time around, no bios need processing
	 * at all and we can stop.  Otherwise it'll loop back up again
	 * and do an additional check so no bios are missed.
	 *
	 * device->running_pending is used to synchronize with the
	 * schedule_bio code.
	 */
406 407
	if (device->pending_sync_bios.head == NULL &&
	    device->pending_bios.head == NULL) {
408 409
		again = 0;
		device->running_pending = 0;
410 411 412
	} else {
		again = 1;
		device->running_pending = 1;
413
	}
414 415 416 417

	pending_bios->head = NULL;
	pending_bios->tail = NULL;

418 419
	spin_unlock(&device->io_lock);

C
Chris Mason 已提交
420
	while (pending) {
421 422

		rmb();
423 424 425 426 427 428 429 430
		/* we want to work on both lists, but do more bios on the
		 * sync list than the regular list
		 */
		if ((num_run > 32 &&
		    pending_bios != &device->pending_sync_bios &&
		    device->pending_sync_bios.head) ||
		   (num_run > 64 && pending_bios == &device->pending_sync_bios &&
		    device->pending_bios.head)) {
431 432 433 434 435
			spin_lock(&device->io_lock);
			requeue_list(pending_bios, pending, tail);
			goto loop_lock;
		}

436 437 438
		cur = pending;
		pending = pending->bi_next;
		cur->bi_next = NULL;
439

440 441 442
		/*
		 * atomic_dec_return implies a barrier for waitqueue_active
		 */
443
		if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
444 445
		    waitqueue_active(&fs_info->async_submit_wait))
			wake_up(&fs_info->async_submit_wait);
446

447
		BUG_ON(atomic_read(&cur->__bi_cnt) == 0);
448

449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
		/*
		 * if we're doing the sync list, record that our
		 * plug has some sync requests on it
		 *
		 * If we're doing the regular list and there are
		 * sync requests sitting around, unplug before
		 * we add more
		 */
		if (pending_bios == &device->pending_sync_bios) {
			sync_pending = 1;
		} else if (sync_pending) {
			blk_finish_plug(&plug);
			blk_start_plug(&plug);
			sync_pending = 0;
		}

465
		btrfsic_submit_bio(cur->bi_rw, cur);
466 467
		num_run++;
		batch_run++;
468 469

		cond_resched();
470 471 472 473 474 475

		/*
		 * we made progress, there is more work to do and the bdi
		 * is now congested.  Back off and let other work structs
		 * run instead
		 */
C
Chris Mason 已提交
476
		if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
477
		    fs_info->fs_devices->open_devices > 1) {
478
			struct io_context *ioc;
479

480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
			ioc = current->io_context;

			/*
			 * the main goal here is that we don't want to
			 * block if we're going to be able to submit
			 * more requests without blocking.
			 *
			 * This code does two great things, it pokes into
			 * the elevator code from a filesystem _and_
			 * it makes assumptions about how batching works.
			 */
			if (ioc && ioc->nr_batch_requests > 0 &&
			    time_before(jiffies, ioc->last_waited + HZ/50UL) &&
			    (last_waited == 0 ||
			     ioc->last_waited == last_waited)) {
				/*
				 * we want to go through our batch of
				 * requests and stop.  So, we copy out
				 * the ioc->last_waited time and test
				 * against it before looping
				 */
				last_waited = ioc->last_waited;
502
				cond_resched();
503 504
				continue;
			}
505
			spin_lock(&device->io_lock);
506
			requeue_list(pending_bios, pending, tail);
507
			device->running_pending = 1;
508 509

			spin_unlock(&device->io_lock);
510 511
			btrfs_queue_work(fs_info->submit_workers,
					 &device->work);
512 513
			goto done;
		}
C
Chris Mason 已提交
514 515 516 517 518 519
		/* unplug every 64 requests just for good measure */
		if (batch_run % 64 == 0) {
			blk_finish_plug(&plug);
			blk_start_plug(&plug);
			sync_pending = 0;
		}
520
	}
521

522 523 524 525 526 527 528 529 530
	cond_resched();
	if (again)
		goto loop;

	spin_lock(&device->io_lock);
	if (device->pending_bios.head || device->pending_sync_bios.head)
		goto loop_lock;
	spin_unlock(&device->io_lock);

531
done:
532
	blk_finish_plug(&plug);
533 534
}

535
static void pending_bios_fn(struct btrfs_work *work)
536 537 538 539 540 541 542
{
	struct btrfs_device *device;

	device = container_of(work, struct btrfs_device, work);
	run_scheduled_bios(device);
}

A
Anand Jain 已提交
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597

void btrfs_free_stale_device(struct btrfs_device *cur_dev)
{
	struct btrfs_fs_devices *fs_devs;
	struct btrfs_device *dev;

	if (!cur_dev->name)
		return;

	list_for_each_entry(fs_devs, &fs_uuids, list) {
		int del = 1;

		if (fs_devs->opened)
			continue;
		if (fs_devs->seeding)
			continue;

		list_for_each_entry(dev, &fs_devs->devices, dev_list) {

			if (dev == cur_dev)
				continue;
			if (!dev->name)
				continue;

			/*
			 * Todo: This won't be enough. What if the same device
			 * comes back (with new uuid and) with its mapper path?
			 * But for now, this does help as mostly an admin will
			 * either use mapper or non mapper path throughout.
			 */
			rcu_read_lock();
			del = strcmp(rcu_str_deref(dev->name),
						rcu_str_deref(cur_dev->name));
			rcu_read_unlock();
			if (!del)
				break;
		}

		if (!del) {
			/* delete the stale device */
			if (fs_devs->num_devices == 1) {
				btrfs_sysfs_remove_fsid(fs_devs);
				list_del(&fs_devs->list);
				free_fs_devices(fs_devs);
			} else {
				fs_devs->num_devices--;
				list_del(&dev->dev_list);
				rcu_string_free(dev->name);
				kfree(dev);
			}
			break;
		}
	}
}

598 599 600 601 602 603 604 605
/*
 * Add new device to list of registered devices
 *
 * Returns:
 * 1   - first time device is seen
 * 0   - device already known
 * < 0 - error
 */
606
static noinline int device_list_add(const char *path,
607 608 609 610 611
			   struct btrfs_super_block *disk_super,
			   u64 devid, struct btrfs_fs_devices **fs_devices_ret)
{
	struct btrfs_device *device;
	struct btrfs_fs_devices *fs_devices;
612
	struct rcu_string *name;
613
	int ret = 0;
614 615 616 617
	u64 found_transid = btrfs_super_generation(disk_super);

	fs_devices = find_fsid(disk_super->fsid);
	if (!fs_devices) {
618 619 620 621
		fs_devices = alloc_fs_devices(disk_super->fsid);
		if (IS_ERR(fs_devices))
			return PTR_ERR(fs_devices);

622
		list_add(&fs_devices->list, &fs_uuids);
623

624 625
		device = NULL;
	} else {
626 627
		device = __find_device(&fs_devices->devices, devid,
				       disk_super->dev_item.uuid);
628
	}
629

630
	if (!device) {
Y
Yan Zheng 已提交
631 632 633
		if (fs_devices->opened)
			return -EBUSY;

634 635 636
		device = btrfs_alloc_device(NULL, &devid,
					    disk_super->dev_item.uuid);
		if (IS_ERR(device)) {
637
			/* we can safely leave the fs_devices entry around */
638
			return PTR_ERR(device);
639
		}
640 641 642

		name = rcu_string_strdup(path, GFP_NOFS);
		if (!name) {
643 644 645
			kfree(device);
			return -ENOMEM;
		}
646
		rcu_assign_pointer(device->name, name);
647

648
		mutex_lock(&fs_devices->device_list_mutex);
649
		list_add_rcu(&device->dev_list, &fs_devices->devices);
650
		fs_devices->num_devices++;
651 652
		mutex_unlock(&fs_devices->device_list_mutex);

653
		ret = 1;
Y
Yan Zheng 已提交
654
		device->fs_devices = fs_devices;
655
	} else if (!device->name || strcmp(device->name->str, path)) {
656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
		/*
		 * When FS is already mounted.
		 * 1. If you are here and if the device->name is NULL that
		 *    means this device was missing at time of FS mount.
		 * 2. If you are here and if the device->name is different
		 *    from 'path' that means either
		 *      a. The same device disappeared and reappeared with
		 *         different name. or
		 *      b. The missing-disk-which-was-replaced, has
		 *         reappeared now.
		 *
		 * We must allow 1 and 2a above. But 2b would be a spurious
		 * and unintentional.
		 *
		 * Further in case of 1 and 2a above, the disk at 'path'
		 * would have missed some transaction when it was away and
		 * in case of 2a the stale bdev has to be updated as well.
		 * 2b must not be allowed at all time.
		 */

		/*
677 678 679 680
		 * For now, we do allow update to btrfs_fs_device through the
		 * btrfs dev scan cli after FS has been mounted.  We're still
		 * tracking a problem where systems fail mount by subvolume id
		 * when we reject replacement on a mounted FS.
681
		 */
682
		if (!fs_devices->opened && found_transid < device->generation) {
683 684 685 686 687 688 689
			/*
			 * That is if the FS is _not_ mounted and if you
			 * are here, that means there is more than one
			 * disk with same uuid and devid.We keep the one
			 * with larger generation number or the last-in if
			 * generation are equal.
			 */
690
			return -EEXIST;
691
		}
692

693
		name = rcu_string_strdup(path, GFP_NOFS);
694 695
		if (!name)
			return -ENOMEM;
696 697
		rcu_string_free(device->name);
		rcu_assign_pointer(device->name, name);
698 699 700 701
		if (device->missing) {
			fs_devices->missing_devices--;
			device->missing = 0;
		}
702 703
	}

704 705 706 707 708 709 710 711 712
	/*
	 * Unmount does not free the btrfs_device struct but would zero
	 * generation along with most of the other members. So just update
	 * it back. We need it to pick the disk with largest generation
	 * (as above).
	 */
	if (!fs_devices->opened)
		device->generation = found_transid;

A
Anand Jain 已提交
713 714 715 716
	/*
	 * if there is new btrfs on an already registered device,
	 * then remove the stale device entry.
	 */
717 718
	if (ret > 0)
		btrfs_free_stale_device(device);
A
Anand Jain 已提交
719

720
	*fs_devices_ret = fs_devices;
721 722

	return ret;
723 724
}

Y
Yan Zheng 已提交
725 726 727 728 729 730
static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
{
	struct btrfs_fs_devices *fs_devices;
	struct btrfs_device *device;
	struct btrfs_device *orig_dev;

731 732 733
	fs_devices = alloc_fs_devices(orig->fsid);
	if (IS_ERR(fs_devices))
		return fs_devices;
Y
Yan Zheng 已提交
734

735
	mutex_lock(&orig->device_list_mutex);
J
Josef Bacik 已提交
736
	fs_devices->total_devices = orig->total_devices;
Y
Yan Zheng 已提交
737

738
	/* We have held the volume lock, it is safe to get the devices. */
Y
Yan Zheng 已提交
739
	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
740 741
		struct rcu_string *name;

742 743 744
		device = btrfs_alloc_device(NULL, &orig_dev->devid,
					    orig_dev->uuid);
		if (IS_ERR(device))
Y
Yan Zheng 已提交
745 746
			goto error;

747 748 749 750
		/*
		 * This is ok to do without rcu read locked because we hold the
		 * uuid mutex so nothing we touch in here is going to disappear.
		 */
751
		if (orig_dev->name) {
752 753
			name = rcu_string_strdup(orig_dev->name->str,
					GFP_KERNEL);
754 755 756 757 758
			if (!name) {
				kfree(device);
				goto error;
			}
			rcu_assign_pointer(device->name, name);
J
Julia Lawall 已提交
759
		}
Y
Yan Zheng 已提交
760 761 762 763 764

		list_add(&device->dev_list, &fs_devices->devices);
		device->fs_devices = fs_devices;
		fs_devices->num_devices++;
	}
765
	mutex_unlock(&orig->device_list_mutex);
Y
Yan Zheng 已提交
766 767
	return fs_devices;
error:
768
	mutex_unlock(&orig->device_list_mutex);
Y
Yan Zheng 已提交
769 770 771 772
	free_fs_devices(fs_devices);
	return ERR_PTR(-ENOMEM);
}

773
void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step)
774
{
Q
Qinghuang Feng 已提交
775
	struct btrfs_device *device, *next;
776
	struct btrfs_device *latest_dev = NULL;
777

778 779
	mutex_lock(&uuid_mutex);
again:
780
	/* This is the initialized path, it is safe to release the devices. */
Q
Qinghuang Feng 已提交
781
	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
782
		if (device->in_fs_metadata) {
783
			if (!device->is_tgtdev_for_dev_replace &&
784 785 786
			    (!latest_dev ||
			     device->generation > latest_dev->generation)) {
				latest_dev = device;
787
			}
Y
Yan Zheng 已提交
788
			continue;
789
		}
Y
Yan Zheng 已提交
790

791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
		if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
			/*
			 * In the first step, keep the device which has
			 * the correct fsid and the devid that is used
			 * for the dev_replace procedure.
			 * In the second step, the dev_replace state is
			 * read from the device tree and it is known
			 * whether the procedure is really active or
			 * not, which means whether this device is
			 * used or whether it should be removed.
			 */
			if (step == 0 || device->is_tgtdev_for_dev_replace) {
				continue;
			}
		}
Y
Yan Zheng 已提交
806
		if (device->bdev) {
807
			blkdev_put(device->bdev, device->mode);
Y
Yan Zheng 已提交
808 809 810 811 812 813
			device->bdev = NULL;
			fs_devices->open_devices--;
		}
		if (device->writeable) {
			list_del_init(&device->dev_alloc_list);
			device->writeable = 0;
814 815
			if (!device->is_tgtdev_for_dev_replace)
				fs_devices->rw_devices--;
Y
Yan Zheng 已提交
816
		}
Y
Yan Zheng 已提交
817 818
		list_del_init(&device->dev_list);
		fs_devices->num_devices--;
819
		rcu_string_free(device->name);
Y
Yan Zheng 已提交
820
		kfree(device);
821
	}
Y
Yan Zheng 已提交
822 823 824 825 826 827

	if (fs_devices->seed) {
		fs_devices = fs_devices->seed;
		goto again;
	}

828
	fs_devices->latest_bdev = latest_dev->bdev;
829

830 831
	mutex_unlock(&uuid_mutex);
}
832

833 834 835 836 837 838 839 840 841
static void __free_device(struct work_struct *work)
{
	struct btrfs_device *device;

	device = container_of(work, struct btrfs_device, rcu_work);

	if (device->bdev)
		blkdev_put(device->bdev, device->mode);

842
	rcu_string_free(device->name);
843 844 845 846 847 848 849 850 851 852 853 854 855
	kfree(device);
}

static void free_device(struct rcu_head *head)
{
	struct btrfs_device *device;

	device = container_of(head, struct btrfs_device, rcu);

	INIT_WORK(&device->rcu_work, __free_device);
	schedule_work(&device->rcu_work);
}

Y
Yan Zheng 已提交
856
static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
857
{
858
	struct btrfs_device *device, *tmp;
Y
Yan Zheng 已提交
859

Y
Yan Zheng 已提交
860 861
	if (--fs_devices->opened > 0)
		return 0;
862

863
	mutex_lock(&fs_devices->device_list_mutex);
864
	list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
865
		btrfs_close_one_device(device);
866
	}
867 868
	mutex_unlock(&fs_devices->device_list_mutex);

Y
Yan Zheng 已提交
869 870
	WARN_ON(fs_devices->open_devices);
	WARN_ON(fs_devices->rw_devices);
Y
Yan Zheng 已提交
871 872 873
	fs_devices->opened = 0;
	fs_devices->seeding = 0;

874 875 876
	return 0;
}

Y
Yan Zheng 已提交
877 878
int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
{
Y
Yan Zheng 已提交
879
	struct btrfs_fs_devices *seed_devices = NULL;
Y
Yan Zheng 已提交
880 881 882 883
	int ret;

	mutex_lock(&uuid_mutex);
	ret = __btrfs_close_devices(fs_devices);
Y
Yan Zheng 已提交
884 885 886 887
	if (!fs_devices->opened) {
		seed_devices = fs_devices->seed;
		fs_devices->seed = NULL;
	}
Y
Yan Zheng 已提交
888
	mutex_unlock(&uuid_mutex);
Y
Yan Zheng 已提交
889 890 891 892 893 894 895

	while (seed_devices) {
		fs_devices = seed_devices;
		seed_devices = fs_devices->seed;
		__btrfs_close_devices(fs_devices);
		free_fs_devices(fs_devices);
	}
896 897 898 899 900 901
	/*
	 * Wait for rcu kworkers under __btrfs_close_devices
	 * to finish all blkdev_puts so device is really
	 * free when umount is done.
	 */
	rcu_barrier();
Y
Yan Zheng 已提交
902 903 904
	return ret;
}

Y
Yan Zheng 已提交
905 906
static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
				fmode_t flags, void *holder)
907
{
908
	struct request_queue *q;
909 910 911
	struct block_device *bdev;
	struct list_head *head = &fs_devices->devices;
	struct btrfs_device *device;
912
	struct btrfs_device *latest_dev = NULL;
913 914 915
	struct buffer_head *bh;
	struct btrfs_super_block *disk_super;
	u64 devid;
Y
Yan Zheng 已提交
916
	int seeding = 1;
917
	int ret = 0;
918

919 920
	flags |= FMODE_EXCL;

Q
Qinghuang Feng 已提交
921
	list_for_each_entry(device, head, dev_list) {
922 923
		if (device->bdev)
			continue;
924 925 926
		if (!device->name)
			continue;

927 928 929
		/* Just open everything we can; ignore failures here */
		if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
					    &bdev, &bh))
930
			continue;
931 932

		disk_super = (struct btrfs_super_block *)bh->b_data;
933
		devid = btrfs_stack_device_id(&disk_super->dev_item);
934 935 936
		if (devid != device->devid)
			goto error_brelse;

Y
Yan Zheng 已提交
937 938 939 940 941
		if (memcmp(device->uuid, disk_super->dev_item.uuid,
			   BTRFS_UUID_SIZE))
			goto error_brelse;

		device->generation = btrfs_super_generation(disk_super);
942 943 944
		if (!latest_dev ||
		    device->generation > latest_dev->generation)
			latest_dev = device;
945

Y
Yan Zheng 已提交
946 947 948 949 950 951 952
		if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
			device->writeable = 0;
		} else {
			device->writeable = !bdev_read_only(bdev);
			seeding = 0;
		}

953
		q = bdev_get_queue(bdev);
954
		if (blk_queue_discard(q))
955 956
			device->can_discard = 1;

957
		device->bdev = bdev;
958
		device->in_fs_metadata = 0;
959 960
		device->mode = flags;

C
Chris Mason 已提交
961 962 963
		if (!blk_queue_nonrot(bdev_get_queue(bdev)))
			fs_devices->rotating = 1;

964
		fs_devices->open_devices++;
965 966
		if (device->writeable &&
		    device->devid != BTRFS_DEV_REPLACE_DEVID) {
Y
Yan Zheng 已提交
967 968 969 970
			fs_devices->rw_devices++;
			list_add(&device->dev_alloc_list,
				 &fs_devices->alloc_list);
		}
971
		brelse(bh);
972
		continue;
973

974 975
error_brelse:
		brelse(bh);
976
		blkdev_put(bdev, flags);
977
		continue;
978
	}
979
	if (fs_devices->open_devices == 0) {
980
		ret = -EINVAL;
981 982
		goto out;
	}
Y
Yan Zheng 已提交
983 984
	fs_devices->seeding = seeding;
	fs_devices->opened = 1;
985
	fs_devices->latest_bdev = latest_dev->bdev;
Y
Yan Zheng 已提交
986
	fs_devices->total_rw_bytes = 0;
987
out:
Y
Yan Zheng 已提交
988 989 990 991
	return ret;
}

int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
992
		       fmode_t flags, void *holder)
Y
Yan Zheng 已提交
993 994 995 996 997
{
	int ret;

	mutex_lock(&uuid_mutex);
	if (fs_devices->opened) {
Y
Yan Zheng 已提交
998 999
		fs_devices->opened++;
		ret = 0;
Y
Yan Zheng 已提交
1000
	} else {
1001
		ret = __btrfs_open_devices(fs_devices, flags, holder);
Y
Yan Zheng 已提交
1002
	}
1003 1004 1005 1006
	mutex_unlock(&uuid_mutex);
	return ret;
}

1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
void btrfs_release_disk_super(struct page *page)
{
	kunmap(page);
	put_page(page);
}

int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr,
		struct page **page, struct btrfs_super_block **disk_super)
{
	void *p;
	pgoff_t index;

	/* make sure our super fits in the device */
	if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
		return 1;

	/* make sure our super fits in the page */
	if (sizeof(**disk_super) > PAGE_SIZE)
		return 1;

	/* make sure our super doesn't straddle pages on disk */
	index = bytenr >> PAGE_SHIFT;
	if ((bytenr + sizeof(**disk_super) - 1) >> PAGE_SHIFT != index)
		return 1;

	/* pull in the page with our super */
	*page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
				   index, GFP_KERNEL);

	if (IS_ERR_OR_NULL(*page))
		return 1;

	p = kmap(*page);

	/* align our pointer to the offset of the super block */
	*disk_super = p + (bytenr & ~PAGE_MASK);

	if (btrfs_super_bytenr(*disk_super) != bytenr ||
	    btrfs_super_magic(*disk_super) != BTRFS_MAGIC) {
		btrfs_release_disk_super(*page);
		return 1;
	}

	if ((*disk_super)->label[0] &&
		(*disk_super)->label[BTRFS_LABEL_SIZE - 1])
		(*disk_super)->label[BTRFS_LABEL_SIZE - 1] = '\0';

	return 0;
}

1057 1058 1059 1060 1061
/*
 * Look for a btrfs signature on a device. This may be called out of the mount path
 * and we are not allowed to call set_blocksize during the scan. The superblock
 * is read via pagecache
 */
1062
int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
1063 1064 1065 1066
			  struct btrfs_fs_devices **fs_devices_ret)
{
	struct btrfs_super_block *disk_super;
	struct block_device *bdev;
1067 1068
	struct page *page;
	int ret = -EINVAL;
1069
	u64 devid;
1070
	u64 transid;
J
Josef Bacik 已提交
1071
	u64 total_devices;
1072
	u64 bytenr;
1073

1074 1075 1076 1077 1078 1079 1080
	/*
	 * we would like to check all the supers, but that would make
	 * a btrfs mount succeed after a mkfs from a different FS.
	 * So, we need to add a special mount option to scan for
	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
	 */
	bytenr = btrfs_sb_offset(0);
1081
	flags |= FMODE_EXCL;
1082
	mutex_lock(&uuid_mutex);
1083 1084 1085 1086

	bdev = blkdev_get_by_path(path, flags, holder);
	if (IS_ERR(bdev)) {
		ret = PTR_ERR(bdev);
1087
		goto error;
1088 1089
	}

1090
	if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super))
1091 1092
		goto error_bdev_put;

1093
	devid = btrfs_stack_device_id(&disk_super->dev_item);
1094
	transid = btrfs_super_generation(disk_super);
J
Josef Bacik 已提交
1095
	total_devices = btrfs_super_num_devices(disk_super);
1096

1097
	ret = device_list_add(path, disk_super, devid, fs_devices_ret);
1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
	if (ret > 0) {
		if (disk_super->label[0]) {
			printk(KERN_INFO "BTRFS: device label %s ", disk_super->label);
		} else {
			printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid);
		}

		printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
		ret = 0;
	}
J
Josef Bacik 已提交
1108 1109
	if (!ret && fs_devices_ret)
		(*fs_devices_ret)->total_devices = total_devices;
1110

1111
	btrfs_release_disk_super(page);
1112 1113

error_bdev_put:
1114
	blkdev_put(bdev, flags);
1115
error:
1116
	mutex_unlock(&uuid_mutex);
1117 1118
	return ret;
}
1119

1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
/* helper to account the used device space in the range */
int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
				   u64 end, u64 *length)
{
	struct btrfs_key key;
	struct btrfs_root *root = device->dev_root;
	struct btrfs_dev_extent *dev_extent;
	struct btrfs_path *path;
	u64 extent_end;
	int ret;
	int slot;
	struct extent_buffer *l;

	*length = 0;

1135
	if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
1136 1137 1138 1139 1140
		return 0;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
1141
	path->reada = READA_FORWARD;
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175

	key.objectid = device->devid;
	key.offset = start;
	key.type = BTRFS_DEV_EXTENT_KEY;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		goto out;
	if (ret > 0) {
		ret = btrfs_previous_item(root, path, key.objectid, key.type);
		if (ret < 0)
			goto out;
	}

	while (1) {
		l = path->nodes[0];
		slot = path->slots[0];
		if (slot >= btrfs_header_nritems(l)) {
			ret = btrfs_next_leaf(root, path);
			if (ret == 0)
				continue;
			if (ret < 0)
				goto out;

			break;
		}
		btrfs_item_key_to_cpu(l, &key, slot);

		if (key.objectid < device->devid)
			goto next;

		if (key.objectid > device->devid)
			break;

1176
		if (key.type != BTRFS_DEV_EXTENT_KEY)
1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
			goto next;

		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
		extent_end = key.offset + btrfs_dev_extent_length(l,
								  dev_extent);
		if (key.offset <= start && extent_end > end) {
			*length = end - start + 1;
			break;
		} else if (key.offset <= start && extent_end > start)
			*length += extent_end - start;
		else if (key.offset > start && extent_end <= end)
			*length += extent_end - key.offset;
		else if (key.offset > start && key.offset <= end) {
			*length += end - key.offset + 1;
			break;
		} else if (key.offset > end)
			break;

next:
		path->slots[0]++;
	}
	ret = 0;
out:
	btrfs_free_path(path);
	return ret;
}

1204
static int contains_pending_extent(struct btrfs_transaction *transaction,
1205 1206 1207
				   struct btrfs_device *device,
				   u64 *start, u64 len)
{
1208
	struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
1209
	struct extent_map *em;
1210
	struct list_head *search_list = &fs_info->pinned_chunks;
1211
	int ret = 0;
1212
	u64 physical_start = *start;
1213

1214 1215
	if (transaction)
		search_list = &transaction->pending_chunks;
1216 1217
again:
	list_for_each_entry(em, search_list, list) {
1218 1219 1220
		struct map_lookup *map;
		int i;

1221
		map = em->map_lookup;
1222
		for (i = 0; i < map->num_stripes; i++) {
1223 1224
			u64 end;

1225 1226
			if (map->stripes[i].dev != device)
				continue;
1227
			if (map->stripes[i].physical >= physical_start + len ||
1228
			    map->stripes[i].physical + em->orig_block_len <=
1229
			    physical_start)
1230
				continue;
1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247
			/*
			 * Make sure that while processing the pinned list we do
			 * not override our *start with a lower value, because
			 * we can have pinned chunks that fall within this
			 * device hole and that have lower physical addresses
			 * than the pending chunks we processed before. If we
			 * do not take this special care we can end up getting
			 * 2 pending chunks that start at the same physical
			 * device offsets because the end offset of a pinned
			 * chunk can be equal to the start offset of some
			 * pending chunk.
			 */
			end = map->stripes[i].physical + em->orig_block_len;
			if (end > *start) {
				*start = end;
				ret = 1;
			}
1248 1249
		}
	}
1250 1251
	if (search_list != &fs_info->pinned_chunks) {
		search_list = &fs_info->pinned_chunks;
1252 1253
		goto again;
	}
1254 1255 1256 1257 1258

	return ret;
}


1259
/*
1260 1261 1262 1263 1264 1265 1266
 * find_free_dev_extent_start - find free space in the specified device
 * @device:	  the device which we search the free space in
 * @num_bytes:	  the size of the free space that we need
 * @search_start: the position from which to begin the search
 * @start:	  store the start of the free space.
 * @len:	  the size of the free space. that we find, or the size
 *		  of the max free space if we don't find suitable free space
1267
 *
1268 1269 1270
 * this uses a pretty simple search, the expectation is that it is
 * called very infrequently and that a given device has a small number
 * of extents
1271 1272 1273 1274 1275 1276 1277 1278
 *
 * @start is used to store the start of the free space if we find. But if we
 * don't find suitable free space, it will be used to store the start position
 * of the max free space.
 *
 * @len is used to store the size of the free space that we find.
 * But if we don't find suitable free space, it is used to store the size of
 * the max free space.
1279
 */
1280 1281 1282
int find_free_dev_extent_start(struct btrfs_transaction *transaction,
			       struct btrfs_device *device, u64 num_bytes,
			       u64 search_start, u64 *start, u64 *len)
1283 1284 1285
{
	struct btrfs_key key;
	struct btrfs_root *root = device->dev_root;
1286
	struct btrfs_dev_extent *dev_extent;
Y
Yan Zheng 已提交
1287
	struct btrfs_path *path;
1288 1289 1290 1291
	u64 hole_size;
	u64 max_hole_start;
	u64 max_hole_size;
	u64 extent_end;
1292 1293
	u64 search_end = device->total_bytes;
	int ret;
1294
	int slot;
1295
	struct extent_buffer *l;
1296 1297 1298 1299 1300 1301 1302 1303 1304
	u64 min_search_start;

	/*
	 * We don't want to overwrite the superblock on the drive nor any area
	 * used by the boot loader (grub for example), so we make sure to start
	 * at an offset of at least 1MB.
	 */
	min_search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
	search_start = max(search_start, min_search_start);
1305

1306 1307 1308
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
1309

1310 1311 1312
	max_hole_start = search_start;
	max_hole_size = 0;

1313
again:
1314
	if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
1315
		ret = -ENOSPC;
1316
		goto out;
1317 1318
	}

1319
	path->reada = READA_FORWARD;
1320 1321
	path->search_commit_root = 1;
	path->skip_locking = 1;
1322

1323 1324 1325
	key.objectid = device->devid;
	key.offset = search_start;
	key.type = BTRFS_DEV_EXTENT_KEY;
1326

1327
	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1328
	if (ret < 0)
1329
		goto out;
1330 1331 1332
	if (ret > 0) {
		ret = btrfs_previous_item(root, path, key.objectid, key.type);
		if (ret < 0)
1333
			goto out;
1334
	}
1335

1336 1337 1338 1339 1340 1341 1342 1343
	while (1) {
		l = path->nodes[0];
		slot = path->slots[0];
		if (slot >= btrfs_header_nritems(l)) {
			ret = btrfs_next_leaf(root, path);
			if (ret == 0)
				continue;
			if (ret < 0)
1344 1345 1346
				goto out;

			break;
1347 1348 1349 1350 1351 1352 1353
		}
		btrfs_item_key_to_cpu(l, &key, slot);

		if (key.objectid < device->devid)
			goto next;

		if (key.objectid > device->devid)
1354
			break;
1355

1356
		if (key.type != BTRFS_DEV_EXTENT_KEY)
1357
			goto next;
1358

1359 1360
		if (key.offset > search_start) {
			hole_size = key.offset - search_start;
1361

1362 1363 1364 1365
			/*
			 * Have to check before we set max_hole_start, otherwise
			 * we could end up sending back this offset anyway.
			 */
1366
			if (contains_pending_extent(transaction, device,
1367
						    &search_start,
1368 1369 1370 1371 1372 1373 1374 1375
						    hole_size)) {
				if (key.offset >= search_start) {
					hole_size = key.offset - search_start;
				} else {
					WARN_ON_ONCE(1);
					hole_size = 0;
				}
			}
1376

1377 1378 1379 1380
			if (hole_size > max_hole_size) {
				max_hole_start = search_start;
				max_hole_size = hole_size;
			}
1381

1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393
			/*
			 * If this free space is greater than which we need,
			 * it must be the max free space that we have found
			 * until now, so max_hole_start must point to the start
			 * of this free space and the length of this free space
			 * is stored in max_hole_size. Thus, we return
			 * max_hole_start and max_hole_size and go back to the
			 * caller.
			 */
			if (hole_size >= num_bytes) {
				ret = 0;
				goto out;
1394 1395 1396 1397
			}
		}

		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1398 1399 1400 1401
		extent_end = key.offset + btrfs_dev_extent_length(l,
								  dev_extent);
		if (extent_end > search_start)
			search_start = extent_end;
1402 1403 1404 1405 1406
next:
		path->slots[0]++;
		cond_resched();
	}

1407 1408 1409 1410 1411
	/*
	 * At this point, search_start should be the end of
	 * allocated dev extents, and when shrinking the device,
	 * search_end may be smaller than search_start.
	 */
1412
	if (search_end > search_start) {
1413 1414
		hole_size = search_end - search_start;

1415
		if (contains_pending_extent(transaction, device, &search_start,
1416 1417 1418 1419
					    hole_size)) {
			btrfs_release_path(path);
			goto again;
		}
1420

1421 1422 1423 1424
		if (hole_size > max_hole_size) {
			max_hole_start = search_start;
			max_hole_size = hole_size;
		}
1425 1426
	}

1427
	/* See above. */
1428
	if (max_hole_size < num_bytes)
1429 1430 1431 1432 1433
		ret = -ENOSPC;
	else
		ret = 0;

out:
Y
Yan Zheng 已提交
1434
	btrfs_free_path(path);
1435
	*start = max_hole_start;
1436
	if (len)
1437
		*len = max_hole_size;
1438 1439 1440
	return ret;
}

1441 1442 1443 1444 1445 1446
int find_free_dev_extent(struct btrfs_trans_handle *trans,
			 struct btrfs_device *device, u64 num_bytes,
			 u64 *start, u64 *len)
{
	/* FIXME use last free of some kind */
	return find_free_dev_extent_start(trans->transaction, device,
1447
					  num_bytes, 0, start, len);
1448 1449
}

1450
static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1451
			  struct btrfs_device *device,
M
Miao Xie 已提交
1452
			  u64 start, u64 *dev_extent_len)
1453 1454 1455 1456 1457
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_root *root = device->dev_root;
	struct btrfs_key key;
1458 1459 1460
	struct btrfs_key found_key;
	struct extent_buffer *leaf = NULL;
	struct btrfs_dev_extent *extent = NULL;
1461 1462 1463 1464 1465 1466 1467 1468

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = device->devid;
	key.offset = start;
	key.type = BTRFS_DEV_EXTENT_KEY;
M
Miao Xie 已提交
1469
again:
1470
	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1471 1472 1473
	if (ret > 0) {
		ret = btrfs_previous_item(root, path, key.objectid,
					  BTRFS_DEV_EXTENT_KEY);
1474 1475
		if (ret)
			goto out;
1476 1477 1478 1479 1480 1481
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
		extent = btrfs_item_ptr(leaf, path->slots[0],
					struct btrfs_dev_extent);
		BUG_ON(found_key.offset > start || found_key.offset +
		       btrfs_dev_extent_length(leaf, extent) < start);
M
Miao Xie 已提交
1482 1483 1484
		key = found_key;
		btrfs_release_path(path);
		goto again;
1485 1486 1487 1488
	} else if (ret == 0) {
		leaf = path->nodes[0];
		extent = btrfs_item_ptr(leaf, path->slots[0],
					struct btrfs_dev_extent);
1489
	} else {
1490
		btrfs_handle_fs_error(root->fs_info, ret, "Slot search failed");
1491
		goto out;
1492
	}
1493

M
Miao Xie 已提交
1494 1495
	*dev_extent_len = btrfs_dev_extent_length(leaf, extent);

1496
	ret = btrfs_del_item(trans, root, path);
1497
	if (ret) {
1498
		btrfs_handle_fs_error(root->fs_info, ret,
1499
			    "Failed to remove dev extent item");
Z
Zhao Lei 已提交
1500
	} else {
1501
		set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1502
	}
1503
out:
1504 1505 1506 1507
	btrfs_free_path(path);
	return ret;
}

1508 1509 1510 1511
static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
				  struct btrfs_device *device,
				  u64 chunk_tree, u64 chunk_objectid,
				  u64 chunk_offset, u64 start, u64 num_bytes)
1512 1513 1514 1515 1516 1517 1518 1519
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_root *root = device->dev_root;
	struct btrfs_dev_extent *extent;
	struct extent_buffer *leaf;
	struct btrfs_key key;

1520
	WARN_ON(!device->in_fs_metadata);
1521
	WARN_ON(device->is_tgtdev_for_dev_replace);
1522 1523 1524 1525 1526
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = device->devid;
Y
Yan Zheng 已提交
1527
	key.offset = start;
1528 1529 1530
	key.type = BTRFS_DEV_EXTENT_KEY;
	ret = btrfs_insert_empty_item(trans, root, path, &key,
				      sizeof(*extent));
1531 1532
	if (ret)
		goto out;
1533 1534 1535 1536

	leaf = path->nodes[0];
	extent = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_dev_extent);
1537 1538 1539 1540 1541
	btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
	btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);

	write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1542
		    btrfs_dev_extent_chunk_tree_uuid(extent), BTRFS_UUID_SIZE);
1543

1544 1545
	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
	btrfs_mark_buffer_dirty(leaf);
1546
out:
1547 1548 1549 1550
	btrfs_free_path(path);
	return ret;
}

1551
static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1552
{
1553 1554 1555 1556
	struct extent_map_tree *em_tree;
	struct extent_map *em;
	struct rb_node *n;
	u64 ret = 0;
1557

1558 1559 1560 1561 1562 1563
	em_tree = &fs_info->mapping_tree.map_tree;
	read_lock(&em_tree->lock);
	n = rb_last(&em_tree->map);
	if (n) {
		em = rb_entry(n, struct extent_map, rb_node);
		ret = em->start + em->len;
1564
	}
1565 1566
	read_unlock(&em_tree->lock);

1567 1568 1569
	return ret;
}

1570 1571
static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
				    u64 *devid_ret)
1572 1573 1574 1575
{
	int ret;
	struct btrfs_key key;
	struct btrfs_key found_key;
Y
Yan Zheng 已提交
1576 1577 1578 1579 1580
	struct btrfs_path *path;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
1581 1582 1583 1584 1585

	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
	key.offset = (u64)-1;

1586
	ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1587 1588 1589
	if (ret < 0)
		goto error;

1590
	BUG_ON(ret == 0); /* Corruption */
1591

1592 1593
	ret = btrfs_previous_item(fs_info->chunk_root, path,
				  BTRFS_DEV_ITEMS_OBJECTID,
1594 1595
				  BTRFS_DEV_ITEM_KEY);
	if (ret) {
1596
		*devid_ret = 1;
1597 1598 1599
	} else {
		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
				      path->slots[0]);
1600
		*devid_ret = found_key.offset + 1;
1601 1602 1603
	}
	ret = 0;
error:
Y
Yan Zheng 已提交
1604
	btrfs_free_path(path);
1605 1606 1607 1608 1609 1610 1611
	return ret;
}

/*
 * the device information is stored in the chunk root
 * the btrfs_device struct should be fully filled in
 */
1612 1613 1614
static int btrfs_add_device(struct btrfs_trans_handle *trans,
			    struct btrfs_root *root,
			    struct btrfs_device *device)
1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_dev_item *dev_item;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	unsigned long ptr;

	root = root->fs_info->chunk_root;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
Y
Yan Zheng 已提交
1631
	key.offset = device->devid;
1632 1633

	ret = btrfs_insert_empty_item(trans, root, path, &key,
1634
				      sizeof(*dev_item));
1635 1636 1637 1638 1639 1640 1641
	if (ret)
		goto out;

	leaf = path->nodes[0];
	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);

	btrfs_set_device_id(leaf, dev_item, device->devid);
Y
Yan Zheng 已提交
1642
	btrfs_set_device_generation(leaf, dev_item, 0);
1643 1644 1645 1646
	btrfs_set_device_type(leaf, dev_item, device->type);
	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1647 1648 1649 1650
	btrfs_set_device_total_bytes(leaf, dev_item,
				     btrfs_device_get_disk_total_bytes(device));
	btrfs_set_device_bytes_used(leaf, dev_item,
				    btrfs_device_get_bytes_used(device));
1651 1652 1653
	btrfs_set_device_group(leaf, dev_item, 0);
	btrfs_set_device_seek_speed(leaf, dev_item, 0);
	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1654
	btrfs_set_device_start_offset(leaf, dev_item, 0);
1655

1656
	ptr = btrfs_device_uuid(dev_item);
1657
	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1658
	ptr = btrfs_device_fsid(dev_item);
Y
Yan Zheng 已提交
1659
	write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1660 1661
	btrfs_mark_buffer_dirty(leaf);

Y
Yan Zheng 已提交
1662
	ret = 0;
1663 1664 1665 1666
out:
	btrfs_free_path(path);
	return ret;
}
1667

1668 1669 1670 1671 1672 1673 1674 1675 1676
/*
 * Function to update ctime/mtime for a given device path.
 * Mainly used for ctime/mtime based probe like libblkid.
 */
static void update_dev_time(char *path_name)
{
	struct file *filp;

	filp = filp_open(path_name, O_RDWR, 0);
1677
	if (IS_ERR(filp))
1678 1679 1680 1681 1682
		return;
	file_update_time(filp);
	filp_close(filp, NULL);
}

1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696
static int btrfs_rm_dev_item(struct btrfs_root *root,
			     struct btrfs_device *device)
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_key key;
	struct btrfs_trans_handle *trans;

	root = root->fs_info->chunk_root;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

1697
	trans = btrfs_start_transaction(root, 0);
1698 1699 1700 1701
	if (IS_ERR(trans)) {
		btrfs_free_path(path);
		return PTR_ERR(trans);
	}
1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723
	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
	key.offset = device->devid;

	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
	if (ret < 0)
		goto out;

	if (ret > 0) {
		ret = -ENOENT;
		goto out;
	}

	ret = btrfs_del_item(trans, root, path);
	if (ret)
		goto out;
out:
	btrfs_free_path(path);
	btrfs_commit_transaction(trans, root);
	return ret;
}

1724 1725 1726 1727 1728 1729 1730
/*
 * Verify that @num_devices satisfies the RAID profile constraints in the whole
 * filesystem. It's up to the caller to adjust that number regarding eg. device
 * replace.
 */
static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
		u64 num_devices)
1731 1732
{
	u64 all_avail;
1733
	unsigned seq;
1734
	int i;
1735

1736
	do {
1737
		seq = read_seqbegin(&fs_info->profiles_lock);
1738

1739 1740 1741 1742
		all_avail = fs_info->avail_data_alloc_bits |
			    fs_info->avail_system_alloc_bits |
			    fs_info->avail_metadata_alloc_bits;
	} while (read_seqretry(&fs_info->profiles_lock, seq));
1743

1744 1745 1746
	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
		if (!(all_avail & btrfs_raid_group[i]))
			continue;
1747

1748 1749
		if (num_devices < btrfs_raid_array[i].devs_min) {
			int ret = btrfs_raid_mindev_error[i];
1750

1751 1752 1753
			if (ret)
				return ret;
		}
D
David Woodhouse 已提交
1754 1755
	}

1756
	return 0;
1757 1758
}

1759 1760
struct btrfs_device *btrfs_find_next_active_device(struct btrfs_fs_devices *fs_devs,
					struct btrfs_device *device)
1761
{
Y
Yan Zheng 已提交
1762
	struct btrfs_device *next_device;
1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798

	list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
		if (next_device != device &&
			!next_device->missing && next_device->bdev)
			return next_device;
	}

	return NULL;
}

/*
 * Helper function to check if the given device is part of s_bdev / latest_bdev
 * and replace it with the provided or the next active device, in the context
 * where this function called, there should be always be another device (or
 * this_dev) which is active.
 */
void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info,
		struct btrfs_device *device, struct btrfs_device *this_dev)
{
	struct btrfs_device *next_device;

	if (this_dev)
		next_device = this_dev;
	else
		next_device = btrfs_find_next_active_device(fs_info->fs_devices,
								device);
	ASSERT(next_device);

	if (fs_info->sb->s_bdev &&
			(fs_info->sb->s_bdev == device->bdev))
		fs_info->sb->s_bdev = next_device->bdev;

	if (fs_info->fs_devices->latest_bdev == device->bdev)
		fs_info->fs_devices->latest_bdev = next_device->bdev;
}

1799
int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
1800 1801
{
	struct btrfs_device *device;
1802
	struct btrfs_fs_devices *cur_devices;
Y
Yan Zheng 已提交
1803
	u64 num_devices;
1804
	int ret = 0;
1805
	bool clear_super = false;
1806
	char *dev_name = NULL;
1807 1808 1809

	mutex_lock(&uuid_mutex);

1810
	num_devices = root->fs_info->fs_devices->num_devices;
1811
	btrfs_dev_replace_lock(&root->fs_info->dev_replace, 0);
1812 1813 1814 1815
	if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
		WARN_ON(num_devices < 1);
		num_devices--;
	}
1816
	btrfs_dev_replace_unlock(&root->fs_info->dev_replace, 0);
1817

1818
	ret = btrfs_check_raid_min_devices(root->fs_info, num_devices - 1);
1819
	if (ret)
1820 1821
		goto out;

1822
	ret = btrfs_find_device_by_devspec(root, devid, device_path,
1823 1824
				&device);
	if (ret)
D
David Woodhouse 已提交
1825
		goto out;
1826

1827
	if (device->is_tgtdev_for_dev_replace) {
1828
		ret = BTRFS_ERROR_DEV_TGT_REPLACE;
1829
		goto out;
1830 1831
	}

Y
Yan Zheng 已提交
1832
	if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1833
		ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
1834
		goto out;
Y
Yan Zheng 已提交
1835 1836 1837
	}

	if (device->writeable) {
1838
		lock_chunks(root);
Y
Yan Zheng 已提交
1839
		list_del_init(&device->dev_alloc_list);
1840
		device->fs_devices->rw_devices--;
1841
		unlock_chunks(root);
1842 1843 1844 1845 1846
		dev_name = kstrdup(device->name->str, GFP_KERNEL);
		if (!dev_name) {
			ret = -ENOMEM;
			goto error_undo;
		}
1847
		clear_super = true;
1848
	}
1849

1850
	mutex_unlock(&uuid_mutex);
1851
	ret = btrfs_shrink_device(device, 0);
1852
	mutex_lock(&uuid_mutex);
1853
	if (ret)
1854
		goto error_undo;
1855

1856 1857 1858 1859 1860
	/*
	 * TODO: the superblock still includes this device in its num_devices
	 * counter although write_all_supers() is not locked out. This
	 * could give a filesystem state which requires a degraded mount.
	 */
1861 1862
	ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
	if (ret)
1863
		goto error_undo;
1864

Y
Yan Zheng 已提交
1865
	device->in_fs_metadata = 0;
1866
	btrfs_scrub_cancel_dev(root->fs_info, device);
1867 1868 1869 1870

	/*
	 * the device list mutex makes sure that we don't change
	 * the device list while someone else is writing out all
1871 1872 1873 1874 1875
	 * the device supers. Whoever is writing all supers, should
	 * lock the device list mutex before getting the number of
	 * devices in the super block (super_copy). Conversely,
	 * whoever updates the number of devices in the super block
	 * (super_copy) should hold the device list mutex.
1876
	 */
1877 1878

	cur_devices = device->fs_devices;
1879
	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1880
	list_del_rcu(&device->dev_list);
1881

Y
Yan Zheng 已提交
1882
	device->fs_devices->num_devices--;
J
Josef Bacik 已提交
1883
	device->fs_devices->total_devices--;
Y
Yan Zheng 已提交
1884

1885
	if (device->missing)
1886
		device->fs_devices->missing_devices--;
1887

1888
	btrfs_assign_next_active_device(root->fs_info, device, NULL);
Y
Yan Zheng 已提交
1889

1890
	if (device->bdev) {
Y
Yan Zheng 已提交
1891
		device->fs_devices->open_devices--;
1892
		/* remove sysfs entry */
1893
		btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
1894
	}
1895

1896
	call_rcu(&device->rcu, free_device);
Y
Yan Zheng 已提交
1897

1898 1899
	num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
	btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1900
	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
Y
Yan Zheng 已提交
1901

1902
	if (cur_devices->open_devices == 0) {
Y
Yan Zheng 已提交
1903 1904 1905
		struct btrfs_fs_devices *fs_devices;
		fs_devices = root->fs_info->fs_devices;
		while (fs_devices) {
1906 1907
			if (fs_devices->seed == cur_devices) {
				fs_devices->seed = cur_devices->seed;
Y
Yan Zheng 已提交
1908
				break;
1909
			}
Y
Yan Zheng 已提交
1910
			fs_devices = fs_devices->seed;
Y
Yan Zheng 已提交
1911
		}
1912 1913 1914
		cur_devices->seed = NULL;
		__btrfs_close_devices(cur_devices);
		free_fs_devices(cur_devices);
Y
Yan Zheng 已提交
1915 1916
	}

1917 1918 1919
	root->fs_info->num_tolerated_disk_barrier_failures =
		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);

Y
Yan Zheng 已提交
1920 1921 1922 1923
	/*
	 * at this point, the device is zero sized.  We want to
	 * remove it from the devices list and zero out the old super
	 */
1924
	if (clear_super) {
1925
		struct block_device *bdev;
1926

1927 1928 1929 1930
		bdev = blkdev_get_by_path(dev_name, FMODE_READ | FMODE_EXCL,
						root->fs_info->bdev_holder);
		if (!IS_ERR(bdev)) {
			btrfs_scratch_superblocks(bdev, dev_name);
1931
			blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1932
		}
1933
	}
1934 1935

out:
1936 1937
	kfree(dev_name);

1938 1939
	mutex_unlock(&uuid_mutex);
	return ret;
1940

1941 1942
error_undo:
	if (device->writeable) {
1943
		lock_chunks(root);
1944 1945
		list_add(&device->dev_alloc_list,
			 &root->fs_info->fs_devices->alloc_list);
1946
		device->fs_devices->rw_devices++;
1947
		unlock_chunks(root);
1948
	}
1949
	goto out;
1950 1951
}

1952 1953
void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
					struct btrfs_device *srcdev)
1954
{
1955 1956
	struct btrfs_fs_devices *fs_devices;

1957
	WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1958

1959 1960 1961 1962 1963 1964 1965
	/*
	 * in case of fs with no seed, srcdev->fs_devices will point
	 * to fs_devices of fs_info. However when the dev being replaced is
	 * a seed dev it will point to the seed's local fs_devices. In short
	 * srcdev will have its correct fs_devices in both the cases.
	 */
	fs_devices = srcdev->fs_devices;
1966

1967 1968
	list_del_rcu(&srcdev->dev_list);
	list_del_rcu(&srcdev->dev_alloc_list);
1969
	fs_devices->num_devices--;
1970
	if (srcdev->missing)
1971
		fs_devices->missing_devices--;
1972

1973
	if (srcdev->writeable)
1974
		fs_devices->rw_devices--;
1975

1976
	if (srcdev->bdev)
1977
		fs_devices->open_devices--;
1978 1979 1980 1981 1982 1983
}

void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
				      struct btrfs_device *srcdev)
{
	struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
1984

1985 1986 1987 1988
	if (srcdev->writeable) {
		/* zero out the old super if it is writable */
		btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
	}
1989
	call_rcu(&srcdev->rcu, free_device);
1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009

	/*
	 * unless fs_devices is seed fs, num_devices shouldn't go
	 * zero
	 */
	BUG_ON(!fs_devices->num_devices && !fs_devices->seeding);

	/* if this is no devs we rather delete the fs_devices */
	if (!fs_devices->num_devices) {
		struct btrfs_fs_devices *tmp_fs_devices;

		tmp_fs_devices = fs_info->fs_devices;
		while (tmp_fs_devices) {
			if (tmp_fs_devices->seed == fs_devices) {
				tmp_fs_devices->seed = fs_devices->seed;
				break;
			}
			tmp_fs_devices = tmp_fs_devices->seed;
		}
		fs_devices->seed = NULL;
2010 2011
		__btrfs_close_devices(fs_devices);
		free_fs_devices(fs_devices);
2012
	}
2013 2014 2015 2016 2017
}

void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
				      struct btrfs_device *tgtdev)
{
2018
	mutex_lock(&uuid_mutex);
2019 2020
	WARN_ON(!tgtdev);
	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2021

2022
	btrfs_sysfs_rm_device_link(fs_info->fs_devices, tgtdev);
2023

2024
	if (tgtdev->bdev)
2025
		fs_info->fs_devices->open_devices--;
2026

2027 2028
	fs_info->fs_devices->num_devices--;

2029 2030
	btrfs_assign_next_active_device(fs_info, tgtdev, NULL);

2031 2032 2033
	list_del_rcu(&tgtdev->dev_list);

	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2034
	mutex_unlock(&uuid_mutex);
2035 2036 2037 2038 2039 2040 2041 2042 2043 2044

	/*
	 * The update_dev_time() with in btrfs_scratch_superblocks()
	 * may lead to a call to btrfs_show_devname() which will try
	 * to hold device_list_mutex. And here this device
	 * is already out of device list, so we don't have to hold
	 * the device_list_mutex lock.
	 */
	btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
	call_rcu(&tgtdev->rcu, free_device);
2045 2046
}

2047 2048
static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
				     struct btrfs_device **device)
2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064
{
	int ret = 0;
	struct btrfs_super_block *disk_super;
	u64 devid;
	u8 *dev_uuid;
	struct block_device *bdev;
	struct buffer_head *bh;

	*device = NULL;
	ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
				    root->fs_info->bdev_holder, 0, &bdev, &bh);
	if (ret)
		return ret;
	disk_super = (struct btrfs_super_block *)bh->b_data;
	devid = btrfs_stack_device_id(&disk_super->dev_item);
	dev_uuid = disk_super->dev_item.uuid;
2065
	*device = btrfs_find_device(root->fs_info, devid, dev_uuid,
2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094
				    disk_super->fsid);
	brelse(bh);
	if (!*device)
		ret = -ENOENT;
	blkdev_put(bdev, FMODE_READ);
	return ret;
}

int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
					 char *device_path,
					 struct btrfs_device **device)
{
	*device = NULL;
	if (strcmp(device_path, "missing") == 0) {
		struct list_head *devices;
		struct btrfs_device *tmp;

		devices = &root->fs_info->fs_devices->devices;
		/*
		 * It is safe to read the devices since the volume_mutex
		 * is held by the caller.
		 */
		list_for_each_entry(tmp, devices, dev_list) {
			if (tmp->in_fs_metadata && !tmp->bdev) {
				*device = tmp;
				break;
			}
		}

2095 2096
		if (!*device)
			return BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2097 2098 2099 2100 2101 2102 2103

		return 0;
	} else {
		return btrfs_find_device_by_path(root, device_path, device);
	}
}

2104 2105 2106 2107 2108
/*
 * Lookup a device given by device id, or the path if the id is 0.
 */
int btrfs_find_device_by_devspec(struct btrfs_root *root, u64 devid,
					 char *devpath,
2109 2110 2111 2112
					 struct btrfs_device **device)
{
	int ret;

2113
	if (devid) {
2114
		ret = 0;
2115
		*device = btrfs_find_device(root->fs_info, devid, NULL,
2116 2117 2118 2119
					    NULL);
		if (!*device)
			ret = -ENOENT;
	} else {
2120
		if (!devpath || !devpath[0])
2121 2122
			return -EINVAL;

2123
		ret = btrfs_find_device_missing_or_by_path(root, devpath,
2124 2125 2126 2127 2128
							   device);
	}
	return ret;
}

Y
Yan Zheng 已提交
2129 2130 2131
/*
 * does all the dirty work required for changing file system's UUID.
 */
2132
static int btrfs_prepare_sprout(struct btrfs_root *root)
Y
Yan Zheng 已提交
2133 2134 2135
{
	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
	struct btrfs_fs_devices *old_devices;
Y
Yan Zheng 已提交
2136
	struct btrfs_fs_devices *seed_devices;
2137
	struct btrfs_super_block *disk_super = root->fs_info->super_copy;
Y
Yan Zheng 已提交
2138 2139 2140 2141
	struct btrfs_device *device;
	u64 super_flags;

	BUG_ON(!mutex_is_locked(&uuid_mutex));
Y
Yan Zheng 已提交
2142
	if (!fs_devices->seeding)
Y
Yan Zheng 已提交
2143 2144
		return -EINVAL;

2145 2146 2147
	seed_devices = __alloc_fs_devices();
	if (IS_ERR(seed_devices))
		return PTR_ERR(seed_devices);
Y
Yan Zheng 已提交
2148

Y
Yan Zheng 已提交
2149 2150 2151 2152
	old_devices = clone_fs_devices(fs_devices);
	if (IS_ERR(old_devices)) {
		kfree(seed_devices);
		return PTR_ERR(old_devices);
Y
Yan Zheng 已提交
2153
	}
Y
Yan Zheng 已提交
2154

Y
Yan Zheng 已提交
2155 2156
	list_add(&old_devices->list, &fs_uuids);

Y
Yan Zheng 已提交
2157 2158 2159 2160
	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
	seed_devices->opened = 1;
	INIT_LIST_HEAD(&seed_devices->devices);
	INIT_LIST_HEAD(&seed_devices->alloc_list);
2161
	mutex_init(&seed_devices->device_list_mutex);
2162 2163

	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2164 2165
	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
			      synchronize_rcu);
M
Miao Xie 已提交
2166 2167
	list_for_each_entry(device, &seed_devices->devices, dev_list)
		device->fs_devices = seed_devices;
2168

M
Miao Xie 已提交
2169
	lock_chunks(root);
Y
Yan Zheng 已提交
2170
	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
M
Miao Xie 已提交
2171
	unlock_chunks(root);
Y
Yan Zheng 已提交
2172

Y
Yan Zheng 已提交
2173 2174 2175
	fs_devices->seeding = 0;
	fs_devices->num_devices = 0;
	fs_devices->open_devices = 0;
2176 2177
	fs_devices->missing_devices = 0;
	fs_devices->rotating = 0;
Y
Yan Zheng 已提交
2178
	fs_devices->seed = seed_devices;
Y
Yan Zheng 已提交
2179 2180 2181 2182

	generate_random_uuid(fs_devices->fsid);
	memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2183 2184
	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);

Y
Yan Zheng 已提交
2185 2186 2187 2188 2189 2190 2191 2192
	super_flags = btrfs_super_flags(disk_super) &
		      ~BTRFS_SUPER_FLAG_SEEDING;
	btrfs_set_super_flags(disk_super, super_flags);

	return 0;
}

/*
2193
 * Store the expected generation for seed devices in device items.
Y
Yan Zheng 已提交
2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231
 */
static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
			       struct btrfs_root *root)
{
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_dev_item *dev_item;
	struct btrfs_device *device;
	struct btrfs_key key;
	u8 fs_uuid[BTRFS_UUID_SIZE];
	u8 dev_uuid[BTRFS_UUID_SIZE];
	u64 devid;
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	root = root->fs_info->chunk_root;
	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.offset = 0;
	key.type = BTRFS_DEV_ITEM_KEY;

	while (1) {
		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
		if (ret < 0)
			goto error;

		leaf = path->nodes[0];
next_slot:
		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret > 0)
				break;
			if (ret < 0)
				goto error;
			leaf = path->nodes[0];
			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2232
			btrfs_release_path(path);
Y
Yan Zheng 已提交
2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243
			continue;
		}

		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
		    key.type != BTRFS_DEV_ITEM_KEY)
			break;

		dev_item = btrfs_item_ptr(leaf, path->slots[0],
					  struct btrfs_dev_item);
		devid = btrfs_device_id(leaf, dev_item);
2244
		read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
Y
Yan Zheng 已提交
2245
				   BTRFS_UUID_SIZE);
2246
		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
Y
Yan Zheng 已提交
2247
				   BTRFS_UUID_SIZE);
2248 2249
		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
					   fs_uuid);
2250
		BUG_ON(!device); /* Logic error */
Y
Yan Zheng 已提交
2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266

		if (device->fs_devices->seeding) {
			btrfs_set_device_generation(leaf, dev_item,
						    device->generation);
			btrfs_mark_buffer_dirty(leaf);
		}

		path->slots[0]++;
		goto next_slot;
	}
	ret = 0;
error:
	btrfs_free_path(path);
	return ret;
}

2267 2268
int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
{
2269
	struct request_queue *q;
2270 2271 2272 2273
	struct btrfs_trans_handle *trans;
	struct btrfs_device *device;
	struct block_device *bdev;
	struct list_head *devices;
Y
Yan Zheng 已提交
2274
	struct super_block *sb = root->fs_info->sb;
2275
	struct rcu_string *name;
2276
	u64 tmp;
Y
Yan Zheng 已提交
2277
	int seeding_dev = 0;
2278 2279
	int ret = 0;

Y
Yan Zheng 已提交
2280
	if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
2281
		return -EROFS;
2282

2283
	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2284
				  root->fs_info->bdev_holder);
2285 2286
	if (IS_ERR(bdev))
		return PTR_ERR(bdev);
2287

Y
Yan Zheng 已提交
2288 2289 2290 2291 2292 2293
	if (root->fs_info->fs_devices->seeding) {
		seeding_dev = 1;
		down_write(&sb->s_umount);
		mutex_lock(&uuid_mutex);
	}

2294
	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2295

2296
	devices = &root->fs_info->fs_devices->devices;
2297 2298

	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
Q
Qinghuang Feng 已提交
2299
	list_for_each_entry(device, devices, dev_list) {
2300 2301
		if (device->bdev == bdev) {
			ret = -EEXIST;
2302 2303
			mutex_unlock(
				&root->fs_info->fs_devices->device_list_mutex);
Y
Yan Zheng 已提交
2304
			goto error;
2305 2306
		}
	}
2307
	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2308

2309 2310
	device = btrfs_alloc_device(root->fs_info, NULL, NULL);
	if (IS_ERR(device)) {
2311
		/* we can safely leave the fs_devices entry around */
2312
		ret = PTR_ERR(device);
Y
Yan Zheng 已提交
2313
		goto error;
2314 2315
	}

2316
	name = rcu_string_strdup(device_path, GFP_KERNEL);
2317
	if (!name) {
2318
		kfree(device);
Y
Yan Zheng 已提交
2319 2320
		ret = -ENOMEM;
		goto error;
2321
	}
2322
	rcu_assign_pointer(device->name, name);
Y
Yan Zheng 已提交
2323

2324
	trans = btrfs_start_transaction(root, 0);
2325
	if (IS_ERR(trans)) {
2326
		rcu_string_free(device->name);
2327 2328 2329 2330 2331
		kfree(device);
		ret = PTR_ERR(trans);
		goto error;
	}

2332 2333 2334
	q = bdev_get_queue(bdev);
	if (blk_queue_discard(q))
		device->can_discard = 1;
Y
Yan Zheng 已提交
2335 2336
	device->writeable = 1;
	device->generation = trans->transid;
2337 2338 2339 2340
	device->io_width = root->sectorsize;
	device->io_align = root->sectorsize;
	device->sector_size = root->sectorsize;
	device->total_bytes = i_size_read(bdev->bd_inode);
2341
	device->disk_total_bytes = device->total_bytes;
2342
	device->commit_total_bytes = device->total_bytes;
2343 2344
	device->dev_root = root->fs_info->dev_root;
	device->bdev = bdev;
2345
	device->in_fs_metadata = 1;
2346
	device->is_tgtdev_for_dev_replace = 0;
2347
	device->mode = FMODE_EXCL;
2348
	device->dev_stats_valid = 1;
Y
Yan Zheng 已提交
2349
	set_blocksize(device->bdev, 4096);
2350

Y
Yan Zheng 已提交
2351 2352
	if (seeding_dev) {
		sb->s_flags &= ~MS_RDONLY;
2353
		ret = btrfs_prepare_sprout(root);
2354
		BUG_ON(ret); /* -ENOMEM */
Y
Yan Zheng 已提交
2355
	}
2356

Y
Yan Zheng 已提交
2357
	device->fs_devices = root->fs_info->fs_devices;
2358 2359

	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
M
Miao Xie 已提交
2360
	lock_chunks(root);
2361
	list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
Y
Yan Zheng 已提交
2362 2363 2364 2365 2366
	list_add(&device->dev_alloc_list,
		 &root->fs_info->fs_devices->alloc_list);
	root->fs_info->fs_devices->num_devices++;
	root->fs_info->fs_devices->open_devices++;
	root->fs_info->fs_devices->rw_devices++;
J
Josef Bacik 已提交
2367
	root->fs_info->fs_devices->total_devices++;
Y
Yan Zheng 已提交
2368
	root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
2369

2370 2371 2372 2373
	spin_lock(&root->fs_info->free_chunk_lock);
	root->fs_info->free_chunk_space += device->total_bytes;
	spin_unlock(&root->fs_info->free_chunk_lock);

C
Chris Mason 已提交
2374 2375 2376
	if (!blk_queue_nonrot(bdev_get_queue(bdev)))
		root->fs_info->fs_devices->rotating = 1;

2377
	tmp = btrfs_super_total_bytes(root->fs_info->super_copy);
2378
	btrfs_set_super_total_bytes(root->fs_info->super_copy,
2379
				    tmp + device->total_bytes);
2380

2381
	tmp = btrfs_super_num_devices(root->fs_info->super_copy);
2382
	btrfs_set_super_num_devices(root->fs_info->super_copy,
2383
				    tmp + 1);
2384 2385

	/* add sysfs device entry */
2386
	btrfs_sysfs_add_device_link(root->fs_info->fs_devices, device);
2387

M
Miao Xie 已提交
2388 2389 2390 2391 2392 2393 2394
	/*
	 * we've got more storage, clear any full flags on the space
	 * infos
	 */
	btrfs_clear_space_info_full(root->fs_info);

	unlock_chunks(root);
2395
	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2396

Y
Yan Zheng 已提交
2397
	if (seeding_dev) {
M
Miao Xie 已提交
2398
		lock_chunks(root);
Y
Yan Zheng 已提交
2399
		ret = init_first_rw_device(trans, root, device);
M
Miao Xie 已提交
2400
		unlock_chunks(root);
2401 2402
		if (ret) {
			btrfs_abort_transaction(trans, root, ret);
2403
			goto error_trans;
2404
		}
M
Miao Xie 已提交
2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415
	}

	ret = btrfs_add_device(trans, root, device);
	if (ret) {
		btrfs_abort_transaction(trans, root, ret);
		goto error_trans;
	}

	if (seeding_dev) {
		char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];

Y
Yan Zheng 已提交
2416
		ret = btrfs_finish_sprout(trans, root);
2417 2418
		if (ret) {
			btrfs_abort_transaction(trans, root, ret);
2419
			goto error_trans;
2420
		}
2421 2422 2423 2424 2425 2426

		/* Sprouting would change fsid of the mounted root,
		 * so rename the fsid on the sysfs
		 */
		snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
						root->fs_info->fsid);
2427
		if (kobject_rename(&root->fs_info->fs_devices->fsid_kobj,
2428
								fsid_buf))
2429 2430
			btrfs_warn(root->fs_info,
				"sysfs: failed to create fsid for sprout");
Y
Yan Zheng 已提交
2431 2432
	}

2433 2434
	root->fs_info->num_tolerated_disk_barrier_failures =
		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
2435
	ret = btrfs_commit_transaction(trans, root);
2436

Y
Yan Zheng 已提交
2437 2438 2439
	if (seeding_dev) {
		mutex_unlock(&uuid_mutex);
		up_write(&sb->s_umount);
2440

2441 2442 2443
		if (ret) /* transaction commit */
			return ret;

Y
Yan Zheng 已提交
2444
		ret = btrfs_relocate_sys_chunks(root);
2445
		if (ret < 0)
2446
			btrfs_handle_fs_error(root->fs_info, ret,
2447 2448 2449
				    "Failed to relocate sys chunks after "
				    "device initialization. This can be fixed "
				    "using the \"btrfs balance\" command.");
2450 2451 2452 2453 2454 2455 2456
		trans = btrfs_attach_transaction(root);
		if (IS_ERR(trans)) {
			if (PTR_ERR(trans) == -ENOENT)
				return 0;
			return PTR_ERR(trans);
		}
		ret = btrfs_commit_transaction(trans, root);
Y
Yan Zheng 已提交
2457
	}
2458

2459 2460
	/* Update ctime/mtime for libblkid */
	update_dev_time(device_path);
Y
Yan Zheng 已提交
2461
	return ret;
2462 2463 2464

error_trans:
	btrfs_end_transaction(trans, root);
2465
	rcu_string_free(device->name);
2466
	btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
2467
	kfree(device);
Y
Yan Zheng 已提交
2468
error:
2469
	blkdev_put(bdev, FMODE_EXCL);
Y
Yan Zheng 已提交
2470 2471 2472 2473
	if (seeding_dev) {
		mutex_unlock(&uuid_mutex);
		up_write(&sb->s_umount);
	}
2474
	return ret;
2475 2476
}

2477
int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2478
				  struct btrfs_device *srcdev,
2479 2480 2481 2482 2483 2484 2485 2486
				  struct btrfs_device **device_out)
{
	struct request_queue *q;
	struct btrfs_device *device;
	struct block_device *bdev;
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct list_head *devices;
	struct rcu_string *name;
2487
	u64 devid = BTRFS_DEV_REPLACE_DEVID;
2488 2489 2490
	int ret = 0;

	*device_out = NULL;
2491 2492
	if (fs_info->fs_devices->seeding) {
		btrfs_err(fs_info, "the filesystem is a seed filesystem!");
2493
		return -EINVAL;
2494
	}
2495 2496 2497

	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
				  fs_info->bdev_holder);
2498 2499
	if (IS_ERR(bdev)) {
		btrfs_err(fs_info, "target device %s is invalid!", device_path);
2500
		return PTR_ERR(bdev);
2501
	}
2502 2503 2504 2505 2506 2507

	filemap_write_and_wait(bdev->bd_inode->i_mapping);

	devices = &fs_info->fs_devices->devices;
	list_for_each_entry(device, devices, dev_list) {
		if (device->bdev == bdev) {
2508
			btrfs_err(fs_info, "target device is in the filesystem!");
2509 2510 2511 2512 2513
			ret = -EEXIST;
			goto error;
		}
	}

2514

2515 2516
	if (i_size_read(bdev->bd_inode) <
	    btrfs_device_get_total_bytes(srcdev)) {
2517 2518 2519 2520 2521 2522
		btrfs_err(fs_info, "target device is smaller than source device!");
		ret = -EINVAL;
		goto error;
	}


2523 2524 2525
	device = btrfs_alloc_device(NULL, &devid, NULL);
	if (IS_ERR(device)) {
		ret = PTR_ERR(device);
2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545
		goto error;
	}

	name = rcu_string_strdup(device_path, GFP_NOFS);
	if (!name) {
		kfree(device);
		ret = -ENOMEM;
		goto error;
	}
	rcu_assign_pointer(device->name, name);

	q = bdev_get_queue(bdev);
	if (blk_queue_discard(q))
		device->can_discard = 1;
	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
	device->writeable = 1;
	device->generation = 0;
	device->io_width = root->sectorsize;
	device->io_align = root->sectorsize;
	device->sector_size = root->sectorsize;
2546 2547 2548
	device->total_bytes = btrfs_device_get_total_bytes(srcdev);
	device->disk_total_bytes = btrfs_device_get_disk_total_bytes(srcdev);
	device->bytes_used = btrfs_device_get_bytes_used(srcdev);
2549 2550
	ASSERT(list_empty(&srcdev->resized_list));
	device->commit_total_bytes = srcdev->commit_total_bytes;
2551
	device->commit_bytes_used = device->bytes_used;
2552 2553 2554 2555 2556
	device->dev_root = fs_info->dev_root;
	device->bdev = bdev;
	device->in_fs_metadata = 1;
	device->is_tgtdev_for_dev_replace = 1;
	device->mode = FMODE_EXCL;
2557
	device->dev_stats_valid = 1;
2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583
	set_blocksize(device->bdev, 4096);
	device->fs_devices = fs_info->fs_devices;
	list_add(&device->dev_list, &fs_info->fs_devices->devices);
	fs_info->fs_devices->num_devices++;
	fs_info->fs_devices->open_devices++;
	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);

	*device_out = device;
	return ret;

error:
	blkdev_put(bdev, FMODE_EXCL);
	return ret;
}

void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
					      struct btrfs_device *tgtdev)
{
	WARN_ON(fs_info->fs_devices->rw_devices == 0);
	tgtdev->io_width = fs_info->dev_root->sectorsize;
	tgtdev->io_align = fs_info->dev_root->sectorsize;
	tgtdev->sector_size = fs_info->dev_root->sectorsize;
	tgtdev->dev_root = fs_info->dev_root;
	tgtdev->in_fs_metadata = 1;
}

C
Chris Mason 已提交
2584 2585
static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
					struct btrfs_device *device)
2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_root *root;
	struct btrfs_dev_item *dev_item;
	struct extent_buffer *leaf;
	struct btrfs_key key;

	root = device->dev_root->fs_info->chunk_root;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
	key.offset = device->devid;

	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
	if (ret < 0)
		goto out;

	if (ret > 0) {
		ret = -ENOENT;
		goto out;
	}

	leaf = path->nodes[0];
	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);

	btrfs_set_device_id(leaf, dev_item, device->devid);
	btrfs_set_device_type(leaf, dev_item, device->type);
	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2621 2622 2623 2624
	btrfs_set_device_total_bytes(leaf, dev_item,
				     btrfs_device_get_disk_total_bytes(device));
	btrfs_set_device_bytes_used(leaf, dev_item,
				    btrfs_device_get_bytes_used(device));
2625 2626 2627 2628 2629 2630 2631
	btrfs_mark_buffer_dirty(leaf);

out:
	btrfs_free_path(path);
	return ret;
}

M
Miao Xie 已提交
2632
int btrfs_grow_device(struct btrfs_trans_handle *trans,
2633 2634 2635
		      struct btrfs_device *device, u64 new_size)
{
	struct btrfs_super_block *super_copy =
2636
		device->dev_root->fs_info->super_copy;
2637
	struct btrfs_fs_devices *fs_devices;
M
Miao Xie 已提交
2638 2639
	u64 old_total;
	u64 diff;
2640

Y
Yan Zheng 已提交
2641 2642
	if (!device->writeable)
		return -EACCES;
M
Miao Xie 已提交
2643 2644 2645 2646 2647

	lock_chunks(device->dev_root);
	old_total = btrfs_super_total_bytes(super_copy);
	diff = new_size - device->total_bytes;

2648
	if (new_size <= device->total_bytes ||
M
Miao Xie 已提交
2649 2650
	    device->is_tgtdev_for_dev_replace) {
		unlock_chunks(device->dev_root);
Y
Yan Zheng 已提交
2651
		return -EINVAL;
M
Miao Xie 已提交
2652
	}
Y
Yan Zheng 已提交
2653

2654
	fs_devices = device->dev_root->fs_info->fs_devices;
Y
Yan Zheng 已提交
2655

2656
	btrfs_set_super_total_bytes(super_copy, old_total + diff);
Y
Yan Zheng 已提交
2657 2658
	device->fs_devices->total_rw_bytes += diff;

2659 2660
	btrfs_device_set_total_bytes(device, new_size);
	btrfs_device_set_disk_total_bytes(device, new_size);
2661
	btrfs_clear_space_info_full(device->dev_root->fs_info);
2662 2663 2664
	if (list_empty(&device->resized_list))
		list_add_tail(&device->resized_list,
			      &fs_devices->resized_devices);
M
Miao Xie 已提交
2665
	unlock_chunks(device->dev_root);
2666

2667 2668 2669 2670
	return btrfs_update_device(trans, device);
}

static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2671
			    struct btrfs_root *root, u64 chunk_objectid,
2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687
			    u64 chunk_offset)
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_key key;

	root = root->fs_info->chunk_root;
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = chunk_objectid;
	key.offset = chunk_offset;
	key.type = BTRFS_CHUNK_ITEM_KEY;

	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2688 2689 2690
	if (ret < 0)
		goto out;
	else if (ret > 0) { /* Logic error or corruption */
2691
		btrfs_handle_fs_error(root->fs_info, -ENOENT,
2692 2693 2694 2695
			    "Failed lookup while freeing chunk.");
		ret = -ENOENT;
		goto out;
	}
2696 2697

	ret = btrfs_del_item(trans, root, path);
2698
	if (ret < 0)
2699
		btrfs_handle_fs_error(root->fs_info, ret,
2700 2701
			    "Failed to delete chunk item.");
out:
2702
	btrfs_free_path(path);
2703
	return ret;
2704 2705
}

2706
static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2707 2708
			chunk_offset)
{
2709
	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2710 2711 2712 2713 2714 2715 2716 2717 2718 2719
	struct btrfs_disk_key *disk_key;
	struct btrfs_chunk *chunk;
	u8 *ptr;
	int ret = 0;
	u32 num_stripes;
	u32 array_size;
	u32 len = 0;
	u32 cur;
	struct btrfs_key key;

M
Miao Xie 已提交
2720
	lock_chunks(root);
2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749
	array_size = btrfs_super_sys_array_size(super_copy);

	ptr = super_copy->sys_chunk_array;
	cur = 0;

	while (cur < array_size) {
		disk_key = (struct btrfs_disk_key *)ptr;
		btrfs_disk_key_to_cpu(&key, disk_key);

		len = sizeof(*disk_key);

		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
			chunk = (struct btrfs_chunk *)(ptr + len);
			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
			len += btrfs_chunk_item_size(num_stripes);
		} else {
			ret = -EIO;
			break;
		}
		if (key.objectid == chunk_objectid &&
		    key.offset == chunk_offset) {
			memmove(ptr, ptr + len, array_size - (cur + len));
			array_size -= len;
			btrfs_set_super_sys_array_size(super_copy, array_size);
		} else {
			ptr += len;
			cur += len;
		}
	}
M
Miao Xie 已提交
2750
	unlock_chunks(root);
2751 2752 2753
	return ret;
}

2754 2755
int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
		       struct btrfs_root *root, u64 chunk_offset)
2756 2757 2758
{
	struct extent_map_tree *em_tree;
	struct extent_map *em;
2759
	struct btrfs_root *extent_root = root->fs_info->extent_root;
2760
	struct map_lookup *map;
M
Miao Xie 已提交
2761
	u64 dev_extent_len = 0;
2762 2763
	u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
	int i, ret = 0;
2764

2765
	/* Just in case */
2766 2767 2768
	root = root->fs_info->chunk_root;
	em_tree = &root->fs_info->mapping_tree.map_tree;

2769
	read_lock(&em_tree->lock);
2770
	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2771
	read_unlock(&em_tree->lock);
2772

2773 2774 2775 2776
	if (!em || em->start > chunk_offset ||
	    em->start + em->len < chunk_offset) {
		/*
		 * This is a logic error, but we don't want to just rely on the
2777
		 * user having built with ASSERT enabled, so if ASSERT doesn't
2778 2779 2780 2781 2782 2783 2784
		 * do anything we still error out.
		 */
		ASSERT(0);
		if (em)
			free_extent_map(em);
		return -EINVAL;
	}
2785
	map = em->map_lookup;
2786
	lock_chunks(root->fs_info->chunk_root);
2787
	check_system_chunk(trans, extent_root, map->type);
2788
	unlock_chunks(root->fs_info->chunk_root);
2789 2790

	for (i = 0; i < map->num_stripes; i++) {
2791
		struct btrfs_device *device = map->stripes[i].dev;
M
Miao Xie 已提交
2792 2793 2794
		ret = btrfs_free_dev_extent(trans, device,
					    map->stripes[i].physical,
					    &dev_extent_len);
2795 2796 2797 2798
		if (ret) {
			btrfs_abort_transaction(trans, root, ret);
			goto out;
		}
2799

M
Miao Xie 已提交
2800 2801 2802 2803 2804 2805 2806 2807 2808 2809
		if (device->bytes_used > 0) {
			lock_chunks(root);
			btrfs_device_set_bytes_used(device,
					device->bytes_used - dev_extent_len);
			spin_lock(&root->fs_info->free_chunk_lock);
			root->fs_info->free_chunk_space += dev_extent_len;
			spin_unlock(&root->fs_info->free_chunk_lock);
			btrfs_clear_space_info_full(root->fs_info);
			unlock_chunks(root);
		}
2810

2811 2812
		if (map->stripes[i].dev) {
			ret = btrfs_update_device(trans, map->stripes[i].dev);
2813 2814 2815 2816
			if (ret) {
				btrfs_abort_transaction(trans, root, ret);
				goto out;
			}
2817
		}
2818
	}
2819
	ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset);
2820 2821 2822 2823
	if (ret) {
		btrfs_abort_transaction(trans, root, ret);
		goto out;
	}
2824

2825 2826
	trace_btrfs_chunk_free(root, map, chunk_offset, em->len);

2827 2828
	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
		ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2829 2830 2831 2832
		if (ret) {
			btrfs_abort_transaction(trans, root, ret);
			goto out;
		}
2833 2834
	}

2835
	ret = btrfs_remove_block_group(trans, extent_root, chunk_offset, em);
2836 2837 2838 2839
	if (ret) {
		btrfs_abort_transaction(trans, extent_root, ret);
		goto out;
	}
Y
Yan Zheng 已提交
2840

2841
out:
Y
Yan Zheng 已提交
2842 2843
	/* once for us */
	free_extent_map(em);
2844 2845
	return ret;
}
Y
Yan Zheng 已提交
2846

2847
static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset)
2848 2849 2850 2851
{
	struct btrfs_root *extent_root;
	struct btrfs_trans_handle *trans;
	int ret;
Y
Yan Zheng 已提交
2852

2853 2854 2855
	root = root->fs_info->chunk_root;
	extent_root = root->fs_info->extent_root;

2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869
	/*
	 * Prevent races with automatic removal of unused block groups.
	 * After we relocate and before we remove the chunk with offset
	 * chunk_offset, automatic removal of the block group can kick in,
	 * resulting in a failure when calling btrfs_remove_chunk() below.
	 *
	 * Make sure to acquire this mutex before doing a tree search (dev
	 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
	 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
	 * we release the path used to search the chunk/dev tree and before
	 * the current task acquires this mutex and calls us.
	 */
	ASSERT(mutex_is_locked(&root->fs_info->delete_unused_bgs_mutex));

2870 2871 2872 2873 2874
	ret = btrfs_can_relocate(extent_root, chunk_offset);
	if (ret)
		return -ENOSPC;

	/* step one, relocate all the extents inside this chunk */
2875
	btrfs_scrub_pause(root);
2876
	ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2877
	btrfs_scrub_continue(root);
2878 2879 2880
	if (ret)
		return ret;

2881 2882
	trans = btrfs_start_trans_remove_block_group(root->fs_info,
						     chunk_offset);
2883 2884
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
2885
		btrfs_handle_fs_error(root->fs_info, ret, NULL);
2886 2887 2888 2889 2890 2891 2892 2893
		return ret;
	}

	/*
	 * step two, delete the device extents and the
	 * chunk tree entries
	 */
	ret = btrfs_remove_chunk(trans, root, chunk_offset);
Y
Yan Zheng 已提交
2894
	btrfs_end_transaction(trans, root);
2895
	return ret;
Y
Yan Zheng 已提交
2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906
}

static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
{
	struct btrfs_root *chunk_root = root->fs_info->chunk_root;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_chunk *chunk;
	struct btrfs_key key;
	struct btrfs_key found_key;
	u64 chunk_type;
2907 2908
	bool retried = false;
	int failed = 0;
Y
Yan Zheng 已提交
2909 2910 2911 2912 2913 2914
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

2915
again:
Y
Yan Zheng 已提交
2916 2917 2918 2919 2920
	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
	key.offset = (u64)-1;
	key.type = BTRFS_CHUNK_ITEM_KEY;

	while (1) {
2921
		mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
Y
Yan Zheng 已提交
2922
		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2923 2924
		if (ret < 0) {
			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
Y
Yan Zheng 已提交
2925
			goto error;
2926
		}
2927
		BUG_ON(ret == 0); /* Corruption */
Y
Yan Zheng 已提交
2928 2929 2930

		ret = btrfs_previous_item(chunk_root, path, key.objectid,
					  key.type);
2931 2932
		if (ret)
			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
Y
Yan Zheng 已提交
2933 2934 2935 2936
		if (ret < 0)
			goto error;
		if (ret > 0)
			break;
Z
Zheng Yan 已提交
2937

Y
Yan Zheng 已提交
2938 2939
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
Z
Zheng Yan 已提交
2940

Y
Yan Zheng 已提交
2941 2942 2943
		chunk = btrfs_item_ptr(leaf, path->slots[0],
				       struct btrfs_chunk);
		chunk_type = btrfs_chunk_type(leaf, chunk);
2944
		btrfs_release_path(path);
2945

Y
Yan Zheng 已提交
2946
		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2947
			ret = btrfs_relocate_chunk(chunk_root,
Y
Yan Zheng 已提交
2948
						   found_key.offset);
2949 2950
			if (ret == -ENOSPC)
				failed++;
H
HIMANGI SARAOGI 已提交
2951 2952
			else
				BUG_ON(ret);
Y
Yan Zheng 已提交
2953
		}
2954
		mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2955

Y
Yan Zheng 已提交
2956 2957 2958 2959 2960
		if (found_key.offset == 0)
			break;
		key.offset = found_key.offset - 1;
	}
	ret = 0;
2961 2962 2963 2964
	if (failed && !retried) {
		failed = 0;
		retried = true;
		goto again;
2965
	} else if (WARN_ON(failed && retried)) {
2966 2967
		ret = -ENOSPC;
	}
Y
Yan Zheng 已提交
2968 2969 2970
error:
	btrfs_free_path(path);
	return ret;
2971 2972
}

2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994
static int insert_balance_item(struct btrfs_root *root,
			       struct btrfs_balance_control *bctl)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_balance_item *item;
	struct btrfs_disk_balance_args disk_bargs;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	int ret, err;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	trans = btrfs_start_transaction(root, 0);
	if (IS_ERR(trans)) {
		btrfs_free_path(path);
		return PTR_ERR(trans);
	}

	key.objectid = BTRFS_BALANCE_OBJECTID;
2995
	key.type = BTRFS_TEMPORARY_ITEM_KEY;
2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043
	key.offset = 0;

	ret = btrfs_insert_empty_item(trans, root, path, &key,
				      sizeof(*item));
	if (ret)
		goto out;

	leaf = path->nodes[0];
	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);

	memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));

	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
	btrfs_set_balance_data(leaf, item, &disk_bargs);
	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
	btrfs_set_balance_meta(leaf, item, &disk_bargs);
	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
	btrfs_set_balance_sys(leaf, item, &disk_bargs);

	btrfs_set_balance_flags(leaf, item, bctl->flags);

	btrfs_mark_buffer_dirty(leaf);
out:
	btrfs_free_path(path);
	err = btrfs_commit_transaction(trans, root);
	if (err && !ret)
		ret = err;
	return ret;
}

static int del_balance_item(struct btrfs_root *root)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_path *path;
	struct btrfs_key key;
	int ret, err;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	trans = btrfs_start_transaction(root, 0);
	if (IS_ERR(trans)) {
		btrfs_free_path(path);
		return PTR_ERR(trans);
	}

	key.objectid = BTRFS_BALANCE_OBJECTID;
3044
	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063
	key.offset = 0;

	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
	if (ret < 0)
		goto out;
	if (ret > 0) {
		ret = -ENOENT;
		goto out;
	}

	ret = btrfs_del_item(trans, root, path);
out:
	btrfs_free_path(path);
	err = btrfs_commit_transaction(trans, root);
	if (err && !ret)
		ret = err;
	return ret;
}

I
Ilya Dryomov 已提交
3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087
/*
 * This is a heuristic used to reduce the number of chunks balanced on
 * resume after balance was interrupted.
 */
static void update_balance_args(struct btrfs_balance_control *bctl)
{
	/*
	 * Turn on soft mode for chunk types that were being converted.
	 */
	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;

	/*
	 * Turn on usage filter if is not already used.  The idea is
	 * that chunks that we have already balanced should be
	 * reasonably full.  Don't do it for chunks that are being
	 * converted - that will keep us from relocating unconverted
	 * (albeit full) chunks.
	 */
	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3088
	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
I
Ilya Dryomov 已提交
3089 3090 3091 3092 3093
	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
		bctl->data.usage = 90;
	}
	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3094
	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
I
Ilya Dryomov 已提交
3095 3096 3097 3098 3099
	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
		bctl->sys.usage = 90;
	}
	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3100
	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
I
Ilya Dryomov 已提交
3101 3102 3103 3104 3105 3106
	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
		bctl->meta.usage = 90;
	}
}

3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135
/*
 * Should be called with both balance and volume mutexes held to
 * serialize other volume operations (add_dev/rm_dev/resize) with
 * restriper.  Same goes for unset_balance_control.
 */
static void set_balance_control(struct btrfs_balance_control *bctl)
{
	struct btrfs_fs_info *fs_info = bctl->fs_info;

	BUG_ON(fs_info->balance_ctl);

	spin_lock(&fs_info->balance_lock);
	fs_info->balance_ctl = bctl;
	spin_unlock(&fs_info->balance_lock);
}

static void unset_balance_control(struct btrfs_fs_info *fs_info)
{
	struct btrfs_balance_control *bctl = fs_info->balance_ctl;

	BUG_ON(!fs_info->balance_ctl);

	spin_lock(&fs_info->balance_lock);
	fs_info->balance_ctl = NULL;
	spin_unlock(&fs_info->balance_lock);

	kfree(bctl);
}

I
Ilya Dryomov 已提交
3136 3137 3138 3139
/*
 * Balance filters.  Return 1 if chunk should be filtered out
 * (should not be balanced).
 */
3140
static int chunk_profiles_filter(u64 chunk_type,
I
Ilya Dryomov 已提交
3141 3142
				 struct btrfs_balance_args *bargs)
{
3143 3144
	chunk_type = chunk_to_extended(chunk_type) &
				BTRFS_EXTENDED_PROFILE_MASK;
I
Ilya Dryomov 已提交
3145

3146
	if (bargs->profiles & chunk_type)
I
Ilya Dryomov 已提交
3147 3148 3149 3150 3151
		return 0;

	return 1;
}

3152
static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
I
Ilya Dryomov 已提交
3153
			      struct btrfs_balance_args *bargs)
3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184
{
	struct btrfs_block_group_cache *cache;
	u64 chunk_used;
	u64 user_thresh_min;
	u64 user_thresh_max;
	int ret = 1;

	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
	chunk_used = btrfs_block_group_used(&cache->item);

	if (bargs->usage_min == 0)
		user_thresh_min = 0;
	else
		user_thresh_min = div_factor_fine(cache->key.offset,
					bargs->usage_min);

	if (bargs->usage_max == 0)
		user_thresh_max = 1;
	else if (bargs->usage_max > 100)
		user_thresh_max = cache->key.offset;
	else
		user_thresh_max = div_factor_fine(cache->key.offset,
					bargs->usage_max);

	if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
		ret = 0;

	btrfs_put_block_group(cache);
	return ret;
}

3185
static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3186
		u64 chunk_offset, struct btrfs_balance_args *bargs)
I
Ilya Dryomov 已提交
3187 3188 3189 3190 3191 3192 3193 3194
{
	struct btrfs_block_group_cache *cache;
	u64 chunk_used, user_thresh;
	int ret = 1;

	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
	chunk_used = btrfs_block_group_used(&cache->item);

3195
	if (bargs->usage_min == 0)
3196
		user_thresh = 1;
3197 3198 3199 3200 3201 3202
	else if (bargs->usage > 100)
		user_thresh = cache->key.offset;
	else
		user_thresh = div_factor_fine(cache->key.offset,
					      bargs->usage);

I
Ilya Dryomov 已提交
3203 3204 3205 3206 3207 3208 3209
	if (chunk_used < user_thresh)
		ret = 0;

	btrfs_put_block_group(cache);
	return ret;
}

I
Ilya Dryomov 已提交
3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226
static int chunk_devid_filter(struct extent_buffer *leaf,
			      struct btrfs_chunk *chunk,
			      struct btrfs_balance_args *bargs)
{
	struct btrfs_stripe *stripe;
	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
	int i;

	for (i = 0; i < num_stripes; i++) {
		stripe = btrfs_stripe_nr(chunk, i);
		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
			return 0;
	}

	return 1;
}

I
Ilya Dryomov 已提交
3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243
/* [pstart, pend) */
static int chunk_drange_filter(struct extent_buffer *leaf,
			       struct btrfs_chunk *chunk,
			       u64 chunk_offset,
			       struct btrfs_balance_args *bargs)
{
	struct btrfs_stripe *stripe;
	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
	u64 stripe_offset;
	u64 stripe_length;
	int factor;
	int i;

	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
		return 0;

	if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
D
David Woodhouse 已提交
3244 3245 3246 3247 3248 3249 3250 3251 3252
	     BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
		factor = num_stripes / 2;
	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
		factor = num_stripes - 1;
	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
		factor = num_stripes - 2;
	} else {
		factor = num_stripes;
	}
I
Ilya Dryomov 已提交
3253 3254 3255 3256 3257 3258 3259 3260

	for (i = 0; i < num_stripes; i++) {
		stripe = btrfs_stripe_nr(chunk, i);
		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
			continue;

		stripe_offset = btrfs_stripe_offset(leaf, stripe);
		stripe_length = btrfs_chunk_length(leaf, chunk);
3261
		stripe_length = div_u64(stripe_length, factor);
I
Ilya Dryomov 已提交
3262 3263 3264 3265 3266 3267 3268 3269 3270

		if (stripe_offset < bargs->pend &&
		    stripe_offset + stripe_length > bargs->pstart)
			return 0;
	}

	return 1;
}

3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284
/* [vstart, vend) */
static int chunk_vrange_filter(struct extent_buffer *leaf,
			       struct btrfs_chunk *chunk,
			       u64 chunk_offset,
			       struct btrfs_balance_args *bargs)
{
	if (chunk_offset < bargs->vend &&
	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
		/* at least part of the chunk is inside this vrange */
		return 0;

	return 1;
}

3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297
static int chunk_stripes_range_filter(struct extent_buffer *leaf,
			       struct btrfs_chunk *chunk,
			       struct btrfs_balance_args *bargs)
{
	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);

	if (bargs->stripes_min <= num_stripes
			&& num_stripes <= bargs->stripes_max)
		return 0;

	return 1;
}

3298
static int chunk_soft_convert_filter(u64 chunk_type,
3299 3300 3301 3302 3303
				     struct btrfs_balance_args *bargs)
{
	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
		return 0;

3304 3305
	chunk_type = chunk_to_extended(chunk_type) &
				BTRFS_EXTENDED_PROFILE_MASK;
3306

3307
	if (bargs->target == chunk_type)
3308 3309 3310 3311 3312
		return 1;

	return 0;
}

3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333
static int should_balance_chunk(struct btrfs_root *root,
				struct extent_buffer *leaf,
				struct btrfs_chunk *chunk, u64 chunk_offset)
{
	struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
	struct btrfs_balance_args *bargs = NULL;
	u64 chunk_type = btrfs_chunk_type(leaf, chunk);

	/* type filter */
	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
		return 0;
	}

	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
		bargs = &bctl->data;
	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
		bargs = &bctl->sys;
	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
		bargs = &bctl->meta;

I
Ilya Dryomov 已提交
3334 3335 3336 3337
	/* profiles filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
	    chunk_profiles_filter(chunk_type, bargs)) {
		return 0;
I
Ilya Dryomov 已提交
3338 3339 3340 3341 3342 3343
	}

	/* usage filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
	    chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
		return 0;
3344 3345 3346
	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
	    chunk_usage_range_filter(bctl->fs_info, chunk_offset, bargs)) {
		return 0;
I
Ilya Dryomov 已提交
3347 3348 3349 3350 3351 3352
	}

	/* devid filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
	    chunk_devid_filter(leaf, chunk, bargs)) {
		return 0;
I
Ilya Dryomov 已提交
3353 3354 3355 3356 3357 3358
	}

	/* drange filter, makes sense only with devid filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
	    chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
		return 0;
3359 3360 3361 3362 3363 3364
	}

	/* vrange filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
		return 0;
I
Ilya Dryomov 已提交
3365 3366
	}

3367 3368 3369 3370 3371 3372
	/* stripes filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
	    chunk_stripes_range_filter(leaf, chunk, bargs)) {
		return 0;
	}

3373 3374 3375 3376 3377 3378
	/* soft profile changing mode */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
	    chunk_soft_convert_filter(chunk_type, bargs)) {
		return 0;
	}

3379 3380 3381 3382 3383 3384 3385 3386
	/*
	 * limited by count, must be the last filter
	 */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
		if (bargs->limit == 0)
			return 0;
		else
			bargs->limit--;
3387 3388 3389
	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
		/*
		 * Same logic as the 'limit' filter; the minimum cannot be
3390
		 * determined here because we do not have the global information
3391 3392 3393 3394 3395 3396
		 * about the count of all chunks that satisfy the filters.
		 */
		if (bargs->limit_max == 0)
			return 0;
		else
			bargs->limit_max--;
3397 3398
	}

3399 3400 3401
	return 1;
}

3402
static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3403
{
3404
	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3405 3406 3407
	struct btrfs_root *chunk_root = fs_info->chunk_root;
	struct btrfs_root *dev_root = fs_info->dev_root;
	struct list_head *devices;
3408 3409 3410
	struct btrfs_device *device;
	u64 old_size;
	u64 size_to_free;
3411
	u64 chunk_type;
3412
	struct btrfs_chunk *chunk;
3413 3414 3415
	struct btrfs_path *path;
	struct btrfs_key key;
	struct btrfs_key found_key;
3416
	struct btrfs_trans_handle *trans;
3417 3418
	struct extent_buffer *leaf;
	int slot;
3419 3420
	int ret;
	int enospc_errors = 0;
3421
	bool counting = true;
3422
	/* The single value limit and min/max limits use the same bytes in the */
3423 3424 3425
	u64 limit_data = bctl->data.limit;
	u64 limit_meta = bctl->meta.limit;
	u64 limit_sys = bctl->sys.limit;
3426 3427 3428
	u32 count_data = 0;
	u32 count_meta = 0;
	u32 count_sys = 0;
3429
	int chunk_reserved = 0;
3430
	u64 bytes_used = 0;
3431 3432

	/* step one make some room on all the devices */
3433
	devices = &fs_info->fs_devices->devices;
Q
Qinghuang Feng 已提交
3434
	list_for_each_entry(device, devices, dev_list) {
3435
		old_size = btrfs_device_get_total_bytes(device);
3436
		size_to_free = div_factor(old_size, 1);
3437
		size_to_free = min_t(u64, size_to_free, SZ_1M);
Y
Yan Zheng 已提交
3438
		if (!device->writeable ||
3439 3440
		    btrfs_device_get_total_bytes(device) -
		    btrfs_device_get_bytes_used(device) > size_to_free ||
3441
		    device->is_tgtdev_for_dev_replace)
3442 3443 3444
			continue;

		ret = btrfs_shrink_device(device, old_size - size_to_free);
3445 3446
		if (ret == -ENOSPC)
			break;
3447 3448
		BUG_ON(ret);

3449
		trans = btrfs_start_transaction(dev_root, 0);
3450
		BUG_ON(IS_ERR(trans));
3451 3452 3453 3454 3455 3456 3457 3458 3459

		ret = btrfs_grow_device(trans, device, old_size);
		BUG_ON(ret);

		btrfs_end_transaction(trans, dev_root);
	}

	/* step two, relocate all the chunks */
	path = btrfs_alloc_path();
3460 3461 3462 3463
	if (!path) {
		ret = -ENOMEM;
		goto error;
	}
3464 3465 3466 3467 3468 3469

	/* zero out stat counters */
	spin_lock(&fs_info->balance_lock);
	memset(&bctl->stat, 0, sizeof(bctl->stat));
	spin_unlock(&fs_info->balance_lock);
again:
3470
	if (!counting) {
3471 3472 3473 3474
		/*
		 * The single value limit and min/max limits use the same bytes
		 * in the
		 */
3475 3476 3477 3478
		bctl->data.limit = limit_data;
		bctl->meta.limit = limit_meta;
		bctl->sys.limit = limit_sys;
	}
3479 3480 3481 3482
	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
	key.offset = (u64)-1;
	key.type = BTRFS_CHUNK_ITEM_KEY;

C
Chris Mason 已提交
3483
	while (1) {
3484
		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3485
		    atomic_read(&fs_info->balance_cancel_req)) {
3486 3487 3488 3489
			ret = -ECANCELED;
			goto error;
		}

3490
		mutex_lock(&fs_info->delete_unused_bgs_mutex);
3491
		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3492 3493
		if (ret < 0) {
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3494
			goto error;
3495
		}
3496 3497 3498 3499 3500 3501

		/*
		 * this shouldn't happen, it means the last relocate
		 * failed
		 */
		if (ret == 0)
3502
			BUG(); /* FIXME break ? */
3503 3504 3505

		ret = btrfs_previous_item(chunk_root, path, 0,
					  BTRFS_CHUNK_ITEM_KEY);
3506
		if (ret) {
3507
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3508
			ret = 0;
3509
			break;
3510
		}
3511

3512 3513 3514
		leaf = path->nodes[0];
		slot = path->slots[0];
		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3515

3516 3517
		if (found_key.objectid != key.objectid) {
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3518
			break;
3519
		}
3520

3521
		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3522
		chunk_type = btrfs_chunk_type(leaf, chunk);
3523

3524 3525 3526 3527 3528 3529
		if (!counting) {
			spin_lock(&fs_info->balance_lock);
			bctl->stat.considered++;
			spin_unlock(&fs_info->balance_lock);
		}

3530 3531
		ret = should_balance_chunk(chunk_root, leaf, chunk,
					   found_key.offset);
3532

3533
		btrfs_release_path(path);
3534 3535
		if (!ret) {
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3536
			goto loop;
3537
		}
3538

3539
		if (counting) {
3540
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3541 3542 3543
			spin_lock(&fs_info->balance_lock);
			bctl->stat.expected++;
			spin_unlock(&fs_info->balance_lock);
3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565

			if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
				count_data++;
			else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
				count_sys++;
			else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
				count_meta++;

			goto loop;
		}

		/*
		 * Apply limit_min filter, no need to check if the LIMITS
		 * filter is used, limit_min is 0 by default
		 */
		if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
					count_data < bctl->data.limit_min)
				|| ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
					count_meta < bctl->meta.limit_min)
				|| ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
					count_sys < bctl->sys.limit_min)) {
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3566 3567 3568
			goto loop;
		}

3569 3570 3571 3572 3573 3574 3575
		ASSERT(fs_info->data_sinfo);
		spin_lock(&fs_info->data_sinfo->lock);
		bytes_used = fs_info->data_sinfo->bytes_used;
		spin_unlock(&fs_info->data_sinfo->lock);

		if ((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
		    !chunk_reserved && !bytes_used) {
3576 3577 3578 3579 3580 3581 3582 3583 3584
			trans = btrfs_start_transaction(chunk_root, 0);
			if (IS_ERR(trans)) {
				mutex_unlock(&fs_info->delete_unused_bgs_mutex);
				ret = PTR_ERR(trans);
				goto error;
			}

			ret = btrfs_force_chunk_alloc(trans, chunk_root,
						      BTRFS_BLOCK_GROUP_DATA);
3585
			btrfs_end_transaction(trans, chunk_root);
3586 3587 3588 3589 3590 3591 3592
			if (ret < 0) {
				mutex_unlock(&fs_info->delete_unused_bgs_mutex);
				goto error;
			}
			chunk_reserved = 1;
		}

3593 3594
		ret = btrfs_relocate_chunk(chunk_root,
					   found_key.offset);
3595
		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3596 3597
		if (ret && ret != -ENOSPC)
			goto error;
3598
		if (ret == -ENOSPC) {
3599
			enospc_errors++;
3600 3601 3602 3603 3604
		} else {
			spin_lock(&fs_info->balance_lock);
			bctl->stat.completed++;
			spin_unlock(&fs_info->balance_lock);
		}
3605
loop:
3606 3607
		if (found_key.offset == 0)
			break;
3608
		key.offset = found_key.offset - 1;
3609
	}
3610

3611 3612 3613 3614 3615
	if (counting) {
		btrfs_release_path(path);
		counting = false;
		goto again;
	}
3616 3617
error:
	btrfs_free_path(path);
3618
	if (enospc_errors) {
3619
		btrfs_info(fs_info, "%d enospc errors during balance",
3620 3621 3622 3623 3624
		       enospc_errors);
		if (!ret)
			ret = -ENOSPC;
	}

3625 3626 3627
	return ret;
}

3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651
/**
 * alloc_profile_is_valid - see if a given profile is valid and reduced
 * @flags: profile to validate
 * @extended: if true @flags is treated as an extended profile
 */
static int alloc_profile_is_valid(u64 flags, int extended)
{
	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
			       BTRFS_BLOCK_GROUP_PROFILE_MASK);

	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;

	/* 1) check that all other bits are zeroed */
	if (flags & ~mask)
		return 0;

	/* 2) see if profile is reduced */
	if (flags == 0)
		return !extended; /* "0" is valid for usual profiles */

	/* true if exactly one bit set */
	return (flags & (flags - 1)) == 0;
}

3652 3653
static inline int balance_need_close(struct btrfs_fs_info *fs_info)
{
3654 3655 3656 3657
	/* cancel requested || normal exit path */
	return atomic_read(&fs_info->balance_cancel_req) ||
		(atomic_read(&fs_info->balance_pause_req) == 0 &&
		 atomic_read(&fs_info->balance_cancel_req) == 0);
3658 3659
}

3660 3661
static void __cancel_balance(struct btrfs_fs_info *fs_info)
{
3662 3663
	int ret;

3664
	unset_balance_control(fs_info);
3665
	ret = del_balance_item(fs_info->tree_root);
3666
	if (ret)
3667
		btrfs_handle_fs_error(fs_info, ret, NULL);
3668 3669

	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3670 3671
}

3672 3673 3674 3675 3676 3677 3678 3679 3680
/* Non-zero return value signifies invalidity */
static inline int validate_convert_profile(struct btrfs_balance_args *bctl_arg,
		u64 allowed)
{
	return ((bctl_arg->flags & BTRFS_BALANCE_ARGS_CONVERT) &&
		(!alloc_profile_is_valid(bctl_arg->target, 1) ||
		 (bctl_arg->target & ~allowed)));
}

3681 3682 3683 3684 3685 3686 3687
/*
 * Should be called with both balance and volume mutexes held
 */
int btrfs_balance(struct btrfs_balance_control *bctl,
		  struct btrfs_ioctl_balance_args *bargs)
{
	struct btrfs_fs_info *fs_info = bctl->fs_info;
3688
	u64 allowed;
3689
	int mixed = 0;
3690
	int ret;
3691
	u64 num_devices;
3692
	unsigned seq;
3693

3694
	if (btrfs_fs_closing(fs_info) ||
3695 3696
	    atomic_read(&fs_info->balance_pause_req) ||
	    atomic_read(&fs_info->balance_cancel_req)) {
3697 3698 3699 3700
		ret = -EINVAL;
		goto out;
	}

3701 3702 3703 3704
	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
		mixed = 1;

3705 3706 3707 3708
	/*
	 * In case of mixed groups both data and meta should be picked,
	 * and identical options should be given for both of them.
	 */
3709 3710
	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
	if (mixed && (bctl->flags & allowed)) {
3711 3712 3713
		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3714 3715
			btrfs_err(fs_info, "with mixed groups data and "
				   "metadata balance options must be the same");
3716 3717 3718 3719 3720
			ret = -EINVAL;
			goto out;
		}
	}

3721
	num_devices = fs_info->fs_devices->num_devices;
3722
	btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
3723 3724 3725 3726
	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
		BUG_ON(num_devices < 1);
		num_devices--;
	}
3727
	btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
3728 3729
	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE | BTRFS_BLOCK_GROUP_DUP;
	if (num_devices > 1)
3730
		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3731 3732 3733 3734 3735
	if (num_devices > 2)
		allowed |= BTRFS_BLOCK_GROUP_RAID5;
	if (num_devices > 3)
		allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
			    BTRFS_BLOCK_GROUP_RAID6);
3736
	if (validate_convert_profile(&bctl->data, allowed)) {
3737 3738
		btrfs_err(fs_info, "unable to start balance with target "
			   "data profile %llu",
3739
		       bctl->data.target);
3740 3741 3742
		ret = -EINVAL;
		goto out;
	}
3743
	if (validate_convert_profile(&bctl->meta, allowed)) {
3744 3745
		btrfs_err(fs_info,
			   "unable to start balance with target metadata profile %llu",
3746
		       bctl->meta.target);
3747 3748 3749
		ret = -EINVAL;
		goto out;
	}
3750
	if (validate_convert_profile(&bctl->sys, allowed)) {
3751 3752
		btrfs_err(fs_info,
			   "unable to start balance with target system profile %llu",
3753
		       bctl->sys.target);
3754 3755 3756 3757 3758 3759
		ret = -EINVAL;
		goto out;
	}

	/* allow to reduce meta or sys integrity only if force set */
	allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
D
David Woodhouse 已提交
3760 3761 3762
			BTRFS_BLOCK_GROUP_RAID10 |
			BTRFS_BLOCK_GROUP_RAID5 |
			BTRFS_BLOCK_GROUP_RAID6;
3763 3764 3765 3766 3767 3768 3769 3770 3771 3772
	do {
		seq = read_seqbegin(&fs_info->profiles_lock);

		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
		     (fs_info->avail_system_alloc_bits & allowed) &&
		     !(bctl->sys.target & allowed)) ||
		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
		     (fs_info->avail_metadata_alloc_bits & allowed) &&
		     !(bctl->meta.target & allowed))) {
			if (bctl->flags & BTRFS_BALANCE_FORCE) {
3773
				btrfs_info(fs_info, "force reducing metadata integrity");
3774
			} else {
3775 3776
				btrfs_err(fs_info, "balance will reduce metadata "
					   "integrity, use force if you want this");
3777 3778 3779
				ret = -EINVAL;
				goto out;
			}
3780
		}
3781
	} while (read_seqretry(&fs_info->profiles_lock, seq));
3782

3783 3784 3785
	if (btrfs_get_num_tolerated_disk_barrier_failures(bctl->meta.target) <
		btrfs_get_num_tolerated_disk_barrier_failures(bctl->data.target)) {
		btrfs_warn(fs_info,
3786
	"metadata profile 0x%llx has lower redundancy than data profile 0x%llx",
3787 3788 3789
			bctl->meta.target, bctl->data.target);
	}

3790
	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3791 3792 3793 3794
		fs_info->num_tolerated_disk_barrier_failures = min(
			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info),
			btrfs_get_num_tolerated_disk_barrier_failures(
				bctl->sys.target));
3795 3796
	}

3797
	ret = insert_balance_item(fs_info->tree_root, bctl);
I
Ilya Dryomov 已提交
3798
	if (ret && ret != -EEXIST)
3799 3800
		goto out;

I
Ilya Dryomov 已提交
3801 3802 3803 3804 3805 3806 3807 3808 3809
	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
		BUG_ON(ret == -EEXIST);
		set_balance_control(bctl);
	} else {
		BUG_ON(ret != -EEXIST);
		spin_lock(&fs_info->balance_lock);
		update_balance_args(bctl);
		spin_unlock(&fs_info->balance_lock);
	}
3810

3811
	atomic_inc(&fs_info->balance_running);
3812 3813 3814 3815 3816
	mutex_unlock(&fs_info->balance_mutex);

	ret = __btrfs_balance(fs_info);

	mutex_lock(&fs_info->balance_mutex);
3817
	atomic_dec(&fs_info->balance_running);
3818

3819 3820 3821 3822 3823
	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
		fs_info->num_tolerated_disk_barrier_failures =
			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
	}

3824 3825
	if (bargs) {
		memset(bargs, 0, sizeof(*bargs));
3826
		update_ioctl_balance_args(fs_info, 0, bargs);
3827 3828
	}

3829 3830 3831 3832 3833
	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
	    balance_need_close(fs_info)) {
		__cancel_balance(fs_info);
	}

3834
	wake_up(&fs_info->balance_wait_q);
3835 3836 3837

	return ret;
out:
I
Ilya Dryomov 已提交
3838 3839
	if (bctl->flags & BTRFS_BALANCE_RESUME)
		__cancel_balance(fs_info);
3840
	else {
I
Ilya Dryomov 已提交
3841
		kfree(bctl);
3842 3843
		atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
	}
I
Ilya Dryomov 已提交
3844 3845 3846 3847 3848
	return ret;
}

static int balance_kthread(void *data)
{
3849
	struct btrfs_fs_info *fs_info = data;
3850
	int ret = 0;
I
Ilya Dryomov 已提交
3851 3852 3853 3854

	mutex_lock(&fs_info->volume_mutex);
	mutex_lock(&fs_info->balance_mutex);

3855
	if (fs_info->balance_ctl) {
3856
		btrfs_info(fs_info, "continuing balance");
3857
		ret = btrfs_balance(fs_info->balance_ctl, NULL);
3858
	}
I
Ilya Dryomov 已提交
3859 3860 3861

	mutex_unlock(&fs_info->balance_mutex);
	mutex_unlock(&fs_info->volume_mutex);
3862

I
Ilya Dryomov 已提交
3863 3864 3865
	return ret;
}

3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877
int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
{
	struct task_struct *tsk;

	spin_lock(&fs_info->balance_lock);
	if (!fs_info->balance_ctl) {
		spin_unlock(&fs_info->balance_lock);
		return 0;
	}
	spin_unlock(&fs_info->balance_lock);

	if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3878
		btrfs_info(fs_info, "force skipping balance");
3879 3880 3881 3882
		return 0;
	}

	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3883
	return PTR_ERR_OR_ZERO(tsk);
3884 3885
}

3886
int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
I
Ilya Dryomov 已提交
3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900
{
	struct btrfs_balance_control *bctl;
	struct btrfs_balance_item *item;
	struct btrfs_disk_balance_args disk_bargs;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = BTRFS_BALANCE_OBJECTID;
3901
	key.type = BTRFS_TEMPORARY_ITEM_KEY;
I
Ilya Dryomov 已提交
3902 3903
	key.offset = 0;

3904
	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
I
Ilya Dryomov 已提交
3905
	if (ret < 0)
3906
		goto out;
I
Ilya Dryomov 已提交
3907 3908
	if (ret > 0) { /* ret = -ENOENT; */
		ret = 0;
3909 3910 3911 3912 3913 3914 3915
		goto out;
	}

	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
	if (!bctl) {
		ret = -ENOMEM;
		goto out;
I
Ilya Dryomov 已提交
3916 3917 3918 3919 3920
	}

	leaf = path->nodes[0];
	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);

3921 3922 3923
	bctl->fs_info = fs_info;
	bctl->flags = btrfs_balance_flags(leaf, item);
	bctl->flags |= BTRFS_BALANCE_RESUME;
I
Ilya Dryomov 已提交
3924 3925 3926 3927 3928 3929 3930 3931

	btrfs_balance_data(leaf, item, &disk_bargs);
	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
	btrfs_balance_meta(leaf, item, &disk_bargs);
	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
	btrfs_balance_sys(leaf, item, &disk_bargs);
	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);

3932 3933
	WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));

3934 3935
	mutex_lock(&fs_info->volume_mutex);
	mutex_lock(&fs_info->balance_mutex);
I
Ilya Dryomov 已提交
3936

3937 3938 3939 3940
	set_balance_control(bctl);

	mutex_unlock(&fs_info->balance_mutex);
	mutex_unlock(&fs_info->volume_mutex);
I
Ilya Dryomov 已提交
3941 3942
out:
	btrfs_free_path(path);
3943 3944 3945
	return ret;
}

3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974
int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
{
	int ret = 0;

	mutex_lock(&fs_info->balance_mutex);
	if (!fs_info->balance_ctl) {
		mutex_unlock(&fs_info->balance_mutex);
		return -ENOTCONN;
	}

	if (atomic_read(&fs_info->balance_running)) {
		atomic_inc(&fs_info->balance_pause_req);
		mutex_unlock(&fs_info->balance_mutex);

		wait_event(fs_info->balance_wait_q,
			   atomic_read(&fs_info->balance_running) == 0);

		mutex_lock(&fs_info->balance_mutex);
		/* we are good with balance_ctl ripped off from under us */
		BUG_ON(atomic_read(&fs_info->balance_running));
		atomic_dec(&fs_info->balance_pause_req);
	} else {
		ret = -ENOTCONN;
	}

	mutex_unlock(&fs_info->balance_mutex);
	return ret;
}

3975 3976
int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
{
3977 3978 3979
	if (fs_info->sb->s_flags & MS_RDONLY)
		return -EROFS;

3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013
	mutex_lock(&fs_info->balance_mutex);
	if (!fs_info->balance_ctl) {
		mutex_unlock(&fs_info->balance_mutex);
		return -ENOTCONN;
	}

	atomic_inc(&fs_info->balance_cancel_req);
	/*
	 * if we are running just wait and return, balance item is
	 * deleted in btrfs_balance in this case
	 */
	if (atomic_read(&fs_info->balance_running)) {
		mutex_unlock(&fs_info->balance_mutex);
		wait_event(fs_info->balance_wait_q,
			   atomic_read(&fs_info->balance_running) == 0);
		mutex_lock(&fs_info->balance_mutex);
	} else {
		/* __cancel_balance needs volume_mutex */
		mutex_unlock(&fs_info->balance_mutex);
		mutex_lock(&fs_info->volume_mutex);
		mutex_lock(&fs_info->balance_mutex);

		if (fs_info->balance_ctl)
			__cancel_balance(fs_info);

		mutex_unlock(&fs_info->volume_mutex);
	}

	BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
	atomic_dec(&fs_info->balance_cancel_req);
	mutex_unlock(&fs_info->balance_mutex);
	return 0;
}

S
Stefan Behrens 已提交
4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025
static int btrfs_uuid_scan_kthread(void *data)
{
	struct btrfs_fs_info *fs_info = data;
	struct btrfs_root *root = fs_info->tree_root;
	struct btrfs_key key;
	struct btrfs_key max_key;
	struct btrfs_path *path = NULL;
	int ret = 0;
	struct extent_buffer *eb;
	int slot;
	struct btrfs_root_item root_item;
	u32 item_size;
4026
	struct btrfs_trans_handle *trans = NULL;
S
Stefan Behrens 已提交
4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042

	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

	key.objectid = 0;
	key.type = BTRFS_ROOT_ITEM_KEY;
	key.offset = 0;

	max_key.objectid = (u64)-1;
	max_key.type = BTRFS_ROOT_ITEM_KEY;
	max_key.offset = (u64)-1;

	while (1) {
4043
		ret = btrfs_search_forward(root, &key, path, 0);
S
Stefan Behrens 已提交
4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066
		if (ret) {
			if (ret > 0)
				ret = 0;
			break;
		}

		if (key.type != BTRFS_ROOT_ITEM_KEY ||
		    (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
		     key.objectid != BTRFS_FS_TREE_OBJECTID) ||
		    key.objectid > BTRFS_LAST_FREE_OBJECTID)
			goto skip;

		eb = path->nodes[0];
		slot = path->slots[0];
		item_size = btrfs_item_size_nr(eb, slot);
		if (item_size < sizeof(root_item))
			goto skip;

		read_extent_buffer(eb, &root_item,
				   btrfs_item_ptr_offset(eb, slot),
				   (int)sizeof(root_item));
		if (btrfs_root_refs(&root_item) == 0)
			goto skip;
4067 4068 4069 4070 4071 4072 4073

		if (!btrfs_is_empty_uuid(root_item.uuid) ||
		    !btrfs_is_empty_uuid(root_item.received_uuid)) {
			if (trans)
				goto update_tree;

			btrfs_release_path(path);
S
Stefan Behrens 已提交
4074 4075 4076 4077 4078 4079 4080 4081 4082
			/*
			 * 1 - subvol uuid item
			 * 1 - received_subvol uuid item
			 */
			trans = btrfs_start_transaction(fs_info->uuid_root, 2);
			if (IS_ERR(trans)) {
				ret = PTR_ERR(trans);
				break;
			}
4083 4084 4085 4086 4087 4088
			continue;
		} else {
			goto skip;
		}
update_tree:
		if (!btrfs_is_empty_uuid(root_item.uuid)) {
S
Stefan Behrens 已提交
4089 4090 4091 4092 4093
			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
						  root_item.uuid,
						  BTRFS_UUID_KEY_SUBVOL,
						  key.objectid);
			if (ret < 0) {
4094
				btrfs_warn(fs_info, "uuid_tree_add failed %d",
S
Stefan Behrens 已提交
4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105
					ret);
				break;
			}
		}

		if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
						  root_item.received_uuid,
						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
						  key.objectid);
			if (ret < 0) {
4106
				btrfs_warn(fs_info, "uuid_tree_add failed %d",
S
Stefan Behrens 已提交
4107 4108 4109 4110 4111
					ret);
				break;
			}
		}

4112
skip:
S
Stefan Behrens 已提交
4113 4114
		if (trans) {
			ret = btrfs_end_transaction(trans, fs_info->uuid_root);
4115
			trans = NULL;
S
Stefan Behrens 已提交
4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137
			if (ret)
				break;
		}

		btrfs_release_path(path);
		if (key.offset < (u64)-1) {
			key.offset++;
		} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
			key.offset = 0;
			key.type = BTRFS_ROOT_ITEM_KEY;
		} else if (key.objectid < (u64)-1) {
			key.offset = 0;
			key.type = BTRFS_ROOT_ITEM_KEY;
			key.objectid++;
		} else {
			break;
		}
		cond_resched();
	}

out:
	btrfs_free_path(path);
4138 4139
	if (trans && !IS_ERR(trans))
		btrfs_end_transaction(trans, fs_info->uuid_root);
S
Stefan Behrens 已提交
4140
	if (ret)
4141
		btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4142 4143
	else
		fs_info->update_uuid_tree_gen = 1;
S
Stefan Behrens 已提交
4144 4145 4146 4147
	up(&fs_info->uuid_tree_rescan_sem);
	return 0;
}

4148 4149 4150 4151
/*
 * Callback for btrfs_uuid_tree_iterate().
 * returns:
 * 0	check succeeded, the entry is not outdated.
4152
 * < 0	if an error occurred.
4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204
 * > 0	if the check failed, which means the caller shall remove the entry.
 */
static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
				       u8 *uuid, u8 type, u64 subid)
{
	struct btrfs_key key;
	int ret = 0;
	struct btrfs_root *subvol_root;

	if (type != BTRFS_UUID_KEY_SUBVOL &&
	    type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
		goto out;

	key.objectid = subid;
	key.type = BTRFS_ROOT_ITEM_KEY;
	key.offset = (u64)-1;
	subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
	if (IS_ERR(subvol_root)) {
		ret = PTR_ERR(subvol_root);
		if (ret == -ENOENT)
			ret = 1;
		goto out;
	}

	switch (type) {
	case BTRFS_UUID_KEY_SUBVOL:
		if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
			ret = 1;
		break;
	case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
		if (memcmp(uuid, subvol_root->root_item.received_uuid,
			   BTRFS_UUID_SIZE))
			ret = 1;
		break;
	}

out:
	return ret;
}

static int btrfs_uuid_rescan_kthread(void *data)
{
	struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
	int ret;

	/*
	 * 1st step is to iterate through the existing UUID tree and
	 * to delete all entries that contain outdated data.
	 * 2nd step is to add all missing entries to the UUID tree.
	 */
	ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
	if (ret < 0) {
4205
		btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret);
4206 4207 4208 4209 4210 4211
		up(&fs_info->uuid_tree_rescan_sem);
		return ret;
	}
	return btrfs_uuid_scan_kthread(data);
}

4212 4213 4214 4215 4216
int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *tree_root = fs_info->tree_root;
	struct btrfs_root *uuid_root;
S
Stefan Behrens 已提交
4217 4218
	struct task_struct *task;
	int ret;
4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230

	/*
	 * 1 - root node
	 * 1 - root item
	 */
	trans = btrfs_start_transaction(tree_root, 2);
	if (IS_ERR(trans))
		return PTR_ERR(trans);

	uuid_root = btrfs_create_tree(trans, fs_info,
				      BTRFS_UUID_TREE_OBJECTID);
	if (IS_ERR(uuid_root)) {
4231 4232
		ret = PTR_ERR(uuid_root);
		btrfs_abort_transaction(trans, tree_root, ret);
4233
		btrfs_end_transaction(trans, tree_root);
4234
		return ret;
4235 4236 4237 4238
	}

	fs_info->uuid_root = uuid_root;

S
Stefan Behrens 已提交
4239 4240 4241 4242 4243 4244 4245
	ret = btrfs_commit_transaction(trans, tree_root);
	if (ret)
		return ret;

	down(&fs_info->uuid_tree_rescan_sem);
	task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
	if (IS_ERR(task)) {
4246
		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4247
		btrfs_warn(fs_info, "failed to start uuid_scan task");
S
Stefan Behrens 已提交
4248 4249 4250 4251 4252
		up(&fs_info->uuid_tree_rescan_sem);
		return PTR_ERR(task);
	}

	return 0;
4253
}
S
Stefan Behrens 已提交
4254

4255 4256 4257 4258 4259 4260 4261 4262
int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
{
	struct task_struct *task;

	down(&fs_info->uuid_tree_rescan_sem);
	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
	if (IS_ERR(task)) {
		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4263
		btrfs_warn(fs_info, "failed to start uuid_rescan task");
4264 4265 4266 4267 4268 4269 4270
		up(&fs_info->uuid_tree_rescan_sem);
		return PTR_ERR(task);
	}

	return 0;
}

4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285
/*
 * shrinking a device means finding all of the device extents past
 * the new size, and then following the back refs to the chunks.
 * The chunk relocation code actually frees the device extent
 */
int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = device->dev_root;
	struct btrfs_dev_extent *dev_extent = NULL;
	struct btrfs_path *path;
	u64 length;
	u64 chunk_offset;
	int ret;
	int slot;
4286 4287
	int failed = 0;
	bool retried = false;
4288
	bool checked_pending_chunks = false;
4289 4290
	struct extent_buffer *l;
	struct btrfs_key key;
4291
	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4292
	u64 old_total = btrfs_super_total_bytes(super_copy);
4293 4294
	u64 old_size = btrfs_device_get_total_bytes(device);
	u64 diff = old_size - new_size;
4295

4296 4297 4298
	if (device->is_tgtdev_for_dev_replace)
		return -EINVAL;

4299 4300 4301 4302
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

4303
	path->reada = READA_FORWARD;
4304

4305 4306
	lock_chunks(root);

4307
	btrfs_device_set_total_bytes(device, new_size);
4308
	if (device->writeable) {
Y
Yan Zheng 已提交
4309
		device->fs_devices->total_rw_bytes -= diff;
4310 4311 4312 4313
		spin_lock(&root->fs_info->free_chunk_lock);
		root->fs_info->free_chunk_space -= diff;
		spin_unlock(&root->fs_info->free_chunk_lock);
	}
4314
	unlock_chunks(root);
4315

4316
again:
4317 4318 4319 4320
	key.objectid = device->devid;
	key.offset = (u64)-1;
	key.type = BTRFS_DEV_EXTENT_KEY;

4321
	do {
4322
		mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
4323
		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4324 4325
		if (ret < 0) {
			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4326
			goto done;
4327
		}
4328 4329

		ret = btrfs_previous_item(root, path, 0, key.type);
4330 4331
		if (ret)
			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4332 4333 4334 4335
		if (ret < 0)
			goto done;
		if (ret) {
			ret = 0;
4336
			btrfs_release_path(path);
4337
			break;
4338 4339 4340 4341 4342 4343
		}

		l = path->nodes[0];
		slot = path->slots[0];
		btrfs_item_key_to_cpu(l, &key, path->slots[0]);

4344
		if (key.objectid != device->devid) {
4345
			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4346
			btrfs_release_path(path);
4347
			break;
4348
		}
4349 4350 4351 4352

		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
		length = btrfs_dev_extent_length(l, dev_extent);

4353
		if (key.offset + length <= new_size) {
4354
			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4355
			btrfs_release_path(path);
4356
			break;
4357
		}
4358 4359

		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4360
		btrfs_release_path(path);
4361

4362
		ret = btrfs_relocate_chunk(root, chunk_offset);
4363
		mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4364
		if (ret && ret != -ENOSPC)
4365
			goto done;
4366 4367
		if (ret == -ENOSPC)
			failed++;
4368
	} while (key.offset-- > 0);
4369 4370 4371 4372 4373 4374 4375 4376

	if (failed && !retried) {
		failed = 0;
		retried = true;
		goto again;
	} else if (failed && retried) {
		ret = -ENOSPC;
		goto done;
4377 4378
	}

4379
	/* Shrinking succeeded, else we would be at "done". */
4380
	trans = btrfs_start_transaction(root, 0);
4381 4382 4383 4384 4385
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto done;
	}

4386
	lock_chunks(root);
4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403

	/*
	 * We checked in the above loop all device extents that were already in
	 * the device tree. However before we have updated the device's
	 * total_bytes to the new size, we might have had chunk allocations that
	 * have not complete yet (new block groups attached to transaction
	 * handles), and therefore their device extents were not yet in the
	 * device tree and we missed them in the loop above. So if we have any
	 * pending chunk using a device extent that overlaps the device range
	 * that we can not use anymore, commit the current transaction and
	 * repeat the search on the device tree - this way we guarantee we will
	 * not have chunks using device extents that end beyond 'new_size'.
	 */
	if (!checked_pending_chunks) {
		u64 start = new_size;
		u64 len = old_size - new_size;

4404 4405
		if (contains_pending_extent(trans->transaction, device,
					    &start, len)) {
4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416
			unlock_chunks(root);
			checked_pending_chunks = true;
			failed = 0;
			retried = false;
			ret = btrfs_commit_transaction(trans, root);
			if (ret)
				goto done;
			goto again;
		}
	}

4417
	btrfs_device_set_disk_total_bytes(device, new_size);
4418 4419 4420
	if (list_empty(&device->resized_list))
		list_add_tail(&device->resized_list,
			      &root->fs_info->fs_devices->resized_devices);
4421 4422 4423 4424

	WARN_ON(diff > old_total);
	btrfs_set_super_total_bytes(super_copy, old_total - diff);
	unlock_chunks(root);
M
Miao Xie 已提交
4425 4426 4427

	/* Now btrfs_update_device() will change the on-disk size. */
	ret = btrfs_update_device(trans, device);
4428
	btrfs_end_transaction(trans, root);
4429 4430
done:
	btrfs_free_path(path);
4431 4432 4433 4434 4435 4436 4437 4438 4439 4440
	if (ret) {
		lock_chunks(root);
		btrfs_device_set_total_bytes(device, old_size);
		if (device->writeable)
			device->fs_devices->total_rw_bytes += diff;
		spin_lock(&root->fs_info->free_chunk_lock);
		root->fs_info->free_chunk_space += diff;
		spin_unlock(&root->fs_info->free_chunk_lock);
		unlock_chunks(root);
	}
4441 4442 4443
	return ret;
}

4444
static int btrfs_add_system_chunk(struct btrfs_root *root,
4445 4446 4447
			   struct btrfs_key *key,
			   struct btrfs_chunk *chunk, int item_size)
{
4448
	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4449 4450 4451 4452
	struct btrfs_disk_key disk_key;
	u32 array_size;
	u8 *ptr;

4453
	lock_chunks(root);
4454
	array_size = btrfs_super_sys_array_size(super_copy);
4455
	if (array_size + item_size + sizeof(disk_key)
4456 4457
			> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
		unlock_chunks(root);
4458
		return -EFBIG;
4459
	}
4460 4461 4462 4463 4464 4465 4466 4467

	ptr = super_copy->sys_chunk_array + array_size;
	btrfs_cpu_key_to_disk(&disk_key, key);
	memcpy(ptr, &disk_key, sizeof(disk_key));
	ptr += sizeof(disk_key);
	memcpy(ptr, chunk, item_size);
	item_size += sizeof(disk_key);
	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4468 4469
	unlock_chunks(root);

4470 4471 4472
	return 0;
}

4473 4474 4475 4476
/*
 * sort the devices in descending order by max_avail, total_avail
 */
static int btrfs_cmp_device_info(const void *a, const void *b)
4477
{
4478 4479
	const struct btrfs_device_info *di_a = a;
	const struct btrfs_device_info *di_b = b;
4480

4481
	if (di_a->max_avail > di_b->max_avail)
4482
		return -1;
4483
	if (di_a->max_avail < di_b->max_avail)
4484
		return 1;
4485 4486 4487 4488 4489
	if (di_a->total_avail > di_b->total_avail)
		return -1;
	if (di_a->total_avail < di_b->total_avail)
		return 1;
	return 0;
4490
}
4491

D
David Woodhouse 已提交
4492 4493 4494
static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
{
	/* TODO allow them to set a preferred stripe size */
4495
	return SZ_64K;
D
David Woodhouse 已提交
4496 4497 4498 4499
}

static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
{
4500
	if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
D
David Woodhouse 已提交
4501 4502
		return;

4503
	btrfs_set_fs_incompat(info, RAID56);
D
David Woodhouse 已提交
4504 4505
}

4506 4507 4508 4509 4510 4511 4512 4513 4514 4515
#define BTRFS_MAX_DEVS(r) ((BTRFS_LEAF_DATA_SIZE(r)		\
			- sizeof(struct btrfs_item)		\
			- sizeof(struct btrfs_chunk))		\
			/ sizeof(struct btrfs_stripe) + 1)

#define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE	\
				- 2 * sizeof(struct btrfs_disk_key)	\
				- 2 * sizeof(struct btrfs_chunk))	\
				/ sizeof(struct btrfs_stripe) + 1)

4516
static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4517 4518
			       struct btrfs_root *extent_root, u64 start,
			       u64 type)
4519
{
4520 4521 4522 4523 4524 4525 4526 4527 4528
	struct btrfs_fs_info *info = extent_root->fs_info;
	struct btrfs_fs_devices *fs_devices = info->fs_devices;
	struct list_head *cur;
	struct map_lookup *map = NULL;
	struct extent_map_tree *em_tree;
	struct extent_map *em;
	struct btrfs_device_info *devices_info = NULL;
	u64 total_avail;
	int num_stripes;	/* total number of stripes to allocate */
D
David Woodhouse 已提交
4529 4530
	int data_stripes;	/* number of stripes that count for
				   block group size */
4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541
	int sub_stripes;	/* sub_stripes info for map */
	int dev_stripes;	/* stripes per dev */
	int devs_max;		/* max devs to use */
	int devs_min;		/* min devs needed */
	int devs_increment;	/* ndevs has to be a multiple of this */
	int ncopies;		/* how many copies to data has */
	int ret;
	u64 max_stripe_size;
	u64 max_chunk_size;
	u64 stripe_size;
	u64 num_bytes;
D
David Woodhouse 已提交
4542
	u64 raid_stripe_len = BTRFS_STRIPE_LEN;
4543 4544 4545
	int ndevs;
	int i;
	int j;
4546
	int index;
4547

4548
	BUG_ON(!alloc_profile_is_valid(type, 0));
4549

4550 4551
	if (list_empty(&fs_devices->alloc_list))
		return -ENOSPC;
4552

4553
	index = __get_raid_index(type);
4554

4555 4556 4557 4558 4559 4560
	sub_stripes = btrfs_raid_array[index].sub_stripes;
	dev_stripes = btrfs_raid_array[index].dev_stripes;
	devs_max = btrfs_raid_array[index].devs_max;
	devs_min = btrfs_raid_array[index].devs_min;
	devs_increment = btrfs_raid_array[index].devs_increment;
	ncopies = btrfs_raid_array[index].ncopies;
4561

4562
	if (type & BTRFS_BLOCK_GROUP_DATA) {
4563
		max_stripe_size = SZ_1G;
4564
		max_chunk_size = 10 * max_stripe_size;
4565 4566
		if (!devs_max)
			devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4567
	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4568
		/* for larger filesystems, use larger metadata chunks */
4569 4570
		if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
			max_stripe_size = SZ_1G;
4571
		else
4572
			max_stripe_size = SZ_256M;
4573
		max_chunk_size = max_stripe_size;
4574 4575
		if (!devs_max)
			devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4576
	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4577
		max_stripe_size = SZ_32M;
4578
		max_chunk_size = 2 * max_stripe_size;
4579 4580
		if (!devs_max)
			devs_max = BTRFS_MAX_DEVS_SYS_CHUNK;
4581
	} else {
4582
		btrfs_err(info, "invalid chunk type 0x%llx requested",
4583 4584
		       type);
		BUG_ON(1);
4585 4586
	}

Y
Yan Zheng 已提交
4587 4588 4589
	/* we don't want a chunk larger than 10% of writeable space */
	max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
			     max_chunk_size);
4590

4591
	devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
4592 4593 4594
			       GFP_NOFS);
	if (!devices_info)
		return -ENOMEM;
4595

4596
	cur = fs_devices->alloc_list.next;
4597

4598
	/*
4599 4600
	 * in the first pass through the devices list, we gather information
	 * about the available holes on each device.
4601
	 */
4602 4603 4604 4605 4606
	ndevs = 0;
	while (cur != &fs_devices->alloc_list) {
		struct btrfs_device *device;
		u64 max_avail;
		u64 dev_offset;
4607

4608
		device = list_entry(cur, struct btrfs_device, dev_alloc_list);
4609

4610
		cur = cur->next;
4611

4612
		if (!device->writeable) {
J
Julia Lawall 已提交
4613
			WARN(1, KERN_ERR
4614
			       "BTRFS: read-only device in alloc_list\n");
4615 4616
			continue;
		}
4617

4618 4619
		if (!device->in_fs_metadata ||
		    device->is_tgtdev_for_dev_replace)
4620
			continue;
4621

4622 4623 4624 4625
		if (device->total_bytes > device->bytes_used)
			total_avail = device->total_bytes - device->bytes_used;
		else
			total_avail = 0;
4626 4627 4628 4629

		/* If there is no space on this device, skip it. */
		if (total_avail == 0)
			continue;
4630

4631
		ret = find_free_dev_extent(trans, device,
4632 4633 4634 4635
					   max_stripe_size * dev_stripes,
					   &dev_offset, &max_avail);
		if (ret && ret != -ENOSPC)
			goto error;
4636

4637 4638
		if (ret == 0)
			max_avail = max_stripe_size * dev_stripes;
4639

4640 4641
		if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
			continue;
4642

4643 4644 4645 4646 4647
		if (ndevs == fs_devices->rw_devices) {
			WARN(1, "%s: found more than %llu devices\n",
			     __func__, fs_devices->rw_devices);
			break;
		}
4648 4649 4650 4651 4652 4653
		devices_info[ndevs].dev_offset = dev_offset;
		devices_info[ndevs].max_avail = max_avail;
		devices_info[ndevs].total_avail = total_avail;
		devices_info[ndevs].dev = device;
		++ndevs;
	}
4654

4655 4656 4657 4658 4659
	/*
	 * now sort the devices by hole size / available space
	 */
	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
	     btrfs_cmp_device_info, NULL);
4660

4661 4662
	/* round down to number of usable stripes */
	ndevs -= ndevs % devs_increment;
4663

4664 4665 4666
	if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
		ret = -ENOSPC;
		goto error;
4667
	}
4668

4669 4670 4671 4672 4673 4674 4675 4676
	if (devs_max && ndevs > devs_max)
		ndevs = devs_max;
	/*
	 * the primary goal is to maximize the number of stripes, so use as many
	 * devices as possible, even if the stripes are not maximum sized.
	 */
	stripe_size = devices_info[ndevs-1].max_avail;
	num_stripes = ndevs * dev_stripes;
4677

D
David Woodhouse 已提交
4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693
	/*
	 * this will have to be fixed for RAID1 and RAID10 over
	 * more drives
	 */
	data_stripes = num_stripes / ncopies;

	if (type & BTRFS_BLOCK_GROUP_RAID5) {
		raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
				 btrfs_super_stripesize(info->super_copy));
		data_stripes = num_stripes - 1;
	}
	if (type & BTRFS_BLOCK_GROUP_RAID6) {
		raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
				 btrfs_super_stripesize(info->super_copy));
		data_stripes = num_stripes - 2;
	}
4694 4695 4696 4697 4698 4699 4700 4701

	/*
	 * Use the number of data stripes to figure out how big this chunk
	 * is really going to be in terms of logical address space,
	 * and compare that answer with the max chunk size
	 */
	if (stripe_size * data_stripes > max_chunk_size) {
		u64 mask = (1ULL << 24) - 1;
4702 4703

		stripe_size = div_u64(max_chunk_size, data_stripes);
4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714

		/* bump the answer up to a 16MB boundary */
		stripe_size = (stripe_size + mask) & ~mask;

		/* but don't go higher than the limits we found
		 * while searching for free extents
		 */
		if (stripe_size > devices_info[ndevs-1].max_avail)
			stripe_size = devices_info[ndevs-1].max_avail;
	}

4715
	stripe_size = div_u64(stripe_size, dev_stripes);
4716 4717

	/* align to BTRFS_STRIPE_LEN */
4718
	stripe_size = div_u64(stripe_size, raid_stripe_len);
D
David Woodhouse 已提交
4719
	stripe_size *= raid_stripe_len;
4720 4721 4722 4723 4724 4725 4726

	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
	if (!map) {
		ret = -ENOMEM;
		goto error;
	}
	map->num_stripes = num_stripes;
4727

4728 4729 4730 4731 4732 4733
	for (i = 0; i < ndevs; ++i) {
		for (j = 0; j < dev_stripes; ++j) {
			int s = i * dev_stripes + j;
			map->stripes[s].dev = devices_info[i].dev;
			map->stripes[s].physical = devices_info[i].dev_offset +
						   j * stripe_size;
4734 4735
		}
	}
Y
Yan Zheng 已提交
4736
	map->sector_size = extent_root->sectorsize;
D
David Woodhouse 已提交
4737 4738 4739
	map->stripe_len = raid_stripe_len;
	map->io_align = raid_stripe_len;
	map->io_width = raid_stripe_len;
Y
Yan Zheng 已提交
4740 4741
	map->type = type;
	map->sub_stripes = sub_stripes;
4742

D
David Woodhouse 已提交
4743
	num_bytes = stripe_size * data_stripes;
4744

4745
	trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
4746

4747
	em = alloc_extent_map();
Y
Yan Zheng 已提交
4748
	if (!em) {
4749
		kfree(map);
4750 4751
		ret = -ENOMEM;
		goto error;
4752
	}
4753
	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
4754
	em->map_lookup = map;
Y
Yan Zheng 已提交
4755
	em->start = start;
4756
	em->len = num_bytes;
Y
Yan Zheng 已提交
4757 4758
	em->block_start = 0;
	em->block_len = em->len;
4759
	em->orig_block_len = stripe_size;
4760

Y
Yan Zheng 已提交
4761
	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4762
	write_lock(&em_tree->lock);
J
Josef Bacik 已提交
4763
	ret = add_extent_mapping(em_tree, em, 0);
4764 4765 4766 4767
	if (!ret) {
		list_add_tail(&em->list, &trans->transaction->pending_chunks);
		atomic_inc(&em->refs);
	}
4768
	write_unlock(&em_tree->lock);
4769 4770
	if (ret) {
		free_extent_map(em);
4771
		goto error;
4772
	}
4773

4774 4775 4776
	ret = btrfs_make_block_group(trans, extent_root, 0, type,
				     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
				     start, num_bytes);
4777 4778
	if (ret)
		goto error_del_extent;
Y
Yan Zheng 已提交
4779

4780 4781 4782 4783
	for (i = 0; i < map->num_stripes; i++) {
		num_bytes = map->stripes[i].dev->bytes_used + stripe_size;
		btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes);
	}
4784

4785 4786 4787 4788 4789
	spin_lock(&extent_root->fs_info->free_chunk_lock);
	extent_root->fs_info->free_chunk_space -= (stripe_size *
						   map->num_stripes);
	spin_unlock(&extent_root->fs_info->free_chunk_lock);

4790
	free_extent_map(em);
D
David Woodhouse 已提交
4791 4792
	check_raid56_incompat_flag(extent_root->fs_info, type);

4793
	kfree(devices_info);
Y
Yan Zheng 已提交
4794
	return 0;
4795

4796
error_del_extent:
4797 4798 4799 4800 4801 4802 4803 4804
	write_lock(&em_tree->lock);
	remove_extent_mapping(em_tree, em);
	write_unlock(&em_tree->lock);

	/* One for our allocation */
	free_extent_map(em);
	/* One for the tree reference */
	free_extent_map(em);
4805 4806
	/* One for the pending_chunks list reference */
	free_extent_map(em);
4807 4808 4809
error:
	kfree(devices_info);
	return ret;
Y
Yan Zheng 已提交
4810 4811
}

4812
int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
Y
Yan Zheng 已提交
4813
				struct btrfs_root *extent_root,
4814
				u64 chunk_offset, u64 chunk_size)
Y
Yan Zheng 已提交
4815 4816 4817 4818 4819 4820
{
	struct btrfs_key key;
	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
	struct btrfs_device *device;
	struct btrfs_chunk *chunk;
	struct btrfs_stripe *stripe;
4821 4822 4823 4824 4825 4826 4827
	struct extent_map_tree *em_tree;
	struct extent_map *em;
	struct map_lookup *map;
	size_t item_size;
	u64 dev_offset;
	u64 stripe_size;
	int i = 0;
4828
	int ret = 0;
Y
Yan Zheng 已提交
4829

4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842
	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
	read_lock(&em_tree->lock);
	em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size);
	read_unlock(&em_tree->lock);

	if (!em) {
		btrfs_crit(extent_root->fs_info, "unable to find logical "
			   "%Lu len %Lu", chunk_offset, chunk_size);
		return -EINVAL;
	}

	if (em->start != chunk_offset || em->len != chunk_size) {
		btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted"
4843
			  " %Lu-%Lu, found %Lu-%Lu", chunk_offset,
4844 4845 4846 4847 4848
			  chunk_size, em->start, em->len);
		free_extent_map(em);
		return -EINVAL;
	}

4849
	map = em->map_lookup;
4850 4851 4852
	item_size = btrfs_chunk_item_size(map->num_stripes);
	stripe_size = em->orig_block_len;

Y
Yan Zheng 已提交
4853
	chunk = kzalloc(item_size, GFP_NOFS);
4854 4855 4856 4857 4858
	if (!chunk) {
		ret = -ENOMEM;
		goto out;
	}

4859 4860 4861 4862 4863 4864 4865 4866
	/*
	 * Take the device list mutex to prevent races with the final phase of
	 * a device replace operation that replaces the device object associated
	 * with the map's stripes, because the device object's id can change
	 * at any time during that final phase of the device replace operation
	 * (dev-replace.c:btrfs_dev_replace_finishing()).
	 */
	mutex_lock(&chunk_root->fs_info->fs_devices->device_list_mutex);
4867 4868 4869
	for (i = 0; i < map->num_stripes; i++) {
		device = map->stripes[i].dev;
		dev_offset = map->stripes[i].physical;
Y
Yan Zheng 已提交
4870

4871
		ret = btrfs_update_device(trans, device);
4872
		if (ret)
4873
			break;
4874 4875 4876 4877 4878 4879
		ret = btrfs_alloc_dev_extent(trans, device,
					     chunk_root->root_key.objectid,
					     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
					     chunk_offset, dev_offset,
					     stripe_size);
		if (ret)
4880 4881 4882 4883 4884
			break;
	}
	if (ret) {
		mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex);
		goto out;
Y
Yan Zheng 已提交
4885 4886 4887
	}

	stripe = &chunk->stripe;
4888 4889 4890
	for (i = 0; i < map->num_stripes; i++) {
		device = map->stripes[i].dev;
		dev_offset = map->stripes[i].physical;
4891

4892 4893 4894
		btrfs_set_stack_stripe_devid(stripe, device->devid);
		btrfs_set_stack_stripe_offset(stripe, dev_offset);
		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
Y
Yan Zheng 已提交
4895
		stripe++;
4896
	}
4897
	mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex);
4898

Y
Yan Zheng 已提交
4899
	btrfs_set_stack_chunk_length(chunk, chunk_size);
4900
	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
Y
Yan Zheng 已提交
4901 4902 4903 4904 4905
	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
	btrfs_set_stack_chunk_type(chunk, map->type);
	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4906
	btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
Y
Yan Zheng 已提交
4907
	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4908

Y
Yan Zheng 已提交
4909 4910 4911
	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
	key.type = BTRFS_CHUNK_ITEM_KEY;
	key.offset = chunk_offset;
4912

Y
Yan Zheng 已提交
4913
	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
4914 4915 4916 4917 4918
	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
		/*
		 * TODO: Cleanup of inserted chunk root in case of
		 * failure.
		 */
4919
		ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
Y
Yan Zheng 已提交
4920
					     item_size);
4921
	}
4922

4923
out:
4924
	kfree(chunk);
4925
	free_extent_map(em);
4926
	return ret;
Y
Yan Zheng 已提交
4927
}
4928

Y
Yan Zheng 已提交
4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940
/*
 * Chunk allocation falls into two parts. The first part does works
 * that make the new allocated chunk useable, but not do any operation
 * that modifies the chunk tree. The second part does the works that
 * require modifying the chunk tree. This division is important for the
 * bootstrap process of adding storage to a seed btrfs.
 */
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
		      struct btrfs_root *extent_root, u64 type)
{
	u64 chunk_offset;

4941
	ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex));
4942 4943
	chunk_offset = find_next_chunk(extent_root->fs_info);
	return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
Y
Yan Zheng 已提交
4944 4945
}

C
Chris Mason 已提交
4946
static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
Y
Yan Zheng 已提交
4947 4948 4949 4950 4951 4952 4953 4954 4955 4956
					 struct btrfs_root *root,
					 struct btrfs_device *device)
{
	u64 chunk_offset;
	u64 sys_chunk_offset;
	u64 alloc_profile;
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct btrfs_root *extent_root = fs_info->extent_root;
	int ret;

4957
	chunk_offset = find_next_chunk(fs_info);
4958
	alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
4959 4960
	ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
				  alloc_profile);
4961 4962
	if (ret)
		return ret;
Y
Yan Zheng 已提交
4963

4964
	sys_chunk_offset = find_next_chunk(root->fs_info);
4965
	alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
4966 4967
	ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
				  alloc_profile);
4968
	return ret;
Y
Yan Zheng 已提交
4969 4970
}

4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983
static inline int btrfs_chunk_max_errors(struct map_lookup *map)
{
	int max_errors;

	if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
			 BTRFS_BLOCK_GROUP_RAID10 |
			 BTRFS_BLOCK_GROUP_RAID5 |
			 BTRFS_BLOCK_GROUP_DUP)) {
		max_errors = 1;
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
		max_errors = 2;
	} else {
		max_errors = 0;
4984
	}
Y
Yan Zheng 已提交
4985

4986
	return max_errors;
Y
Yan Zheng 已提交
4987 4988 4989 4990 4991 4992 4993 4994
}

int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
{
	struct extent_map *em;
	struct map_lookup *map;
	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
	int readonly = 0;
4995
	int miss_ndevs = 0;
Y
Yan Zheng 已提交
4996 4997
	int i;

4998
	read_lock(&map_tree->map_tree.lock);
Y
Yan Zheng 已提交
4999
	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
5000
	read_unlock(&map_tree->map_tree.lock);
Y
Yan Zheng 已提交
5001 5002 5003
	if (!em)
		return 1;

5004
	map = em->map_lookup;
Y
Yan Zheng 已提交
5005
	for (i = 0; i < map->num_stripes; i++) {
5006 5007 5008 5009 5010
		if (map->stripes[i].dev->missing) {
			miss_ndevs++;
			continue;
		}

Y
Yan Zheng 已提交
5011 5012
		if (!map->stripes[i].dev->writeable) {
			readonly = 1;
5013
			goto end;
Y
Yan Zheng 已提交
5014 5015
		}
	}
5016 5017 5018 5019 5020 5021 5022 5023 5024

	/*
	 * If the number of missing devices is larger than max errors,
	 * we can not write the data into that chunk successfully, so
	 * set it readonly.
	 */
	if (miss_ndevs > btrfs_chunk_max_errors(map))
		readonly = 1;
end:
5025
	free_extent_map(em);
Y
Yan Zheng 已提交
5026
	return readonly;
5027 5028 5029 5030
}

void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
{
5031
	extent_map_tree_init(&tree->map_tree);
5032 5033 5034 5035 5036 5037
}

void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
{
	struct extent_map *em;

C
Chris Mason 已提交
5038
	while (1) {
5039
		write_lock(&tree->map_tree.lock);
5040 5041 5042
		em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
		if (em)
			remove_extent_mapping(&tree->map_tree, em);
5043
		write_unlock(&tree->map_tree.lock);
5044 5045 5046 5047 5048 5049 5050 5051 5052
		if (!em)
			break;
		/* once for us */
		free_extent_map(em);
		/* once for the tree */
		free_extent_map(em);
	}
}

5053
int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5054
{
5055
	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
5056 5057 5058 5059 5060
	struct extent_map *em;
	struct map_lookup *map;
	struct extent_map_tree *em_tree = &map_tree->map_tree;
	int ret;

5061
	read_lock(&em_tree->lock);
5062
	em = lookup_extent_mapping(em_tree, logical, len);
5063
	read_unlock(&em_tree->lock);
5064

5065 5066 5067 5068 5069 5070
	/*
	 * We could return errors for these cases, but that could get ugly and
	 * we'd probably do the same thing which is just not do anything else
	 * and exit, so return 1 so the callers don't try to use other copies.
	 */
	if (!em) {
5071
		btrfs_crit(fs_info, "No mapping for %Lu-%Lu", logical,
5072 5073 5074 5075 5076
			    logical+len);
		return 1;
	}

	if (em->start > logical || em->start + em->len < logical) {
5077
		btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got "
5078
			    "%Lu-%Lu", logical, logical+len, em->start,
5079
			    em->start + em->len);
5080
		free_extent_map(em);
5081 5082 5083
		return 1;
	}

5084
	map = em->map_lookup;
5085 5086
	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
		ret = map->num_stripes;
C
Chris Mason 已提交
5087 5088
	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
		ret = map->sub_stripes;
D
David Woodhouse 已提交
5089 5090 5091 5092
	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
		ret = 2;
	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
		ret = 3;
5093 5094 5095
	else
		ret = 1;
	free_extent_map(em);
5096

5097
	btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
5098 5099
	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
		ret++;
5100
	btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
5101

5102 5103 5104
	return ret;
}

D
David Woodhouse 已提交
5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119
unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
				    struct btrfs_mapping_tree *map_tree,
				    u64 logical)
{
	struct extent_map *em;
	struct map_lookup *map;
	struct extent_map_tree *em_tree = &map_tree->map_tree;
	unsigned long len = root->sectorsize;

	read_lock(&em_tree->lock);
	em = lookup_extent_mapping(em_tree, logical, len);
	read_unlock(&em_tree->lock);
	BUG_ON(!em);

	BUG_ON(em->start > logical || em->start + em->len < logical);
5120
	map = em->map_lookup;
5121
	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
D
David Woodhouse 已提交
5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140
		len = map->stripe_len * nr_data_stripes(map);
	free_extent_map(em);
	return len;
}

int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
			   u64 logical, u64 len, int mirror_num)
{
	struct extent_map *em;
	struct map_lookup *map;
	struct extent_map_tree *em_tree = &map_tree->map_tree;
	int ret = 0;

	read_lock(&em_tree->lock);
	em = lookup_extent_mapping(em_tree, logical, len);
	read_unlock(&em_tree->lock);
	BUG_ON(!em);

	BUG_ON(em->start > logical || em->start + em->len < logical);
5141
	map = em->map_lookup;
5142
	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
D
David Woodhouse 已提交
5143 5144 5145 5146 5147
		ret = 1;
	free_extent_map(em);
	return ret;
}

5148 5149 5150
static int find_live_mirror(struct btrfs_fs_info *fs_info,
			    struct map_lookup *map, int first, int num,
			    int optimal, int dev_replace_is_ongoing)
5151 5152
{
	int i;
5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176
	int tolerance;
	struct btrfs_device *srcdev;

	if (dev_replace_is_ongoing &&
	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
		srcdev = fs_info->dev_replace.srcdev;
	else
		srcdev = NULL;

	/*
	 * try to avoid the drive that is the source drive for a
	 * dev-replace procedure, only choose it if no other non-missing
	 * mirror is available
	 */
	for (tolerance = 0; tolerance < 2; tolerance++) {
		if (map->stripes[optimal].dev->bdev &&
		    (tolerance || map->stripes[optimal].dev != srcdev))
			return optimal;
		for (i = first; i < first + num; i++) {
			if (map->stripes[i].dev->bdev &&
			    (tolerance || map->stripes[i].dev != srcdev))
				return i;
		}
5177
	}
5178

5179 5180 5181 5182 5183 5184
	/* we couldn't find one that doesn't fail.  Just return something
	 * and the io error handling code will clean up eventually
	 */
	return optimal;
}

D
David Woodhouse 已提交
5185 5186 5187 5188 5189 5190
static inline int parity_smaller(u64 a, u64 b)
{
	return a > b;
}

/* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5191
static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
D
David Woodhouse 已提交
5192 5193 5194 5195 5196 5197 5198 5199
{
	struct btrfs_bio_stripe s;
	int i;
	u64 l;
	int again = 1;

	while (again) {
		again = 0;
5200
		for (i = 0; i < num_stripes - 1; i++) {
5201 5202
			if (parity_smaller(bbio->raid_map[i],
					   bbio->raid_map[i+1])) {
D
David Woodhouse 已提交
5203
				s = bbio->stripes[i];
5204
				l = bbio->raid_map[i];
D
David Woodhouse 已提交
5205
				bbio->stripes[i] = bbio->stripes[i+1];
5206
				bbio->raid_map[i] = bbio->raid_map[i+1];
D
David Woodhouse 已提交
5207
				bbio->stripes[i+1] = s;
5208
				bbio->raid_map[i+1] = l;
5209

D
David Woodhouse 已提交
5210 5211 5212 5213 5214 5215
				again = 1;
			}
		}
	}
}

5216 5217 5218
static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
{
	struct btrfs_bio *bbio = kzalloc(
5219
		 /* the size of the btrfs_bio */
5220
		sizeof(struct btrfs_bio) +
5221
		/* plus the variable array for the stripes */
5222
		sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5223
		/* plus the variable array for the tgt dev */
5224
		sizeof(int) * (real_stripes) +
5225 5226 5227 5228 5229
		/*
		 * plus the raid_map, which includes both the tgt dev
		 * and the stripes
		 */
		sizeof(u64) * (total_stripes),
5230
		GFP_NOFS|__GFP_NOFAIL);
5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251

	atomic_set(&bbio->error, 0);
	atomic_set(&bbio->refs, 1);

	return bbio;
}

void btrfs_get_bbio(struct btrfs_bio *bbio)
{
	WARN_ON(!atomic_read(&bbio->refs));
	atomic_inc(&bbio->refs);
}

void btrfs_put_bbio(struct btrfs_bio *bbio)
{
	if (!bbio)
		return;
	if (atomic_dec_and_test(&bbio->refs))
		kfree(bbio);
}

5252
static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5253
			     u64 logical, u64 *length,
5254
			     struct btrfs_bio **bbio_ret,
5255
			     int mirror_num, int need_raid_map)
5256 5257 5258
{
	struct extent_map *em;
	struct map_lookup *map;
5259
	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
5260 5261
	struct extent_map_tree *em_tree = &map_tree->map_tree;
	u64 offset;
5262
	u64 stripe_offset;
5263
	u64 stripe_end_offset;
5264
	u64 stripe_nr;
5265 5266
	u64 stripe_nr_orig;
	u64 stripe_nr_end;
D
David Woodhouse 已提交
5267
	u64 stripe_len;
5268
	u32 stripe_index;
5269
	int i;
L
Li Zefan 已提交
5270
	int ret = 0;
5271
	int num_stripes;
5272
	int max_errors = 0;
5273
	int tgtdev_indexes = 0;
5274
	struct btrfs_bio *bbio = NULL;
5275 5276 5277
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
	int dev_replace_is_ongoing = 0;
	int num_alloc_stripes;
5278 5279
	int patch_the_first_stripe_for_dev_replace = 0;
	u64 physical_to_patch_in_first_stripe = 0;
D
David Woodhouse 已提交
5280
	u64 raid56_full_stripe_start = (u64)-1;
5281

5282
	read_lock(&em_tree->lock);
5283
	em = lookup_extent_mapping(em_tree, logical, *length);
5284
	read_unlock(&em_tree->lock);
5285

5286
	if (!em) {
5287
		btrfs_crit(fs_info, "unable to find logical %llu len %llu",
5288
			logical, *length);
5289 5290 5291 5292 5293
		return -EINVAL;
	}

	if (em->start > logical || em->start + em->len < logical) {
		btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
5294
			   "found %Lu-%Lu", logical, em->start,
5295
			   em->start + em->len);
5296
		free_extent_map(em);
5297
		return -EINVAL;
5298
	}
5299

5300
	map = em->map_lookup;
5301
	offset = logical - em->start;
5302

D
David Woodhouse 已提交
5303
	stripe_len = map->stripe_len;
5304 5305 5306 5307 5308
	stripe_nr = offset;
	/*
	 * stripe_nr counts the total number of stripes we have to stride
	 * to get to this block
	 */
5309
	stripe_nr = div64_u64(stripe_nr, stripe_len);
5310

D
David Woodhouse 已提交
5311
	stripe_offset = stripe_nr * stripe_len;
5312 5313 5314 5315 5316 5317 5318 5319 5320
	if (offset < stripe_offset) {
		btrfs_crit(fs_info, "stripe math has gone wrong, "
			   "stripe_offset=%llu, offset=%llu, start=%llu, "
			   "logical=%llu, stripe_len=%llu",
			   stripe_offset, offset, em->start, logical,
			   stripe_len);
		free_extent_map(em);
		return -EINVAL;
	}
5321 5322 5323 5324

	/* stripe_offset is the offset of this block in its stripe*/
	stripe_offset = offset - stripe_offset;

D
David Woodhouse 已提交
5325
	/* if we're here for raid56, we need to know the stripe aligned start */
5326
	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
D
David Woodhouse 已提交
5327 5328 5329 5330 5331 5332
		unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
		raid56_full_stripe_start = offset;

		/* allow a write of a full stripe, but make sure we don't
		 * allow straddling of stripes
		 */
5333 5334
		raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
				full_stripe_len);
D
David Woodhouse 已提交
5335 5336 5337 5338 5339
		raid56_full_stripe_start *= full_stripe_len;
	}

	if (rw & REQ_DISCARD) {
		/* we don't discard raid56 yet */
5340
		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
D
David Woodhouse 已提交
5341 5342 5343
			ret = -EOPNOTSUPP;
			goto out;
		}
5344
		*length = min_t(u64, em->len - offset, *length);
D
David Woodhouse 已提交
5345 5346 5347 5348 5349
	} else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
		u64 max_len;
		/* For writes to RAID[56], allow a full stripeset across all disks.
		   For other RAID types and for RAID[56] reads, just allow a single
		   stripe (on a single disk). */
5350
		if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
D
David Woodhouse 已提交
5351 5352 5353 5354 5355 5356 5357 5358
		    (rw & REQ_WRITE)) {
			max_len = stripe_len * nr_data_stripes(map) -
				(offset - raid56_full_stripe_start);
		} else {
			/* we limit the length of each bio to what fits in a stripe */
			max_len = stripe_len - stripe_offset;
		}
		*length = min_t(u64, em->len - offset, max_len);
5359 5360 5361
	} else {
		*length = em->len - offset;
	}
5362

D
David Woodhouse 已提交
5363 5364
	/* This is for when we're called from btrfs_merge_bio_hook() and all
	   it cares about is the length */
5365
	if (!bbio_ret)
5366 5367
		goto out;

5368
	btrfs_dev_replace_lock(dev_replace, 0);
5369 5370
	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
	if (!dev_replace_is_ongoing)
5371 5372 5373
		btrfs_dev_replace_unlock(dev_replace, 0);
	else
		btrfs_dev_replace_set_lock_blocking(dev_replace);
5374

5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398
	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
	    !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
	    dev_replace->tgtdev != NULL) {
		/*
		 * in dev-replace case, for repair case (that's the only
		 * case where the mirror is selected explicitly when
		 * calling btrfs_map_block), blocks left of the left cursor
		 * can also be read from the target drive.
		 * For REQ_GET_READ_MIRRORS, the target drive is added as
		 * the last one to the array of stripes. For READ, it also
		 * needs to be supported using the same mirror number.
		 * If the requested block is not left of the left cursor,
		 * EIO is returned. This can happen because btrfs_num_copies()
		 * returns one more in the dev-replace case.
		 */
		u64 tmp_length = *length;
		struct btrfs_bio *tmp_bbio = NULL;
		int tmp_num_stripes;
		u64 srcdev_devid = dev_replace->srcdev->devid;
		int index_srcdev = 0;
		int found = 0;
		u64 physical_of_found = 0;

		ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
5399
			     logical, &tmp_length, &tmp_bbio, 0, 0);
5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412
		if (ret) {
			WARN_ON(tmp_bbio != NULL);
			goto out;
		}

		tmp_num_stripes = tmp_bbio->num_stripes;
		if (mirror_num > tmp_num_stripes) {
			/*
			 * REQ_GET_READ_MIRRORS does not contain this
			 * mirror, that means that the requested area
			 * is not left of the left cursor
			 */
			ret = -EIO;
5413
			btrfs_put_bbio(tmp_bbio);
5414 5415 5416 5417 5418 5419 5420 5421 5422 5423
			goto out;
		}

		/*
		 * process the rest of the function using the mirror_num
		 * of the source drive. Therefore look it up first.
		 * At the end, patch the device pointer to the one of the
		 * target drive.
		 */
		for (i = 0; i < tmp_num_stripes; i++) {
5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437
			if (tmp_bbio->stripes[i].dev->devid != srcdev_devid)
				continue;

			/*
			 * In case of DUP, in order to keep it simple, only add
			 * the mirror with the lowest physical address
			 */
			if (found &&
			    physical_of_found <= tmp_bbio->stripes[i].physical)
				continue;

			index_srcdev = i;
			found = 1;
			physical_of_found = tmp_bbio->stripes[i].physical;
5438 5439
		}

5440 5441 5442
		btrfs_put_bbio(tmp_bbio);

		if (!found) {
5443 5444 5445 5446 5447
			WARN_ON(1);
			ret = -EIO;
			goto out;
		}

5448 5449 5450
		mirror_num = index_srcdev + 1;
		patch_the_first_stripe_for_dev_replace = 1;
		physical_to_patch_in_first_stripe = physical_of_found;
5451 5452 5453 5454
	} else if (mirror_num > map->num_stripes) {
		mirror_num = 0;
	}

5455
	num_stripes = 1;
5456
	stripe_index = 0;
5457
	stripe_nr_orig = stripe_nr;
5458
	stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
5459
	stripe_nr_end = div_u64(stripe_nr_end, map->stripe_len);
5460 5461
	stripe_end_offset = stripe_nr_end * map->stripe_len -
			    (offset + *length);
D
David Woodhouse 已提交
5462

5463 5464 5465 5466
	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
		if (rw & REQ_DISCARD)
			num_stripes = min_t(u64, map->num_stripes,
					    stripe_nr_end - stripe_nr_orig);
5467 5468
		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
				&stripe_index);
5469 5470
		if (!(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)))
			mirror_num = 1;
5471
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
5472
		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
5473
			num_stripes = map->num_stripes;
5474
		else if (mirror_num)
5475
			stripe_index = mirror_num - 1;
5476
		else {
5477
			stripe_index = find_live_mirror(fs_info, map, 0,
5478
					    map->num_stripes,
5479 5480
					    current->pid % map->num_stripes,
					    dev_replace_is_ongoing);
5481
			mirror_num = stripe_index + 1;
5482
		}
5483

5484
	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
5485
		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
5486
			num_stripes = map->num_stripes;
5487
		} else if (mirror_num) {
5488
			stripe_index = mirror_num - 1;
5489 5490 5491
		} else {
			mirror_num = 1;
		}
5492

C
Chris Mason 已提交
5493
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5494
		u32 factor = map->num_stripes / map->sub_stripes;
C
Chris Mason 已提交
5495

5496
		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
C
Chris Mason 已提交
5497 5498
		stripe_index *= map->sub_stripes;

5499
		if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
5500
			num_stripes = map->sub_stripes;
5501 5502 5503 5504
		else if (rw & REQ_DISCARD)
			num_stripes = min_t(u64, map->sub_stripes *
					    (stripe_nr_end - stripe_nr_orig),
					    map->num_stripes);
C
Chris Mason 已提交
5505 5506
		else if (mirror_num)
			stripe_index += mirror_num - 1;
5507
		else {
J
Jan Schmidt 已提交
5508
			int old_stripe_index = stripe_index;
5509 5510
			stripe_index = find_live_mirror(fs_info, map,
					      stripe_index,
5511
					      map->sub_stripes, stripe_index +
5512 5513
					      current->pid % map->sub_stripes,
					      dev_replace_is_ongoing);
J
Jan Schmidt 已提交
5514
			mirror_num = stripe_index - old_stripe_index + 1;
5515
		}
D
David Woodhouse 已提交
5516

5517
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5518
		if (need_raid_map &&
5519 5520
		    ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
		     mirror_num > 1)) {
D
David Woodhouse 已提交
5521
			/* push stripe_nr back to the start of the full stripe */
5522 5523
			stripe_nr = div_u64(raid56_full_stripe_start,
					stripe_len * nr_data_stripes(map));
D
David Woodhouse 已提交
5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537

			/* RAID[56] write or recovery. Return all stripes */
			num_stripes = map->num_stripes;
			max_errors = nr_parity_stripes(map);

			*length = map->stripe_len;
			stripe_index = 0;
			stripe_offset = 0;
		} else {
			/*
			 * Mirror #0 or #1 means the original data block.
			 * Mirror #2 is RAID5 parity block.
			 * Mirror #3 is RAID6 Q block.
			 */
5538 5539
			stripe_nr = div_u64_rem(stripe_nr,
					nr_data_stripes(map), &stripe_index);
D
David Woodhouse 已提交
5540 5541 5542 5543 5544
			if (mirror_num > 1)
				stripe_index = nr_data_stripes(map) +
						mirror_num - 2;

			/* We distribute the parity blocks across stripes */
5545 5546
			div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
					&stripe_index);
5547 5548 5549
			if (!(rw & (REQ_WRITE | REQ_DISCARD |
				    REQ_GET_READ_MIRRORS)) && mirror_num <= 1)
				mirror_num = 1;
D
David Woodhouse 已提交
5550
		}
5551 5552
	} else {
		/*
5553 5554 5555
		 * after this, stripe_nr is the number of stripes on this
		 * device we have to walk to find the data, and stripe_index is
		 * the number of our device in the stripe array
5556
		 */
5557 5558
		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
				&stripe_index);
5559
		mirror_num = stripe_index + 1;
5560
	}
5561 5562 5563 5564 5565 5566 5567
	if (stripe_index >= map->num_stripes) {
		btrfs_crit(fs_info, "stripe index math went horribly wrong, "
			   "got stripe_index=%u, num_stripes=%u",
			   stripe_index, map->num_stripes);
		ret = -EINVAL;
		goto out;
	}
5568

5569
	num_alloc_stripes = num_stripes;
5570 5571 5572 5573 5574
	if (dev_replace_is_ongoing) {
		if (rw & (REQ_WRITE | REQ_DISCARD))
			num_alloc_stripes <<= 1;
		if (rw & REQ_GET_READ_MIRRORS)
			num_alloc_stripes++;
5575
		tgtdev_indexes = num_stripes;
5576
	}
5577

5578
	bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
L
Li Zefan 已提交
5579 5580 5581 5582
	if (!bbio) {
		ret = -ENOMEM;
		goto out;
	}
5583 5584
	if (dev_replace_is_ongoing)
		bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
L
Li Zefan 已提交
5585

5586
	/* build raid_map */
5587
	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
5588 5589 5590
	    need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
	    mirror_num > 1)) {
		u64 tmp;
5591
		unsigned rot;
5592 5593 5594 5595 5596 5597 5598

		bbio->raid_map = (u64 *)((void *)bbio->stripes +
				 sizeof(struct btrfs_bio_stripe) *
				 num_alloc_stripes +
				 sizeof(int) * tgtdev_indexes);

		/* Work out the disk rotation on this stripe-set */
5599
		div_u64_rem(stripe_nr, num_stripes, &rot);
5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612

		/* Fill in the logical address of each stripe */
		tmp = stripe_nr * nr_data_stripes(map);
		for (i = 0; i < nr_data_stripes(map); i++)
			bbio->raid_map[(i+rot) % num_stripes] =
				em->start + (tmp + i) * map->stripe_len;

		bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
		if (map->type & BTRFS_BLOCK_GROUP_RAID6)
			bbio->raid_map[(i+rot+1) % num_stripes] =
				RAID6_Q_STRIPE;
	}

5613
	if (rw & REQ_DISCARD) {
5614 5615
		u32 factor = 0;
		u32 sub_stripes = 0;
5616 5617
		u64 stripes_per_dev = 0;
		u32 remaining_stripes = 0;
L
Liu Bo 已提交
5618
		u32 last_stripe = 0;
5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631

		if (map->type &
		    (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
			if (map->type & BTRFS_BLOCK_GROUP_RAID0)
				sub_stripes = 1;
			else
				sub_stripes = map->sub_stripes;

			factor = map->num_stripes / sub_stripes;
			stripes_per_dev = div_u64_rem(stripe_nr_end -
						      stripe_nr_orig,
						      factor,
						      &remaining_stripes);
L
Liu Bo 已提交
5632 5633
			div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
			last_stripe *= sub_stripes;
5634 5635
		}

5636
		for (i = 0; i < num_stripes; i++) {
5637
			bbio->stripes[i].physical =
5638 5639
				map->stripes[stripe_index].physical +
				stripe_offset + stripe_nr * map->stripe_len;
5640
			bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5641

5642 5643 5644 5645
			if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
					 BTRFS_BLOCK_GROUP_RAID10)) {
				bbio->stripes[i].length = stripes_per_dev *
							  map->stripe_len;
L
Liu Bo 已提交
5646

5647 5648 5649
				if (i / sub_stripes < remaining_stripes)
					bbio->stripes[i].length +=
						map->stripe_len;
L
Liu Bo 已提交
5650 5651 5652 5653 5654 5655 5656 5657 5658

				/*
				 * Special for the first stripe and
				 * the last stripe:
				 *
				 * |-------|...|-------|
				 *     |----------|
				 *    off     end_off
				 */
5659
				if (i < sub_stripes)
5660
					bbio->stripes[i].length -=
5661
						stripe_offset;
L
Liu Bo 已提交
5662 5663 5664 5665

				if (stripe_index >= last_stripe &&
				    stripe_index <= (last_stripe +
						     sub_stripes - 1))
5666
					bbio->stripes[i].length -=
5667
						stripe_end_offset;
L
Liu Bo 已提交
5668

5669 5670
				if (i == sub_stripes - 1)
					stripe_offset = 0;
5671
			} else
5672
				bbio->stripes[i].length = *length;
5673 5674 5675 5676 5677 5678 5679 5680 5681 5682

			stripe_index++;
			if (stripe_index == map->num_stripes) {
				/* This could only happen for RAID0/10 */
				stripe_index = 0;
				stripe_nr++;
			}
		}
	} else {
		for (i = 0; i < num_stripes; i++) {
5683
			bbio->stripes[i].physical =
5684 5685 5686
				map->stripes[stripe_index].physical +
				stripe_offset +
				stripe_nr * map->stripe_len;
5687
			bbio->stripes[i].dev =
5688
				map->stripes[stripe_index].dev;
5689
			stripe_index++;
5690
		}
5691
	}
L
Li Zefan 已提交
5692

5693 5694
	if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
		max_errors = btrfs_chunk_max_errors(map);
L
Li Zefan 已提交
5695

5696 5697
	if (bbio->raid_map)
		sort_parity_stripes(bbio, num_stripes);
5698

5699
	tgtdev_indexes = 0;
5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727
	if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
	    dev_replace->tgtdev != NULL) {
		int index_where_to_add;
		u64 srcdev_devid = dev_replace->srcdev->devid;

		/*
		 * duplicate the write operations while the dev replace
		 * procedure is running. Since the copying of the old disk
		 * to the new disk takes place at run time while the
		 * filesystem is mounted writable, the regular write
		 * operations to the old disk have to be duplicated to go
		 * to the new disk as well.
		 * Note that device->missing is handled by the caller, and
		 * that the write to the old disk is already set up in the
		 * stripes array.
		 */
		index_where_to_add = num_stripes;
		for (i = 0; i < num_stripes; i++) {
			if (bbio->stripes[i].dev->devid == srcdev_devid) {
				/* write to new disk, too */
				struct btrfs_bio_stripe *new =
					bbio->stripes + index_where_to_add;
				struct btrfs_bio_stripe *old =
					bbio->stripes + i;

				new->physical = old->physical;
				new->length = old->length;
				new->dev = dev_replace->tgtdev;
5728
				bbio->tgtdev_map[i] = index_where_to_add;
5729 5730
				index_where_to_add++;
				max_errors++;
5731
				tgtdev_indexes++;
5732 5733 5734
			}
		}
		num_stripes = index_where_to_add;
5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765
	} else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
		   dev_replace->tgtdev != NULL) {
		u64 srcdev_devid = dev_replace->srcdev->devid;
		int index_srcdev = 0;
		int found = 0;
		u64 physical_of_found = 0;

		/*
		 * During the dev-replace procedure, the target drive can
		 * also be used to read data in case it is needed to repair
		 * a corrupt block elsewhere. This is possible if the
		 * requested area is left of the left cursor. In this area,
		 * the target drive is a full copy of the source drive.
		 */
		for (i = 0; i < num_stripes; i++) {
			if (bbio->stripes[i].dev->devid == srcdev_devid) {
				/*
				 * In case of DUP, in order to keep it
				 * simple, only add the mirror with the
				 * lowest physical address
				 */
				if (found &&
				    physical_of_found <=
				     bbio->stripes[i].physical)
					continue;
				index_srcdev = i;
				found = 1;
				physical_of_found = bbio->stripes[i].physical;
			}
		}
		if (found) {
5766
			if (physical_of_found + map->stripe_len <=
5767 5768 5769 5770 5771 5772 5773 5774
			    dev_replace->cursor_left) {
				struct btrfs_bio_stripe *tgtdev_stripe =
					bbio->stripes + num_stripes;

				tgtdev_stripe->physical = physical_of_found;
				tgtdev_stripe->length =
					bbio->stripes[index_srcdev].length;
				tgtdev_stripe->dev = dev_replace->tgtdev;
5775
				bbio->tgtdev_map[index_srcdev] = num_stripes;
5776

5777
				tgtdev_indexes++;
5778 5779 5780
				num_stripes++;
			}
		}
5781 5782
	}

L
Li Zefan 已提交
5783
	*bbio_ret = bbio;
Z
Zhao Lei 已提交
5784
	bbio->map_type = map->type;
L
Li Zefan 已提交
5785 5786 5787
	bbio->num_stripes = num_stripes;
	bbio->max_errors = max_errors;
	bbio->mirror_num = mirror_num;
5788
	bbio->num_tgtdevs = tgtdev_indexes;
5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800

	/*
	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
	 * mirror_num == num_stripes + 1 && dev_replace target drive is
	 * available as a mirror
	 */
	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
		WARN_ON(num_stripes > 1);
		bbio->stripes[0].dev = dev_replace->tgtdev;
		bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
		bbio->mirror_num = map->num_stripes + 1;
	}
5801
out:
5802 5803 5804 5805
	if (dev_replace_is_ongoing) {
		btrfs_dev_replace_clear_lock_blocking(dev_replace);
		btrfs_dev_replace_unlock(dev_replace, 0);
	}
5806
	free_extent_map(em);
L
Li Zefan 已提交
5807
	return ret;
5808 5809
}

5810
int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5811
		      u64 logical, u64 *length,
5812
		      struct btrfs_bio **bbio_ret, int mirror_num)
5813
{
5814
	return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5815
				 mirror_num, 0);
5816 5817
}

5818 5819 5820 5821
/* For Scrub/replace */
int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
		     u64 logical, u64 *length,
		     struct btrfs_bio **bbio_ret, int mirror_num,
5822
		     int need_raid_map)
5823 5824
{
	return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5825
				 mirror_num, need_raid_map);
5826 5827
}

Y
Yan Zheng 已提交
5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838
int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
		     u64 chunk_start, u64 physical, u64 devid,
		     u64 **logical, int *naddrs, int *stripe_len)
{
	struct extent_map_tree *em_tree = &map_tree->map_tree;
	struct extent_map *em;
	struct map_lookup *map;
	u64 *buf;
	u64 bytenr;
	u64 length;
	u64 stripe_nr;
D
David Woodhouse 已提交
5839
	u64 rmap_len;
Y
Yan Zheng 已提交
5840 5841
	int i, j, nr = 0;

5842
	read_lock(&em_tree->lock);
Y
Yan Zheng 已提交
5843
	em = lookup_extent_mapping(em_tree, chunk_start, 1);
5844
	read_unlock(&em_tree->lock);
Y
Yan Zheng 已提交
5845

5846
	if (!em) {
5847
		printk(KERN_ERR "BTRFS: couldn't find em for chunk %Lu\n",
5848 5849 5850 5851 5852
		       chunk_start);
		return -EIO;
	}

	if (em->start != chunk_start) {
5853
		printk(KERN_ERR "BTRFS: bad chunk start, em=%Lu, wanted=%Lu\n",
5854 5855 5856 5857
		       em->start, chunk_start);
		free_extent_map(em);
		return -EIO;
	}
5858
	map = em->map_lookup;
Y
Yan Zheng 已提交
5859 5860

	length = em->len;
D
David Woodhouse 已提交
5861 5862
	rmap_len = map->stripe_len;

Y
Yan Zheng 已提交
5863
	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5864
		length = div_u64(length, map->num_stripes / map->sub_stripes);
Y
Yan Zheng 已提交
5865
	else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5866
		length = div_u64(length, map->num_stripes);
5867
	else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5868
		length = div_u64(length, nr_data_stripes(map));
D
David Woodhouse 已提交
5869 5870
		rmap_len = map->stripe_len * nr_data_stripes(map);
	}
Y
Yan Zheng 已提交
5871

5872
	buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
5873
	BUG_ON(!buf); /* -ENOMEM */
Y
Yan Zheng 已提交
5874 5875 5876 5877 5878 5879 5880 5881 5882

	for (i = 0; i < map->num_stripes; i++) {
		if (devid && map->stripes[i].dev->devid != devid)
			continue;
		if (map->stripes[i].physical > physical ||
		    map->stripes[i].physical + length <= physical)
			continue;

		stripe_nr = physical - map->stripes[i].physical;
5883
		stripe_nr = div_u64(stripe_nr, map->stripe_len);
Y
Yan Zheng 已提交
5884 5885 5886

		if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
			stripe_nr = stripe_nr * map->num_stripes + i;
5887
			stripe_nr = div_u64(stripe_nr, map->sub_stripes);
Y
Yan Zheng 已提交
5888 5889
		} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
			stripe_nr = stripe_nr * map->num_stripes + i;
D
David Woodhouse 已提交
5890 5891 5892 5893 5894
		} /* else if RAID[56], multiply by nr_data_stripes().
		   * Alternatively, just use rmap_len below instead of
		   * map->stripe_len */

		bytenr = chunk_start + stripe_nr * rmap_len;
5895
		WARN_ON(nr >= map->num_stripes);
Y
Yan Zheng 已提交
5896 5897 5898 5899
		for (j = 0; j < nr; j++) {
			if (buf[j] == bytenr)
				break;
		}
5900 5901
		if (j == nr) {
			WARN_ON(nr >= map->num_stripes);
Y
Yan Zheng 已提交
5902
			buf[nr++] = bytenr;
5903
		}
Y
Yan Zheng 已提交
5904 5905 5906 5907
	}

	*logical = buf;
	*naddrs = nr;
D
David Woodhouse 已提交
5908
	*stripe_len = rmap_len;
Y
Yan Zheng 已提交
5909 5910 5911

	free_extent_map(em);
	return 0;
5912 5913
}

5914
static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
5915
{
5916 5917
	bio->bi_private = bbio->private;
	bio->bi_end_io = bbio->end_io;
5918
	bio_endio(bio);
5919

5920
	btrfs_put_bbio(bbio);
5921 5922
}

5923
static void btrfs_end_bio(struct bio *bio)
5924
{
5925
	struct btrfs_bio *bbio = bio->bi_private;
5926
	int is_orig_bio = 0;
5927

5928
	if (bio->bi_error) {
5929
		atomic_inc(&bbio->error);
5930
		if (bio->bi_error == -EIO || bio->bi_error == -EREMOTEIO) {
5931
			unsigned int stripe_index =
5932
				btrfs_io_bio(bio)->stripe_index;
5933
			struct btrfs_device *dev;
5934 5935 5936

			BUG_ON(stripe_index >= bbio->num_stripes);
			dev = bbio->stripes[stripe_index].dev;
5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948
			if (dev->bdev) {
				if (bio->bi_rw & WRITE)
					btrfs_dev_stat_inc(dev,
						BTRFS_DEV_STAT_WRITE_ERRS);
				else
					btrfs_dev_stat_inc(dev,
						BTRFS_DEV_STAT_READ_ERRS);
				if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
					btrfs_dev_stat_inc(dev,
						BTRFS_DEV_STAT_FLUSH_ERRS);
				btrfs_dev_stat_print_on_error(dev);
			}
5949 5950
		}
	}
5951

5952
	if (bio == bbio->orig_bio)
5953 5954
		is_orig_bio = 1;

5955 5956
	btrfs_bio_counter_dec(bbio->fs_info);

5957
	if (atomic_dec_and_test(&bbio->stripes_pending)) {
5958 5959
		if (!is_orig_bio) {
			bio_put(bio);
5960
			bio = bbio->orig_bio;
5961
		}
5962

5963
		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5964
		/* only send an error to the higher layers if it is
D
David Woodhouse 已提交
5965
		 * beyond the tolerance of the btrfs bio
5966
		 */
5967
		if (atomic_read(&bbio->error) > bbio->max_errors) {
5968
			bio->bi_error = -EIO;
5969
		} else {
5970 5971 5972 5973
			/*
			 * this bio is actually up to date, we didn't
			 * go over the max number of errors
			 */
5974
			bio->bi_error = 0;
5975
		}
5976

5977
		btrfs_end_bbio(bbio, bio);
5978
	} else if (!is_orig_bio) {
5979 5980 5981 5982
		bio_put(bio);
	}
}

5983 5984 5985 5986 5987 5988 5989
/*
 * see run_scheduled_bios for a description of why bios are collected for
 * async submit.
 *
 * This will add one bio to the pending list for a device and make sure
 * the work struct is scheduled.
 */
5990 5991 5992
static noinline void btrfs_schedule_bio(struct btrfs_root *root,
					struct btrfs_device *device,
					int rw, struct bio *bio)
5993 5994
{
	int should_queue = 1;
5995
	struct btrfs_pending_bios *pending_bios;
5996

D
David Woodhouse 已提交
5997
	if (device->missing || !device->bdev) {
5998
		bio_io_error(bio);
D
David Woodhouse 已提交
5999 6000 6001
		return;
	}

6002
	/* don't bother with additional async steps for reads, right now */
6003
	if (!(rw & REQ_WRITE)) {
6004
		bio_get(bio);
6005
		btrfsic_submit_bio(rw, bio);
6006
		bio_put(bio);
6007
		return;
6008 6009 6010
	}

	/*
6011
	 * nr_async_bios allows us to reliably return congestion to the
6012 6013 6014 6015
	 * higher layers.  Otherwise, the async bio makes it appear we have
	 * made progress against dirty pages when we've really just put it
	 * on a queue for later
	 */
6016
	atomic_inc(&root->fs_info->nr_async_bios);
6017
	WARN_ON(bio->bi_next);
6018 6019 6020 6021
	bio->bi_next = NULL;
	bio->bi_rw |= rw;

	spin_lock(&device->io_lock);
6022
	if (bio->bi_rw & REQ_SYNC)
6023 6024 6025
		pending_bios = &device->pending_sync_bios;
	else
		pending_bios = &device->pending_bios;
6026

6027 6028
	if (pending_bios->tail)
		pending_bios->tail->bi_next = bio;
6029

6030 6031 6032
	pending_bios->tail = bio;
	if (!pending_bios->head)
		pending_bios->head = bio;
6033 6034 6035 6036 6037 6038
	if (device->running_pending)
		should_queue = 0;

	spin_unlock(&device->io_lock);

	if (should_queue)
6039 6040
		btrfs_queue_work(root->fs_info->submit_workers,
				 &device->work);
6041 6042
}

6043 6044 6045 6046 6047 6048 6049
static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
			      struct bio *bio, u64 physical, int dev_nr,
			      int rw, int async)
{
	struct btrfs_device *dev = bbio->stripes[dev_nr].dev;

	bio->bi_private = bbio;
6050
	btrfs_io_bio(bio)->stripe_index = dev_nr;
6051
	bio->bi_end_io = btrfs_end_bio;
6052
	bio->bi_iter.bi_sector = physical >> 9;
6053 6054 6055 6056 6057 6058
#ifdef DEBUG
	{
		struct rcu_string *name;

		rcu_read_lock();
		name = rcu_dereference(dev->name);
M
Masanari Iida 已提交
6059
		pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
6060
			 "(%s id %llu), size=%u\n", rw,
6061 6062
			 (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev,
			 name->str, dev->devid, bio->bi_iter.bi_size);
6063 6064 6065 6066
		rcu_read_unlock();
	}
#endif
	bio->bi_bdev = dev->bdev;
6067 6068 6069

	btrfs_bio_counter_inc_noblocked(root->fs_info);

6070
	if (async)
D
David Woodhouse 已提交
6071
		btrfs_schedule_bio(root, dev, rw, bio);
6072 6073 6074 6075 6076 6077 6078 6079
	else
		btrfsic_submit_bio(rw, bio);
}

static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
{
	atomic_inc(&bbio->error);
	if (atomic_dec_and_test(&bbio->stripes_pending)) {
6080
		/* Should be the original bio. */
6081 6082
		WARN_ON(bio != bbio->orig_bio);

6083
		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6084
		bio->bi_iter.bi_sector = logical >> 9;
6085 6086
		bio->bi_error = -EIO;
		btrfs_end_bbio(bbio, bio);
6087 6088 6089
	}
}

6090
int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
6091
		  int mirror_num, int async_submit)
6092 6093
{
	struct btrfs_device *dev;
6094
	struct bio *first_bio = bio;
6095
	u64 logical = (u64)bio->bi_iter.bi_sector << 9;
6096 6097 6098
	u64 length = 0;
	u64 map_length;
	int ret;
6099 6100
	int dev_nr;
	int total_devs;
6101
	struct btrfs_bio *bbio = NULL;
6102

6103
	length = bio->bi_iter.bi_size;
6104
	map_length = length;
6105

6106
	btrfs_bio_counter_inc_blocked(root->fs_info);
D
David Woodhouse 已提交
6107
	ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
6108
			      mirror_num, 1);
6109 6110
	if (ret) {
		btrfs_bio_counter_dec(root->fs_info);
6111
		return ret;
6112
	}
6113

6114
	total_devs = bbio->num_stripes;
D
David Woodhouse 已提交
6115 6116 6117
	bbio->orig_bio = first_bio;
	bbio->private = first_bio->bi_private;
	bbio->end_io = first_bio->bi_end_io;
6118
	bbio->fs_info = root->fs_info;
D
David Woodhouse 已提交
6119 6120
	atomic_set(&bbio->stripes_pending, bbio->num_stripes);

6121 6122
	if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
	    ((rw & WRITE) || (mirror_num > 1))) {
D
David Woodhouse 已提交
6123 6124 6125
		/* In this case, map_length has been set to the length of
		   a single stripe; not the whole write */
		if (rw & WRITE) {
6126
			ret = raid56_parity_write(root, bio, bbio, map_length);
D
David Woodhouse 已提交
6127
		} else {
6128
			ret = raid56_parity_recover(root, bio, bbio, map_length,
6129
						    mirror_num, 1);
D
David Woodhouse 已提交
6130
		}
6131

6132 6133
		btrfs_bio_counter_dec(root->fs_info);
		return ret;
D
David Woodhouse 已提交
6134 6135
	}

6136
	if (map_length < length) {
6137
		btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
6138
			logical, length, map_length);
6139 6140
		BUG();
	}
6141

6142
	for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6143 6144 6145 6146 6147 6148
		dev = bbio->stripes[dev_nr].dev;
		if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
			bbio_error(bbio, first_bio, logical);
			continue;
		}

6149
		if (dev_nr < total_devs - 1) {
6150
			bio = btrfs_bio_clone(first_bio, GFP_NOFS);
6151
			BUG_ON(!bio); /* -ENOMEM */
6152
		} else
6153
			bio = first_bio;
6154 6155 6156 6157

		submit_stripe_bio(root, bbio, bio,
				  bbio->stripes[dev_nr].physical, dev_nr, rw,
				  async_submit);
6158
	}
6159
	btrfs_bio_counter_dec(root->fs_info);
6160 6161 6162
	return 0;
}

6163
struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
Y
Yan Zheng 已提交
6164
				       u8 *uuid, u8 *fsid)
6165
{
Y
Yan Zheng 已提交
6166 6167 6168
	struct btrfs_device *device;
	struct btrfs_fs_devices *cur_devices;

6169
	cur_devices = fs_info->fs_devices;
Y
Yan Zheng 已提交
6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180
	while (cur_devices) {
		if (!fsid ||
		    !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
			device = __find_device(&cur_devices->devices,
					       devid, uuid);
			if (device)
				return device;
		}
		cur_devices = cur_devices->seed;
	}
	return NULL;
6181 6182
}

6183
static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
6184
					    struct btrfs_fs_devices *fs_devices,
6185 6186 6187 6188
					    u64 devid, u8 *dev_uuid)
{
	struct btrfs_device *device;

6189 6190
	device = btrfs_alloc_device(NULL, &devid, dev_uuid);
	if (IS_ERR(device))
6191
		return NULL;
6192 6193

	list_add(&device->dev_list, &fs_devices->devices);
Y
Yan Zheng 已提交
6194
	device->fs_devices = fs_devices;
6195
	fs_devices->num_devices++;
6196 6197

	device->missing = 1;
6198
	fs_devices->missing_devices++;
6199

6200 6201 6202
	return device;
}

6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222
/**
 * btrfs_alloc_device - allocate struct btrfs_device
 * @fs_info:	used only for generating a new devid, can be NULL if
 *		devid is provided (i.e. @devid != NULL).
 * @devid:	a pointer to devid for this device.  If NULL a new devid
 *		is generated.
 * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
 *		is generated.
 *
 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
 * on error.  Returned struct is not linked onto any lists and can be
 * destroyed with kfree() right away.
 */
struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
					const u64 *devid,
					const u8 *uuid)
{
	struct btrfs_device *dev;
	u64 tmp;

6223
	if (WARN_ON(!devid && !fs_info))
6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247
		return ERR_PTR(-EINVAL);

	dev = __alloc_device();
	if (IS_ERR(dev))
		return dev;

	if (devid)
		tmp = *devid;
	else {
		int ret;

		ret = find_next_devid(fs_info, &tmp);
		if (ret) {
			kfree(dev);
			return ERR_PTR(ret);
		}
	}
	dev->devid = tmp;

	if (uuid)
		memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
	else
		generate_random_uuid(dev->uuid);

6248 6249
	btrfs_init_work(&dev->work, btrfs_submit_helper,
			pending_bios_fn, NULL, NULL);
6250 6251 6252 6253

	return dev;
}

6254 6255 6256 6257 6258 6259 6260 6261 6262
static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
			  struct extent_buffer *leaf,
			  struct btrfs_chunk *chunk)
{
	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
	struct map_lookup *map;
	struct extent_map *em;
	u64 logical;
	u64 length;
6263
	u64 stripe_len;
6264
	u64 devid;
6265
	u8 uuid[BTRFS_UUID_SIZE];
6266
	int num_stripes;
6267
	int ret;
6268
	int i;
6269

6270 6271
	logical = key->offset;
	length = btrfs_chunk_length(leaf, chunk);
6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289
	stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
	/* Validation check */
	if (!num_stripes) {
		btrfs_err(root->fs_info, "invalid chunk num_stripes: %u",
			  num_stripes);
		return -EIO;
	}
	if (!IS_ALIGNED(logical, root->sectorsize)) {
		btrfs_err(root->fs_info,
			  "invalid chunk logical %llu", logical);
		return -EIO;
	}
	if (!length || !IS_ALIGNED(length, root->sectorsize)) {
		btrfs_err(root->fs_info,
			"invalid chunk length %llu", length);
		return -EIO;
	}
6290
	if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) {
6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302
		btrfs_err(root->fs_info, "invalid chunk stripe length: %llu",
			  stripe_len);
		return -EIO;
	}
	if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
	    btrfs_chunk_type(leaf, chunk)) {
		btrfs_err(root->fs_info, "unrecognized chunk type: %llu",
			  ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
			    BTRFS_BLOCK_GROUP_PROFILE_MASK) &
			  btrfs_chunk_type(leaf, chunk));
		return -EIO;
	}
6303

6304
	read_lock(&map_tree->map_tree.lock);
6305
	em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
6306
	read_unlock(&map_tree->map_tree.lock);
6307 6308 6309 6310 6311 6312 6313 6314 6315

	/* already mapped? */
	if (em && em->start <= logical && em->start + em->len > logical) {
		free_extent_map(em);
		return 0;
	} else if (em) {
		free_extent_map(em);
	}

6316
	em = alloc_extent_map();
6317 6318
	if (!em)
		return -ENOMEM;
6319
	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6320 6321 6322 6323 6324
	if (!map) {
		free_extent_map(em);
		return -ENOMEM;
	}

6325
	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6326
	em->map_lookup = map;
6327 6328
	em->start = logical;
	em->len = length;
6329
	em->orig_start = 0;
6330
	em->block_start = 0;
C
Chris Mason 已提交
6331
	em->block_len = em->len;
6332

6333 6334 6335 6336 6337 6338
	map->num_stripes = num_stripes;
	map->io_width = btrfs_chunk_io_width(leaf, chunk);
	map->io_align = btrfs_chunk_io_align(leaf, chunk);
	map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
	map->type = btrfs_chunk_type(leaf, chunk);
C
Chris Mason 已提交
6339
	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6340 6341 6342 6343
	for (i = 0; i < num_stripes; i++) {
		map->stripes[i].physical =
			btrfs_stripe_offset_nr(leaf, chunk, i);
		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6344 6345 6346
		read_extent_buffer(leaf, uuid, (unsigned long)
				   btrfs_stripe_dev_uuid_nr(chunk, i),
				   BTRFS_UUID_SIZE);
6347 6348
		map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
							uuid, NULL);
6349
		if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
6350 6351 6352
			free_extent_map(em);
			return -EIO;
		}
6353 6354
		if (!map->stripes[i].dev) {
			map->stripes[i].dev =
6355 6356
				add_missing_dev(root, root->fs_info->fs_devices,
						devid, uuid);
6357 6358 6359 6360
			if (!map->stripes[i].dev) {
				free_extent_map(em);
				return -EIO;
			}
6361 6362
			btrfs_warn(root->fs_info, "devid %llu uuid %pU is missing",
						devid, uuid);
6363 6364
		}
		map->stripes[i].dev->in_fs_metadata = 1;
6365 6366
	}

6367
	write_lock(&map_tree->map_tree.lock);
J
Josef Bacik 已提交
6368
	ret = add_extent_mapping(&map_tree->map_tree, em, 0);
6369
	write_unlock(&map_tree->map_tree.lock);
6370
	BUG_ON(ret); /* Tree corruption */
6371 6372 6373 6374 6375
	free_extent_map(em);

	return 0;
}

6376
static void fill_device_from_item(struct extent_buffer *leaf,
6377 6378 6379 6380 6381 6382
				 struct btrfs_dev_item *dev_item,
				 struct btrfs_device *device)
{
	unsigned long ptr;

	device->devid = btrfs_device_id(leaf, dev_item);
6383 6384
	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
	device->total_bytes = device->disk_total_bytes;
6385
	device->commit_total_bytes = device->disk_total_bytes;
6386
	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6387
	device->commit_bytes_used = device->bytes_used;
6388 6389 6390 6391
	device->type = btrfs_device_type(leaf, dev_item);
	device->io_align = btrfs_device_io_align(leaf, dev_item);
	device->io_width = btrfs_device_io_width(leaf, dev_item);
	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6392
	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6393
	device->is_tgtdev_for_dev_replace = 0;
6394

6395
	ptr = btrfs_device_uuid(dev_item);
6396
	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6397 6398
}

6399 6400
static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
						  u8 *fsid)
Y
Yan Zheng 已提交
6401 6402 6403 6404
{
	struct btrfs_fs_devices *fs_devices;
	int ret;

6405
	BUG_ON(!mutex_is_locked(&uuid_mutex));
Y
Yan Zheng 已提交
6406 6407 6408

	fs_devices = root->fs_info->fs_devices->seed;
	while (fs_devices) {
6409 6410 6411
		if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE))
			return fs_devices;

Y
Yan Zheng 已提交
6412 6413 6414 6415 6416
		fs_devices = fs_devices->seed;
	}

	fs_devices = find_fsid(fsid);
	if (!fs_devices) {
6417 6418 6419 6420 6421 6422 6423 6424 6425 6426
		if (!btrfs_test_opt(root, DEGRADED))
			return ERR_PTR(-ENOENT);

		fs_devices = alloc_fs_devices(fsid);
		if (IS_ERR(fs_devices))
			return fs_devices;

		fs_devices->seeding = 1;
		fs_devices->opened = 1;
		return fs_devices;
Y
Yan Zheng 已提交
6427
	}
Y
Yan Zheng 已提交
6428 6429

	fs_devices = clone_fs_devices(fs_devices);
6430 6431
	if (IS_ERR(fs_devices))
		return fs_devices;
Y
Yan Zheng 已提交
6432

6433
	ret = __btrfs_open_devices(fs_devices, FMODE_READ,
6434
				   root->fs_info->bdev_holder);
6435 6436
	if (ret) {
		free_fs_devices(fs_devices);
6437
		fs_devices = ERR_PTR(ret);
Y
Yan Zheng 已提交
6438
		goto out;
6439
	}
Y
Yan Zheng 已提交
6440 6441 6442

	if (!fs_devices->seeding) {
		__btrfs_close_devices(fs_devices);
Y
Yan Zheng 已提交
6443
		free_fs_devices(fs_devices);
6444
		fs_devices = ERR_PTR(-EINVAL);
Y
Yan Zheng 已提交
6445 6446 6447 6448 6449 6450
		goto out;
	}

	fs_devices->seed = root->fs_info->fs_devices->seed;
	root->fs_info->fs_devices->seed = fs_devices;
out:
6451
	return fs_devices;
Y
Yan Zheng 已提交
6452 6453
}

6454
static int read_one_dev(struct btrfs_root *root,
6455 6456 6457
			struct extent_buffer *leaf,
			struct btrfs_dev_item *dev_item)
{
6458
	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6459 6460 6461
	struct btrfs_device *device;
	u64 devid;
	int ret;
Y
Yan Zheng 已提交
6462
	u8 fs_uuid[BTRFS_UUID_SIZE];
6463 6464
	u8 dev_uuid[BTRFS_UUID_SIZE];

6465
	devid = btrfs_device_id(leaf, dev_item);
6466
	read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6467
			   BTRFS_UUID_SIZE);
6468
	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
Y
Yan Zheng 已提交
6469 6470 6471
			   BTRFS_UUID_SIZE);

	if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
6472 6473 6474
		fs_devices = open_seed_devices(root, fs_uuid);
		if (IS_ERR(fs_devices))
			return PTR_ERR(fs_devices);
Y
Yan Zheng 已提交
6475 6476
	}

6477
	device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
6478
	if (!device) {
Y
Yan Zheng 已提交
6479
		if (!btrfs_test_opt(root, DEGRADED))
Y
Yan Zheng 已提交
6480 6481
			return -EIO;

6482 6483 6484
		device = add_missing_dev(root, fs_devices, devid, dev_uuid);
		if (!device)
			return -ENOMEM;
6485 6486
		btrfs_warn(root->fs_info, "devid %llu uuid %pU missing",
				devid, dev_uuid);
6487 6488 6489 6490 6491
	} else {
		if (!device->bdev && !btrfs_test_opt(root, DEGRADED))
			return -EIO;

		if(!device->bdev && !device->missing) {
6492 6493 6494 6495 6496 6497
			/*
			 * this happens when a device that was properly setup
			 * in the device info lists suddenly goes bad.
			 * device->bdev is NULL, and so we have to set
			 * device->missing to one here
			 */
6498
			device->fs_devices->missing_devices++;
6499
			device->missing = 1;
Y
Yan Zheng 已提交
6500
		}
6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514

		/* Move the device to its own fs_devices */
		if (device->fs_devices != fs_devices) {
			ASSERT(device->missing);

			list_move(&device->dev_list, &fs_devices->devices);
			device->fs_devices->num_devices--;
			fs_devices->num_devices++;

			device->fs_devices->missing_devices--;
			fs_devices->missing_devices++;

			device->fs_devices = fs_devices;
		}
Y
Yan Zheng 已提交
6515 6516 6517 6518 6519 6520 6521
	}

	if (device->fs_devices != root->fs_info->fs_devices) {
		BUG_ON(device->writeable);
		if (device->generation !=
		    btrfs_device_generation(leaf, dev_item))
			return -EINVAL;
6522
	}
6523 6524

	fill_device_from_item(leaf, dev_item, device);
6525
	device->in_fs_metadata = 1;
6526
	if (device->writeable && !device->is_tgtdev_for_dev_replace) {
Y
Yan Zheng 已提交
6527
		device->fs_devices->total_rw_bytes += device->total_bytes;
6528 6529 6530 6531 6532
		spin_lock(&root->fs_info->free_chunk_lock);
		root->fs_info->free_chunk_space += device->total_bytes -
			device->bytes_used;
		spin_unlock(&root->fs_info->free_chunk_lock);
	}
6533 6534 6535 6536
	ret = 0;
	return ret;
}

Y
Yan Zheng 已提交
6537
int btrfs_read_sys_array(struct btrfs_root *root)
6538
{
6539
	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
6540
	struct extent_buffer *sb;
6541 6542
	struct btrfs_disk_key *disk_key;
	struct btrfs_chunk *chunk;
6543 6544
	u8 *array_ptr;
	unsigned long sb_array_offset;
6545
	int ret = 0;
6546 6547 6548
	u32 num_stripes;
	u32 array_size;
	u32 len = 0;
6549
	u32 cur_offset;
6550
	struct btrfs_key key;
6551

6552 6553 6554 6555 6556 6557 6558
	ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize);
	/*
	 * This will create extent buffer of nodesize, superblock size is
	 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
	 * overallocate but we can keep it as-is, only the first page is used.
	 */
	sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
6559 6560
	if (!sb)
		return -ENOMEM;
6561
	set_extent_buffer_uptodate(sb);
6562
	btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6563
	/*
6564
	 * The sb extent buffer is artificial and just used to read the system array.
6565
	 * set_extent_buffer_uptodate() call does not properly mark all it's
6566 6567 6568 6569 6570 6571 6572 6573 6574
	 * pages up-to-date when the page is larger: extent does not cover the
	 * whole page and consequently check_page_uptodate does not find all
	 * the page's extents up-to-date (the hole beyond sb),
	 * write_extent_buffer then triggers a WARN_ON.
	 *
	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
	 * but sb spans only this function. Add an explicit SetPageUptodate call
	 * to silence the warning eg. on PowerPC 64.
	 */
6575
	if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
6576
		SetPageUptodate(sb->pages[0]);
6577

6578
	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
6579 6580
	array_size = btrfs_super_sys_array_size(super_copy);

6581 6582 6583
	array_ptr = super_copy->sys_chunk_array;
	sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
	cur_offset = 0;
6584

6585 6586
	while (cur_offset < array_size) {
		disk_key = (struct btrfs_disk_key *)array_ptr;
6587 6588 6589 6590
		len = sizeof(*disk_key);
		if (cur_offset + len > array_size)
			goto out_short_read;

6591 6592
		btrfs_disk_key_to_cpu(&key, disk_key);

6593 6594 6595
		array_ptr += len;
		sb_array_offset += len;
		cur_offset += len;
6596

6597
		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
6598
			chunk = (struct btrfs_chunk *)sb_array_offset;
6599 6600 6601 6602 6603 6604 6605 6606 6607
			/*
			 * At least one btrfs_chunk with one stripe must be
			 * present, exact stripe count check comes afterwards
			 */
			len = btrfs_chunk_item_size(1);
			if (cur_offset + len > array_size)
				goto out_short_read;

			num_stripes = btrfs_chunk_num_stripes(sb, chunk);
6608 6609 6610 6611 6612 6613 6614 6615
			if (!num_stripes) {
				printk(KERN_ERR
	    "BTRFS: invalid number of stripes %u in sys_array at offset %u\n",
					num_stripes, cur_offset);
				ret = -EIO;
				break;
			}

6616 6617 6618 6619
			len = btrfs_chunk_item_size(num_stripes);
			if (cur_offset + len > array_size)
				goto out_short_read;

6620
			ret = read_one_chunk(root, &key, sb, chunk);
6621 6622
			if (ret)
				break;
6623
		} else {
6624 6625 6626
			printk(KERN_ERR
		"BTRFS: unexpected item type %u in sys_array at offset %u\n",
				(u32)key.type, cur_offset);
6627 6628
			ret = -EIO;
			break;
6629
		}
6630 6631 6632
		array_ptr += len;
		sb_array_offset += len;
		cur_offset += len;
6633
	}
6634
	clear_extent_buffer_uptodate(sb);
6635
	free_extent_buffer_stale(sb);
6636
	return ret;
6637 6638 6639 6640

out_short_read:
	printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
			len, cur_offset);
6641
	clear_extent_buffer_uptodate(sb);
6642
	free_extent_buffer_stale(sb);
6643
	return -EIO;
6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660
}

int btrfs_read_chunk_tree(struct btrfs_root *root)
{
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	struct btrfs_key found_key;
	int ret;
	int slot;

	root = root->fs_info->chunk_root;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

6661 6662 6663
	mutex_lock(&uuid_mutex);
	lock_chunks(root);

6664 6665 6666 6667 6668
	/*
	 * Read all device items, and then all the chunk items. All
	 * device items are found before any chunk item (their object id
	 * is smaller than the lowest possible object id for a chunk
	 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
6669 6670 6671 6672 6673
	 */
	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.offset = 0;
	key.type = 0;
	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6674 6675
	if (ret < 0)
		goto error;
C
Chris Mason 已提交
6676
	while (1) {
6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687
		leaf = path->nodes[0];
		slot = path->slots[0];
		if (slot >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret == 0)
				continue;
			if (ret < 0)
				goto error;
			break;
		}
		btrfs_item_key_to_cpu(leaf, &found_key, slot);
6688 6689 6690
		if (found_key.type == BTRFS_DEV_ITEM_KEY) {
			struct btrfs_dev_item *dev_item;
			dev_item = btrfs_item_ptr(leaf, slot,
6691
						  struct btrfs_dev_item);
6692 6693 6694
			ret = read_one_dev(root, leaf, dev_item);
			if (ret)
				goto error;
6695 6696 6697 6698
		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
			struct btrfs_chunk *chunk;
			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
			ret = read_one_chunk(root, &found_key, leaf, chunk);
Y
Yan Zheng 已提交
6699 6700
			if (ret)
				goto error;
6701 6702 6703 6704 6705
		}
		path->slots[0]++;
	}
	ret = 0;
error:
6706 6707 6708
	unlock_chunks(root);
	mutex_unlock(&uuid_mutex);

Y
Yan Zheng 已提交
6709
	btrfs_free_path(path);
6710 6711
	return ret;
}
6712

6713 6714 6715 6716 6717
void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
{
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	struct btrfs_device *device;

6718 6719 6720 6721 6722 6723 6724 6725
	while (fs_devices) {
		mutex_lock(&fs_devices->device_list_mutex);
		list_for_each_entry(device, &fs_devices->devices, dev_list)
			device->dev_root = fs_info->dev_root;
		mutex_unlock(&fs_devices->device_list_mutex);

		fs_devices = fs_devices->seed;
	}
6726 6727
}

6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759
static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
{
	int i;

	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
		btrfs_dev_stat_reset(dev, i);
}

int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
{
	struct btrfs_key key;
	struct btrfs_key found_key;
	struct btrfs_root *dev_root = fs_info->dev_root;
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	struct extent_buffer *eb;
	int slot;
	int ret = 0;
	struct btrfs_device *device;
	struct btrfs_path *path = NULL;
	int i;

	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

	mutex_lock(&fs_devices->device_list_mutex);
	list_for_each_entry(device, &fs_devices->devices, dev_list) {
		int item_size;
		struct btrfs_dev_stats_item *ptr;

6760 6761
		key.objectid = BTRFS_DEV_STATS_OBJECTID;
		key.type = BTRFS_PERSISTENT_ITEM_KEY;
6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807
		key.offset = device->devid;
		ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
		if (ret) {
			__btrfs_reset_dev_stats(device);
			device->dev_stats_valid = 1;
			btrfs_release_path(path);
			continue;
		}
		slot = path->slots[0];
		eb = path->nodes[0];
		btrfs_item_key_to_cpu(eb, &found_key, slot);
		item_size = btrfs_item_size_nr(eb, slot);

		ptr = btrfs_item_ptr(eb, slot,
				     struct btrfs_dev_stats_item);

		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
			if (item_size >= (1 + i) * sizeof(__le64))
				btrfs_dev_stat_set(device, i,
					btrfs_dev_stats_value(eb, ptr, i));
			else
				btrfs_dev_stat_reset(device, i);
		}

		device->dev_stats_valid = 1;
		btrfs_dev_stat_print_on_load(device);
		btrfs_release_path(path);
	}
	mutex_unlock(&fs_devices->device_list_mutex);

out:
	btrfs_free_path(path);
	return ret < 0 ? ret : 0;
}

static int update_dev_stat_item(struct btrfs_trans_handle *trans,
				struct btrfs_root *dev_root,
				struct btrfs_device *device)
{
	struct btrfs_path *path;
	struct btrfs_key key;
	struct extent_buffer *eb;
	struct btrfs_dev_stats_item *ptr;
	int ret;
	int i;

6808 6809
	key.objectid = BTRFS_DEV_STATS_OBJECTID;
	key.type = BTRFS_PERSISTENT_ITEM_KEY;
6810 6811 6812 6813 6814 6815
	key.offset = device->devid;

	path = btrfs_alloc_path();
	BUG_ON(!path);
	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
	if (ret < 0) {
6816 6817
		btrfs_warn_in_rcu(dev_root->fs_info,
			"error %d while searching for dev_stats item for device %s",
6818
			      ret, rcu_str_deref(device->name));
6819 6820 6821 6822 6823 6824 6825 6826
		goto out;
	}

	if (ret == 0 &&
	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
		/* need to delete old one and insert a new one */
		ret = btrfs_del_item(trans, dev_root, path);
		if (ret != 0) {
6827 6828
			btrfs_warn_in_rcu(dev_root->fs_info,
				"delete too small dev_stats item for device %s failed %d",
6829
				      rcu_str_deref(device->name), ret);
6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840
			goto out;
		}
		ret = 1;
	}

	if (ret == 1) {
		/* need to insert a new item */
		btrfs_release_path(path);
		ret = btrfs_insert_empty_item(trans, dev_root, path,
					      &key, sizeof(*ptr));
		if (ret < 0) {
6841 6842 6843
			btrfs_warn_in_rcu(dev_root->fs_info,
				"insert dev_stats item for device %s failed %d",
				rcu_str_deref(device->name), ret);
6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868
			goto out;
		}
	}

	eb = path->nodes[0];
	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
		btrfs_set_dev_stats_value(eb, ptr, i,
					  btrfs_dev_stat_read(device, i));
	btrfs_mark_buffer_dirty(eb);

out:
	btrfs_free_path(path);
	return ret;
}

/*
 * called from commit_transaction. Writes all changed device stats to disk.
 */
int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
			struct btrfs_fs_info *fs_info)
{
	struct btrfs_root *dev_root = fs_info->dev_root;
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	struct btrfs_device *device;
6869
	int stats_cnt;
6870 6871 6872 6873
	int ret = 0;

	mutex_lock(&fs_devices->device_list_mutex);
	list_for_each_entry(device, &fs_devices->devices, dev_list) {
6874
		if (!device->dev_stats_valid || !btrfs_dev_stats_dirty(device))
6875 6876
			continue;

6877
		stats_cnt = atomic_read(&device->dev_stats_ccnt);
6878 6879
		ret = update_dev_stat_item(trans, dev_root, device);
		if (!ret)
6880
			atomic_sub(stats_cnt, &device->dev_stats_ccnt);
6881 6882 6883 6884 6885 6886
	}
	mutex_unlock(&fs_devices->device_list_mutex);

	return ret;
}

6887 6888 6889 6890 6891 6892
void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
{
	btrfs_dev_stat_inc(dev, index);
	btrfs_dev_stat_print_on_error(dev);
}

6893
static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
6894
{
6895 6896
	if (!dev->dev_stats_valid)
		return;
6897 6898
	btrfs_err_rl_in_rcu(dev->dev_root->fs_info,
		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
6899
			   rcu_str_deref(dev->name),
6900 6901 6902
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6903 6904
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
6905
}
6906

6907 6908
static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
{
6909 6910 6911 6912 6913 6914 6915 6916
	int i;

	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
		if (btrfs_dev_stat_read(dev, i) != 0)
			break;
	if (i == BTRFS_DEV_STAT_VALUES_MAX)
		return; /* all values == 0, suppress message */

6917 6918
	btrfs_info_in_rcu(dev->dev_root->fs_info,
		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
6919
	       rcu_str_deref(dev->name),
6920 6921 6922 6923 6924 6925 6926
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
}

6927
int btrfs_get_dev_stats(struct btrfs_root *root,
6928
			struct btrfs_ioctl_get_dev_stats *stats)
6929 6930 6931 6932 6933 6934
{
	struct btrfs_device *dev;
	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
	int i;

	mutex_lock(&fs_devices->device_list_mutex);
6935
	dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
6936 6937 6938
	mutex_unlock(&fs_devices->device_list_mutex);

	if (!dev) {
6939
		btrfs_warn(root->fs_info, "get dev_stats failed, device not found");
6940
		return -ENODEV;
6941
	} else if (!dev->dev_stats_valid) {
6942
		btrfs_warn(root->fs_info, "get dev_stats failed, not yet valid");
6943
		return -ENODEV;
6944
	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960
		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
			if (stats->nr_items > i)
				stats->values[i] =
					btrfs_dev_stat_read_and_reset(dev, i);
			else
				btrfs_dev_stat_reset(dev, i);
		}
	} else {
		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
			if (stats->nr_items > i)
				stats->values[i] = btrfs_dev_stat_read(dev, i);
	}
	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
	return 0;
}
6961

6962
void btrfs_scratch_superblocks(struct block_device *bdev, char *device_path)
6963 6964 6965
{
	struct buffer_head *bh;
	struct btrfs_super_block *disk_super;
6966
	int copy_num;
6967

6968 6969
	if (!bdev)
		return;
6970

6971 6972
	for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX;
		copy_num++) {
6973

6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989
		if (btrfs_read_dev_one_super(bdev, copy_num, &bh))
			continue;

		disk_super = (struct btrfs_super_block *)bh->b_data;

		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
		set_buffer_dirty(bh);
		sync_dirty_buffer(bh);
		brelse(bh);
	}

	/* Notify udev that device has changed */
	btrfs_kobject_uevent(bdev, KOBJ_CHANGE);

	/* Update ctime/mtime for device path for libblkid */
	update_dev_time(device_path);
6990
}
6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013

/*
 * Update the size of all devices, which is used for writing out the
 * super blocks.
 */
void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info)
{
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	struct btrfs_device *curr, *next;

	if (list_empty(&fs_devices->resized_devices))
		return;

	mutex_lock(&fs_devices->device_list_mutex);
	lock_chunks(fs_info->dev_root);
	list_for_each_entry_safe(curr, next, &fs_devices->resized_devices,
				 resized_list) {
		list_del_init(&curr->resized_list);
		curr->commit_total_bytes = curr->disk_total_bytes;
	}
	unlock_chunks(fs_info->dev_root);
	mutex_unlock(&fs_devices->device_list_mutex);
}
7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029

/* Must be invoked during the transaction commit */
void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
					struct btrfs_transaction *transaction)
{
	struct extent_map *em;
	struct map_lookup *map;
	struct btrfs_device *dev;
	int i;

	if (list_empty(&transaction->pending_chunks))
		return;

	/* In order to kick the device replace finish process */
	lock_chunks(root);
	list_for_each_entry(em, &transaction->pending_chunks, list) {
7030
		map = em->map_lookup;
7031 7032 7033 7034 7035 7036 7037 7038

		for (i = 0; i < map->num_stripes; i++) {
			dev = map->stripes[i].dev;
			dev->commit_bytes_used = dev->bytes_used;
		}
	}
	unlock_chunks(root);
}
7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056

void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info)
{
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	while (fs_devices) {
		fs_devices->fs_info = fs_info;
		fs_devices = fs_devices->seed;
	}
}

void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
{
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	while (fs_devices) {
		fs_devices->fs_info = NULL;
		fs_devices = fs_devices->seed;
	}
}
7057

7058
static void btrfs_close_one_device(struct btrfs_device *device)
7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091
{
	struct btrfs_fs_devices *fs_devices = device->fs_devices;
	struct btrfs_device *new_device;
	struct rcu_string *name;

	if (device->bdev)
		fs_devices->open_devices--;

	if (device->writeable &&
	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
		list_del_init(&device->dev_alloc_list);
		fs_devices->rw_devices--;
	}

	if (device->missing)
		fs_devices->missing_devices--;

	new_device = btrfs_alloc_device(NULL, &device->devid,
					device->uuid);
	BUG_ON(IS_ERR(new_device)); /* -ENOMEM */

	/* Safe because we are under uuid_mutex */
	if (device->name) {
		name = rcu_string_strdup(device->name->str, GFP_NOFS);
		BUG_ON(!name); /* -ENOMEM */
		rcu_assign_pointer(new_device->name, name);
	}

	list_replace_rcu(&device->dev_list, &new_device->dev_list);
	new_device->fs_devices = device->fs_devices;

	call_rcu(&device->rcu, free_device);
}