volumes.c 182.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */
#include <linux/sched.h>
#include <linux/bio.h>
20
#include <linux/slab.h>
21
#include <linux/buffer_head.h>
22
#include <linux/blkdev.h>
23
#include <linux/random.h>
24
#include <linux/iocontext.h>
25
#include <linux/capability.h>
26
#include <linux/ratelimit.h>
I
Ilya Dryomov 已提交
27
#include <linux/kthread.h>
D
David Woodhouse 已提交
28
#include <linux/raid/pq.h>
S
Stefan Behrens 已提交
29
#include <linux/semaphore.h>
D
David Woodhouse 已提交
30
#include <asm/div64.h>
31 32 33 34 35 36
#include "ctree.h"
#include "extent_map.h"
#include "disk-io.h"
#include "transaction.h"
#include "print-tree.h"
#include "volumes.h"
D
David Woodhouse 已提交
37
#include "raid56.h"
38
#include "async-thread.h"
39
#include "check-integrity.h"
40
#include "rcu-string.h"
41
#include "math.h"
42
#include "dev-replace.h"
43
#include "sysfs.h"
44

Z
Zhao Lei 已提交
45 46 47 48 49 50
const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
	[BTRFS_RAID_RAID10] = {
		.sub_stripes	= 2,
		.dev_stripes	= 1,
		.devs_max	= 0,	/* 0 == as many as possible */
		.devs_min	= 4,
51
		.tolerated_failures = 1,
Z
Zhao Lei 已提交
52 53 54 55 56 57 58 59
		.devs_increment	= 2,
		.ncopies	= 2,
	},
	[BTRFS_RAID_RAID1] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 2,
		.devs_min	= 2,
60
		.tolerated_failures = 1,
Z
Zhao Lei 已提交
61 62 63 64 65 66 67 68
		.devs_increment	= 2,
		.ncopies	= 2,
	},
	[BTRFS_RAID_DUP] = {
		.sub_stripes	= 1,
		.dev_stripes	= 2,
		.devs_max	= 1,
		.devs_min	= 1,
69
		.tolerated_failures = 0,
Z
Zhao Lei 已提交
70 71 72 73 74 75 76 77
		.devs_increment	= 1,
		.ncopies	= 2,
	},
	[BTRFS_RAID_RAID0] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 0,
		.devs_min	= 2,
78
		.tolerated_failures = 0,
Z
Zhao Lei 已提交
79 80 81 82 83 84 85 86
		.devs_increment	= 1,
		.ncopies	= 1,
	},
	[BTRFS_RAID_SINGLE] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 1,
		.devs_min	= 1,
87
		.tolerated_failures = 0,
Z
Zhao Lei 已提交
88 89 90 91 92 93 94 95
		.devs_increment	= 1,
		.ncopies	= 1,
	},
	[BTRFS_RAID_RAID5] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 0,
		.devs_min	= 2,
96
		.tolerated_failures = 1,
Z
Zhao Lei 已提交
97 98 99 100 101 102 103 104
		.devs_increment	= 1,
		.ncopies	= 2,
	},
	[BTRFS_RAID_RAID6] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 0,
		.devs_min	= 3,
105
		.tolerated_failures = 2,
Z
Zhao Lei 已提交
106 107 108 109 110
		.devs_increment	= 1,
		.ncopies	= 3,
	},
};

111
const u64 btrfs_raid_group[BTRFS_NR_RAID_TYPES] = {
Z
Zhao Lei 已提交
112 113 114 115 116 117 118 119 120
	[BTRFS_RAID_RAID10] = BTRFS_BLOCK_GROUP_RAID10,
	[BTRFS_RAID_RAID1]  = BTRFS_BLOCK_GROUP_RAID1,
	[BTRFS_RAID_DUP]    = BTRFS_BLOCK_GROUP_DUP,
	[BTRFS_RAID_RAID0]  = BTRFS_BLOCK_GROUP_RAID0,
	[BTRFS_RAID_SINGLE] = 0,
	[BTRFS_RAID_RAID5]  = BTRFS_BLOCK_GROUP_RAID5,
	[BTRFS_RAID_RAID6]  = BTRFS_BLOCK_GROUP_RAID6,
};

121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
/*
 * Table to convert BTRFS_RAID_* to the error code if minimum number of devices
 * condition is not met. Zero means there's no corresponding
 * BTRFS_ERROR_DEV_*_NOT_MET value.
 */
const int btrfs_raid_mindev_error[BTRFS_NR_RAID_TYPES] = {
	[BTRFS_RAID_RAID10] = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
	[BTRFS_RAID_RAID1]  = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
	[BTRFS_RAID_DUP]    = 0,
	[BTRFS_RAID_RAID0]  = 0,
	[BTRFS_RAID_SINGLE] = 0,
	[BTRFS_RAID_RAID5]  = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
	[BTRFS_RAID_RAID6]  = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
};

Y
Yan Zheng 已提交
136 137 138 139
static int init_first_rw_device(struct btrfs_trans_handle *trans,
				struct btrfs_root *root,
				struct btrfs_device *device);
static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
140
static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
141
static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
142
static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
143
static void btrfs_close_one_device(struct btrfs_device *device);
Y
Yan Zheng 已提交
144

145
DEFINE_MUTEX(uuid_mutex);
146
static LIST_HEAD(fs_uuids);
147 148 149 150
struct list_head *btrfs_get_fs_uuids(void)
{
	return &fs_uuids;
}
151

152 153 154 155
static struct btrfs_fs_devices *__alloc_fs_devices(void)
{
	struct btrfs_fs_devices *fs_devs;

156
	fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
157 158 159 160 161 162
	if (!fs_devs)
		return ERR_PTR(-ENOMEM);

	mutex_init(&fs_devs->device_list_mutex);

	INIT_LIST_HEAD(&fs_devs->devices);
163
	INIT_LIST_HEAD(&fs_devs->resized_devices);
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
	INIT_LIST_HEAD(&fs_devs->alloc_list);
	INIT_LIST_HEAD(&fs_devs->list);

	return fs_devs;
}

/**
 * alloc_fs_devices - allocate struct btrfs_fs_devices
 * @fsid:	a pointer to UUID for this FS.  If NULL a new UUID is
 *		generated.
 *
 * Return: a pointer to a new &struct btrfs_fs_devices on success;
 * ERR_PTR() on error.  Returned struct is not linked onto any lists and
 * can be destroyed with kfree() right away.
 */
static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
{
	struct btrfs_fs_devices *fs_devs;

	fs_devs = __alloc_fs_devices();
	if (IS_ERR(fs_devs))
		return fs_devs;

	if (fsid)
		memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
	else
		generate_random_uuid(fs_devs->fsid);

	return fs_devs;
}

Y
Yan Zheng 已提交
195 196 197 198 199 200 201 202
static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
{
	struct btrfs_device *device;
	WARN_ON(fs_devices->opened);
	while (!list_empty(&fs_devices->devices)) {
		device = list_entry(fs_devices->devices.next,
				    struct btrfs_device, dev_list);
		list_del(&device->dev_list);
203
		rcu_string_free(device->name);
Y
Yan Zheng 已提交
204 205 206 207 208
		kfree(device);
	}
	kfree(fs_devices);
}

209 210 211 212 213 214 215
static void btrfs_kobject_uevent(struct block_device *bdev,
				 enum kobject_action action)
{
	int ret;

	ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
	if (ret)
216
		pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
217 218 219 220 221
			action,
			kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
			&disk_to_dev(bdev->bd_disk)->kobj);
}

222
void btrfs_cleanup_fs_uuids(void)
223 224 225
{
	struct btrfs_fs_devices *fs_devices;

Y
Yan Zheng 已提交
226 227 228 229
	while (!list_empty(&fs_uuids)) {
		fs_devices = list_entry(fs_uuids.next,
					struct btrfs_fs_devices, list);
		list_del(&fs_devices->list);
Y
Yan Zheng 已提交
230
		free_fs_devices(fs_devices);
231 232 233
	}
}

234 235 236 237
static struct btrfs_device *__alloc_device(void)
{
	struct btrfs_device *dev;

238
	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
239 240 241 242 243
	if (!dev)
		return ERR_PTR(-ENOMEM);

	INIT_LIST_HEAD(&dev->dev_list);
	INIT_LIST_HEAD(&dev->dev_alloc_list);
244
	INIT_LIST_HEAD(&dev->resized_list);
245 246 247 248 249

	spin_lock_init(&dev->io_lock);

	spin_lock_init(&dev->reada_lock);
	atomic_set(&dev->reada_in_flight, 0);
250
	atomic_set(&dev->dev_stats_ccnt, 0);
251
	btrfs_device_data_ordered_init(dev);
252 253
	INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
	INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
254 255 256 257

	return dev;
}

258 259
static noinline struct btrfs_device *__find_device(struct list_head *head,
						   u64 devid, u8 *uuid)
260 261 262
{
	struct btrfs_device *dev;

Q
Qinghuang Feng 已提交
263
	list_for_each_entry(dev, head, dev_list) {
264
		if (dev->devid == devid &&
265
		    (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
266
			return dev;
267
		}
268 269 270 271
	}
	return NULL;
}

272
static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
273 274 275
{
	struct btrfs_fs_devices *fs_devices;

Q
Qinghuang Feng 已提交
276
	list_for_each_entry(fs_devices, &fs_uuids, list) {
277 278 279 280 281 282
		if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
			return fs_devices;
	}
	return NULL;
}

283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
static int
btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
		      int flush, struct block_device **bdev,
		      struct buffer_head **bh)
{
	int ret;

	*bdev = blkdev_get_by_path(device_path, flags, holder);

	if (IS_ERR(*bdev)) {
		ret = PTR_ERR(*bdev);
		goto error;
	}

	if (flush)
		filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
	ret = set_blocksize(*bdev, 4096);
	if (ret) {
		blkdev_put(*bdev, flags);
		goto error;
	}
	invalidate_bdev(*bdev);
	*bh = btrfs_read_dev_super(*bdev);
306 307
	if (IS_ERR(*bh)) {
		ret = PTR_ERR(*bh);
308 309 310 311 312 313 314 315 316 317 318 319
		blkdev_put(*bdev, flags);
		goto error;
	}

	return 0;

error:
	*bdev = NULL;
	*bh = NULL;
	return ret;
}

320 321 322 323 324 325 326 327 328 329 330 331 332 333
static void requeue_list(struct btrfs_pending_bios *pending_bios,
			struct bio *head, struct bio *tail)
{

	struct bio *old_head;

	old_head = pending_bios->head;
	pending_bios->head = head;
	if (pending_bios->tail)
		tail->bi_next = old_head;
	else
		pending_bios->tail = tail;
}

334 335 336 337 338 339 340 341 342 343 344
/*
 * we try to collect pending bios for a device so we don't get a large
 * number of procs sending bios down to the same device.  This greatly
 * improves the schedulers ability to collect and merge the bios.
 *
 * But, it also turns into a long list of bios to process and that is sure
 * to eventually make the worker thread block.  The solution here is to
 * make some progress and then put this work struct back at the end of
 * the list if the block device is congested.  This way, multiple devices
 * can make progress from a single worker thread.
 */
345
static noinline void run_scheduled_bios(struct btrfs_device *device)
346 347 348
{
	struct bio *pending;
	struct backing_dev_info *bdi;
349
	struct btrfs_fs_info *fs_info;
350
	struct btrfs_pending_bios *pending_bios;
351 352 353
	struct bio *tail;
	struct bio *cur;
	int again = 0;
354
	unsigned long num_run;
355
	unsigned long batch_run = 0;
356
	unsigned long limit;
357
	unsigned long last_waited = 0;
358
	int force_reg = 0;
M
Miao Xie 已提交
359
	int sync_pending = 0;
360 361 362 363 364 365 366 367 368
	struct blk_plug plug;

	/*
	 * this function runs all the bios we've collected for
	 * a particular device.  We don't want to wander off to
	 * another device without first sending all of these down.
	 * So, setup a plug here and finish it off before we return
	 */
	blk_start_plug(&plug);
369

370
	bdi = blk_get_backing_dev_info(device->bdev);
371 372 373 374
	fs_info = device->dev_root->fs_info;
	limit = btrfs_async_submit_limit(fs_info);
	limit = limit * 2 / 3;

375 376 377
loop:
	spin_lock(&device->io_lock);

378
loop_lock:
379
	num_run = 0;
380

381 382 383 384 385
	/* take all the bios off the list at once and process them
	 * later on (without the lock held).  But, remember the
	 * tail and other pointers so the bios can be properly reinserted
	 * into the list if we hit congestion
	 */
386
	if (!force_reg && device->pending_sync_bios.head) {
387
		pending_bios = &device->pending_sync_bios;
388 389
		force_reg = 1;
	} else {
390
		pending_bios = &device->pending_bios;
391 392
		force_reg = 0;
	}
393 394 395

	pending = pending_bios->head;
	tail = pending_bios->tail;
396 397 398 399 400 401 402 403 404 405
	WARN_ON(pending && !tail);

	/*
	 * if pending was null this time around, no bios need processing
	 * at all and we can stop.  Otherwise it'll loop back up again
	 * and do an additional check so no bios are missed.
	 *
	 * device->running_pending is used to synchronize with the
	 * schedule_bio code.
	 */
406 407
	if (device->pending_sync_bios.head == NULL &&
	    device->pending_bios.head == NULL) {
408 409
		again = 0;
		device->running_pending = 0;
410 411 412
	} else {
		again = 1;
		device->running_pending = 1;
413
	}
414 415 416 417

	pending_bios->head = NULL;
	pending_bios->tail = NULL;

418 419
	spin_unlock(&device->io_lock);

C
Chris Mason 已提交
420
	while (pending) {
421 422

		rmb();
423 424 425 426 427 428 429 430
		/* we want to work on both lists, but do more bios on the
		 * sync list than the regular list
		 */
		if ((num_run > 32 &&
		    pending_bios != &device->pending_sync_bios &&
		    device->pending_sync_bios.head) ||
		   (num_run > 64 && pending_bios == &device->pending_sync_bios &&
		    device->pending_bios.head)) {
431 432 433 434 435
			spin_lock(&device->io_lock);
			requeue_list(pending_bios, pending, tail);
			goto loop_lock;
		}

436 437 438
		cur = pending;
		pending = pending->bi_next;
		cur->bi_next = NULL;
439

440 441 442
		/*
		 * atomic_dec_return implies a barrier for waitqueue_active
		 */
443
		if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
444 445
		    waitqueue_active(&fs_info->async_submit_wait))
			wake_up(&fs_info->async_submit_wait);
446

447
		BUG_ON(atomic_read(&cur->__bi_cnt) == 0);
448

449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
		/*
		 * if we're doing the sync list, record that our
		 * plug has some sync requests on it
		 *
		 * If we're doing the regular list and there are
		 * sync requests sitting around, unplug before
		 * we add more
		 */
		if (pending_bios == &device->pending_sync_bios) {
			sync_pending = 1;
		} else if (sync_pending) {
			blk_finish_plug(&plug);
			blk_start_plug(&plug);
			sync_pending = 0;
		}

465
		btrfsic_submit_bio(cur->bi_rw, cur);
466 467
		num_run++;
		batch_run++;
468 469

		cond_resched();
470 471 472 473 474 475

		/*
		 * we made progress, there is more work to do and the bdi
		 * is now congested.  Back off and let other work structs
		 * run instead
		 */
C
Chris Mason 已提交
476
		if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
477
		    fs_info->fs_devices->open_devices > 1) {
478
			struct io_context *ioc;
479

480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
			ioc = current->io_context;

			/*
			 * the main goal here is that we don't want to
			 * block if we're going to be able to submit
			 * more requests without blocking.
			 *
			 * This code does two great things, it pokes into
			 * the elevator code from a filesystem _and_
			 * it makes assumptions about how batching works.
			 */
			if (ioc && ioc->nr_batch_requests > 0 &&
			    time_before(jiffies, ioc->last_waited + HZ/50UL) &&
			    (last_waited == 0 ||
			     ioc->last_waited == last_waited)) {
				/*
				 * we want to go through our batch of
				 * requests and stop.  So, we copy out
				 * the ioc->last_waited time and test
				 * against it before looping
				 */
				last_waited = ioc->last_waited;
502
				cond_resched();
503 504
				continue;
			}
505
			spin_lock(&device->io_lock);
506
			requeue_list(pending_bios, pending, tail);
507
			device->running_pending = 1;
508 509

			spin_unlock(&device->io_lock);
510 511
			btrfs_queue_work(fs_info->submit_workers,
					 &device->work);
512 513
			goto done;
		}
C
Chris Mason 已提交
514 515 516 517 518 519
		/* unplug every 64 requests just for good measure */
		if (batch_run % 64 == 0) {
			blk_finish_plug(&plug);
			blk_start_plug(&plug);
			sync_pending = 0;
		}
520
	}
521

522 523 524 525 526 527 528 529 530
	cond_resched();
	if (again)
		goto loop;

	spin_lock(&device->io_lock);
	if (device->pending_bios.head || device->pending_sync_bios.head)
		goto loop_lock;
	spin_unlock(&device->io_lock);

531
done:
532
	blk_finish_plug(&plug);
533 534
}

535
static void pending_bios_fn(struct btrfs_work *work)
536 537 538 539 540 541 542
{
	struct btrfs_device *device;

	device = container_of(work, struct btrfs_device, work);
	run_scheduled_bios(device);
}

A
Anand Jain 已提交
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597

void btrfs_free_stale_device(struct btrfs_device *cur_dev)
{
	struct btrfs_fs_devices *fs_devs;
	struct btrfs_device *dev;

	if (!cur_dev->name)
		return;

	list_for_each_entry(fs_devs, &fs_uuids, list) {
		int del = 1;

		if (fs_devs->opened)
			continue;
		if (fs_devs->seeding)
			continue;

		list_for_each_entry(dev, &fs_devs->devices, dev_list) {

			if (dev == cur_dev)
				continue;
			if (!dev->name)
				continue;

			/*
			 * Todo: This won't be enough. What if the same device
			 * comes back (with new uuid and) with its mapper path?
			 * But for now, this does help as mostly an admin will
			 * either use mapper or non mapper path throughout.
			 */
			rcu_read_lock();
			del = strcmp(rcu_str_deref(dev->name),
						rcu_str_deref(cur_dev->name));
			rcu_read_unlock();
			if (!del)
				break;
		}

		if (!del) {
			/* delete the stale device */
			if (fs_devs->num_devices == 1) {
				btrfs_sysfs_remove_fsid(fs_devs);
				list_del(&fs_devs->list);
				free_fs_devices(fs_devs);
			} else {
				fs_devs->num_devices--;
				list_del(&dev->dev_list);
				rcu_string_free(dev->name);
				kfree(dev);
			}
			break;
		}
	}
}

598 599 600 601 602 603 604 605
/*
 * Add new device to list of registered devices
 *
 * Returns:
 * 1   - first time device is seen
 * 0   - device already known
 * < 0 - error
 */
606
static noinline int device_list_add(const char *path,
607 608 609 610 611
			   struct btrfs_super_block *disk_super,
			   u64 devid, struct btrfs_fs_devices **fs_devices_ret)
{
	struct btrfs_device *device;
	struct btrfs_fs_devices *fs_devices;
612
	struct rcu_string *name;
613
	int ret = 0;
614 615 616 617
	u64 found_transid = btrfs_super_generation(disk_super);

	fs_devices = find_fsid(disk_super->fsid);
	if (!fs_devices) {
618 619 620 621
		fs_devices = alloc_fs_devices(disk_super->fsid);
		if (IS_ERR(fs_devices))
			return PTR_ERR(fs_devices);

622
		list_add(&fs_devices->list, &fs_uuids);
623

624 625
		device = NULL;
	} else {
626 627
		device = __find_device(&fs_devices->devices, devid,
				       disk_super->dev_item.uuid);
628
	}
629

630
	if (!device) {
Y
Yan Zheng 已提交
631 632 633
		if (fs_devices->opened)
			return -EBUSY;

634 635 636
		device = btrfs_alloc_device(NULL, &devid,
					    disk_super->dev_item.uuid);
		if (IS_ERR(device)) {
637
			/* we can safely leave the fs_devices entry around */
638
			return PTR_ERR(device);
639
		}
640 641 642

		name = rcu_string_strdup(path, GFP_NOFS);
		if (!name) {
643 644 645
			kfree(device);
			return -ENOMEM;
		}
646
		rcu_assign_pointer(device->name, name);
647

648
		mutex_lock(&fs_devices->device_list_mutex);
649
		list_add_rcu(&device->dev_list, &fs_devices->devices);
650
		fs_devices->num_devices++;
651 652
		mutex_unlock(&fs_devices->device_list_mutex);

653
		ret = 1;
Y
Yan Zheng 已提交
654
		device->fs_devices = fs_devices;
655
	} else if (!device->name || strcmp(device->name->str, path)) {
656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
		/*
		 * When FS is already mounted.
		 * 1. If you are here and if the device->name is NULL that
		 *    means this device was missing at time of FS mount.
		 * 2. If you are here and if the device->name is different
		 *    from 'path' that means either
		 *      a. The same device disappeared and reappeared with
		 *         different name. or
		 *      b. The missing-disk-which-was-replaced, has
		 *         reappeared now.
		 *
		 * We must allow 1 and 2a above. But 2b would be a spurious
		 * and unintentional.
		 *
		 * Further in case of 1 and 2a above, the disk at 'path'
		 * would have missed some transaction when it was away and
		 * in case of 2a the stale bdev has to be updated as well.
		 * 2b must not be allowed at all time.
		 */

		/*
677 678 679 680
		 * For now, we do allow update to btrfs_fs_device through the
		 * btrfs dev scan cli after FS has been mounted.  We're still
		 * tracking a problem where systems fail mount by subvolume id
		 * when we reject replacement on a mounted FS.
681
		 */
682
		if (!fs_devices->opened && found_transid < device->generation) {
683 684 685 686 687 688 689
			/*
			 * That is if the FS is _not_ mounted and if you
			 * are here, that means there is more than one
			 * disk with same uuid and devid.We keep the one
			 * with larger generation number or the last-in if
			 * generation are equal.
			 */
690
			return -EEXIST;
691
		}
692

693
		name = rcu_string_strdup(path, GFP_NOFS);
694 695
		if (!name)
			return -ENOMEM;
696 697
		rcu_string_free(device->name);
		rcu_assign_pointer(device->name, name);
698 699 700 701
		if (device->missing) {
			fs_devices->missing_devices--;
			device->missing = 0;
		}
702 703
	}

704 705 706 707 708 709 710 711 712
	/*
	 * Unmount does not free the btrfs_device struct but would zero
	 * generation along with most of the other members. So just update
	 * it back. We need it to pick the disk with largest generation
	 * (as above).
	 */
	if (!fs_devices->opened)
		device->generation = found_transid;

A
Anand Jain 已提交
713 714 715 716
	/*
	 * if there is new btrfs on an already registered device,
	 * then remove the stale device entry.
	 */
717 718
	if (ret > 0)
		btrfs_free_stale_device(device);
A
Anand Jain 已提交
719

720
	*fs_devices_ret = fs_devices;
721 722

	return ret;
723 724
}

Y
Yan Zheng 已提交
725 726 727 728 729 730
static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
{
	struct btrfs_fs_devices *fs_devices;
	struct btrfs_device *device;
	struct btrfs_device *orig_dev;

731 732 733
	fs_devices = alloc_fs_devices(orig->fsid);
	if (IS_ERR(fs_devices))
		return fs_devices;
Y
Yan Zheng 已提交
734

735
	mutex_lock(&orig->device_list_mutex);
J
Josef Bacik 已提交
736
	fs_devices->total_devices = orig->total_devices;
Y
Yan Zheng 已提交
737

738
	/* We have held the volume lock, it is safe to get the devices. */
Y
Yan Zheng 已提交
739
	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
740 741
		struct rcu_string *name;

742 743 744
		device = btrfs_alloc_device(NULL, &orig_dev->devid,
					    orig_dev->uuid);
		if (IS_ERR(device))
Y
Yan Zheng 已提交
745 746
			goto error;

747 748 749 750
		/*
		 * This is ok to do without rcu read locked because we hold the
		 * uuid mutex so nothing we touch in here is going to disappear.
		 */
751
		if (orig_dev->name) {
752 753
			name = rcu_string_strdup(orig_dev->name->str,
					GFP_KERNEL);
754 755 756 757 758
			if (!name) {
				kfree(device);
				goto error;
			}
			rcu_assign_pointer(device->name, name);
J
Julia Lawall 已提交
759
		}
Y
Yan Zheng 已提交
760 761 762 763 764

		list_add(&device->dev_list, &fs_devices->devices);
		device->fs_devices = fs_devices;
		fs_devices->num_devices++;
	}
765
	mutex_unlock(&orig->device_list_mutex);
Y
Yan Zheng 已提交
766 767
	return fs_devices;
error:
768
	mutex_unlock(&orig->device_list_mutex);
Y
Yan Zheng 已提交
769 770 771 772
	free_fs_devices(fs_devices);
	return ERR_PTR(-ENOMEM);
}

773
void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step)
774
{
Q
Qinghuang Feng 已提交
775
	struct btrfs_device *device, *next;
776
	struct btrfs_device *latest_dev = NULL;
777

778 779
	mutex_lock(&uuid_mutex);
again:
780
	/* This is the initialized path, it is safe to release the devices. */
Q
Qinghuang Feng 已提交
781
	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
782
		if (device->in_fs_metadata) {
783
			if (!device->is_tgtdev_for_dev_replace &&
784 785 786
			    (!latest_dev ||
			     device->generation > latest_dev->generation)) {
				latest_dev = device;
787
			}
Y
Yan Zheng 已提交
788
			continue;
789
		}
Y
Yan Zheng 已提交
790

791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
		if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
			/*
			 * In the first step, keep the device which has
			 * the correct fsid and the devid that is used
			 * for the dev_replace procedure.
			 * In the second step, the dev_replace state is
			 * read from the device tree and it is known
			 * whether the procedure is really active or
			 * not, which means whether this device is
			 * used or whether it should be removed.
			 */
			if (step == 0 || device->is_tgtdev_for_dev_replace) {
				continue;
			}
		}
Y
Yan Zheng 已提交
806
		if (device->bdev) {
807
			blkdev_put(device->bdev, device->mode);
Y
Yan Zheng 已提交
808 809 810 811 812 813
			device->bdev = NULL;
			fs_devices->open_devices--;
		}
		if (device->writeable) {
			list_del_init(&device->dev_alloc_list);
			device->writeable = 0;
814 815
			if (!device->is_tgtdev_for_dev_replace)
				fs_devices->rw_devices--;
Y
Yan Zheng 已提交
816
		}
Y
Yan Zheng 已提交
817 818
		list_del_init(&device->dev_list);
		fs_devices->num_devices--;
819
		rcu_string_free(device->name);
Y
Yan Zheng 已提交
820
		kfree(device);
821
	}
Y
Yan Zheng 已提交
822 823 824 825 826 827

	if (fs_devices->seed) {
		fs_devices = fs_devices->seed;
		goto again;
	}

828
	fs_devices->latest_bdev = latest_dev->bdev;
829

830 831
	mutex_unlock(&uuid_mutex);
}
832

833 834 835 836 837 838 839 840 841
static void __free_device(struct work_struct *work)
{
	struct btrfs_device *device;

	device = container_of(work, struct btrfs_device, rcu_work);

	if (device->bdev)
		blkdev_put(device->bdev, device->mode);

842
	rcu_string_free(device->name);
843 844 845 846 847 848 849 850 851 852 853 854 855
	kfree(device);
}

static void free_device(struct rcu_head *head)
{
	struct btrfs_device *device;

	device = container_of(head, struct btrfs_device, rcu);

	INIT_WORK(&device->rcu_work, __free_device);
	schedule_work(&device->rcu_work);
}

Y
Yan Zheng 已提交
856
static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
857
{
858
	struct btrfs_device *device, *tmp;
Y
Yan Zheng 已提交
859

Y
Yan Zheng 已提交
860 861
	if (--fs_devices->opened > 0)
		return 0;
862

863
	mutex_lock(&fs_devices->device_list_mutex);
864
	list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
865
		btrfs_close_one_device(device);
866
	}
867 868
	mutex_unlock(&fs_devices->device_list_mutex);

Y
Yan Zheng 已提交
869 870
	WARN_ON(fs_devices->open_devices);
	WARN_ON(fs_devices->rw_devices);
Y
Yan Zheng 已提交
871 872 873
	fs_devices->opened = 0;
	fs_devices->seeding = 0;

874 875 876
	return 0;
}

Y
Yan Zheng 已提交
877 878
int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
{
Y
Yan Zheng 已提交
879
	struct btrfs_fs_devices *seed_devices = NULL;
Y
Yan Zheng 已提交
880 881 882 883
	int ret;

	mutex_lock(&uuid_mutex);
	ret = __btrfs_close_devices(fs_devices);
Y
Yan Zheng 已提交
884 885 886 887
	if (!fs_devices->opened) {
		seed_devices = fs_devices->seed;
		fs_devices->seed = NULL;
	}
Y
Yan Zheng 已提交
888
	mutex_unlock(&uuid_mutex);
Y
Yan Zheng 已提交
889 890 891 892 893 894 895

	while (seed_devices) {
		fs_devices = seed_devices;
		seed_devices = fs_devices->seed;
		__btrfs_close_devices(fs_devices);
		free_fs_devices(fs_devices);
	}
896 897 898 899 900 901
	/*
	 * Wait for rcu kworkers under __btrfs_close_devices
	 * to finish all blkdev_puts so device is really
	 * free when umount is done.
	 */
	rcu_barrier();
Y
Yan Zheng 已提交
902 903 904
	return ret;
}

Y
Yan Zheng 已提交
905 906
static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
				fmode_t flags, void *holder)
907
{
908
	struct request_queue *q;
909 910 911
	struct block_device *bdev;
	struct list_head *head = &fs_devices->devices;
	struct btrfs_device *device;
912
	struct btrfs_device *latest_dev = NULL;
913 914 915
	struct buffer_head *bh;
	struct btrfs_super_block *disk_super;
	u64 devid;
Y
Yan Zheng 已提交
916
	int seeding = 1;
917
	int ret = 0;
918

919 920
	flags |= FMODE_EXCL;

Q
Qinghuang Feng 已提交
921
	list_for_each_entry(device, head, dev_list) {
922 923
		if (device->bdev)
			continue;
924 925 926
		if (!device->name)
			continue;

927 928 929
		/* Just open everything we can; ignore failures here */
		if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
					    &bdev, &bh))
930
			continue;
931 932

		disk_super = (struct btrfs_super_block *)bh->b_data;
933
		devid = btrfs_stack_device_id(&disk_super->dev_item);
934 935 936
		if (devid != device->devid)
			goto error_brelse;

Y
Yan Zheng 已提交
937 938 939 940 941
		if (memcmp(device->uuid, disk_super->dev_item.uuid,
			   BTRFS_UUID_SIZE))
			goto error_brelse;

		device->generation = btrfs_super_generation(disk_super);
942 943 944
		if (!latest_dev ||
		    device->generation > latest_dev->generation)
			latest_dev = device;
945

Y
Yan Zheng 已提交
946 947 948 949 950 951 952
		if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
			device->writeable = 0;
		} else {
			device->writeable = !bdev_read_only(bdev);
			seeding = 0;
		}

953
		q = bdev_get_queue(bdev);
954
		if (blk_queue_discard(q))
955 956
			device->can_discard = 1;

957
		device->bdev = bdev;
958
		device->in_fs_metadata = 0;
959 960
		device->mode = flags;

C
Chris Mason 已提交
961 962 963
		if (!blk_queue_nonrot(bdev_get_queue(bdev)))
			fs_devices->rotating = 1;

964
		fs_devices->open_devices++;
965 966
		if (device->writeable &&
		    device->devid != BTRFS_DEV_REPLACE_DEVID) {
Y
Yan Zheng 已提交
967 968 969 970
			fs_devices->rw_devices++;
			list_add(&device->dev_alloc_list,
				 &fs_devices->alloc_list);
		}
971
		brelse(bh);
972
		continue;
973

974 975
error_brelse:
		brelse(bh);
976
		blkdev_put(bdev, flags);
977
		continue;
978
	}
979
	if (fs_devices->open_devices == 0) {
980
		ret = -EINVAL;
981 982
		goto out;
	}
Y
Yan Zheng 已提交
983 984
	fs_devices->seeding = seeding;
	fs_devices->opened = 1;
985
	fs_devices->latest_bdev = latest_dev->bdev;
Y
Yan Zheng 已提交
986
	fs_devices->total_rw_bytes = 0;
987
out:
Y
Yan Zheng 已提交
988 989 990 991
	return ret;
}

int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
992
		       fmode_t flags, void *holder)
Y
Yan Zheng 已提交
993 994 995 996 997
{
	int ret;

	mutex_lock(&uuid_mutex);
	if (fs_devices->opened) {
Y
Yan Zheng 已提交
998 999
		fs_devices->opened++;
		ret = 0;
Y
Yan Zheng 已提交
1000
	} else {
1001
		ret = __btrfs_open_devices(fs_devices, flags, holder);
Y
Yan Zheng 已提交
1002
	}
1003 1004 1005 1006
	mutex_unlock(&uuid_mutex);
	return ret;
}

1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
void btrfs_release_disk_super(struct page *page)
{
	kunmap(page);
	put_page(page);
}

int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr,
		struct page **page, struct btrfs_super_block **disk_super)
{
	void *p;
	pgoff_t index;

	/* make sure our super fits in the device */
	if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
		return 1;

	/* make sure our super fits in the page */
	if (sizeof(**disk_super) > PAGE_SIZE)
		return 1;

	/* make sure our super doesn't straddle pages on disk */
	index = bytenr >> PAGE_SHIFT;
	if ((bytenr + sizeof(**disk_super) - 1) >> PAGE_SHIFT != index)
		return 1;

	/* pull in the page with our super */
	*page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
				   index, GFP_KERNEL);

	if (IS_ERR_OR_NULL(*page))
		return 1;

	p = kmap(*page);

	/* align our pointer to the offset of the super block */
	*disk_super = p + (bytenr & ~PAGE_MASK);

	if (btrfs_super_bytenr(*disk_super) != bytenr ||
	    btrfs_super_magic(*disk_super) != BTRFS_MAGIC) {
		btrfs_release_disk_super(*page);
		return 1;
	}

	if ((*disk_super)->label[0] &&
		(*disk_super)->label[BTRFS_LABEL_SIZE - 1])
		(*disk_super)->label[BTRFS_LABEL_SIZE - 1] = '\0';

	return 0;
}

1057 1058 1059 1060 1061
/*
 * Look for a btrfs signature on a device. This may be called out of the mount path
 * and we are not allowed to call set_blocksize during the scan. The superblock
 * is read via pagecache
 */
1062
int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
1063 1064 1065 1066
			  struct btrfs_fs_devices **fs_devices_ret)
{
	struct btrfs_super_block *disk_super;
	struct block_device *bdev;
1067 1068
	struct page *page;
	int ret = -EINVAL;
1069
	u64 devid;
1070
	u64 transid;
J
Josef Bacik 已提交
1071
	u64 total_devices;
1072
	u64 bytenr;
1073

1074 1075 1076 1077 1078 1079 1080
	/*
	 * we would like to check all the supers, but that would make
	 * a btrfs mount succeed after a mkfs from a different FS.
	 * So, we need to add a special mount option to scan for
	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
	 */
	bytenr = btrfs_sb_offset(0);
1081
	flags |= FMODE_EXCL;
1082
	mutex_lock(&uuid_mutex);
1083 1084 1085 1086

	bdev = blkdev_get_by_path(path, flags, holder);
	if (IS_ERR(bdev)) {
		ret = PTR_ERR(bdev);
1087
		goto error;
1088 1089
	}

1090
	if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super))
1091 1092
		goto error_bdev_put;

1093
	devid = btrfs_stack_device_id(&disk_super->dev_item);
1094
	transid = btrfs_super_generation(disk_super);
J
Josef Bacik 已提交
1095
	total_devices = btrfs_super_num_devices(disk_super);
1096

1097
	ret = device_list_add(path, disk_super, devid, fs_devices_ret);
1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
	if (ret > 0) {
		if (disk_super->label[0]) {
			printk(KERN_INFO "BTRFS: device label %s ", disk_super->label);
		} else {
			printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid);
		}

		printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
		ret = 0;
	}
J
Josef Bacik 已提交
1108 1109
	if (!ret && fs_devices_ret)
		(*fs_devices_ret)->total_devices = total_devices;
1110

1111
	btrfs_release_disk_super(page);
1112 1113

error_bdev_put:
1114
	blkdev_put(bdev, flags);
1115
error:
1116
	mutex_unlock(&uuid_mutex);
1117 1118
	return ret;
}
1119

1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
/* helper to account the used device space in the range */
int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
				   u64 end, u64 *length)
{
	struct btrfs_key key;
	struct btrfs_root *root = device->dev_root;
	struct btrfs_dev_extent *dev_extent;
	struct btrfs_path *path;
	u64 extent_end;
	int ret;
	int slot;
	struct extent_buffer *l;

	*length = 0;

1135
	if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
1136 1137 1138 1139 1140
		return 0;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
1141
	path->reada = READA_FORWARD;
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175

	key.objectid = device->devid;
	key.offset = start;
	key.type = BTRFS_DEV_EXTENT_KEY;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		goto out;
	if (ret > 0) {
		ret = btrfs_previous_item(root, path, key.objectid, key.type);
		if (ret < 0)
			goto out;
	}

	while (1) {
		l = path->nodes[0];
		slot = path->slots[0];
		if (slot >= btrfs_header_nritems(l)) {
			ret = btrfs_next_leaf(root, path);
			if (ret == 0)
				continue;
			if (ret < 0)
				goto out;

			break;
		}
		btrfs_item_key_to_cpu(l, &key, slot);

		if (key.objectid < device->devid)
			goto next;

		if (key.objectid > device->devid)
			break;

1176
		if (key.type != BTRFS_DEV_EXTENT_KEY)
1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
			goto next;

		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
		extent_end = key.offset + btrfs_dev_extent_length(l,
								  dev_extent);
		if (key.offset <= start && extent_end > end) {
			*length = end - start + 1;
			break;
		} else if (key.offset <= start && extent_end > start)
			*length += extent_end - start;
		else if (key.offset > start && extent_end <= end)
			*length += extent_end - key.offset;
		else if (key.offset > start && key.offset <= end) {
			*length += end - key.offset + 1;
			break;
		} else if (key.offset > end)
			break;

next:
		path->slots[0]++;
	}
	ret = 0;
out:
	btrfs_free_path(path);
	return ret;
}

1204
static int contains_pending_extent(struct btrfs_transaction *transaction,
1205 1206 1207
				   struct btrfs_device *device,
				   u64 *start, u64 len)
{
1208
	struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
1209
	struct extent_map *em;
1210
	struct list_head *search_list = &fs_info->pinned_chunks;
1211
	int ret = 0;
1212
	u64 physical_start = *start;
1213

1214 1215
	if (transaction)
		search_list = &transaction->pending_chunks;
1216 1217
again:
	list_for_each_entry(em, search_list, list) {
1218 1219 1220
		struct map_lookup *map;
		int i;

1221
		map = em->map_lookup;
1222
		for (i = 0; i < map->num_stripes; i++) {
1223 1224
			u64 end;

1225 1226
			if (map->stripes[i].dev != device)
				continue;
1227
			if (map->stripes[i].physical >= physical_start + len ||
1228
			    map->stripes[i].physical + em->orig_block_len <=
1229
			    physical_start)
1230
				continue;
1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247
			/*
			 * Make sure that while processing the pinned list we do
			 * not override our *start with a lower value, because
			 * we can have pinned chunks that fall within this
			 * device hole and that have lower physical addresses
			 * than the pending chunks we processed before. If we
			 * do not take this special care we can end up getting
			 * 2 pending chunks that start at the same physical
			 * device offsets because the end offset of a pinned
			 * chunk can be equal to the start offset of some
			 * pending chunk.
			 */
			end = map->stripes[i].physical + em->orig_block_len;
			if (end > *start) {
				*start = end;
				ret = 1;
			}
1248 1249
		}
	}
1250 1251
	if (search_list != &fs_info->pinned_chunks) {
		search_list = &fs_info->pinned_chunks;
1252 1253
		goto again;
	}
1254 1255 1256 1257 1258

	return ret;
}


1259
/*
1260 1261 1262 1263 1264 1265 1266
 * find_free_dev_extent_start - find free space in the specified device
 * @device:	  the device which we search the free space in
 * @num_bytes:	  the size of the free space that we need
 * @search_start: the position from which to begin the search
 * @start:	  store the start of the free space.
 * @len:	  the size of the free space. that we find, or the size
 *		  of the max free space if we don't find suitable free space
1267
 *
1268 1269 1270
 * this uses a pretty simple search, the expectation is that it is
 * called very infrequently and that a given device has a small number
 * of extents
1271 1272 1273 1274 1275 1276 1277 1278
 *
 * @start is used to store the start of the free space if we find. But if we
 * don't find suitable free space, it will be used to store the start position
 * of the max free space.
 *
 * @len is used to store the size of the free space that we find.
 * But if we don't find suitable free space, it is used to store the size of
 * the max free space.
1279
 */
1280 1281 1282
int find_free_dev_extent_start(struct btrfs_transaction *transaction,
			       struct btrfs_device *device, u64 num_bytes,
			       u64 search_start, u64 *start, u64 *len)
1283 1284 1285
{
	struct btrfs_key key;
	struct btrfs_root *root = device->dev_root;
1286
	struct btrfs_dev_extent *dev_extent;
Y
Yan Zheng 已提交
1287
	struct btrfs_path *path;
1288 1289 1290 1291
	u64 hole_size;
	u64 max_hole_start;
	u64 max_hole_size;
	u64 extent_end;
1292 1293
	u64 search_end = device->total_bytes;
	int ret;
1294
	int slot;
1295
	struct extent_buffer *l;
1296 1297 1298 1299 1300 1301 1302 1303 1304
	u64 min_search_start;

	/*
	 * We don't want to overwrite the superblock on the drive nor any area
	 * used by the boot loader (grub for example), so we make sure to start
	 * at an offset of at least 1MB.
	 */
	min_search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
	search_start = max(search_start, min_search_start);
1305

1306 1307 1308
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
1309

1310 1311 1312
	max_hole_start = search_start;
	max_hole_size = 0;

1313
again:
1314
	if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
1315
		ret = -ENOSPC;
1316
		goto out;
1317 1318
	}

1319
	path->reada = READA_FORWARD;
1320 1321
	path->search_commit_root = 1;
	path->skip_locking = 1;
1322

1323 1324 1325
	key.objectid = device->devid;
	key.offset = search_start;
	key.type = BTRFS_DEV_EXTENT_KEY;
1326

1327
	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1328
	if (ret < 0)
1329
		goto out;
1330 1331 1332
	if (ret > 0) {
		ret = btrfs_previous_item(root, path, key.objectid, key.type);
		if (ret < 0)
1333
			goto out;
1334
	}
1335

1336 1337 1338 1339 1340 1341 1342 1343
	while (1) {
		l = path->nodes[0];
		slot = path->slots[0];
		if (slot >= btrfs_header_nritems(l)) {
			ret = btrfs_next_leaf(root, path);
			if (ret == 0)
				continue;
			if (ret < 0)
1344 1345 1346
				goto out;

			break;
1347 1348 1349 1350 1351 1352 1353
		}
		btrfs_item_key_to_cpu(l, &key, slot);

		if (key.objectid < device->devid)
			goto next;

		if (key.objectid > device->devid)
1354
			break;
1355

1356
		if (key.type != BTRFS_DEV_EXTENT_KEY)
1357
			goto next;
1358

1359 1360
		if (key.offset > search_start) {
			hole_size = key.offset - search_start;
1361

1362 1363 1364 1365
			/*
			 * Have to check before we set max_hole_start, otherwise
			 * we could end up sending back this offset anyway.
			 */
1366
			if (contains_pending_extent(transaction, device,
1367
						    &search_start,
1368 1369 1370 1371 1372 1373 1374 1375
						    hole_size)) {
				if (key.offset >= search_start) {
					hole_size = key.offset - search_start;
				} else {
					WARN_ON_ONCE(1);
					hole_size = 0;
				}
			}
1376

1377 1378 1379 1380
			if (hole_size > max_hole_size) {
				max_hole_start = search_start;
				max_hole_size = hole_size;
			}
1381

1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393
			/*
			 * If this free space is greater than which we need,
			 * it must be the max free space that we have found
			 * until now, so max_hole_start must point to the start
			 * of this free space and the length of this free space
			 * is stored in max_hole_size. Thus, we return
			 * max_hole_start and max_hole_size and go back to the
			 * caller.
			 */
			if (hole_size >= num_bytes) {
				ret = 0;
				goto out;
1394 1395 1396 1397
			}
		}

		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1398 1399 1400 1401
		extent_end = key.offset + btrfs_dev_extent_length(l,
								  dev_extent);
		if (extent_end > search_start)
			search_start = extent_end;
1402 1403 1404 1405 1406
next:
		path->slots[0]++;
		cond_resched();
	}

1407 1408 1409 1410 1411
	/*
	 * At this point, search_start should be the end of
	 * allocated dev extents, and when shrinking the device,
	 * search_end may be smaller than search_start.
	 */
1412
	if (search_end > search_start) {
1413 1414
		hole_size = search_end - search_start;

1415
		if (contains_pending_extent(transaction, device, &search_start,
1416 1417 1418 1419
					    hole_size)) {
			btrfs_release_path(path);
			goto again;
		}
1420

1421 1422 1423 1424
		if (hole_size > max_hole_size) {
			max_hole_start = search_start;
			max_hole_size = hole_size;
		}
1425 1426
	}

1427
	/* See above. */
1428
	if (max_hole_size < num_bytes)
1429 1430 1431 1432 1433
		ret = -ENOSPC;
	else
		ret = 0;

out:
Y
Yan Zheng 已提交
1434
	btrfs_free_path(path);
1435
	*start = max_hole_start;
1436
	if (len)
1437
		*len = max_hole_size;
1438 1439 1440
	return ret;
}

1441 1442 1443 1444 1445 1446
int find_free_dev_extent(struct btrfs_trans_handle *trans,
			 struct btrfs_device *device, u64 num_bytes,
			 u64 *start, u64 *len)
{
	/* FIXME use last free of some kind */
	return find_free_dev_extent_start(trans->transaction, device,
1447
					  num_bytes, 0, start, len);
1448 1449
}

1450
static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1451
			  struct btrfs_device *device,
M
Miao Xie 已提交
1452
			  u64 start, u64 *dev_extent_len)
1453 1454 1455 1456 1457
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_root *root = device->dev_root;
	struct btrfs_key key;
1458 1459 1460
	struct btrfs_key found_key;
	struct extent_buffer *leaf = NULL;
	struct btrfs_dev_extent *extent = NULL;
1461 1462 1463 1464 1465 1466 1467 1468

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = device->devid;
	key.offset = start;
	key.type = BTRFS_DEV_EXTENT_KEY;
M
Miao Xie 已提交
1469
again:
1470
	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1471 1472 1473
	if (ret > 0) {
		ret = btrfs_previous_item(root, path, key.objectid,
					  BTRFS_DEV_EXTENT_KEY);
1474 1475
		if (ret)
			goto out;
1476 1477 1478 1479 1480 1481
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
		extent = btrfs_item_ptr(leaf, path->slots[0],
					struct btrfs_dev_extent);
		BUG_ON(found_key.offset > start || found_key.offset +
		       btrfs_dev_extent_length(leaf, extent) < start);
M
Miao Xie 已提交
1482 1483 1484
		key = found_key;
		btrfs_release_path(path);
		goto again;
1485 1486 1487 1488
	} else if (ret == 0) {
		leaf = path->nodes[0];
		extent = btrfs_item_ptr(leaf, path->slots[0],
					struct btrfs_dev_extent);
1489
	} else {
1490
		btrfs_std_error(root->fs_info, ret, "Slot search failed");
1491
		goto out;
1492
	}
1493

M
Miao Xie 已提交
1494 1495
	*dev_extent_len = btrfs_dev_extent_length(leaf, extent);

1496
	ret = btrfs_del_item(trans, root, path);
1497
	if (ret) {
1498
		btrfs_std_error(root->fs_info, ret,
1499
			    "Failed to remove dev extent item");
Z
Zhao Lei 已提交
1500
	} else {
1501
		set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1502
	}
1503
out:
1504 1505 1506 1507
	btrfs_free_path(path);
	return ret;
}

1508 1509 1510 1511
static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
				  struct btrfs_device *device,
				  u64 chunk_tree, u64 chunk_objectid,
				  u64 chunk_offset, u64 start, u64 num_bytes)
1512 1513 1514 1515 1516 1517 1518 1519
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_root *root = device->dev_root;
	struct btrfs_dev_extent *extent;
	struct extent_buffer *leaf;
	struct btrfs_key key;

1520
	WARN_ON(!device->in_fs_metadata);
1521
	WARN_ON(device->is_tgtdev_for_dev_replace);
1522 1523 1524 1525 1526
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = device->devid;
Y
Yan Zheng 已提交
1527
	key.offset = start;
1528 1529 1530
	key.type = BTRFS_DEV_EXTENT_KEY;
	ret = btrfs_insert_empty_item(trans, root, path, &key,
				      sizeof(*extent));
1531 1532
	if (ret)
		goto out;
1533 1534 1535 1536

	leaf = path->nodes[0];
	extent = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_dev_extent);
1537 1538 1539 1540 1541
	btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
	btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);

	write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1542
		    btrfs_dev_extent_chunk_tree_uuid(extent), BTRFS_UUID_SIZE);
1543

1544 1545
	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
	btrfs_mark_buffer_dirty(leaf);
1546
out:
1547 1548 1549 1550
	btrfs_free_path(path);
	return ret;
}

1551
static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1552
{
1553 1554 1555 1556
	struct extent_map_tree *em_tree;
	struct extent_map *em;
	struct rb_node *n;
	u64 ret = 0;
1557

1558 1559 1560 1561 1562 1563
	em_tree = &fs_info->mapping_tree.map_tree;
	read_lock(&em_tree->lock);
	n = rb_last(&em_tree->map);
	if (n) {
		em = rb_entry(n, struct extent_map, rb_node);
		ret = em->start + em->len;
1564
	}
1565 1566
	read_unlock(&em_tree->lock);

1567 1568 1569
	return ret;
}

1570 1571
static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
				    u64 *devid_ret)
1572 1573 1574 1575
{
	int ret;
	struct btrfs_key key;
	struct btrfs_key found_key;
Y
Yan Zheng 已提交
1576 1577 1578 1579 1580
	struct btrfs_path *path;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
1581 1582 1583 1584 1585

	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
	key.offset = (u64)-1;

1586
	ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1587 1588 1589
	if (ret < 0)
		goto error;

1590
	BUG_ON(ret == 0); /* Corruption */
1591

1592 1593
	ret = btrfs_previous_item(fs_info->chunk_root, path,
				  BTRFS_DEV_ITEMS_OBJECTID,
1594 1595
				  BTRFS_DEV_ITEM_KEY);
	if (ret) {
1596
		*devid_ret = 1;
1597 1598 1599
	} else {
		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
				      path->slots[0]);
1600
		*devid_ret = found_key.offset + 1;
1601 1602 1603
	}
	ret = 0;
error:
Y
Yan Zheng 已提交
1604
	btrfs_free_path(path);
1605 1606 1607 1608 1609 1610 1611
	return ret;
}

/*
 * the device information is stored in the chunk root
 * the btrfs_device struct should be fully filled in
 */
1612 1613 1614
static int btrfs_add_device(struct btrfs_trans_handle *trans,
			    struct btrfs_root *root,
			    struct btrfs_device *device)
1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_dev_item *dev_item;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	unsigned long ptr;

	root = root->fs_info->chunk_root;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
Y
Yan Zheng 已提交
1631
	key.offset = device->devid;
1632 1633

	ret = btrfs_insert_empty_item(trans, root, path, &key,
1634
				      sizeof(*dev_item));
1635 1636 1637 1638 1639 1640 1641
	if (ret)
		goto out;

	leaf = path->nodes[0];
	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);

	btrfs_set_device_id(leaf, dev_item, device->devid);
Y
Yan Zheng 已提交
1642
	btrfs_set_device_generation(leaf, dev_item, 0);
1643 1644 1645 1646
	btrfs_set_device_type(leaf, dev_item, device->type);
	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1647 1648 1649 1650
	btrfs_set_device_total_bytes(leaf, dev_item,
				     btrfs_device_get_disk_total_bytes(device));
	btrfs_set_device_bytes_used(leaf, dev_item,
				    btrfs_device_get_bytes_used(device));
1651 1652 1653
	btrfs_set_device_group(leaf, dev_item, 0);
	btrfs_set_device_seek_speed(leaf, dev_item, 0);
	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1654
	btrfs_set_device_start_offset(leaf, dev_item, 0);
1655

1656
	ptr = btrfs_device_uuid(dev_item);
1657
	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1658
	ptr = btrfs_device_fsid(dev_item);
Y
Yan Zheng 已提交
1659
	write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1660 1661
	btrfs_mark_buffer_dirty(leaf);

Y
Yan Zheng 已提交
1662
	ret = 0;
1663 1664 1665 1666
out:
	btrfs_free_path(path);
	return ret;
}
1667

1668 1669 1670 1671 1672 1673 1674 1675 1676
/*
 * Function to update ctime/mtime for a given device path.
 * Mainly used for ctime/mtime based probe like libblkid.
 */
static void update_dev_time(char *path_name)
{
	struct file *filp;

	filp = filp_open(path_name, O_RDWR, 0);
1677
	if (IS_ERR(filp))
1678 1679 1680 1681 1682
		return;
	file_update_time(filp);
	filp_close(filp, NULL);
}

1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696
static int btrfs_rm_dev_item(struct btrfs_root *root,
			     struct btrfs_device *device)
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_key key;
	struct btrfs_trans_handle *trans;

	root = root->fs_info->chunk_root;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

1697
	trans = btrfs_start_transaction(root, 0);
1698 1699 1700 1701
	if (IS_ERR(trans)) {
		btrfs_free_path(path);
		return PTR_ERR(trans);
	}
1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723
	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
	key.offset = device->devid;

	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
	if (ret < 0)
		goto out;

	if (ret > 0) {
		ret = -ENOENT;
		goto out;
	}

	ret = btrfs_del_item(trans, root, path);
	if (ret)
		goto out;
out:
	btrfs_free_path(path);
	btrfs_commit_transaction(trans, root);
	return ret;
}

1724 1725 1726 1727 1728 1729 1730
/*
 * Verify that @num_devices satisfies the RAID profile constraints in the whole
 * filesystem. It's up to the caller to adjust that number regarding eg. device
 * replace.
 */
static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
		u64 num_devices)
1731 1732
{
	u64 all_avail;
1733
	unsigned seq;
1734
	int i;
1735

1736
	do {
1737
		seq = read_seqbegin(&fs_info->profiles_lock);
1738

1739 1740 1741 1742
		all_avail = fs_info->avail_data_alloc_bits |
			    fs_info->avail_system_alloc_bits |
			    fs_info->avail_metadata_alloc_bits;
	} while (read_seqretry(&fs_info->profiles_lock, seq));
1743

1744 1745 1746
	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
		if (!(all_avail & btrfs_raid_group[i]))
			continue;
1747

1748 1749
		if (num_devices < btrfs_raid_array[i].devs_min) {
			int ret = btrfs_raid_mindev_error[i];
1750

1751 1752 1753
			if (ret)
				return ret;
		}
D
David Woodhouse 已提交
1754 1755
	}

1756
	return 0;
1757 1758
}

1759
int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
1760 1761 1762 1763 1764 1765 1766
{
	struct btrfs_device *device;
	struct btrfs_device *next_device;
	struct btrfs_fs_devices *cur_devices;
	u64 num_devices;
	int ret = 0;
	bool clear_super = false;
1767
	char *dev_name = NULL;
1768 1769 1770

	mutex_lock(&uuid_mutex);

1771 1772 1773 1774 1775 1776 1777 1778 1779
	num_devices = root->fs_info->fs_devices->num_devices;
	btrfs_dev_replace_lock(&root->fs_info->dev_replace, 0);
	if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
		WARN_ON(num_devices < 1);
		num_devices--;
	}
	btrfs_dev_replace_unlock(&root->fs_info->dev_replace, 0);

	ret = btrfs_check_raid_min_devices(root->fs_info, num_devices - 1);
1780 1781 1782
	if (ret)
		goto out;

1783
	ret = btrfs_find_device_by_user_input(root, devid, device_path,
1784 1785 1786
				&device);
	if (ret)
		goto out;
1787

1788
	if (device->is_tgtdev_for_dev_replace) {
1789
		ret = BTRFS_ERROR_DEV_TGT_REPLACE;
1790
		goto out;
1791 1792
	}

Y
Yan Zheng 已提交
1793
	if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1794
		ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
1795
		goto out;
Y
Yan Zheng 已提交
1796 1797 1798
	}

	if (device->writeable) {
1799
		lock_chunks(root);
Y
Yan Zheng 已提交
1800
		list_del_init(&device->dev_alloc_list);
1801
		device->fs_devices->rw_devices--;
1802
		unlock_chunks(root);
1803 1804 1805 1806 1807
		dev_name = kstrdup(device->name->str, GFP_KERNEL);
		if (!dev_name) {
			ret = -ENOMEM;
			goto error_undo;
		}
1808
		clear_super = true;
1809
	}
1810

1811
	mutex_unlock(&uuid_mutex);
1812
	ret = btrfs_shrink_device(device, 0);
1813
	mutex_lock(&uuid_mutex);
1814
	if (ret)
1815
		goto error_undo;
1816

1817 1818 1819 1820 1821
	/*
	 * TODO: the superblock still includes this device in its num_devices
	 * counter although write_all_supers() is not locked out. This
	 * could give a filesystem state which requires a degraded mount.
	 */
1822 1823
	ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
	if (ret)
1824
		goto error_undo;
1825

Y
Yan Zheng 已提交
1826
	device->in_fs_metadata = 0;
1827
	btrfs_scrub_cancel_dev(root->fs_info, device);
1828 1829 1830 1831

	/*
	 * the device list mutex makes sure that we don't change
	 * the device list while someone else is writing out all
1832 1833 1834 1835 1836
	 * the device supers. Whoever is writing all supers, should
	 * lock the device list mutex before getting the number of
	 * devices in the super block (super_copy). Conversely,
	 * whoever updates the number of devices in the super block
	 * (super_copy) should hold the device list mutex.
1837
	 */
1838 1839

	cur_devices = device->fs_devices;
1840
	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1841
	list_del_rcu(&device->dev_list);
1842

Y
Yan Zheng 已提交
1843
	device->fs_devices->num_devices--;
J
Josef Bacik 已提交
1844
	device->fs_devices->total_devices--;
Y
Yan Zheng 已提交
1845

1846
	if (device->missing)
1847
		device->fs_devices->missing_devices--;
1848

Y
Yan Zheng 已提交
1849 1850 1851 1852 1853 1854 1855
	next_device = list_entry(root->fs_info->fs_devices->devices.next,
				 struct btrfs_device, dev_list);
	if (device->bdev == root->fs_info->sb->s_bdev)
		root->fs_info->sb->s_bdev = next_device->bdev;
	if (device->bdev == root->fs_info->fs_devices->latest_bdev)
		root->fs_info->fs_devices->latest_bdev = next_device->bdev;

1856
	if (device->bdev) {
Y
Yan Zheng 已提交
1857
		device->fs_devices->open_devices--;
1858
		/* remove sysfs entry */
1859
		btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
1860
	}
1861

1862
	call_rcu(&device->rcu, free_device);
Y
Yan Zheng 已提交
1863

1864 1865
	num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
	btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1866
	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
Y
Yan Zheng 已提交
1867

1868
	if (cur_devices->open_devices == 0) {
Y
Yan Zheng 已提交
1869 1870 1871
		struct btrfs_fs_devices *fs_devices;
		fs_devices = root->fs_info->fs_devices;
		while (fs_devices) {
1872 1873
			if (fs_devices->seed == cur_devices) {
				fs_devices->seed = cur_devices->seed;
Y
Yan Zheng 已提交
1874
				break;
1875
			}
Y
Yan Zheng 已提交
1876
			fs_devices = fs_devices->seed;
Y
Yan Zheng 已提交
1877
		}
1878 1879 1880
		cur_devices->seed = NULL;
		__btrfs_close_devices(cur_devices);
		free_fs_devices(cur_devices);
Y
Yan Zheng 已提交
1881 1882
	}

1883 1884 1885
	root->fs_info->num_tolerated_disk_barrier_failures =
		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);

Y
Yan Zheng 已提交
1886 1887 1888 1889
	/*
	 * at this point, the device is zero sized.  We want to
	 * remove it from the devices list and zero out the old super
	 */
1890
	if (clear_super) {
1891
		struct block_device *bdev;
1892

1893 1894 1895 1896
		bdev = blkdev_get_by_path(dev_name, FMODE_READ | FMODE_EXCL,
						root->fs_info->bdev_holder);
		if (!IS_ERR(bdev)) {
			btrfs_scratch_superblocks(bdev, dev_name);
1897 1898
			blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
		}
1899 1900
	}

1901
out:
1902 1903
	kfree(dev_name);

1904 1905
	mutex_unlock(&uuid_mutex);
	return ret;
1906

1907 1908
error_undo:
	if (device->writeable) {
1909
		lock_chunks(root);
1910 1911
		list_add(&device->dev_alloc_list,
			 &root->fs_info->fs_devices->alloc_list);
1912
		device->fs_devices->rw_devices++;
1913
		unlock_chunks(root);
1914
	}
1915
	goto out;
1916 1917
}

1918 1919
void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
					struct btrfs_device *srcdev)
1920
{
1921 1922
	struct btrfs_fs_devices *fs_devices;

1923
	WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1924

1925 1926 1927 1928 1929 1930 1931
	/*
	 * in case of fs with no seed, srcdev->fs_devices will point
	 * to fs_devices of fs_info. However when the dev being replaced is
	 * a seed dev it will point to the seed's local fs_devices. In short
	 * srcdev will have its correct fs_devices in both the cases.
	 */
	fs_devices = srcdev->fs_devices;
1932

1933 1934
	list_del_rcu(&srcdev->dev_list);
	list_del_rcu(&srcdev->dev_alloc_list);
1935
	fs_devices->num_devices--;
1936
	if (srcdev->missing)
1937
		fs_devices->missing_devices--;
1938

1939 1940 1941
	if (srcdev->writeable) {
		fs_devices->rw_devices--;
		/* zero out the old super if it is writable */
1942
		btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
1943 1944
	}

1945
	if (srcdev->bdev)
1946
		fs_devices->open_devices--;
1947 1948 1949 1950 1951 1952
}

void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
				      struct btrfs_device *srcdev)
{
	struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
1953 1954

	call_rcu(&srcdev->rcu, free_device);
1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974

	/*
	 * unless fs_devices is seed fs, num_devices shouldn't go
	 * zero
	 */
	BUG_ON(!fs_devices->num_devices && !fs_devices->seeding);

	/* if this is no devs we rather delete the fs_devices */
	if (!fs_devices->num_devices) {
		struct btrfs_fs_devices *tmp_fs_devices;

		tmp_fs_devices = fs_info->fs_devices;
		while (tmp_fs_devices) {
			if (tmp_fs_devices->seed == fs_devices) {
				tmp_fs_devices->seed = fs_devices->seed;
				break;
			}
			tmp_fs_devices = tmp_fs_devices->seed;
		}
		fs_devices->seed = NULL;
1975 1976
		__btrfs_close_devices(fs_devices);
		free_fs_devices(fs_devices);
1977
	}
1978 1979 1980 1981 1982 1983 1984
}

void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
				      struct btrfs_device *tgtdev)
{
	struct btrfs_device *next_device;

1985
	mutex_lock(&uuid_mutex);
1986 1987
	WARN_ON(!tgtdev);
	mutex_lock(&fs_info->fs_devices->device_list_mutex);
1988

1989
	btrfs_sysfs_rm_device_link(fs_info->fs_devices, tgtdev);
1990

1991
	if (tgtdev->bdev) {
1992
		btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007
		fs_info->fs_devices->open_devices--;
	}
	fs_info->fs_devices->num_devices--;

	next_device = list_entry(fs_info->fs_devices->devices.next,
				 struct btrfs_device, dev_list);
	if (tgtdev->bdev == fs_info->sb->s_bdev)
		fs_info->sb->s_bdev = next_device->bdev;
	if (tgtdev->bdev == fs_info->fs_devices->latest_bdev)
		fs_info->fs_devices->latest_bdev = next_device->bdev;
	list_del_rcu(&tgtdev->dev_list);

	call_rcu(&tgtdev->rcu, free_device);

	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2008
	mutex_unlock(&uuid_mutex);
2009 2010
}

2011 2012
static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
				     struct btrfs_device **device)
2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028
{
	int ret = 0;
	struct btrfs_super_block *disk_super;
	u64 devid;
	u8 *dev_uuid;
	struct block_device *bdev;
	struct buffer_head *bh;

	*device = NULL;
	ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
				    root->fs_info->bdev_holder, 0, &bdev, &bh);
	if (ret)
		return ret;
	disk_super = (struct btrfs_super_block *)bh->b_data;
	devid = btrfs_stack_device_id(&disk_super->dev_item);
	dev_uuid = disk_super->dev_item.uuid;
2029
	*device = btrfs_find_device(root->fs_info, devid, dev_uuid,
2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058
				    disk_super->fsid);
	brelse(bh);
	if (!*device)
		ret = -ENOENT;
	blkdev_put(bdev, FMODE_READ);
	return ret;
}

int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
					 char *device_path,
					 struct btrfs_device **device)
{
	*device = NULL;
	if (strcmp(device_path, "missing") == 0) {
		struct list_head *devices;
		struct btrfs_device *tmp;

		devices = &root->fs_info->fs_devices->devices;
		/*
		 * It is safe to read the devices since the volume_mutex
		 * is held by the caller.
		 */
		list_for_each_entry(tmp, devices, dev_list) {
			if (tmp->in_fs_metadata && !tmp->bdev) {
				*device = tmp;
				break;
			}
		}

2059 2060
		if (!*device)
			return BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2061 2062 2063 2064 2065 2066 2067

		return 0;
	} else {
		return btrfs_find_device_by_path(root, device_path, device);
	}
}

2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080
int btrfs_find_device_by_user_input(struct btrfs_root *root, u64 srcdevid,
					 char *srcdev_name,
					 struct btrfs_device **device)
{
	int ret;

	if (srcdevid) {
		ret = 0;
		*device = btrfs_find_device(root->fs_info, srcdevid, NULL,
					    NULL);
		if (!*device)
			ret = -ENOENT;
	} else {
2081 2082 2083
		if (!srcdev_name || !srcdev_name[0])
			return -EINVAL;

2084 2085 2086 2087 2088 2089
		ret = btrfs_find_device_missing_or_by_path(root, srcdev_name,
							   device);
	}
	return ret;
}

Y
Yan Zheng 已提交
2090 2091 2092
/*
 * does all the dirty work required for changing file system's UUID.
 */
2093
static int btrfs_prepare_sprout(struct btrfs_root *root)
Y
Yan Zheng 已提交
2094 2095 2096
{
	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
	struct btrfs_fs_devices *old_devices;
Y
Yan Zheng 已提交
2097
	struct btrfs_fs_devices *seed_devices;
2098
	struct btrfs_super_block *disk_super = root->fs_info->super_copy;
Y
Yan Zheng 已提交
2099 2100 2101 2102
	struct btrfs_device *device;
	u64 super_flags;

	BUG_ON(!mutex_is_locked(&uuid_mutex));
Y
Yan Zheng 已提交
2103
	if (!fs_devices->seeding)
Y
Yan Zheng 已提交
2104 2105
		return -EINVAL;

2106 2107 2108
	seed_devices = __alloc_fs_devices();
	if (IS_ERR(seed_devices))
		return PTR_ERR(seed_devices);
Y
Yan Zheng 已提交
2109

Y
Yan Zheng 已提交
2110 2111 2112 2113
	old_devices = clone_fs_devices(fs_devices);
	if (IS_ERR(old_devices)) {
		kfree(seed_devices);
		return PTR_ERR(old_devices);
Y
Yan Zheng 已提交
2114
	}
Y
Yan Zheng 已提交
2115

Y
Yan Zheng 已提交
2116 2117
	list_add(&old_devices->list, &fs_uuids);

Y
Yan Zheng 已提交
2118 2119 2120 2121
	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
	seed_devices->opened = 1;
	INIT_LIST_HEAD(&seed_devices->devices);
	INIT_LIST_HEAD(&seed_devices->alloc_list);
2122
	mutex_init(&seed_devices->device_list_mutex);
2123 2124

	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2125 2126
	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
			      synchronize_rcu);
M
Miao Xie 已提交
2127 2128
	list_for_each_entry(device, &seed_devices->devices, dev_list)
		device->fs_devices = seed_devices;
2129

M
Miao Xie 已提交
2130
	lock_chunks(root);
Y
Yan Zheng 已提交
2131
	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
M
Miao Xie 已提交
2132
	unlock_chunks(root);
Y
Yan Zheng 已提交
2133

Y
Yan Zheng 已提交
2134 2135 2136
	fs_devices->seeding = 0;
	fs_devices->num_devices = 0;
	fs_devices->open_devices = 0;
2137 2138
	fs_devices->missing_devices = 0;
	fs_devices->rotating = 0;
Y
Yan Zheng 已提交
2139
	fs_devices->seed = seed_devices;
Y
Yan Zheng 已提交
2140 2141 2142 2143

	generate_random_uuid(fs_devices->fsid);
	memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2144 2145
	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);

Y
Yan Zheng 已提交
2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192
	super_flags = btrfs_super_flags(disk_super) &
		      ~BTRFS_SUPER_FLAG_SEEDING;
	btrfs_set_super_flags(disk_super, super_flags);

	return 0;
}

/*
 * strore the expected generation for seed devices in device items.
 */
static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
			       struct btrfs_root *root)
{
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_dev_item *dev_item;
	struct btrfs_device *device;
	struct btrfs_key key;
	u8 fs_uuid[BTRFS_UUID_SIZE];
	u8 dev_uuid[BTRFS_UUID_SIZE];
	u64 devid;
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	root = root->fs_info->chunk_root;
	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.offset = 0;
	key.type = BTRFS_DEV_ITEM_KEY;

	while (1) {
		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
		if (ret < 0)
			goto error;

		leaf = path->nodes[0];
next_slot:
		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret > 0)
				break;
			if (ret < 0)
				goto error;
			leaf = path->nodes[0];
			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2193
			btrfs_release_path(path);
Y
Yan Zheng 已提交
2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204
			continue;
		}

		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
		    key.type != BTRFS_DEV_ITEM_KEY)
			break;

		dev_item = btrfs_item_ptr(leaf, path->slots[0],
					  struct btrfs_dev_item);
		devid = btrfs_device_id(leaf, dev_item);
2205
		read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
Y
Yan Zheng 已提交
2206
				   BTRFS_UUID_SIZE);
2207
		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
Y
Yan Zheng 已提交
2208
				   BTRFS_UUID_SIZE);
2209 2210
		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
					   fs_uuid);
2211
		BUG_ON(!device); /* Logic error */
Y
Yan Zheng 已提交
2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227

		if (device->fs_devices->seeding) {
			btrfs_set_device_generation(leaf, dev_item,
						    device->generation);
			btrfs_mark_buffer_dirty(leaf);
		}

		path->slots[0]++;
		goto next_slot;
	}
	ret = 0;
error:
	btrfs_free_path(path);
	return ret;
}

2228 2229
int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
{
2230
	struct request_queue *q;
2231 2232 2233 2234
	struct btrfs_trans_handle *trans;
	struct btrfs_device *device;
	struct block_device *bdev;
	struct list_head *devices;
Y
Yan Zheng 已提交
2235
	struct super_block *sb = root->fs_info->sb;
2236
	struct rcu_string *name;
2237
	u64 tmp;
Y
Yan Zheng 已提交
2238
	int seeding_dev = 0;
2239 2240
	int ret = 0;

Y
Yan Zheng 已提交
2241
	if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
2242
		return -EROFS;
2243

2244
	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2245
				  root->fs_info->bdev_holder);
2246 2247
	if (IS_ERR(bdev))
		return PTR_ERR(bdev);
2248

Y
Yan Zheng 已提交
2249 2250 2251 2252 2253 2254
	if (root->fs_info->fs_devices->seeding) {
		seeding_dev = 1;
		down_write(&sb->s_umount);
		mutex_lock(&uuid_mutex);
	}

2255
	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2256

2257
	devices = &root->fs_info->fs_devices->devices;
2258 2259

	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
Q
Qinghuang Feng 已提交
2260
	list_for_each_entry(device, devices, dev_list) {
2261 2262
		if (device->bdev == bdev) {
			ret = -EEXIST;
2263 2264
			mutex_unlock(
				&root->fs_info->fs_devices->device_list_mutex);
Y
Yan Zheng 已提交
2265
			goto error;
2266 2267
		}
	}
2268
	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2269

2270 2271
	device = btrfs_alloc_device(root->fs_info, NULL, NULL);
	if (IS_ERR(device)) {
2272
		/* we can safely leave the fs_devices entry around */
2273
		ret = PTR_ERR(device);
Y
Yan Zheng 已提交
2274
		goto error;
2275 2276
	}

2277
	name = rcu_string_strdup(device_path, GFP_KERNEL);
2278
	if (!name) {
2279
		kfree(device);
Y
Yan Zheng 已提交
2280 2281
		ret = -ENOMEM;
		goto error;
2282
	}
2283
	rcu_assign_pointer(device->name, name);
Y
Yan Zheng 已提交
2284

2285
	trans = btrfs_start_transaction(root, 0);
2286
	if (IS_ERR(trans)) {
2287
		rcu_string_free(device->name);
2288 2289 2290 2291 2292
		kfree(device);
		ret = PTR_ERR(trans);
		goto error;
	}

2293 2294 2295
	q = bdev_get_queue(bdev);
	if (blk_queue_discard(q))
		device->can_discard = 1;
Y
Yan Zheng 已提交
2296 2297
	device->writeable = 1;
	device->generation = trans->transid;
2298 2299 2300 2301
	device->io_width = root->sectorsize;
	device->io_align = root->sectorsize;
	device->sector_size = root->sectorsize;
	device->total_bytes = i_size_read(bdev->bd_inode);
2302
	device->disk_total_bytes = device->total_bytes;
2303
	device->commit_total_bytes = device->total_bytes;
2304 2305
	device->dev_root = root->fs_info->dev_root;
	device->bdev = bdev;
2306
	device->in_fs_metadata = 1;
2307
	device->is_tgtdev_for_dev_replace = 0;
2308
	device->mode = FMODE_EXCL;
2309
	device->dev_stats_valid = 1;
Y
Yan Zheng 已提交
2310
	set_blocksize(device->bdev, 4096);
2311

Y
Yan Zheng 已提交
2312 2313
	if (seeding_dev) {
		sb->s_flags &= ~MS_RDONLY;
2314
		ret = btrfs_prepare_sprout(root);
2315
		BUG_ON(ret); /* -ENOMEM */
Y
Yan Zheng 已提交
2316
	}
2317

Y
Yan Zheng 已提交
2318
	device->fs_devices = root->fs_info->fs_devices;
2319 2320

	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
M
Miao Xie 已提交
2321
	lock_chunks(root);
2322
	list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
Y
Yan Zheng 已提交
2323 2324 2325 2326 2327
	list_add(&device->dev_alloc_list,
		 &root->fs_info->fs_devices->alloc_list);
	root->fs_info->fs_devices->num_devices++;
	root->fs_info->fs_devices->open_devices++;
	root->fs_info->fs_devices->rw_devices++;
J
Josef Bacik 已提交
2328
	root->fs_info->fs_devices->total_devices++;
Y
Yan Zheng 已提交
2329
	root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
2330

2331 2332 2333 2334
	spin_lock(&root->fs_info->free_chunk_lock);
	root->fs_info->free_chunk_space += device->total_bytes;
	spin_unlock(&root->fs_info->free_chunk_lock);

C
Chris Mason 已提交
2335 2336 2337
	if (!blk_queue_nonrot(bdev_get_queue(bdev)))
		root->fs_info->fs_devices->rotating = 1;

2338
	tmp = btrfs_super_total_bytes(root->fs_info->super_copy);
2339
	btrfs_set_super_total_bytes(root->fs_info->super_copy,
2340
				    tmp + device->total_bytes);
2341

2342
	tmp = btrfs_super_num_devices(root->fs_info->super_copy);
2343
	btrfs_set_super_num_devices(root->fs_info->super_copy,
2344
				    tmp + 1);
2345 2346

	/* add sysfs device entry */
2347
	btrfs_sysfs_add_device_link(root->fs_info->fs_devices, device);
2348

M
Miao Xie 已提交
2349 2350 2351 2352 2353 2354 2355
	/*
	 * we've got more storage, clear any full flags on the space
	 * infos
	 */
	btrfs_clear_space_info_full(root->fs_info);

	unlock_chunks(root);
2356
	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2357

Y
Yan Zheng 已提交
2358
	if (seeding_dev) {
M
Miao Xie 已提交
2359
		lock_chunks(root);
Y
Yan Zheng 已提交
2360
		ret = init_first_rw_device(trans, root, device);
M
Miao Xie 已提交
2361
		unlock_chunks(root);
2362 2363
		if (ret) {
			btrfs_abort_transaction(trans, root, ret);
2364
			goto error_trans;
2365
		}
M
Miao Xie 已提交
2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376
	}

	ret = btrfs_add_device(trans, root, device);
	if (ret) {
		btrfs_abort_transaction(trans, root, ret);
		goto error_trans;
	}

	if (seeding_dev) {
		char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];

Y
Yan Zheng 已提交
2377
		ret = btrfs_finish_sprout(trans, root);
2378 2379
		if (ret) {
			btrfs_abort_transaction(trans, root, ret);
2380
			goto error_trans;
2381
		}
2382 2383 2384 2385 2386 2387

		/* Sprouting would change fsid of the mounted root,
		 * so rename the fsid on the sysfs
		 */
		snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
						root->fs_info->fsid);
2388
		if (kobject_rename(&root->fs_info->fs_devices->fsid_kobj,
2389
								fsid_buf))
2390 2391
			btrfs_warn(root->fs_info,
				"sysfs: failed to create fsid for sprout");
Y
Yan Zheng 已提交
2392 2393
	}

2394 2395
	root->fs_info->num_tolerated_disk_barrier_failures =
		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
2396
	ret = btrfs_commit_transaction(trans, root);
2397

Y
Yan Zheng 已提交
2398 2399 2400
	if (seeding_dev) {
		mutex_unlock(&uuid_mutex);
		up_write(&sb->s_umount);
2401

2402 2403 2404
		if (ret) /* transaction commit */
			return ret;

Y
Yan Zheng 已提交
2405
		ret = btrfs_relocate_sys_chunks(root);
2406
		if (ret < 0)
2407
			btrfs_std_error(root->fs_info, ret,
2408 2409 2410
				    "Failed to relocate sys chunks after "
				    "device initialization. This can be fixed "
				    "using the \"btrfs balance\" command.");
2411 2412 2413 2414 2415 2416 2417
		trans = btrfs_attach_transaction(root);
		if (IS_ERR(trans)) {
			if (PTR_ERR(trans) == -ENOENT)
				return 0;
			return PTR_ERR(trans);
		}
		ret = btrfs_commit_transaction(trans, root);
Y
Yan Zheng 已提交
2418
	}
2419

2420 2421
	/* Update ctime/mtime for libblkid */
	update_dev_time(device_path);
Y
Yan Zheng 已提交
2422
	return ret;
2423 2424 2425

error_trans:
	btrfs_end_transaction(trans, root);
2426
	rcu_string_free(device->name);
2427
	btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
2428
	kfree(device);
Y
Yan Zheng 已提交
2429
error:
2430
	blkdev_put(bdev, FMODE_EXCL);
Y
Yan Zheng 已提交
2431 2432 2433 2434
	if (seeding_dev) {
		mutex_unlock(&uuid_mutex);
		up_write(&sb->s_umount);
	}
2435
	return ret;
2436 2437
}

2438
int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2439
				  struct btrfs_device *srcdev,
2440 2441 2442 2443 2444 2445 2446 2447
				  struct btrfs_device **device_out)
{
	struct request_queue *q;
	struct btrfs_device *device;
	struct block_device *bdev;
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct list_head *devices;
	struct rcu_string *name;
2448
	u64 devid = BTRFS_DEV_REPLACE_DEVID;
2449 2450 2451
	int ret = 0;

	*device_out = NULL;
2452 2453
	if (fs_info->fs_devices->seeding) {
		btrfs_err(fs_info, "the filesystem is a seed filesystem!");
2454
		return -EINVAL;
2455
	}
2456 2457 2458

	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
				  fs_info->bdev_holder);
2459 2460
	if (IS_ERR(bdev)) {
		btrfs_err(fs_info, "target device %s is invalid!", device_path);
2461
		return PTR_ERR(bdev);
2462
	}
2463 2464 2465 2466 2467 2468

	filemap_write_and_wait(bdev->bd_inode->i_mapping);

	devices = &fs_info->fs_devices->devices;
	list_for_each_entry(device, devices, dev_list) {
		if (device->bdev == bdev) {
2469
			btrfs_err(fs_info, "target device is in the filesystem!");
2470 2471 2472 2473 2474
			ret = -EEXIST;
			goto error;
		}
	}

2475

2476 2477
	if (i_size_read(bdev->bd_inode) <
	    btrfs_device_get_total_bytes(srcdev)) {
2478 2479 2480 2481 2482 2483
		btrfs_err(fs_info, "target device is smaller than source device!");
		ret = -EINVAL;
		goto error;
	}


2484 2485 2486
	device = btrfs_alloc_device(NULL, &devid, NULL);
	if (IS_ERR(device)) {
		ret = PTR_ERR(device);
2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506
		goto error;
	}

	name = rcu_string_strdup(device_path, GFP_NOFS);
	if (!name) {
		kfree(device);
		ret = -ENOMEM;
		goto error;
	}
	rcu_assign_pointer(device->name, name);

	q = bdev_get_queue(bdev);
	if (blk_queue_discard(q))
		device->can_discard = 1;
	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
	device->writeable = 1;
	device->generation = 0;
	device->io_width = root->sectorsize;
	device->io_align = root->sectorsize;
	device->sector_size = root->sectorsize;
2507 2508 2509
	device->total_bytes = btrfs_device_get_total_bytes(srcdev);
	device->disk_total_bytes = btrfs_device_get_disk_total_bytes(srcdev);
	device->bytes_used = btrfs_device_get_bytes_used(srcdev);
2510 2511
	ASSERT(list_empty(&srcdev->resized_list));
	device->commit_total_bytes = srcdev->commit_total_bytes;
2512
	device->commit_bytes_used = device->bytes_used;
2513 2514 2515 2516 2517
	device->dev_root = fs_info->dev_root;
	device->bdev = bdev;
	device->in_fs_metadata = 1;
	device->is_tgtdev_for_dev_replace = 1;
	device->mode = FMODE_EXCL;
2518
	device->dev_stats_valid = 1;
2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544
	set_blocksize(device->bdev, 4096);
	device->fs_devices = fs_info->fs_devices;
	list_add(&device->dev_list, &fs_info->fs_devices->devices);
	fs_info->fs_devices->num_devices++;
	fs_info->fs_devices->open_devices++;
	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);

	*device_out = device;
	return ret;

error:
	blkdev_put(bdev, FMODE_EXCL);
	return ret;
}

void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
					      struct btrfs_device *tgtdev)
{
	WARN_ON(fs_info->fs_devices->rw_devices == 0);
	tgtdev->io_width = fs_info->dev_root->sectorsize;
	tgtdev->io_align = fs_info->dev_root->sectorsize;
	tgtdev->sector_size = fs_info->dev_root->sectorsize;
	tgtdev->dev_root = fs_info->dev_root;
	tgtdev->in_fs_metadata = 1;
}

C
Chris Mason 已提交
2545 2546
static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
					struct btrfs_device *device)
2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_root *root;
	struct btrfs_dev_item *dev_item;
	struct extent_buffer *leaf;
	struct btrfs_key key;

	root = device->dev_root->fs_info->chunk_root;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
	key.offset = device->devid;

	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
	if (ret < 0)
		goto out;

	if (ret > 0) {
		ret = -ENOENT;
		goto out;
	}

	leaf = path->nodes[0];
	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);

	btrfs_set_device_id(leaf, dev_item, device->devid);
	btrfs_set_device_type(leaf, dev_item, device->type);
	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2582 2583 2584 2585
	btrfs_set_device_total_bytes(leaf, dev_item,
				     btrfs_device_get_disk_total_bytes(device));
	btrfs_set_device_bytes_used(leaf, dev_item,
				    btrfs_device_get_bytes_used(device));
2586 2587 2588 2589 2590 2591 2592
	btrfs_mark_buffer_dirty(leaf);

out:
	btrfs_free_path(path);
	return ret;
}

M
Miao Xie 已提交
2593
int btrfs_grow_device(struct btrfs_trans_handle *trans,
2594 2595 2596
		      struct btrfs_device *device, u64 new_size)
{
	struct btrfs_super_block *super_copy =
2597
		device->dev_root->fs_info->super_copy;
2598
	struct btrfs_fs_devices *fs_devices;
M
Miao Xie 已提交
2599 2600
	u64 old_total;
	u64 diff;
2601

Y
Yan Zheng 已提交
2602 2603
	if (!device->writeable)
		return -EACCES;
M
Miao Xie 已提交
2604 2605 2606 2607 2608

	lock_chunks(device->dev_root);
	old_total = btrfs_super_total_bytes(super_copy);
	diff = new_size - device->total_bytes;

2609
	if (new_size <= device->total_bytes ||
M
Miao Xie 已提交
2610 2611
	    device->is_tgtdev_for_dev_replace) {
		unlock_chunks(device->dev_root);
Y
Yan Zheng 已提交
2612
		return -EINVAL;
M
Miao Xie 已提交
2613
	}
Y
Yan Zheng 已提交
2614

2615
	fs_devices = device->dev_root->fs_info->fs_devices;
Y
Yan Zheng 已提交
2616

2617
	btrfs_set_super_total_bytes(super_copy, old_total + diff);
Y
Yan Zheng 已提交
2618 2619
	device->fs_devices->total_rw_bytes += diff;

2620 2621
	btrfs_device_set_total_bytes(device, new_size);
	btrfs_device_set_disk_total_bytes(device, new_size);
2622
	btrfs_clear_space_info_full(device->dev_root->fs_info);
2623 2624 2625
	if (list_empty(&device->resized_list))
		list_add_tail(&device->resized_list,
			      &fs_devices->resized_devices);
M
Miao Xie 已提交
2626
	unlock_chunks(device->dev_root);
2627

2628 2629 2630 2631
	return btrfs_update_device(trans, device);
}

static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2632
			    struct btrfs_root *root, u64 chunk_objectid,
2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648
			    u64 chunk_offset)
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_key key;

	root = root->fs_info->chunk_root;
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = chunk_objectid;
	key.offset = chunk_offset;
	key.type = BTRFS_CHUNK_ITEM_KEY;

	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2649 2650 2651
	if (ret < 0)
		goto out;
	else if (ret > 0) { /* Logic error or corruption */
2652
		btrfs_std_error(root->fs_info, -ENOENT,
2653 2654 2655 2656
			    "Failed lookup while freeing chunk.");
		ret = -ENOENT;
		goto out;
	}
2657 2658

	ret = btrfs_del_item(trans, root, path);
2659
	if (ret < 0)
2660
		btrfs_std_error(root->fs_info, ret,
2661 2662
			    "Failed to delete chunk item.");
out:
2663
	btrfs_free_path(path);
2664
	return ret;
2665 2666
}

2667
static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2668 2669
			chunk_offset)
{
2670
	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2671 2672 2673 2674 2675 2676 2677 2678 2679 2680
	struct btrfs_disk_key *disk_key;
	struct btrfs_chunk *chunk;
	u8 *ptr;
	int ret = 0;
	u32 num_stripes;
	u32 array_size;
	u32 len = 0;
	u32 cur;
	struct btrfs_key key;

M
Miao Xie 已提交
2681
	lock_chunks(root);
2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710
	array_size = btrfs_super_sys_array_size(super_copy);

	ptr = super_copy->sys_chunk_array;
	cur = 0;

	while (cur < array_size) {
		disk_key = (struct btrfs_disk_key *)ptr;
		btrfs_disk_key_to_cpu(&key, disk_key);

		len = sizeof(*disk_key);

		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
			chunk = (struct btrfs_chunk *)(ptr + len);
			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
			len += btrfs_chunk_item_size(num_stripes);
		} else {
			ret = -EIO;
			break;
		}
		if (key.objectid == chunk_objectid &&
		    key.offset == chunk_offset) {
			memmove(ptr, ptr + len, array_size - (cur + len));
			array_size -= len;
			btrfs_set_super_sys_array_size(super_copy, array_size);
		} else {
			ptr += len;
			cur += len;
		}
	}
M
Miao Xie 已提交
2711
	unlock_chunks(root);
2712 2713 2714
	return ret;
}

2715 2716
int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
		       struct btrfs_root *root, u64 chunk_offset)
2717 2718 2719
{
	struct extent_map_tree *em_tree;
	struct extent_map *em;
2720
	struct btrfs_root *extent_root = root->fs_info->extent_root;
2721
	struct map_lookup *map;
M
Miao Xie 已提交
2722
	u64 dev_extent_len = 0;
2723 2724
	u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
	int i, ret = 0;
2725

2726
	/* Just in case */
2727 2728 2729
	root = root->fs_info->chunk_root;
	em_tree = &root->fs_info->mapping_tree.map_tree;

2730
	read_lock(&em_tree->lock);
2731
	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2732
	read_unlock(&em_tree->lock);
2733

2734 2735 2736 2737
	if (!em || em->start > chunk_offset ||
	    em->start + em->len < chunk_offset) {
		/*
		 * This is a logic error, but we don't want to just rely on the
2738
		 * user having built with ASSERT enabled, so if ASSERT doesn't
2739 2740 2741 2742 2743 2744 2745
		 * do anything we still error out.
		 */
		ASSERT(0);
		if (em)
			free_extent_map(em);
		return -EINVAL;
	}
2746
	map = em->map_lookup;
2747
	lock_chunks(root->fs_info->chunk_root);
2748
	check_system_chunk(trans, extent_root, map->type);
2749
	unlock_chunks(root->fs_info->chunk_root);
2750 2751

	for (i = 0; i < map->num_stripes; i++) {
2752
		struct btrfs_device *device = map->stripes[i].dev;
M
Miao Xie 已提交
2753 2754 2755
		ret = btrfs_free_dev_extent(trans, device,
					    map->stripes[i].physical,
					    &dev_extent_len);
2756 2757 2758 2759
		if (ret) {
			btrfs_abort_transaction(trans, root, ret);
			goto out;
		}
2760

M
Miao Xie 已提交
2761 2762 2763 2764 2765 2766 2767 2768 2769 2770
		if (device->bytes_used > 0) {
			lock_chunks(root);
			btrfs_device_set_bytes_used(device,
					device->bytes_used - dev_extent_len);
			spin_lock(&root->fs_info->free_chunk_lock);
			root->fs_info->free_chunk_space += dev_extent_len;
			spin_unlock(&root->fs_info->free_chunk_lock);
			btrfs_clear_space_info_full(root->fs_info);
			unlock_chunks(root);
		}
2771

2772 2773
		if (map->stripes[i].dev) {
			ret = btrfs_update_device(trans, map->stripes[i].dev);
2774 2775 2776 2777
			if (ret) {
				btrfs_abort_transaction(trans, root, ret);
				goto out;
			}
2778
		}
2779
	}
2780
	ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset);
2781 2782 2783 2784
	if (ret) {
		btrfs_abort_transaction(trans, root, ret);
		goto out;
	}
2785

2786 2787
	trace_btrfs_chunk_free(root, map, chunk_offset, em->len);

2788 2789
	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
		ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2790 2791 2792 2793
		if (ret) {
			btrfs_abort_transaction(trans, root, ret);
			goto out;
		}
2794 2795
	}

2796
	ret = btrfs_remove_block_group(trans, extent_root, chunk_offset, em);
2797 2798 2799 2800
	if (ret) {
		btrfs_abort_transaction(trans, extent_root, ret);
		goto out;
	}
Y
Yan Zheng 已提交
2801

2802
out:
Y
Yan Zheng 已提交
2803 2804
	/* once for us */
	free_extent_map(em);
2805 2806
	return ret;
}
Y
Yan Zheng 已提交
2807

2808
static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset)
2809 2810 2811 2812
{
	struct btrfs_root *extent_root;
	struct btrfs_trans_handle *trans;
	int ret;
Y
Yan Zheng 已提交
2813

2814 2815 2816
	root = root->fs_info->chunk_root;
	extent_root = root->fs_info->extent_root;

2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830
	/*
	 * Prevent races with automatic removal of unused block groups.
	 * After we relocate and before we remove the chunk with offset
	 * chunk_offset, automatic removal of the block group can kick in,
	 * resulting in a failure when calling btrfs_remove_chunk() below.
	 *
	 * Make sure to acquire this mutex before doing a tree search (dev
	 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
	 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
	 * we release the path used to search the chunk/dev tree and before
	 * the current task acquires this mutex and calls us.
	 */
	ASSERT(mutex_is_locked(&root->fs_info->delete_unused_bgs_mutex));

2831 2832 2833 2834 2835
	ret = btrfs_can_relocate(extent_root, chunk_offset);
	if (ret)
		return -ENOSPC;

	/* step one, relocate all the extents inside this chunk */
2836
	btrfs_scrub_pause(root);
2837
	ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2838
	btrfs_scrub_continue(root);
2839 2840 2841
	if (ret)
		return ret;

2842 2843
	trans = btrfs_start_trans_remove_block_group(root->fs_info,
						     chunk_offset);
2844 2845
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
2846
		btrfs_std_error(root->fs_info, ret, NULL);
2847 2848 2849 2850 2851 2852 2853 2854
		return ret;
	}

	/*
	 * step two, delete the device extents and the
	 * chunk tree entries
	 */
	ret = btrfs_remove_chunk(trans, root, chunk_offset);
Y
Yan Zheng 已提交
2855
	btrfs_end_transaction(trans, root);
2856
	return ret;
Y
Yan Zheng 已提交
2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867
}

static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
{
	struct btrfs_root *chunk_root = root->fs_info->chunk_root;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_chunk *chunk;
	struct btrfs_key key;
	struct btrfs_key found_key;
	u64 chunk_type;
2868 2869
	bool retried = false;
	int failed = 0;
Y
Yan Zheng 已提交
2870 2871 2872 2873 2874 2875
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

2876
again:
Y
Yan Zheng 已提交
2877 2878 2879 2880 2881
	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
	key.offset = (u64)-1;
	key.type = BTRFS_CHUNK_ITEM_KEY;

	while (1) {
2882
		mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
Y
Yan Zheng 已提交
2883
		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2884 2885
		if (ret < 0) {
			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
Y
Yan Zheng 已提交
2886
			goto error;
2887
		}
2888
		BUG_ON(ret == 0); /* Corruption */
Y
Yan Zheng 已提交
2889 2890 2891

		ret = btrfs_previous_item(chunk_root, path, key.objectid,
					  key.type);
2892 2893
		if (ret)
			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
Y
Yan Zheng 已提交
2894 2895 2896 2897
		if (ret < 0)
			goto error;
		if (ret > 0)
			break;
Z
Zheng Yan 已提交
2898

Y
Yan Zheng 已提交
2899 2900
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
Z
Zheng Yan 已提交
2901

Y
Yan Zheng 已提交
2902 2903 2904
		chunk = btrfs_item_ptr(leaf, path->slots[0],
				       struct btrfs_chunk);
		chunk_type = btrfs_chunk_type(leaf, chunk);
2905
		btrfs_release_path(path);
2906

Y
Yan Zheng 已提交
2907
		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2908
			ret = btrfs_relocate_chunk(chunk_root,
Y
Yan Zheng 已提交
2909
						   found_key.offset);
2910 2911
			if (ret == -ENOSPC)
				failed++;
H
HIMANGI SARAOGI 已提交
2912 2913
			else
				BUG_ON(ret);
Y
Yan Zheng 已提交
2914
		}
2915
		mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2916

Y
Yan Zheng 已提交
2917 2918 2919 2920 2921
		if (found_key.offset == 0)
			break;
		key.offset = found_key.offset - 1;
	}
	ret = 0;
2922 2923 2924 2925
	if (failed && !retried) {
		failed = 0;
		retried = true;
		goto again;
2926
	} else if (WARN_ON(failed && retried)) {
2927 2928
		ret = -ENOSPC;
	}
Y
Yan Zheng 已提交
2929 2930 2931
error:
	btrfs_free_path(path);
	return ret;
2932 2933
}

2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955
static int insert_balance_item(struct btrfs_root *root,
			       struct btrfs_balance_control *bctl)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_balance_item *item;
	struct btrfs_disk_balance_args disk_bargs;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	int ret, err;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	trans = btrfs_start_transaction(root, 0);
	if (IS_ERR(trans)) {
		btrfs_free_path(path);
		return PTR_ERR(trans);
	}

	key.objectid = BTRFS_BALANCE_OBJECTID;
2956
	key.type = BTRFS_TEMPORARY_ITEM_KEY;
2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004
	key.offset = 0;

	ret = btrfs_insert_empty_item(trans, root, path, &key,
				      sizeof(*item));
	if (ret)
		goto out;

	leaf = path->nodes[0];
	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);

	memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));

	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
	btrfs_set_balance_data(leaf, item, &disk_bargs);
	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
	btrfs_set_balance_meta(leaf, item, &disk_bargs);
	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
	btrfs_set_balance_sys(leaf, item, &disk_bargs);

	btrfs_set_balance_flags(leaf, item, bctl->flags);

	btrfs_mark_buffer_dirty(leaf);
out:
	btrfs_free_path(path);
	err = btrfs_commit_transaction(trans, root);
	if (err && !ret)
		ret = err;
	return ret;
}

static int del_balance_item(struct btrfs_root *root)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_path *path;
	struct btrfs_key key;
	int ret, err;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	trans = btrfs_start_transaction(root, 0);
	if (IS_ERR(trans)) {
		btrfs_free_path(path);
		return PTR_ERR(trans);
	}

	key.objectid = BTRFS_BALANCE_OBJECTID;
3005
	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024
	key.offset = 0;

	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
	if (ret < 0)
		goto out;
	if (ret > 0) {
		ret = -ENOENT;
		goto out;
	}

	ret = btrfs_del_item(trans, root, path);
out:
	btrfs_free_path(path);
	err = btrfs_commit_transaction(trans, root);
	if (err && !ret)
		ret = err;
	return ret;
}

I
Ilya Dryomov 已提交
3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048
/*
 * This is a heuristic used to reduce the number of chunks balanced on
 * resume after balance was interrupted.
 */
static void update_balance_args(struct btrfs_balance_control *bctl)
{
	/*
	 * Turn on soft mode for chunk types that were being converted.
	 */
	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;

	/*
	 * Turn on usage filter if is not already used.  The idea is
	 * that chunks that we have already balanced should be
	 * reasonably full.  Don't do it for chunks that are being
	 * converted - that will keep us from relocating unconverted
	 * (albeit full) chunks.
	 */
	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3049
	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
I
Ilya Dryomov 已提交
3050 3051 3052 3053 3054
	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
		bctl->data.usage = 90;
	}
	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3055
	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
I
Ilya Dryomov 已提交
3056 3057 3058 3059 3060
	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
		bctl->sys.usage = 90;
	}
	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3061
	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
I
Ilya Dryomov 已提交
3062 3063 3064 3065 3066 3067
	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
		bctl->meta.usage = 90;
	}
}

3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096
/*
 * Should be called with both balance and volume mutexes held to
 * serialize other volume operations (add_dev/rm_dev/resize) with
 * restriper.  Same goes for unset_balance_control.
 */
static void set_balance_control(struct btrfs_balance_control *bctl)
{
	struct btrfs_fs_info *fs_info = bctl->fs_info;

	BUG_ON(fs_info->balance_ctl);

	spin_lock(&fs_info->balance_lock);
	fs_info->balance_ctl = bctl;
	spin_unlock(&fs_info->balance_lock);
}

static void unset_balance_control(struct btrfs_fs_info *fs_info)
{
	struct btrfs_balance_control *bctl = fs_info->balance_ctl;

	BUG_ON(!fs_info->balance_ctl);

	spin_lock(&fs_info->balance_lock);
	fs_info->balance_ctl = NULL;
	spin_unlock(&fs_info->balance_lock);

	kfree(bctl);
}

I
Ilya Dryomov 已提交
3097 3098 3099 3100
/*
 * Balance filters.  Return 1 if chunk should be filtered out
 * (should not be balanced).
 */
3101
static int chunk_profiles_filter(u64 chunk_type,
I
Ilya Dryomov 已提交
3102 3103
				 struct btrfs_balance_args *bargs)
{
3104 3105
	chunk_type = chunk_to_extended(chunk_type) &
				BTRFS_EXTENDED_PROFILE_MASK;
I
Ilya Dryomov 已提交
3106

3107
	if (bargs->profiles & chunk_type)
I
Ilya Dryomov 已提交
3108 3109 3110 3111 3112
		return 0;

	return 1;
}

3113
static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
I
Ilya Dryomov 已提交
3114
			      struct btrfs_balance_args *bargs)
3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145
{
	struct btrfs_block_group_cache *cache;
	u64 chunk_used;
	u64 user_thresh_min;
	u64 user_thresh_max;
	int ret = 1;

	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
	chunk_used = btrfs_block_group_used(&cache->item);

	if (bargs->usage_min == 0)
		user_thresh_min = 0;
	else
		user_thresh_min = div_factor_fine(cache->key.offset,
					bargs->usage_min);

	if (bargs->usage_max == 0)
		user_thresh_max = 1;
	else if (bargs->usage_max > 100)
		user_thresh_max = cache->key.offset;
	else
		user_thresh_max = div_factor_fine(cache->key.offset,
					bargs->usage_max);

	if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
		ret = 0;

	btrfs_put_block_group(cache);
	return ret;
}

3146
static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3147
		u64 chunk_offset, struct btrfs_balance_args *bargs)
I
Ilya Dryomov 已提交
3148 3149 3150 3151 3152 3153 3154 3155
{
	struct btrfs_block_group_cache *cache;
	u64 chunk_used, user_thresh;
	int ret = 1;

	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
	chunk_used = btrfs_block_group_used(&cache->item);

3156
	if (bargs->usage_min == 0)
3157
		user_thresh = 1;
3158 3159 3160 3161 3162 3163
	else if (bargs->usage > 100)
		user_thresh = cache->key.offset;
	else
		user_thresh = div_factor_fine(cache->key.offset,
					      bargs->usage);

I
Ilya Dryomov 已提交
3164 3165 3166 3167 3168 3169 3170
	if (chunk_used < user_thresh)
		ret = 0;

	btrfs_put_block_group(cache);
	return ret;
}

I
Ilya Dryomov 已提交
3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187
static int chunk_devid_filter(struct extent_buffer *leaf,
			      struct btrfs_chunk *chunk,
			      struct btrfs_balance_args *bargs)
{
	struct btrfs_stripe *stripe;
	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
	int i;

	for (i = 0; i < num_stripes; i++) {
		stripe = btrfs_stripe_nr(chunk, i);
		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
			return 0;
	}

	return 1;
}

I
Ilya Dryomov 已提交
3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204
/* [pstart, pend) */
static int chunk_drange_filter(struct extent_buffer *leaf,
			       struct btrfs_chunk *chunk,
			       u64 chunk_offset,
			       struct btrfs_balance_args *bargs)
{
	struct btrfs_stripe *stripe;
	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
	u64 stripe_offset;
	u64 stripe_length;
	int factor;
	int i;

	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
		return 0;

	if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
D
David Woodhouse 已提交
3205 3206 3207 3208 3209 3210 3211 3212 3213
	     BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
		factor = num_stripes / 2;
	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
		factor = num_stripes - 1;
	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
		factor = num_stripes - 2;
	} else {
		factor = num_stripes;
	}
I
Ilya Dryomov 已提交
3214 3215 3216 3217 3218 3219 3220 3221

	for (i = 0; i < num_stripes; i++) {
		stripe = btrfs_stripe_nr(chunk, i);
		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
			continue;

		stripe_offset = btrfs_stripe_offset(leaf, stripe);
		stripe_length = btrfs_chunk_length(leaf, chunk);
3222
		stripe_length = div_u64(stripe_length, factor);
I
Ilya Dryomov 已提交
3223 3224 3225 3226 3227 3228 3229 3230 3231

		if (stripe_offset < bargs->pend &&
		    stripe_offset + stripe_length > bargs->pstart)
			return 0;
	}

	return 1;
}

3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245
/* [vstart, vend) */
static int chunk_vrange_filter(struct extent_buffer *leaf,
			       struct btrfs_chunk *chunk,
			       u64 chunk_offset,
			       struct btrfs_balance_args *bargs)
{
	if (chunk_offset < bargs->vend &&
	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
		/* at least part of the chunk is inside this vrange */
		return 0;

	return 1;
}

3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258
static int chunk_stripes_range_filter(struct extent_buffer *leaf,
			       struct btrfs_chunk *chunk,
			       struct btrfs_balance_args *bargs)
{
	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);

	if (bargs->stripes_min <= num_stripes
			&& num_stripes <= bargs->stripes_max)
		return 0;

	return 1;
}

3259
static int chunk_soft_convert_filter(u64 chunk_type,
3260 3261 3262 3263 3264
				     struct btrfs_balance_args *bargs)
{
	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
		return 0;

3265 3266
	chunk_type = chunk_to_extended(chunk_type) &
				BTRFS_EXTENDED_PROFILE_MASK;
3267

3268
	if (bargs->target == chunk_type)
3269 3270 3271 3272 3273
		return 1;

	return 0;
}

3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294
static int should_balance_chunk(struct btrfs_root *root,
				struct extent_buffer *leaf,
				struct btrfs_chunk *chunk, u64 chunk_offset)
{
	struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
	struct btrfs_balance_args *bargs = NULL;
	u64 chunk_type = btrfs_chunk_type(leaf, chunk);

	/* type filter */
	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
		return 0;
	}

	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
		bargs = &bctl->data;
	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
		bargs = &bctl->sys;
	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
		bargs = &bctl->meta;

I
Ilya Dryomov 已提交
3295 3296 3297 3298
	/* profiles filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
	    chunk_profiles_filter(chunk_type, bargs)) {
		return 0;
I
Ilya Dryomov 已提交
3299 3300 3301 3302 3303 3304
	}

	/* usage filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
	    chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
		return 0;
3305 3306 3307
	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
	    chunk_usage_range_filter(bctl->fs_info, chunk_offset, bargs)) {
		return 0;
I
Ilya Dryomov 已提交
3308 3309 3310 3311 3312 3313
	}

	/* devid filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
	    chunk_devid_filter(leaf, chunk, bargs)) {
		return 0;
I
Ilya Dryomov 已提交
3314 3315 3316 3317 3318 3319
	}

	/* drange filter, makes sense only with devid filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
	    chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
		return 0;
3320 3321 3322 3323 3324 3325
	}

	/* vrange filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
		return 0;
I
Ilya Dryomov 已提交
3326 3327
	}

3328 3329 3330 3331 3332 3333
	/* stripes filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
	    chunk_stripes_range_filter(leaf, chunk, bargs)) {
		return 0;
	}

3334 3335 3336 3337 3338 3339
	/* soft profile changing mode */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
	    chunk_soft_convert_filter(chunk_type, bargs)) {
		return 0;
	}

3340 3341 3342 3343 3344 3345 3346 3347
	/*
	 * limited by count, must be the last filter
	 */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
		if (bargs->limit == 0)
			return 0;
		else
			bargs->limit--;
3348 3349 3350 3351 3352 3353 3354 3355 3356 3357
	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
		/*
		 * Same logic as the 'limit' filter; the minimum cannot be
		 * determined here because we do not have the global informatoin
		 * about the count of all chunks that satisfy the filters.
		 */
		if (bargs->limit_max == 0)
			return 0;
		else
			bargs->limit_max--;
3358 3359
	}

3360 3361 3362
	return 1;
}

3363
static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3364
{
3365
	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3366 3367 3368
	struct btrfs_root *chunk_root = fs_info->chunk_root;
	struct btrfs_root *dev_root = fs_info->dev_root;
	struct list_head *devices;
3369 3370 3371
	struct btrfs_device *device;
	u64 old_size;
	u64 size_to_free;
3372
	u64 chunk_type;
3373
	struct btrfs_chunk *chunk;
3374 3375 3376
	struct btrfs_path *path;
	struct btrfs_key key;
	struct btrfs_key found_key;
3377
	struct btrfs_trans_handle *trans;
3378 3379
	struct extent_buffer *leaf;
	int slot;
3380 3381
	int ret;
	int enospc_errors = 0;
3382
	bool counting = true;
3383
	/* The single value limit and min/max limits use the same bytes in the */
3384 3385 3386
	u64 limit_data = bctl->data.limit;
	u64 limit_meta = bctl->meta.limit;
	u64 limit_sys = bctl->sys.limit;
3387 3388 3389
	u32 count_data = 0;
	u32 count_meta = 0;
	u32 count_sys = 0;
3390
	int chunk_reserved = 0;
3391 3392

	/* step one make some room on all the devices */
3393
	devices = &fs_info->fs_devices->devices;
Q
Qinghuang Feng 已提交
3394
	list_for_each_entry(device, devices, dev_list) {
3395
		old_size = btrfs_device_get_total_bytes(device);
3396
		size_to_free = div_factor(old_size, 1);
3397
		size_to_free = min_t(u64, size_to_free, SZ_1M);
Y
Yan Zheng 已提交
3398
		if (!device->writeable ||
3399 3400
		    btrfs_device_get_total_bytes(device) -
		    btrfs_device_get_bytes_used(device) > size_to_free ||
3401
		    device->is_tgtdev_for_dev_replace)
3402 3403 3404
			continue;

		ret = btrfs_shrink_device(device, old_size - size_to_free);
3405 3406
		if (ret == -ENOSPC)
			break;
3407 3408
		BUG_ON(ret);

3409
		trans = btrfs_start_transaction(dev_root, 0);
3410
		BUG_ON(IS_ERR(trans));
3411 3412 3413 3414 3415 3416 3417 3418 3419

		ret = btrfs_grow_device(trans, device, old_size);
		BUG_ON(ret);

		btrfs_end_transaction(trans, dev_root);
	}

	/* step two, relocate all the chunks */
	path = btrfs_alloc_path();
3420 3421 3422 3423
	if (!path) {
		ret = -ENOMEM;
		goto error;
	}
3424 3425 3426 3427 3428 3429

	/* zero out stat counters */
	spin_lock(&fs_info->balance_lock);
	memset(&bctl->stat, 0, sizeof(bctl->stat));
	spin_unlock(&fs_info->balance_lock);
again:
3430
	if (!counting) {
3431 3432 3433 3434
		/*
		 * The single value limit and min/max limits use the same bytes
		 * in the
		 */
3435 3436 3437 3438
		bctl->data.limit = limit_data;
		bctl->meta.limit = limit_meta;
		bctl->sys.limit = limit_sys;
	}
3439 3440 3441 3442
	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
	key.offset = (u64)-1;
	key.type = BTRFS_CHUNK_ITEM_KEY;

C
Chris Mason 已提交
3443
	while (1) {
3444
		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3445
		    atomic_read(&fs_info->balance_cancel_req)) {
3446 3447 3448 3449
			ret = -ECANCELED;
			goto error;
		}

3450
		mutex_lock(&fs_info->delete_unused_bgs_mutex);
3451
		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3452 3453
		if (ret < 0) {
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3454
			goto error;
3455
		}
3456 3457 3458 3459 3460 3461

		/*
		 * this shouldn't happen, it means the last relocate
		 * failed
		 */
		if (ret == 0)
3462
			BUG(); /* FIXME break ? */
3463 3464 3465

		ret = btrfs_previous_item(chunk_root, path, 0,
					  BTRFS_CHUNK_ITEM_KEY);
3466
		if (ret) {
3467
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3468
			ret = 0;
3469
			break;
3470
		}
3471

3472 3473 3474
		leaf = path->nodes[0];
		slot = path->slots[0];
		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3475

3476 3477
		if (found_key.objectid != key.objectid) {
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3478
			break;
3479
		}
3480

3481
		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3482
		chunk_type = btrfs_chunk_type(leaf, chunk);
3483

3484 3485 3486 3487 3488 3489
		if (!counting) {
			spin_lock(&fs_info->balance_lock);
			bctl->stat.considered++;
			spin_unlock(&fs_info->balance_lock);
		}

3490 3491
		ret = should_balance_chunk(chunk_root, leaf, chunk,
					   found_key.offset);
3492

3493
		btrfs_release_path(path);
3494 3495
		if (!ret) {
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3496
			goto loop;
3497
		}
3498

3499
		if (counting) {
3500
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3501 3502 3503
			spin_lock(&fs_info->balance_lock);
			bctl->stat.expected++;
			spin_unlock(&fs_info->balance_lock);
3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525

			if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
				count_data++;
			else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
				count_sys++;
			else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
				count_meta++;

			goto loop;
		}

		/*
		 * Apply limit_min filter, no need to check if the LIMITS
		 * filter is used, limit_min is 0 by default
		 */
		if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
					count_data < bctl->data.limit_min)
				|| ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
					count_meta < bctl->meta.limit_min)
				|| ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
					count_sys < bctl->sys.limit_min)) {
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3526 3527 3528
			goto loop;
		}

3529 3530 3531 3532 3533 3534 3535 3536 3537 3538
		if ((chunk_type & BTRFS_BLOCK_GROUP_DATA) && !chunk_reserved) {
			trans = btrfs_start_transaction(chunk_root, 0);
			if (IS_ERR(trans)) {
				mutex_unlock(&fs_info->delete_unused_bgs_mutex);
				ret = PTR_ERR(trans);
				goto error;
			}

			ret = btrfs_force_chunk_alloc(trans, chunk_root,
						      BTRFS_BLOCK_GROUP_DATA);
3539
			btrfs_end_transaction(trans, chunk_root);
3540 3541 3542 3543 3544 3545 3546
			if (ret < 0) {
				mutex_unlock(&fs_info->delete_unused_bgs_mutex);
				goto error;
			}
			chunk_reserved = 1;
		}

3547 3548
		ret = btrfs_relocate_chunk(chunk_root,
					   found_key.offset);
3549
		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3550 3551
		if (ret && ret != -ENOSPC)
			goto error;
3552
		if (ret == -ENOSPC) {
3553
			enospc_errors++;
3554 3555 3556 3557 3558
		} else {
			spin_lock(&fs_info->balance_lock);
			bctl->stat.completed++;
			spin_unlock(&fs_info->balance_lock);
		}
3559
loop:
3560 3561
		if (found_key.offset == 0)
			break;
3562
		key.offset = found_key.offset - 1;
3563
	}
3564

3565 3566 3567 3568 3569
	if (counting) {
		btrfs_release_path(path);
		counting = false;
		goto again;
	}
3570 3571
error:
	btrfs_free_path(path);
3572
	if (enospc_errors) {
3573
		btrfs_info(fs_info, "%d enospc errors during balance",
3574 3575 3576 3577 3578
		       enospc_errors);
		if (!ret)
			ret = -ENOSPC;
	}

3579 3580 3581
	return ret;
}

3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605
/**
 * alloc_profile_is_valid - see if a given profile is valid and reduced
 * @flags: profile to validate
 * @extended: if true @flags is treated as an extended profile
 */
static int alloc_profile_is_valid(u64 flags, int extended)
{
	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
			       BTRFS_BLOCK_GROUP_PROFILE_MASK);

	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;

	/* 1) check that all other bits are zeroed */
	if (flags & ~mask)
		return 0;

	/* 2) see if profile is reduced */
	if (flags == 0)
		return !extended; /* "0" is valid for usual profiles */

	/* true if exactly one bit set */
	return (flags & (flags - 1)) == 0;
}

3606 3607
static inline int balance_need_close(struct btrfs_fs_info *fs_info)
{
3608 3609 3610 3611
	/* cancel requested || normal exit path */
	return atomic_read(&fs_info->balance_cancel_req) ||
		(atomic_read(&fs_info->balance_pause_req) == 0 &&
		 atomic_read(&fs_info->balance_cancel_req) == 0);
3612 3613
}

3614 3615
static void __cancel_balance(struct btrfs_fs_info *fs_info)
{
3616 3617
	int ret;

3618
	unset_balance_control(fs_info);
3619
	ret = del_balance_item(fs_info->tree_root);
3620
	if (ret)
3621
		btrfs_std_error(fs_info, ret, NULL);
3622 3623

	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3624 3625
}

3626 3627 3628 3629 3630 3631 3632 3633 3634
/* Non-zero return value signifies invalidity */
static inline int validate_convert_profile(struct btrfs_balance_args *bctl_arg,
		u64 allowed)
{
	return ((bctl_arg->flags & BTRFS_BALANCE_ARGS_CONVERT) &&
		(!alloc_profile_is_valid(bctl_arg->target, 1) ||
		 (bctl_arg->target & ~allowed)));
}

3635 3636 3637 3638 3639 3640 3641
/*
 * Should be called with both balance and volume mutexes held
 */
int btrfs_balance(struct btrfs_balance_control *bctl,
		  struct btrfs_ioctl_balance_args *bargs)
{
	struct btrfs_fs_info *fs_info = bctl->fs_info;
3642
	u64 allowed;
3643
	int mixed = 0;
3644
	int ret;
3645
	u64 num_devices;
3646
	unsigned seq;
3647

3648
	if (btrfs_fs_closing(fs_info) ||
3649 3650
	    atomic_read(&fs_info->balance_pause_req) ||
	    atomic_read(&fs_info->balance_cancel_req)) {
3651 3652 3653 3654
		ret = -EINVAL;
		goto out;
	}

3655 3656 3657 3658
	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
		mixed = 1;

3659 3660 3661 3662
	/*
	 * In case of mixed groups both data and meta should be picked,
	 * and identical options should be given for both of them.
	 */
3663 3664
	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
	if (mixed && (bctl->flags & allowed)) {
3665 3666 3667
		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3668 3669
			btrfs_err(fs_info, "with mixed groups data and "
				   "metadata balance options must be the same");
3670 3671 3672 3673 3674
			ret = -EINVAL;
			goto out;
		}
	}

3675
	num_devices = fs_info->fs_devices->num_devices;
3676
	btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
3677 3678 3679 3680
	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
		BUG_ON(num_devices < 1);
		num_devices--;
	}
3681
	btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
3682
	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3683
	if (num_devices == 1)
3684
		allowed |= BTRFS_BLOCK_GROUP_DUP;
3685
	else if (num_devices > 1)
3686
		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3687 3688 3689 3690 3691
	if (num_devices > 2)
		allowed |= BTRFS_BLOCK_GROUP_RAID5;
	if (num_devices > 3)
		allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
			    BTRFS_BLOCK_GROUP_RAID6);
3692
	if (validate_convert_profile(&bctl->data, allowed)) {
3693 3694
		btrfs_err(fs_info, "unable to start balance with target "
			   "data profile %llu",
3695
		       bctl->data.target);
3696 3697 3698
		ret = -EINVAL;
		goto out;
	}
3699
	if (validate_convert_profile(&bctl->meta, allowed)) {
3700 3701
		btrfs_err(fs_info,
			   "unable to start balance with target metadata profile %llu",
3702
		       bctl->meta.target);
3703 3704 3705
		ret = -EINVAL;
		goto out;
	}
3706
	if (validate_convert_profile(&bctl->sys, allowed)) {
3707 3708
		btrfs_err(fs_info,
			   "unable to start balance with target system profile %llu",
3709
		       bctl->sys.target);
3710 3711 3712 3713 3714 3715
		ret = -EINVAL;
		goto out;
	}

	/* allow to reduce meta or sys integrity only if force set */
	allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
D
David Woodhouse 已提交
3716 3717 3718
			BTRFS_BLOCK_GROUP_RAID10 |
			BTRFS_BLOCK_GROUP_RAID5 |
			BTRFS_BLOCK_GROUP_RAID6;
3719 3720 3721 3722 3723 3724 3725 3726 3727 3728
	do {
		seq = read_seqbegin(&fs_info->profiles_lock);

		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
		     (fs_info->avail_system_alloc_bits & allowed) &&
		     !(bctl->sys.target & allowed)) ||
		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
		     (fs_info->avail_metadata_alloc_bits & allowed) &&
		     !(bctl->meta.target & allowed))) {
			if (bctl->flags & BTRFS_BALANCE_FORCE) {
3729
				btrfs_info(fs_info, "force reducing metadata integrity");
3730
			} else {
3731 3732
				btrfs_err(fs_info, "balance will reduce metadata "
					   "integrity, use force if you want this");
3733 3734 3735
				ret = -EINVAL;
				goto out;
			}
3736
		}
3737
	} while (read_seqretry(&fs_info->profiles_lock, seq));
3738

3739 3740 3741
	if (btrfs_get_num_tolerated_disk_barrier_failures(bctl->meta.target) <
		btrfs_get_num_tolerated_disk_barrier_failures(bctl->data.target)) {
		btrfs_warn(fs_info,
3742
	"metadata profile 0x%llx has lower redundancy than data profile 0x%llx",
3743 3744 3745
			bctl->meta.target, bctl->data.target);
	}

3746
	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3747 3748 3749 3750
		fs_info->num_tolerated_disk_barrier_failures = min(
			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info),
			btrfs_get_num_tolerated_disk_barrier_failures(
				bctl->sys.target));
3751 3752
	}

3753
	ret = insert_balance_item(fs_info->tree_root, bctl);
I
Ilya Dryomov 已提交
3754
	if (ret && ret != -EEXIST)
3755 3756
		goto out;

I
Ilya Dryomov 已提交
3757 3758 3759 3760 3761 3762 3763 3764 3765
	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
		BUG_ON(ret == -EEXIST);
		set_balance_control(bctl);
	} else {
		BUG_ON(ret != -EEXIST);
		spin_lock(&fs_info->balance_lock);
		update_balance_args(bctl);
		spin_unlock(&fs_info->balance_lock);
	}
3766

3767
	atomic_inc(&fs_info->balance_running);
3768 3769 3770 3771 3772
	mutex_unlock(&fs_info->balance_mutex);

	ret = __btrfs_balance(fs_info);

	mutex_lock(&fs_info->balance_mutex);
3773
	atomic_dec(&fs_info->balance_running);
3774

3775 3776 3777 3778 3779
	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
		fs_info->num_tolerated_disk_barrier_failures =
			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
	}

3780 3781
	if (bargs) {
		memset(bargs, 0, sizeof(*bargs));
3782
		update_ioctl_balance_args(fs_info, 0, bargs);
3783 3784
	}

3785 3786 3787 3788 3789
	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
	    balance_need_close(fs_info)) {
		__cancel_balance(fs_info);
	}

3790
	wake_up(&fs_info->balance_wait_q);
3791 3792 3793

	return ret;
out:
I
Ilya Dryomov 已提交
3794 3795
	if (bctl->flags & BTRFS_BALANCE_RESUME)
		__cancel_balance(fs_info);
3796
	else {
I
Ilya Dryomov 已提交
3797
		kfree(bctl);
3798 3799
		atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
	}
I
Ilya Dryomov 已提交
3800 3801 3802 3803 3804
	return ret;
}

static int balance_kthread(void *data)
{
3805
	struct btrfs_fs_info *fs_info = data;
3806
	int ret = 0;
I
Ilya Dryomov 已提交
3807 3808 3809 3810

	mutex_lock(&fs_info->volume_mutex);
	mutex_lock(&fs_info->balance_mutex);

3811
	if (fs_info->balance_ctl) {
3812
		btrfs_info(fs_info, "continuing balance");
3813
		ret = btrfs_balance(fs_info->balance_ctl, NULL);
3814
	}
I
Ilya Dryomov 已提交
3815 3816 3817

	mutex_unlock(&fs_info->balance_mutex);
	mutex_unlock(&fs_info->volume_mutex);
3818

I
Ilya Dryomov 已提交
3819 3820 3821
	return ret;
}

3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833
int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
{
	struct task_struct *tsk;

	spin_lock(&fs_info->balance_lock);
	if (!fs_info->balance_ctl) {
		spin_unlock(&fs_info->balance_lock);
		return 0;
	}
	spin_unlock(&fs_info->balance_lock);

	if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3834
		btrfs_info(fs_info, "force skipping balance");
3835 3836 3837 3838
		return 0;
	}

	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3839
	return PTR_ERR_OR_ZERO(tsk);
3840 3841
}

3842
int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
I
Ilya Dryomov 已提交
3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856
{
	struct btrfs_balance_control *bctl;
	struct btrfs_balance_item *item;
	struct btrfs_disk_balance_args disk_bargs;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = BTRFS_BALANCE_OBJECTID;
3857
	key.type = BTRFS_TEMPORARY_ITEM_KEY;
I
Ilya Dryomov 已提交
3858 3859
	key.offset = 0;

3860
	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
I
Ilya Dryomov 已提交
3861
	if (ret < 0)
3862
		goto out;
I
Ilya Dryomov 已提交
3863 3864
	if (ret > 0) { /* ret = -ENOENT; */
		ret = 0;
3865 3866 3867 3868 3869 3870 3871
		goto out;
	}

	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
	if (!bctl) {
		ret = -ENOMEM;
		goto out;
I
Ilya Dryomov 已提交
3872 3873 3874 3875 3876
	}

	leaf = path->nodes[0];
	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);

3877 3878 3879
	bctl->fs_info = fs_info;
	bctl->flags = btrfs_balance_flags(leaf, item);
	bctl->flags |= BTRFS_BALANCE_RESUME;
I
Ilya Dryomov 已提交
3880 3881 3882 3883 3884 3885 3886 3887

	btrfs_balance_data(leaf, item, &disk_bargs);
	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
	btrfs_balance_meta(leaf, item, &disk_bargs);
	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
	btrfs_balance_sys(leaf, item, &disk_bargs);
	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);

3888 3889
	WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));

3890 3891
	mutex_lock(&fs_info->volume_mutex);
	mutex_lock(&fs_info->balance_mutex);
I
Ilya Dryomov 已提交
3892

3893 3894 3895 3896
	set_balance_control(bctl);

	mutex_unlock(&fs_info->balance_mutex);
	mutex_unlock(&fs_info->volume_mutex);
I
Ilya Dryomov 已提交
3897 3898
out:
	btrfs_free_path(path);
3899 3900 3901
	return ret;
}

3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930
int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
{
	int ret = 0;

	mutex_lock(&fs_info->balance_mutex);
	if (!fs_info->balance_ctl) {
		mutex_unlock(&fs_info->balance_mutex);
		return -ENOTCONN;
	}

	if (atomic_read(&fs_info->balance_running)) {
		atomic_inc(&fs_info->balance_pause_req);
		mutex_unlock(&fs_info->balance_mutex);

		wait_event(fs_info->balance_wait_q,
			   atomic_read(&fs_info->balance_running) == 0);

		mutex_lock(&fs_info->balance_mutex);
		/* we are good with balance_ctl ripped off from under us */
		BUG_ON(atomic_read(&fs_info->balance_running));
		atomic_dec(&fs_info->balance_pause_req);
	} else {
		ret = -ENOTCONN;
	}

	mutex_unlock(&fs_info->balance_mutex);
	return ret;
}

3931 3932
int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
{
3933 3934 3935
	if (fs_info->sb->s_flags & MS_RDONLY)
		return -EROFS;

3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969
	mutex_lock(&fs_info->balance_mutex);
	if (!fs_info->balance_ctl) {
		mutex_unlock(&fs_info->balance_mutex);
		return -ENOTCONN;
	}

	atomic_inc(&fs_info->balance_cancel_req);
	/*
	 * if we are running just wait and return, balance item is
	 * deleted in btrfs_balance in this case
	 */
	if (atomic_read(&fs_info->balance_running)) {
		mutex_unlock(&fs_info->balance_mutex);
		wait_event(fs_info->balance_wait_q,
			   atomic_read(&fs_info->balance_running) == 0);
		mutex_lock(&fs_info->balance_mutex);
	} else {
		/* __cancel_balance needs volume_mutex */
		mutex_unlock(&fs_info->balance_mutex);
		mutex_lock(&fs_info->volume_mutex);
		mutex_lock(&fs_info->balance_mutex);

		if (fs_info->balance_ctl)
			__cancel_balance(fs_info);

		mutex_unlock(&fs_info->volume_mutex);
	}

	BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
	atomic_dec(&fs_info->balance_cancel_req);
	mutex_unlock(&fs_info->balance_mutex);
	return 0;
}

S
Stefan Behrens 已提交
3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981
static int btrfs_uuid_scan_kthread(void *data)
{
	struct btrfs_fs_info *fs_info = data;
	struct btrfs_root *root = fs_info->tree_root;
	struct btrfs_key key;
	struct btrfs_key max_key;
	struct btrfs_path *path = NULL;
	int ret = 0;
	struct extent_buffer *eb;
	int slot;
	struct btrfs_root_item root_item;
	u32 item_size;
3982
	struct btrfs_trans_handle *trans = NULL;
S
Stefan Behrens 已提交
3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998

	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

	key.objectid = 0;
	key.type = BTRFS_ROOT_ITEM_KEY;
	key.offset = 0;

	max_key.objectid = (u64)-1;
	max_key.type = BTRFS_ROOT_ITEM_KEY;
	max_key.offset = (u64)-1;

	while (1) {
3999
		ret = btrfs_search_forward(root, &key, path, 0);
S
Stefan Behrens 已提交
4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022
		if (ret) {
			if (ret > 0)
				ret = 0;
			break;
		}

		if (key.type != BTRFS_ROOT_ITEM_KEY ||
		    (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
		     key.objectid != BTRFS_FS_TREE_OBJECTID) ||
		    key.objectid > BTRFS_LAST_FREE_OBJECTID)
			goto skip;

		eb = path->nodes[0];
		slot = path->slots[0];
		item_size = btrfs_item_size_nr(eb, slot);
		if (item_size < sizeof(root_item))
			goto skip;

		read_extent_buffer(eb, &root_item,
				   btrfs_item_ptr_offset(eb, slot),
				   (int)sizeof(root_item));
		if (btrfs_root_refs(&root_item) == 0)
			goto skip;
4023 4024 4025 4026 4027 4028 4029

		if (!btrfs_is_empty_uuid(root_item.uuid) ||
		    !btrfs_is_empty_uuid(root_item.received_uuid)) {
			if (trans)
				goto update_tree;

			btrfs_release_path(path);
S
Stefan Behrens 已提交
4030 4031 4032 4033 4034 4035 4036 4037 4038
			/*
			 * 1 - subvol uuid item
			 * 1 - received_subvol uuid item
			 */
			trans = btrfs_start_transaction(fs_info->uuid_root, 2);
			if (IS_ERR(trans)) {
				ret = PTR_ERR(trans);
				break;
			}
4039 4040 4041 4042 4043 4044
			continue;
		} else {
			goto skip;
		}
update_tree:
		if (!btrfs_is_empty_uuid(root_item.uuid)) {
S
Stefan Behrens 已提交
4045 4046 4047 4048 4049
			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
						  root_item.uuid,
						  BTRFS_UUID_KEY_SUBVOL,
						  key.objectid);
			if (ret < 0) {
4050
				btrfs_warn(fs_info, "uuid_tree_add failed %d",
S
Stefan Behrens 已提交
4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061
					ret);
				break;
			}
		}

		if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
						  root_item.received_uuid,
						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
						  key.objectid);
			if (ret < 0) {
4062
				btrfs_warn(fs_info, "uuid_tree_add failed %d",
S
Stefan Behrens 已提交
4063 4064 4065 4066 4067
					ret);
				break;
			}
		}

4068
skip:
S
Stefan Behrens 已提交
4069 4070
		if (trans) {
			ret = btrfs_end_transaction(trans, fs_info->uuid_root);
4071
			trans = NULL;
S
Stefan Behrens 已提交
4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093
			if (ret)
				break;
		}

		btrfs_release_path(path);
		if (key.offset < (u64)-1) {
			key.offset++;
		} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
			key.offset = 0;
			key.type = BTRFS_ROOT_ITEM_KEY;
		} else if (key.objectid < (u64)-1) {
			key.offset = 0;
			key.type = BTRFS_ROOT_ITEM_KEY;
			key.objectid++;
		} else {
			break;
		}
		cond_resched();
	}

out:
	btrfs_free_path(path);
4094 4095
	if (trans && !IS_ERR(trans))
		btrfs_end_transaction(trans, fs_info->uuid_root);
S
Stefan Behrens 已提交
4096
	if (ret)
4097
		btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4098 4099
	else
		fs_info->update_uuid_tree_gen = 1;
S
Stefan Behrens 已提交
4100 4101 4102 4103
	up(&fs_info->uuid_tree_rescan_sem);
	return 0;
}

4104 4105 4106 4107
/*
 * Callback for btrfs_uuid_tree_iterate().
 * returns:
 * 0	check succeeded, the entry is not outdated.
4108
 * < 0	if an error occurred.
4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160
 * > 0	if the check failed, which means the caller shall remove the entry.
 */
static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
				       u8 *uuid, u8 type, u64 subid)
{
	struct btrfs_key key;
	int ret = 0;
	struct btrfs_root *subvol_root;

	if (type != BTRFS_UUID_KEY_SUBVOL &&
	    type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
		goto out;

	key.objectid = subid;
	key.type = BTRFS_ROOT_ITEM_KEY;
	key.offset = (u64)-1;
	subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
	if (IS_ERR(subvol_root)) {
		ret = PTR_ERR(subvol_root);
		if (ret == -ENOENT)
			ret = 1;
		goto out;
	}

	switch (type) {
	case BTRFS_UUID_KEY_SUBVOL:
		if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
			ret = 1;
		break;
	case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
		if (memcmp(uuid, subvol_root->root_item.received_uuid,
			   BTRFS_UUID_SIZE))
			ret = 1;
		break;
	}

out:
	return ret;
}

static int btrfs_uuid_rescan_kthread(void *data)
{
	struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
	int ret;

	/*
	 * 1st step is to iterate through the existing UUID tree and
	 * to delete all entries that contain outdated data.
	 * 2nd step is to add all missing entries to the UUID tree.
	 */
	ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
	if (ret < 0) {
4161
		btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret);
4162 4163 4164 4165 4166 4167
		up(&fs_info->uuid_tree_rescan_sem);
		return ret;
	}
	return btrfs_uuid_scan_kthread(data);
}

4168 4169 4170 4171 4172
int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *tree_root = fs_info->tree_root;
	struct btrfs_root *uuid_root;
S
Stefan Behrens 已提交
4173 4174
	struct task_struct *task;
	int ret;
4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186

	/*
	 * 1 - root node
	 * 1 - root item
	 */
	trans = btrfs_start_transaction(tree_root, 2);
	if (IS_ERR(trans))
		return PTR_ERR(trans);

	uuid_root = btrfs_create_tree(trans, fs_info,
				      BTRFS_UUID_TREE_OBJECTID);
	if (IS_ERR(uuid_root)) {
4187 4188 4189
		ret = PTR_ERR(uuid_root);
		btrfs_abort_transaction(trans, tree_root, ret);
		return ret;
4190 4191 4192 4193
	}

	fs_info->uuid_root = uuid_root;

S
Stefan Behrens 已提交
4194 4195 4196 4197 4198 4199 4200
	ret = btrfs_commit_transaction(trans, tree_root);
	if (ret)
		return ret;

	down(&fs_info->uuid_tree_rescan_sem);
	task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
	if (IS_ERR(task)) {
4201
		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4202
		btrfs_warn(fs_info, "failed to start uuid_scan task");
S
Stefan Behrens 已提交
4203 4204 4205 4206 4207
		up(&fs_info->uuid_tree_rescan_sem);
		return PTR_ERR(task);
	}

	return 0;
4208
}
S
Stefan Behrens 已提交
4209

4210 4211 4212 4213 4214 4215 4216 4217
int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
{
	struct task_struct *task;

	down(&fs_info->uuid_tree_rescan_sem);
	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
	if (IS_ERR(task)) {
		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4218
		btrfs_warn(fs_info, "failed to start uuid_rescan task");
4219 4220 4221 4222 4223 4224 4225
		up(&fs_info->uuid_tree_rescan_sem);
		return PTR_ERR(task);
	}

	return 0;
}

4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240
/*
 * shrinking a device means finding all of the device extents past
 * the new size, and then following the back refs to the chunks.
 * The chunk relocation code actually frees the device extent
 */
int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = device->dev_root;
	struct btrfs_dev_extent *dev_extent = NULL;
	struct btrfs_path *path;
	u64 length;
	u64 chunk_offset;
	int ret;
	int slot;
4241 4242
	int failed = 0;
	bool retried = false;
4243
	bool checked_pending_chunks = false;
4244 4245
	struct extent_buffer *l;
	struct btrfs_key key;
4246
	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4247
	u64 old_total = btrfs_super_total_bytes(super_copy);
4248 4249
	u64 old_size = btrfs_device_get_total_bytes(device);
	u64 diff = old_size - new_size;
4250

4251 4252 4253
	if (device->is_tgtdev_for_dev_replace)
		return -EINVAL;

4254 4255 4256 4257
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

4258
	path->reada = READA_FORWARD;
4259

4260 4261
	lock_chunks(root);

4262
	btrfs_device_set_total_bytes(device, new_size);
4263
	if (device->writeable) {
Y
Yan Zheng 已提交
4264
		device->fs_devices->total_rw_bytes -= diff;
4265 4266 4267 4268
		spin_lock(&root->fs_info->free_chunk_lock);
		root->fs_info->free_chunk_space -= diff;
		spin_unlock(&root->fs_info->free_chunk_lock);
	}
4269
	unlock_chunks(root);
4270

4271
again:
4272 4273 4274 4275
	key.objectid = device->devid;
	key.offset = (u64)-1;
	key.type = BTRFS_DEV_EXTENT_KEY;

4276
	do {
4277
		mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
4278
		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4279 4280
		if (ret < 0) {
			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4281
			goto done;
4282
		}
4283 4284

		ret = btrfs_previous_item(root, path, 0, key.type);
4285 4286
		if (ret)
			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4287 4288 4289 4290
		if (ret < 0)
			goto done;
		if (ret) {
			ret = 0;
4291
			btrfs_release_path(path);
4292
			break;
4293 4294 4295 4296 4297 4298
		}

		l = path->nodes[0];
		slot = path->slots[0];
		btrfs_item_key_to_cpu(l, &key, path->slots[0]);

4299
		if (key.objectid != device->devid) {
4300
			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4301
			btrfs_release_path(path);
4302
			break;
4303
		}
4304 4305 4306 4307

		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
		length = btrfs_dev_extent_length(l, dev_extent);

4308
		if (key.offset + length <= new_size) {
4309
			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4310
			btrfs_release_path(path);
4311
			break;
4312
		}
4313 4314

		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4315
		btrfs_release_path(path);
4316

4317
		ret = btrfs_relocate_chunk(root, chunk_offset);
4318
		mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4319
		if (ret && ret != -ENOSPC)
4320
			goto done;
4321 4322
		if (ret == -ENOSPC)
			failed++;
4323
	} while (key.offset-- > 0);
4324 4325 4326 4327 4328 4329 4330 4331

	if (failed && !retried) {
		failed = 0;
		retried = true;
		goto again;
	} else if (failed && retried) {
		ret = -ENOSPC;
		goto done;
4332 4333
	}

4334
	/* Shrinking succeeded, else we would be at "done". */
4335
	trans = btrfs_start_transaction(root, 0);
4336 4337 4338 4339 4340
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto done;
	}

4341
	lock_chunks(root);
4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358

	/*
	 * We checked in the above loop all device extents that were already in
	 * the device tree. However before we have updated the device's
	 * total_bytes to the new size, we might have had chunk allocations that
	 * have not complete yet (new block groups attached to transaction
	 * handles), and therefore their device extents were not yet in the
	 * device tree and we missed them in the loop above. So if we have any
	 * pending chunk using a device extent that overlaps the device range
	 * that we can not use anymore, commit the current transaction and
	 * repeat the search on the device tree - this way we guarantee we will
	 * not have chunks using device extents that end beyond 'new_size'.
	 */
	if (!checked_pending_chunks) {
		u64 start = new_size;
		u64 len = old_size - new_size;

4359 4360
		if (contains_pending_extent(trans->transaction, device,
					    &start, len)) {
4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371
			unlock_chunks(root);
			checked_pending_chunks = true;
			failed = 0;
			retried = false;
			ret = btrfs_commit_transaction(trans, root);
			if (ret)
				goto done;
			goto again;
		}
	}

4372
	btrfs_device_set_disk_total_bytes(device, new_size);
4373 4374 4375
	if (list_empty(&device->resized_list))
		list_add_tail(&device->resized_list,
			      &root->fs_info->fs_devices->resized_devices);
4376 4377 4378 4379

	WARN_ON(diff > old_total);
	btrfs_set_super_total_bytes(super_copy, old_total - diff);
	unlock_chunks(root);
M
Miao Xie 已提交
4380 4381 4382

	/* Now btrfs_update_device() will change the on-disk size. */
	ret = btrfs_update_device(trans, device);
4383
	btrfs_end_transaction(trans, root);
4384 4385
done:
	btrfs_free_path(path);
4386 4387 4388 4389 4390 4391 4392 4393 4394 4395
	if (ret) {
		lock_chunks(root);
		btrfs_device_set_total_bytes(device, old_size);
		if (device->writeable)
			device->fs_devices->total_rw_bytes += diff;
		spin_lock(&root->fs_info->free_chunk_lock);
		root->fs_info->free_chunk_space += diff;
		spin_unlock(&root->fs_info->free_chunk_lock);
		unlock_chunks(root);
	}
4396 4397 4398
	return ret;
}

4399
static int btrfs_add_system_chunk(struct btrfs_root *root,
4400 4401 4402
			   struct btrfs_key *key,
			   struct btrfs_chunk *chunk, int item_size)
{
4403
	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4404 4405 4406 4407
	struct btrfs_disk_key disk_key;
	u32 array_size;
	u8 *ptr;

4408
	lock_chunks(root);
4409
	array_size = btrfs_super_sys_array_size(super_copy);
4410
	if (array_size + item_size + sizeof(disk_key)
4411 4412
			> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
		unlock_chunks(root);
4413
		return -EFBIG;
4414
	}
4415 4416 4417 4418 4419 4420 4421 4422

	ptr = super_copy->sys_chunk_array + array_size;
	btrfs_cpu_key_to_disk(&disk_key, key);
	memcpy(ptr, &disk_key, sizeof(disk_key));
	ptr += sizeof(disk_key);
	memcpy(ptr, chunk, item_size);
	item_size += sizeof(disk_key);
	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4423 4424
	unlock_chunks(root);

4425 4426 4427
	return 0;
}

4428 4429 4430 4431
/*
 * sort the devices in descending order by max_avail, total_avail
 */
static int btrfs_cmp_device_info(const void *a, const void *b)
4432
{
4433 4434
	const struct btrfs_device_info *di_a = a;
	const struct btrfs_device_info *di_b = b;
4435

4436
	if (di_a->max_avail > di_b->max_avail)
4437
		return -1;
4438
	if (di_a->max_avail < di_b->max_avail)
4439
		return 1;
4440 4441 4442 4443 4444
	if (di_a->total_avail > di_b->total_avail)
		return -1;
	if (di_a->total_avail < di_b->total_avail)
		return 1;
	return 0;
4445
}
4446

D
David Woodhouse 已提交
4447 4448 4449
static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
{
	/* TODO allow them to set a preferred stripe size */
4450
	return SZ_64K;
D
David Woodhouse 已提交
4451 4452 4453 4454
}

static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
{
4455
	if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
D
David Woodhouse 已提交
4456 4457
		return;

4458
	btrfs_set_fs_incompat(info, RAID56);
D
David Woodhouse 已提交
4459 4460
}

4461 4462 4463 4464 4465 4466 4467 4468 4469 4470
#define BTRFS_MAX_DEVS(r) ((BTRFS_LEAF_DATA_SIZE(r)		\
			- sizeof(struct btrfs_item)		\
			- sizeof(struct btrfs_chunk))		\
			/ sizeof(struct btrfs_stripe) + 1)

#define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE	\
				- 2 * sizeof(struct btrfs_disk_key)	\
				- 2 * sizeof(struct btrfs_chunk))	\
				/ sizeof(struct btrfs_stripe) + 1)

4471
static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4472 4473
			       struct btrfs_root *extent_root, u64 start,
			       u64 type)
4474
{
4475 4476 4477 4478 4479 4480 4481 4482 4483
	struct btrfs_fs_info *info = extent_root->fs_info;
	struct btrfs_fs_devices *fs_devices = info->fs_devices;
	struct list_head *cur;
	struct map_lookup *map = NULL;
	struct extent_map_tree *em_tree;
	struct extent_map *em;
	struct btrfs_device_info *devices_info = NULL;
	u64 total_avail;
	int num_stripes;	/* total number of stripes to allocate */
D
David Woodhouse 已提交
4484 4485
	int data_stripes;	/* number of stripes that count for
				   block group size */
4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496
	int sub_stripes;	/* sub_stripes info for map */
	int dev_stripes;	/* stripes per dev */
	int devs_max;		/* max devs to use */
	int devs_min;		/* min devs needed */
	int devs_increment;	/* ndevs has to be a multiple of this */
	int ncopies;		/* how many copies to data has */
	int ret;
	u64 max_stripe_size;
	u64 max_chunk_size;
	u64 stripe_size;
	u64 num_bytes;
D
David Woodhouse 已提交
4497
	u64 raid_stripe_len = BTRFS_STRIPE_LEN;
4498 4499 4500
	int ndevs;
	int i;
	int j;
4501
	int index;
4502

4503
	BUG_ON(!alloc_profile_is_valid(type, 0));
4504

4505 4506
	if (list_empty(&fs_devices->alloc_list))
		return -ENOSPC;
4507

4508
	index = __get_raid_index(type);
4509

4510 4511 4512 4513 4514 4515
	sub_stripes = btrfs_raid_array[index].sub_stripes;
	dev_stripes = btrfs_raid_array[index].dev_stripes;
	devs_max = btrfs_raid_array[index].devs_max;
	devs_min = btrfs_raid_array[index].devs_min;
	devs_increment = btrfs_raid_array[index].devs_increment;
	ncopies = btrfs_raid_array[index].ncopies;
4516

4517
	if (type & BTRFS_BLOCK_GROUP_DATA) {
4518
		max_stripe_size = SZ_1G;
4519
		max_chunk_size = 10 * max_stripe_size;
4520 4521
		if (!devs_max)
			devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4522
	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4523
		/* for larger filesystems, use larger metadata chunks */
4524 4525
		if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
			max_stripe_size = SZ_1G;
4526
		else
4527
			max_stripe_size = SZ_256M;
4528
		max_chunk_size = max_stripe_size;
4529 4530
		if (!devs_max)
			devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4531
	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4532
		max_stripe_size = SZ_32M;
4533
		max_chunk_size = 2 * max_stripe_size;
4534 4535
		if (!devs_max)
			devs_max = BTRFS_MAX_DEVS_SYS_CHUNK;
4536
	} else {
4537
		btrfs_err(info, "invalid chunk type 0x%llx requested",
4538 4539
		       type);
		BUG_ON(1);
4540 4541
	}

Y
Yan Zheng 已提交
4542 4543 4544
	/* we don't want a chunk larger than 10% of writeable space */
	max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
			     max_chunk_size);
4545

4546
	devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
4547 4548 4549
			       GFP_NOFS);
	if (!devices_info)
		return -ENOMEM;
4550

4551
	cur = fs_devices->alloc_list.next;
4552

4553
	/*
4554 4555
	 * in the first pass through the devices list, we gather information
	 * about the available holes on each device.
4556
	 */
4557 4558 4559 4560 4561
	ndevs = 0;
	while (cur != &fs_devices->alloc_list) {
		struct btrfs_device *device;
		u64 max_avail;
		u64 dev_offset;
4562

4563
		device = list_entry(cur, struct btrfs_device, dev_alloc_list);
4564

4565
		cur = cur->next;
4566

4567
		if (!device->writeable) {
J
Julia Lawall 已提交
4568
			WARN(1, KERN_ERR
4569
			       "BTRFS: read-only device in alloc_list\n");
4570 4571
			continue;
		}
4572

4573 4574
		if (!device->in_fs_metadata ||
		    device->is_tgtdev_for_dev_replace)
4575
			continue;
4576

4577 4578 4579 4580
		if (device->total_bytes > device->bytes_used)
			total_avail = device->total_bytes - device->bytes_used;
		else
			total_avail = 0;
4581 4582 4583 4584

		/* If there is no space on this device, skip it. */
		if (total_avail == 0)
			continue;
4585

4586
		ret = find_free_dev_extent(trans, device,
4587 4588 4589 4590
					   max_stripe_size * dev_stripes,
					   &dev_offset, &max_avail);
		if (ret && ret != -ENOSPC)
			goto error;
4591

4592 4593
		if (ret == 0)
			max_avail = max_stripe_size * dev_stripes;
4594

4595 4596
		if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
			continue;
4597

4598 4599 4600 4601 4602
		if (ndevs == fs_devices->rw_devices) {
			WARN(1, "%s: found more than %llu devices\n",
			     __func__, fs_devices->rw_devices);
			break;
		}
4603 4604 4605 4606 4607 4608
		devices_info[ndevs].dev_offset = dev_offset;
		devices_info[ndevs].max_avail = max_avail;
		devices_info[ndevs].total_avail = total_avail;
		devices_info[ndevs].dev = device;
		++ndevs;
	}
4609

4610 4611 4612 4613 4614
	/*
	 * now sort the devices by hole size / available space
	 */
	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
	     btrfs_cmp_device_info, NULL);
4615

4616 4617
	/* round down to number of usable stripes */
	ndevs -= ndevs % devs_increment;
4618

4619 4620 4621
	if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
		ret = -ENOSPC;
		goto error;
4622
	}
4623

4624 4625 4626 4627 4628 4629 4630 4631
	if (devs_max && ndevs > devs_max)
		ndevs = devs_max;
	/*
	 * the primary goal is to maximize the number of stripes, so use as many
	 * devices as possible, even if the stripes are not maximum sized.
	 */
	stripe_size = devices_info[ndevs-1].max_avail;
	num_stripes = ndevs * dev_stripes;
4632

D
David Woodhouse 已提交
4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648
	/*
	 * this will have to be fixed for RAID1 and RAID10 over
	 * more drives
	 */
	data_stripes = num_stripes / ncopies;

	if (type & BTRFS_BLOCK_GROUP_RAID5) {
		raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
				 btrfs_super_stripesize(info->super_copy));
		data_stripes = num_stripes - 1;
	}
	if (type & BTRFS_BLOCK_GROUP_RAID6) {
		raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
				 btrfs_super_stripesize(info->super_copy));
		data_stripes = num_stripes - 2;
	}
4649 4650 4651 4652 4653 4654 4655 4656

	/*
	 * Use the number of data stripes to figure out how big this chunk
	 * is really going to be in terms of logical address space,
	 * and compare that answer with the max chunk size
	 */
	if (stripe_size * data_stripes > max_chunk_size) {
		u64 mask = (1ULL << 24) - 1;
4657 4658

		stripe_size = div_u64(max_chunk_size, data_stripes);
4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669

		/* bump the answer up to a 16MB boundary */
		stripe_size = (stripe_size + mask) & ~mask;

		/* but don't go higher than the limits we found
		 * while searching for free extents
		 */
		if (stripe_size > devices_info[ndevs-1].max_avail)
			stripe_size = devices_info[ndevs-1].max_avail;
	}

4670
	stripe_size = div_u64(stripe_size, dev_stripes);
4671 4672

	/* align to BTRFS_STRIPE_LEN */
4673
	stripe_size = div_u64(stripe_size, raid_stripe_len);
D
David Woodhouse 已提交
4674
	stripe_size *= raid_stripe_len;
4675 4676 4677 4678 4679 4680 4681

	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
	if (!map) {
		ret = -ENOMEM;
		goto error;
	}
	map->num_stripes = num_stripes;
4682

4683 4684 4685 4686 4687 4688
	for (i = 0; i < ndevs; ++i) {
		for (j = 0; j < dev_stripes; ++j) {
			int s = i * dev_stripes + j;
			map->stripes[s].dev = devices_info[i].dev;
			map->stripes[s].physical = devices_info[i].dev_offset +
						   j * stripe_size;
4689 4690
		}
	}
Y
Yan Zheng 已提交
4691
	map->sector_size = extent_root->sectorsize;
D
David Woodhouse 已提交
4692 4693 4694
	map->stripe_len = raid_stripe_len;
	map->io_align = raid_stripe_len;
	map->io_width = raid_stripe_len;
Y
Yan Zheng 已提交
4695 4696
	map->type = type;
	map->sub_stripes = sub_stripes;
4697

D
David Woodhouse 已提交
4698
	num_bytes = stripe_size * data_stripes;
4699

4700
	trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
4701

4702
	em = alloc_extent_map();
Y
Yan Zheng 已提交
4703
	if (!em) {
4704
		kfree(map);
4705 4706
		ret = -ENOMEM;
		goto error;
4707
	}
4708
	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
4709
	em->map_lookup = map;
Y
Yan Zheng 已提交
4710
	em->start = start;
4711
	em->len = num_bytes;
Y
Yan Zheng 已提交
4712 4713
	em->block_start = 0;
	em->block_len = em->len;
4714
	em->orig_block_len = stripe_size;
4715

Y
Yan Zheng 已提交
4716
	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4717
	write_lock(&em_tree->lock);
J
Josef Bacik 已提交
4718
	ret = add_extent_mapping(em_tree, em, 0);
4719 4720 4721 4722
	if (!ret) {
		list_add_tail(&em->list, &trans->transaction->pending_chunks);
		atomic_inc(&em->refs);
	}
4723
	write_unlock(&em_tree->lock);
4724 4725
	if (ret) {
		free_extent_map(em);
4726
		goto error;
4727
	}
4728

4729 4730 4731
	ret = btrfs_make_block_group(trans, extent_root, 0, type,
				     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
				     start, num_bytes);
4732 4733
	if (ret)
		goto error_del_extent;
Y
Yan Zheng 已提交
4734

4735 4736 4737 4738
	for (i = 0; i < map->num_stripes; i++) {
		num_bytes = map->stripes[i].dev->bytes_used + stripe_size;
		btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes);
	}
4739

4740 4741 4742 4743 4744
	spin_lock(&extent_root->fs_info->free_chunk_lock);
	extent_root->fs_info->free_chunk_space -= (stripe_size *
						   map->num_stripes);
	spin_unlock(&extent_root->fs_info->free_chunk_lock);

4745
	free_extent_map(em);
D
David Woodhouse 已提交
4746 4747
	check_raid56_incompat_flag(extent_root->fs_info, type);

4748
	kfree(devices_info);
Y
Yan Zheng 已提交
4749
	return 0;
4750

4751
error_del_extent:
4752 4753 4754 4755 4756 4757 4758 4759
	write_lock(&em_tree->lock);
	remove_extent_mapping(em_tree, em);
	write_unlock(&em_tree->lock);

	/* One for our allocation */
	free_extent_map(em);
	/* One for the tree reference */
	free_extent_map(em);
4760 4761
	/* One for the pending_chunks list reference */
	free_extent_map(em);
4762 4763 4764
error:
	kfree(devices_info);
	return ret;
Y
Yan Zheng 已提交
4765 4766
}

4767
int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
Y
Yan Zheng 已提交
4768
				struct btrfs_root *extent_root,
4769
				u64 chunk_offset, u64 chunk_size)
Y
Yan Zheng 已提交
4770 4771 4772 4773 4774 4775
{
	struct btrfs_key key;
	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
	struct btrfs_device *device;
	struct btrfs_chunk *chunk;
	struct btrfs_stripe *stripe;
4776 4777 4778 4779 4780 4781 4782
	struct extent_map_tree *em_tree;
	struct extent_map *em;
	struct map_lookup *map;
	size_t item_size;
	u64 dev_offset;
	u64 stripe_size;
	int i = 0;
4783
	int ret = 0;
Y
Yan Zheng 已提交
4784

4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797
	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
	read_lock(&em_tree->lock);
	em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size);
	read_unlock(&em_tree->lock);

	if (!em) {
		btrfs_crit(extent_root->fs_info, "unable to find logical "
			   "%Lu len %Lu", chunk_offset, chunk_size);
		return -EINVAL;
	}

	if (em->start != chunk_offset || em->len != chunk_size) {
		btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted"
4798
			  " %Lu-%Lu, found %Lu-%Lu", chunk_offset,
4799 4800 4801 4802 4803
			  chunk_size, em->start, em->len);
		free_extent_map(em);
		return -EINVAL;
	}

4804
	map = em->map_lookup;
4805 4806 4807
	item_size = btrfs_chunk_item_size(map->num_stripes);
	stripe_size = em->orig_block_len;

Y
Yan Zheng 已提交
4808
	chunk = kzalloc(item_size, GFP_NOFS);
4809 4810 4811 4812 4813
	if (!chunk) {
		ret = -ENOMEM;
		goto out;
	}

4814 4815 4816 4817 4818 4819 4820 4821
	/*
	 * Take the device list mutex to prevent races with the final phase of
	 * a device replace operation that replaces the device object associated
	 * with the map's stripes, because the device object's id can change
	 * at any time during that final phase of the device replace operation
	 * (dev-replace.c:btrfs_dev_replace_finishing()).
	 */
	mutex_lock(&chunk_root->fs_info->fs_devices->device_list_mutex);
4822 4823 4824
	for (i = 0; i < map->num_stripes; i++) {
		device = map->stripes[i].dev;
		dev_offset = map->stripes[i].physical;
Y
Yan Zheng 已提交
4825

4826
		ret = btrfs_update_device(trans, device);
4827
		if (ret)
4828
			break;
4829 4830 4831 4832 4833 4834
		ret = btrfs_alloc_dev_extent(trans, device,
					     chunk_root->root_key.objectid,
					     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
					     chunk_offset, dev_offset,
					     stripe_size);
		if (ret)
4835 4836 4837 4838 4839
			break;
	}
	if (ret) {
		mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex);
		goto out;
Y
Yan Zheng 已提交
4840 4841 4842
	}

	stripe = &chunk->stripe;
4843 4844 4845
	for (i = 0; i < map->num_stripes; i++) {
		device = map->stripes[i].dev;
		dev_offset = map->stripes[i].physical;
4846

4847 4848 4849
		btrfs_set_stack_stripe_devid(stripe, device->devid);
		btrfs_set_stack_stripe_offset(stripe, dev_offset);
		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
Y
Yan Zheng 已提交
4850
		stripe++;
4851
	}
4852
	mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex);
4853

Y
Yan Zheng 已提交
4854
	btrfs_set_stack_chunk_length(chunk, chunk_size);
4855
	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
Y
Yan Zheng 已提交
4856 4857 4858 4859 4860
	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
	btrfs_set_stack_chunk_type(chunk, map->type);
	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4861
	btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
Y
Yan Zheng 已提交
4862
	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4863

Y
Yan Zheng 已提交
4864 4865 4866
	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
	key.type = BTRFS_CHUNK_ITEM_KEY;
	key.offset = chunk_offset;
4867

Y
Yan Zheng 已提交
4868
	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
4869 4870 4871 4872 4873
	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
		/*
		 * TODO: Cleanup of inserted chunk root in case of
		 * failure.
		 */
4874
		ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
Y
Yan Zheng 已提交
4875
					     item_size);
4876
	}
4877

4878
out:
4879
	kfree(chunk);
4880
	free_extent_map(em);
4881
	return ret;
Y
Yan Zheng 已提交
4882
}
4883

Y
Yan Zheng 已提交
4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895
/*
 * Chunk allocation falls into two parts. The first part does works
 * that make the new allocated chunk useable, but not do any operation
 * that modifies the chunk tree. The second part does the works that
 * require modifying the chunk tree. This division is important for the
 * bootstrap process of adding storage to a seed btrfs.
 */
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
		      struct btrfs_root *extent_root, u64 type)
{
	u64 chunk_offset;

4896
	ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex));
4897 4898
	chunk_offset = find_next_chunk(extent_root->fs_info);
	return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
Y
Yan Zheng 已提交
4899 4900
}

C
Chris Mason 已提交
4901
static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
Y
Yan Zheng 已提交
4902 4903 4904 4905 4906 4907 4908 4909 4910 4911
					 struct btrfs_root *root,
					 struct btrfs_device *device)
{
	u64 chunk_offset;
	u64 sys_chunk_offset;
	u64 alloc_profile;
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct btrfs_root *extent_root = fs_info->extent_root;
	int ret;

4912
	chunk_offset = find_next_chunk(fs_info);
4913
	alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
4914 4915
	ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
				  alloc_profile);
4916 4917
	if (ret)
		return ret;
Y
Yan Zheng 已提交
4918

4919
	sys_chunk_offset = find_next_chunk(root->fs_info);
4920
	alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
4921 4922
	ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
				  alloc_profile);
4923
	return ret;
Y
Yan Zheng 已提交
4924 4925
}

4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938
static inline int btrfs_chunk_max_errors(struct map_lookup *map)
{
	int max_errors;

	if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
			 BTRFS_BLOCK_GROUP_RAID10 |
			 BTRFS_BLOCK_GROUP_RAID5 |
			 BTRFS_BLOCK_GROUP_DUP)) {
		max_errors = 1;
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
		max_errors = 2;
	} else {
		max_errors = 0;
4939
	}
Y
Yan Zheng 已提交
4940

4941
	return max_errors;
Y
Yan Zheng 已提交
4942 4943 4944 4945 4946 4947 4948 4949
}

int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
{
	struct extent_map *em;
	struct map_lookup *map;
	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
	int readonly = 0;
4950
	int miss_ndevs = 0;
Y
Yan Zheng 已提交
4951 4952
	int i;

4953
	read_lock(&map_tree->map_tree.lock);
Y
Yan Zheng 已提交
4954
	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
4955
	read_unlock(&map_tree->map_tree.lock);
Y
Yan Zheng 已提交
4956 4957 4958
	if (!em)
		return 1;

4959
	map = em->map_lookup;
Y
Yan Zheng 已提交
4960
	for (i = 0; i < map->num_stripes; i++) {
4961 4962 4963 4964 4965
		if (map->stripes[i].dev->missing) {
			miss_ndevs++;
			continue;
		}

Y
Yan Zheng 已提交
4966 4967
		if (!map->stripes[i].dev->writeable) {
			readonly = 1;
4968
			goto end;
Y
Yan Zheng 已提交
4969 4970
		}
	}
4971 4972 4973 4974 4975 4976 4977 4978 4979

	/*
	 * If the number of missing devices is larger than max errors,
	 * we can not write the data into that chunk successfully, so
	 * set it readonly.
	 */
	if (miss_ndevs > btrfs_chunk_max_errors(map))
		readonly = 1;
end:
4980
	free_extent_map(em);
Y
Yan Zheng 已提交
4981
	return readonly;
4982 4983 4984 4985
}

void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
{
4986
	extent_map_tree_init(&tree->map_tree);
4987 4988 4989 4990 4991 4992
}

void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
{
	struct extent_map *em;

C
Chris Mason 已提交
4993
	while (1) {
4994
		write_lock(&tree->map_tree.lock);
4995 4996 4997
		em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
		if (em)
			remove_extent_mapping(&tree->map_tree, em);
4998
		write_unlock(&tree->map_tree.lock);
4999 5000 5001 5002 5003 5004 5005 5006 5007
		if (!em)
			break;
		/* once for us */
		free_extent_map(em);
		/* once for the tree */
		free_extent_map(em);
	}
}

5008
int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5009
{
5010
	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
5011 5012 5013 5014 5015
	struct extent_map *em;
	struct map_lookup *map;
	struct extent_map_tree *em_tree = &map_tree->map_tree;
	int ret;

5016
	read_lock(&em_tree->lock);
5017
	em = lookup_extent_mapping(em_tree, logical, len);
5018
	read_unlock(&em_tree->lock);
5019

5020 5021 5022 5023 5024 5025
	/*
	 * We could return errors for these cases, but that could get ugly and
	 * we'd probably do the same thing which is just not do anything else
	 * and exit, so return 1 so the callers don't try to use other copies.
	 */
	if (!em) {
5026
		btrfs_crit(fs_info, "No mapping for %Lu-%Lu", logical,
5027 5028 5029 5030 5031
			    logical+len);
		return 1;
	}

	if (em->start > logical || em->start + em->len < logical) {
5032
		btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got "
5033
			    "%Lu-%Lu", logical, logical+len, em->start,
5034
			    em->start + em->len);
5035
		free_extent_map(em);
5036 5037 5038
		return 1;
	}

5039
	map = em->map_lookup;
5040 5041
	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
		ret = map->num_stripes;
C
Chris Mason 已提交
5042 5043
	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
		ret = map->sub_stripes;
D
David Woodhouse 已提交
5044 5045 5046 5047
	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
		ret = 2;
	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
		ret = 3;
5048 5049 5050
	else
		ret = 1;
	free_extent_map(em);
5051

5052
	btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
5053 5054
	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
		ret++;
5055
	btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
5056

5057 5058 5059
	return ret;
}

D
David Woodhouse 已提交
5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074
unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
				    struct btrfs_mapping_tree *map_tree,
				    u64 logical)
{
	struct extent_map *em;
	struct map_lookup *map;
	struct extent_map_tree *em_tree = &map_tree->map_tree;
	unsigned long len = root->sectorsize;

	read_lock(&em_tree->lock);
	em = lookup_extent_mapping(em_tree, logical, len);
	read_unlock(&em_tree->lock);
	BUG_ON(!em);

	BUG_ON(em->start > logical || em->start + em->len < logical);
5075
	map = em->map_lookup;
5076
	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
D
David Woodhouse 已提交
5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095
		len = map->stripe_len * nr_data_stripes(map);
	free_extent_map(em);
	return len;
}

int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
			   u64 logical, u64 len, int mirror_num)
{
	struct extent_map *em;
	struct map_lookup *map;
	struct extent_map_tree *em_tree = &map_tree->map_tree;
	int ret = 0;

	read_lock(&em_tree->lock);
	em = lookup_extent_mapping(em_tree, logical, len);
	read_unlock(&em_tree->lock);
	BUG_ON(!em);

	BUG_ON(em->start > logical || em->start + em->len < logical);
5096
	map = em->map_lookup;
5097
	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
D
David Woodhouse 已提交
5098 5099 5100 5101 5102
		ret = 1;
	free_extent_map(em);
	return ret;
}

5103 5104 5105
static int find_live_mirror(struct btrfs_fs_info *fs_info,
			    struct map_lookup *map, int first, int num,
			    int optimal, int dev_replace_is_ongoing)
5106 5107
{
	int i;
5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131
	int tolerance;
	struct btrfs_device *srcdev;

	if (dev_replace_is_ongoing &&
	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
		srcdev = fs_info->dev_replace.srcdev;
	else
		srcdev = NULL;

	/*
	 * try to avoid the drive that is the source drive for a
	 * dev-replace procedure, only choose it if no other non-missing
	 * mirror is available
	 */
	for (tolerance = 0; tolerance < 2; tolerance++) {
		if (map->stripes[optimal].dev->bdev &&
		    (tolerance || map->stripes[optimal].dev != srcdev))
			return optimal;
		for (i = first; i < first + num; i++) {
			if (map->stripes[i].dev->bdev &&
			    (tolerance || map->stripes[i].dev != srcdev))
				return i;
		}
5132
	}
5133

5134 5135 5136 5137 5138 5139
	/* we couldn't find one that doesn't fail.  Just return something
	 * and the io error handling code will clean up eventually
	 */
	return optimal;
}

D
David Woodhouse 已提交
5140 5141 5142 5143 5144 5145
static inline int parity_smaller(u64 a, u64 b)
{
	return a > b;
}

/* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5146
static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
D
David Woodhouse 已提交
5147 5148 5149 5150 5151 5152 5153 5154
{
	struct btrfs_bio_stripe s;
	int i;
	u64 l;
	int again = 1;

	while (again) {
		again = 0;
5155
		for (i = 0; i < num_stripes - 1; i++) {
5156 5157
			if (parity_smaller(bbio->raid_map[i],
					   bbio->raid_map[i+1])) {
D
David Woodhouse 已提交
5158
				s = bbio->stripes[i];
5159
				l = bbio->raid_map[i];
D
David Woodhouse 已提交
5160
				bbio->stripes[i] = bbio->stripes[i+1];
5161
				bbio->raid_map[i] = bbio->raid_map[i+1];
D
David Woodhouse 已提交
5162
				bbio->stripes[i+1] = s;
5163
				bbio->raid_map[i+1] = l;
5164

D
David Woodhouse 已提交
5165 5166 5167 5168 5169 5170
				again = 1;
			}
		}
	}
}

5171 5172 5173
static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
{
	struct btrfs_bio *bbio = kzalloc(
5174
		 /* the size of the btrfs_bio */
5175
		sizeof(struct btrfs_bio) +
5176
		/* plus the variable array for the stripes */
5177
		sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5178
		/* plus the variable array for the tgt dev */
5179
		sizeof(int) * (real_stripes) +
5180 5181 5182 5183 5184
		/*
		 * plus the raid_map, which includes both the tgt dev
		 * and the stripes
		 */
		sizeof(u64) * (total_stripes),
5185
		GFP_NOFS|__GFP_NOFAIL);
5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206

	atomic_set(&bbio->error, 0);
	atomic_set(&bbio->refs, 1);

	return bbio;
}

void btrfs_get_bbio(struct btrfs_bio *bbio)
{
	WARN_ON(!atomic_read(&bbio->refs));
	atomic_inc(&bbio->refs);
}

void btrfs_put_bbio(struct btrfs_bio *bbio)
{
	if (!bbio)
		return;
	if (atomic_dec_and_test(&bbio->refs))
		kfree(bbio);
}

5207
static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5208
			     u64 logical, u64 *length,
5209
			     struct btrfs_bio **bbio_ret,
5210
			     int mirror_num, int need_raid_map)
5211 5212 5213
{
	struct extent_map *em;
	struct map_lookup *map;
5214
	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
5215 5216
	struct extent_map_tree *em_tree = &map_tree->map_tree;
	u64 offset;
5217
	u64 stripe_offset;
5218
	u64 stripe_end_offset;
5219
	u64 stripe_nr;
5220 5221
	u64 stripe_nr_orig;
	u64 stripe_nr_end;
D
David Woodhouse 已提交
5222
	u64 stripe_len;
5223
	u32 stripe_index;
5224
	int i;
L
Li Zefan 已提交
5225
	int ret = 0;
5226
	int num_stripes;
5227
	int max_errors = 0;
5228
	int tgtdev_indexes = 0;
5229
	struct btrfs_bio *bbio = NULL;
5230 5231 5232
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
	int dev_replace_is_ongoing = 0;
	int num_alloc_stripes;
5233 5234
	int patch_the_first_stripe_for_dev_replace = 0;
	u64 physical_to_patch_in_first_stripe = 0;
D
David Woodhouse 已提交
5235
	u64 raid56_full_stripe_start = (u64)-1;
5236

5237
	read_lock(&em_tree->lock);
5238
	em = lookup_extent_mapping(em_tree, logical, *length);
5239
	read_unlock(&em_tree->lock);
5240

5241
	if (!em) {
5242
		btrfs_crit(fs_info, "unable to find logical %llu len %llu",
5243
			logical, *length);
5244 5245 5246 5247 5248
		return -EINVAL;
	}

	if (em->start > logical || em->start + em->len < logical) {
		btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
5249
			   "found %Lu-%Lu", logical, em->start,
5250
			   em->start + em->len);
5251
		free_extent_map(em);
5252
		return -EINVAL;
5253
	}
5254

5255
	map = em->map_lookup;
5256
	offset = logical - em->start;
5257

D
David Woodhouse 已提交
5258
	stripe_len = map->stripe_len;
5259 5260 5261 5262 5263
	stripe_nr = offset;
	/*
	 * stripe_nr counts the total number of stripes we have to stride
	 * to get to this block
	 */
5264
	stripe_nr = div64_u64(stripe_nr, stripe_len);
5265

D
David Woodhouse 已提交
5266
	stripe_offset = stripe_nr * stripe_len;
5267 5268 5269 5270 5271
	BUG_ON(offset < stripe_offset);

	/* stripe_offset is the offset of this block in its stripe*/
	stripe_offset = offset - stripe_offset;

D
David Woodhouse 已提交
5272
	/* if we're here for raid56, we need to know the stripe aligned start */
5273
	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
D
David Woodhouse 已提交
5274 5275 5276 5277 5278 5279
		unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
		raid56_full_stripe_start = offset;

		/* allow a write of a full stripe, but make sure we don't
		 * allow straddling of stripes
		 */
5280 5281
		raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
				full_stripe_len);
D
David Woodhouse 已提交
5282 5283 5284 5285 5286
		raid56_full_stripe_start *= full_stripe_len;
	}

	if (rw & REQ_DISCARD) {
		/* we don't discard raid56 yet */
5287
		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
D
David Woodhouse 已提交
5288 5289 5290
			ret = -EOPNOTSUPP;
			goto out;
		}
5291
		*length = min_t(u64, em->len - offset, *length);
D
David Woodhouse 已提交
5292 5293 5294 5295 5296
	} else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
		u64 max_len;
		/* For writes to RAID[56], allow a full stripeset across all disks.
		   For other RAID types and for RAID[56] reads, just allow a single
		   stripe (on a single disk). */
5297
		if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
D
David Woodhouse 已提交
5298 5299 5300 5301 5302 5303 5304 5305
		    (rw & REQ_WRITE)) {
			max_len = stripe_len * nr_data_stripes(map) -
				(offset - raid56_full_stripe_start);
		} else {
			/* we limit the length of each bio to what fits in a stripe */
			max_len = stripe_len - stripe_offset;
		}
		*length = min_t(u64, em->len - offset, max_len);
5306 5307 5308
	} else {
		*length = em->len - offset;
	}
5309

D
David Woodhouse 已提交
5310 5311
	/* This is for when we're called from btrfs_merge_bio_hook() and all
	   it cares about is the length */
5312
	if (!bbio_ret)
5313 5314
		goto out;

5315
	btrfs_dev_replace_lock(dev_replace, 0);
5316 5317
	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
	if (!dev_replace_is_ongoing)
5318 5319 5320
		btrfs_dev_replace_unlock(dev_replace, 0);
	else
		btrfs_dev_replace_set_lock_blocking(dev_replace);
5321

5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345
	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
	    !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
	    dev_replace->tgtdev != NULL) {
		/*
		 * in dev-replace case, for repair case (that's the only
		 * case where the mirror is selected explicitly when
		 * calling btrfs_map_block), blocks left of the left cursor
		 * can also be read from the target drive.
		 * For REQ_GET_READ_MIRRORS, the target drive is added as
		 * the last one to the array of stripes. For READ, it also
		 * needs to be supported using the same mirror number.
		 * If the requested block is not left of the left cursor,
		 * EIO is returned. This can happen because btrfs_num_copies()
		 * returns one more in the dev-replace case.
		 */
		u64 tmp_length = *length;
		struct btrfs_bio *tmp_bbio = NULL;
		int tmp_num_stripes;
		u64 srcdev_devid = dev_replace->srcdev->devid;
		int index_srcdev = 0;
		int found = 0;
		u64 physical_of_found = 0;

		ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
5346
			     logical, &tmp_length, &tmp_bbio, 0, 0);
5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359
		if (ret) {
			WARN_ON(tmp_bbio != NULL);
			goto out;
		}

		tmp_num_stripes = tmp_bbio->num_stripes;
		if (mirror_num > tmp_num_stripes) {
			/*
			 * REQ_GET_READ_MIRRORS does not contain this
			 * mirror, that means that the requested area
			 * is not left of the left cursor
			 */
			ret = -EIO;
5360
			btrfs_put_bbio(tmp_bbio);
5361 5362 5363 5364 5365 5366 5367 5368 5369 5370
			goto out;
		}

		/*
		 * process the rest of the function using the mirror_num
		 * of the source drive. Therefore look it up first.
		 * At the end, patch the device pointer to the one of the
		 * target drive.
		 */
		for (i = 0; i < tmp_num_stripes; i++) {
5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384
			if (tmp_bbio->stripes[i].dev->devid != srcdev_devid)
				continue;

			/*
			 * In case of DUP, in order to keep it simple, only add
			 * the mirror with the lowest physical address
			 */
			if (found &&
			    physical_of_found <= tmp_bbio->stripes[i].physical)
				continue;

			index_srcdev = i;
			found = 1;
			physical_of_found = tmp_bbio->stripes[i].physical;
5385 5386
		}

5387 5388 5389
		btrfs_put_bbio(tmp_bbio);

		if (!found) {
5390 5391 5392 5393 5394
			WARN_ON(1);
			ret = -EIO;
			goto out;
		}

5395 5396 5397
		mirror_num = index_srcdev + 1;
		patch_the_first_stripe_for_dev_replace = 1;
		physical_to_patch_in_first_stripe = physical_of_found;
5398 5399 5400 5401
	} else if (mirror_num > map->num_stripes) {
		mirror_num = 0;
	}

5402
	num_stripes = 1;
5403
	stripe_index = 0;
5404
	stripe_nr_orig = stripe_nr;
5405
	stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
5406
	stripe_nr_end = div_u64(stripe_nr_end, map->stripe_len);
5407 5408
	stripe_end_offset = stripe_nr_end * map->stripe_len -
			    (offset + *length);
D
David Woodhouse 已提交
5409

5410 5411 5412 5413
	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
		if (rw & REQ_DISCARD)
			num_stripes = min_t(u64, map->num_stripes,
					    stripe_nr_end - stripe_nr_orig);
5414 5415
		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
				&stripe_index);
5416 5417
		if (!(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)))
			mirror_num = 1;
5418
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
5419
		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
5420
			num_stripes = map->num_stripes;
5421
		else if (mirror_num)
5422
			stripe_index = mirror_num - 1;
5423
		else {
5424
			stripe_index = find_live_mirror(fs_info, map, 0,
5425
					    map->num_stripes,
5426 5427
					    current->pid % map->num_stripes,
					    dev_replace_is_ongoing);
5428
			mirror_num = stripe_index + 1;
5429
		}
5430

5431
	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
5432
		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
5433
			num_stripes = map->num_stripes;
5434
		} else if (mirror_num) {
5435
			stripe_index = mirror_num - 1;
5436 5437 5438
		} else {
			mirror_num = 1;
		}
5439

C
Chris Mason 已提交
5440
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5441
		u32 factor = map->num_stripes / map->sub_stripes;
C
Chris Mason 已提交
5442

5443
		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
C
Chris Mason 已提交
5444 5445
		stripe_index *= map->sub_stripes;

5446
		if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
5447
			num_stripes = map->sub_stripes;
5448 5449 5450 5451
		else if (rw & REQ_DISCARD)
			num_stripes = min_t(u64, map->sub_stripes *
					    (stripe_nr_end - stripe_nr_orig),
					    map->num_stripes);
C
Chris Mason 已提交
5452 5453
		else if (mirror_num)
			stripe_index += mirror_num - 1;
5454
		else {
J
Jan Schmidt 已提交
5455
			int old_stripe_index = stripe_index;
5456 5457
			stripe_index = find_live_mirror(fs_info, map,
					      stripe_index,
5458
					      map->sub_stripes, stripe_index +
5459 5460
					      current->pid % map->sub_stripes,
					      dev_replace_is_ongoing);
J
Jan Schmidt 已提交
5461
			mirror_num = stripe_index - old_stripe_index + 1;
5462
		}
D
David Woodhouse 已提交
5463

5464
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5465
		if (need_raid_map &&
5466 5467
		    ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
		     mirror_num > 1)) {
D
David Woodhouse 已提交
5468
			/* push stripe_nr back to the start of the full stripe */
5469 5470
			stripe_nr = div_u64(raid56_full_stripe_start,
					stripe_len * nr_data_stripes(map));
D
David Woodhouse 已提交
5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484

			/* RAID[56] write or recovery. Return all stripes */
			num_stripes = map->num_stripes;
			max_errors = nr_parity_stripes(map);

			*length = map->stripe_len;
			stripe_index = 0;
			stripe_offset = 0;
		} else {
			/*
			 * Mirror #0 or #1 means the original data block.
			 * Mirror #2 is RAID5 parity block.
			 * Mirror #3 is RAID6 Q block.
			 */
5485 5486
			stripe_nr = div_u64_rem(stripe_nr,
					nr_data_stripes(map), &stripe_index);
D
David Woodhouse 已提交
5487 5488 5489 5490 5491
			if (mirror_num > 1)
				stripe_index = nr_data_stripes(map) +
						mirror_num - 2;

			/* We distribute the parity blocks across stripes */
5492 5493
			div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
					&stripe_index);
5494 5495 5496
			if (!(rw & (REQ_WRITE | REQ_DISCARD |
				    REQ_GET_READ_MIRRORS)) && mirror_num <= 1)
				mirror_num = 1;
D
David Woodhouse 已提交
5497
		}
5498 5499
	} else {
		/*
5500 5501 5502
		 * after this, stripe_nr is the number of stripes on this
		 * device we have to walk to find the data, and stripe_index is
		 * the number of our device in the stripe array
5503
		 */
5504 5505
		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
				&stripe_index);
5506
		mirror_num = stripe_index + 1;
5507
	}
5508
	BUG_ON(stripe_index >= map->num_stripes);
5509

5510
	num_alloc_stripes = num_stripes;
5511 5512 5513 5514 5515
	if (dev_replace_is_ongoing) {
		if (rw & (REQ_WRITE | REQ_DISCARD))
			num_alloc_stripes <<= 1;
		if (rw & REQ_GET_READ_MIRRORS)
			num_alloc_stripes++;
5516
		tgtdev_indexes = num_stripes;
5517
	}
5518

5519
	bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
L
Li Zefan 已提交
5520 5521 5522 5523
	if (!bbio) {
		ret = -ENOMEM;
		goto out;
	}
5524 5525
	if (dev_replace_is_ongoing)
		bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
L
Li Zefan 已提交
5526

5527
	/* build raid_map */
5528
	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
5529 5530 5531
	    need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
	    mirror_num > 1)) {
		u64 tmp;
5532
		unsigned rot;
5533 5534 5535 5536 5537 5538 5539

		bbio->raid_map = (u64 *)((void *)bbio->stripes +
				 sizeof(struct btrfs_bio_stripe) *
				 num_alloc_stripes +
				 sizeof(int) * tgtdev_indexes);

		/* Work out the disk rotation on this stripe-set */
5540
		div_u64_rem(stripe_nr, num_stripes, &rot);
5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553

		/* Fill in the logical address of each stripe */
		tmp = stripe_nr * nr_data_stripes(map);
		for (i = 0; i < nr_data_stripes(map); i++)
			bbio->raid_map[(i+rot) % num_stripes] =
				em->start + (tmp + i) * map->stripe_len;

		bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
		if (map->type & BTRFS_BLOCK_GROUP_RAID6)
			bbio->raid_map[(i+rot+1) % num_stripes] =
				RAID6_Q_STRIPE;
	}

5554
	if (rw & REQ_DISCARD) {
5555 5556
		u32 factor = 0;
		u32 sub_stripes = 0;
5557 5558
		u64 stripes_per_dev = 0;
		u32 remaining_stripes = 0;
L
Liu Bo 已提交
5559
		u32 last_stripe = 0;
5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572

		if (map->type &
		    (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
			if (map->type & BTRFS_BLOCK_GROUP_RAID0)
				sub_stripes = 1;
			else
				sub_stripes = map->sub_stripes;

			factor = map->num_stripes / sub_stripes;
			stripes_per_dev = div_u64_rem(stripe_nr_end -
						      stripe_nr_orig,
						      factor,
						      &remaining_stripes);
L
Liu Bo 已提交
5573 5574
			div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
			last_stripe *= sub_stripes;
5575 5576
		}

5577
		for (i = 0; i < num_stripes; i++) {
5578
			bbio->stripes[i].physical =
5579 5580
				map->stripes[stripe_index].physical +
				stripe_offset + stripe_nr * map->stripe_len;
5581
			bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5582

5583 5584 5585 5586
			if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
					 BTRFS_BLOCK_GROUP_RAID10)) {
				bbio->stripes[i].length = stripes_per_dev *
							  map->stripe_len;
L
Liu Bo 已提交
5587

5588 5589 5590
				if (i / sub_stripes < remaining_stripes)
					bbio->stripes[i].length +=
						map->stripe_len;
L
Liu Bo 已提交
5591 5592 5593 5594 5595 5596 5597 5598 5599

				/*
				 * Special for the first stripe and
				 * the last stripe:
				 *
				 * |-------|...|-------|
				 *     |----------|
				 *    off     end_off
				 */
5600
				if (i < sub_stripes)
5601
					bbio->stripes[i].length -=
5602
						stripe_offset;
L
Liu Bo 已提交
5603 5604 5605 5606

				if (stripe_index >= last_stripe &&
				    stripe_index <= (last_stripe +
						     sub_stripes - 1))
5607
					bbio->stripes[i].length -=
5608
						stripe_end_offset;
L
Liu Bo 已提交
5609

5610 5611
				if (i == sub_stripes - 1)
					stripe_offset = 0;
5612
			} else
5613
				bbio->stripes[i].length = *length;
5614 5615 5616 5617 5618 5619 5620 5621 5622 5623

			stripe_index++;
			if (stripe_index == map->num_stripes) {
				/* This could only happen for RAID0/10 */
				stripe_index = 0;
				stripe_nr++;
			}
		}
	} else {
		for (i = 0; i < num_stripes; i++) {
5624
			bbio->stripes[i].physical =
5625 5626 5627
				map->stripes[stripe_index].physical +
				stripe_offset +
				stripe_nr * map->stripe_len;
5628
			bbio->stripes[i].dev =
5629
				map->stripes[stripe_index].dev;
5630
			stripe_index++;
5631
		}
5632
	}
L
Li Zefan 已提交
5633

5634 5635
	if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
		max_errors = btrfs_chunk_max_errors(map);
L
Li Zefan 已提交
5636

5637 5638
	if (bbio->raid_map)
		sort_parity_stripes(bbio, num_stripes);
5639

5640
	tgtdev_indexes = 0;
5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668
	if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
	    dev_replace->tgtdev != NULL) {
		int index_where_to_add;
		u64 srcdev_devid = dev_replace->srcdev->devid;

		/*
		 * duplicate the write operations while the dev replace
		 * procedure is running. Since the copying of the old disk
		 * to the new disk takes place at run time while the
		 * filesystem is mounted writable, the regular write
		 * operations to the old disk have to be duplicated to go
		 * to the new disk as well.
		 * Note that device->missing is handled by the caller, and
		 * that the write to the old disk is already set up in the
		 * stripes array.
		 */
		index_where_to_add = num_stripes;
		for (i = 0; i < num_stripes; i++) {
			if (bbio->stripes[i].dev->devid == srcdev_devid) {
				/* write to new disk, too */
				struct btrfs_bio_stripe *new =
					bbio->stripes + index_where_to_add;
				struct btrfs_bio_stripe *old =
					bbio->stripes + i;

				new->physical = old->physical;
				new->length = old->length;
				new->dev = dev_replace->tgtdev;
5669
				bbio->tgtdev_map[i] = index_where_to_add;
5670 5671
				index_where_to_add++;
				max_errors++;
5672
				tgtdev_indexes++;
5673 5674 5675
			}
		}
		num_stripes = index_where_to_add;
5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706
	} else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
		   dev_replace->tgtdev != NULL) {
		u64 srcdev_devid = dev_replace->srcdev->devid;
		int index_srcdev = 0;
		int found = 0;
		u64 physical_of_found = 0;

		/*
		 * During the dev-replace procedure, the target drive can
		 * also be used to read data in case it is needed to repair
		 * a corrupt block elsewhere. This is possible if the
		 * requested area is left of the left cursor. In this area,
		 * the target drive is a full copy of the source drive.
		 */
		for (i = 0; i < num_stripes; i++) {
			if (bbio->stripes[i].dev->devid == srcdev_devid) {
				/*
				 * In case of DUP, in order to keep it
				 * simple, only add the mirror with the
				 * lowest physical address
				 */
				if (found &&
				    physical_of_found <=
				     bbio->stripes[i].physical)
					continue;
				index_srcdev = i;
				found = 1;
				physical_of_found = bbio->stripes[i].physical;
			}
		}
		if (found) {
5707
			if (physical_of_found + map->stripe_len <=
5708 5709 5710 5711 5712 5713 5714 5715
			    dev_replace->cursor_left) {
				struct btrfs_bio_stripe *tgtdev_stripe =
					bbio->stripes + num_stripes;

				tgtdev_stripe->physical = physical_of_found;
				tgtdev_stripe->length =
					bbio->stripes[index_srcdev].length;
				tgtdev_stripe->dev = dev_replace->tgtdev;
5716
				bbio->tgtdev_map[index_srcdev] = num_stripes;
5717

5718
				tgtdev_indexes++;
5719 5720 5721
				num_stripes++;
			}
		}
5722 5723
	}

L
Li Zefan 已提交
5724
	*bbio_ret = bbio;
Z
Zhao Lei 已提交
5725
	bbio->map_type = map->type;
L
Li Zefan 已提交
5726 5727 5728
	bbio->num_stripes = num_stripes;
	bbio->max_errors = max_errors;
	bbio->mirror_num = mirror_num;
5729
	bbio->num_tgtdevs = tgtdev_indexes;
5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741

	/*
	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
	 * mirror_num == num_stripes + 1 && dev_replace target drive is
	 * available as a mirror
	 */
	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
		WARN_ON(num_stripes > 1);
		bbio->stripes[0].dev = dev_replace->tgtdev;
		bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
		bbio->mirror_num = map->num_stripes + 1;
	}
5742
out:
5743 5744 5745 5746
	if (dev_replace_is_ongoing) {
		btrfs_dev_replace_clear_lock_blocking(dev_replace);
		btrfs_dev_replace_unlock(dev_replace, 0);
	}
5747
	free_extent_map(em);
L
Li Zefan 已提交
5748
	return ret;
5749 5750
}

5751
int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5752
		      u64 logical, u64 *length,
5753
		      struct btrfs_bio **bbio_ret, int mirror_num)
5754
{
5755
	return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5756
				 mirror_num, 0);
5757 5758
}

5759 5760 5761 5762
/* For Scrub/replace */
int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
		     u64 logical, u64 *length,
		     struct btrfs_bio **bbio_ret, int mirror_num,
5763
		     int need_raid_map)
5764 5765
{
	return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5766
				 mirror_num, need_raid_map);
5767 5768
}

Y
Yan Zheng 已提交
5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779
int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
		     u64 chunk_start, u64 physical, u64 devid,
		     u64 **logical, int *naddrs, int *stripe_len)
{
	struct extent_map_tree *em_tree = &map_tree->map_tree;
	struct extent_map *em;
	struct map_lookup *map;
	u64 *buf;
	u64 bytenr;
	u64 length;
	u64 stripe_nr;
D
David Woodhouse 已提交
5780
	u64 rmap_len;
Y
Yan Zheng 已提交
5781 5782
	int i, j, nr = 0;

5783
	read_lock(&em_tree->lock);
Y
Yan Zheng 已提交
5784
	em = lookup_extent_mapping(em_tree, chunk_start, 1);
5785
	read_unlock(&em_tree->lock);
Y
Yan Zheng 已提交
5786

5787
	if (!em) {
5788
		printk(KERN_ERR "BTRFS: couldn't find em for chunk %Lu\n",
5789 5790 5791 5792 5793
		       chunk_start);
		return -EIO;
	}

	if (em->start != chunk_start) {
5794
		printk(KERN_ERR "BTRFS: bad chunk start, em=%Lu, wanted=%Lu\n",
5795 5796 5797 5798
		       em->start, chunk_start);
		free_extent_map(em);
		return -EIO;
	}
5799
	map = em->map_lookup;
Y
Yan Zheng 已提交
5800 5801

	length = em->len;
D
David Woodhouse 已提交
5802 5803
	rmap_len = map->stripe_len;

Y
Yan Zheng 已提交
5804
	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5805
		length = div_u64(length, map->num_stripes / map->sub_stripes);
Y
Yan Zheng 已提交
5806
	else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5807
		length = div_u64(length, map->num_stripes);
5808
	else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5809
		length = div_u64(length, nr_data_stripes(map));
D
David Woodhouse 已提交
5810 5811
		rmap_len = map->stripe_len * nr_data_stripes(map);
	}
Y
Yan Zheng 已提交
5812

5813
	buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
5814
	BUG_ON(!buf); /* -ENOMEM */
Y
Yan Zheng 已提交
5815 5816 5817 5818 5819 5820 5821 5822 5823

	for (i = 0; i < map->num_stripes; i++) {
		if (devid && map->stripes[i].dev->devid != devid)
			continue;
		if (map->stripes[i].physical > physical ||
		    map->stripes[i].physical + length <= physical)
			continue;

		stripe_nr = physical - map->stripes[i].physical;
5824
		stripe_nr = div_u64(stripe_nr, map->stripe_len);
Y
Yan Zheng 已提交
5825 5826 5827

		if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
			stripe_nr = stripe_nr * map->num_stripes + i;
5828
			stripe_nr = div_u64(stripe_nr, map->sub_stripes);
Y
Yan Zheng 已提交
5829 5830
		} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
			stripe_nr = stripe_nr * map->num_stripes + i;
D
David Woodhouse 已提交
5831 5832 5833 5834 5835
		} /* else if RAID[56], multiply by nr_data_stripes().
		   * Alternatively, just use rmap_len below instead of
		   * map->stripe_len */

		bytenr = chunk_start + stripe_nr * rmap_len;
5836
		WARN_ON(nr >= map->num_stripes);
Y
Yan Zheng 已提交
5837 5838 5839 5840
		for (j = 0; j < nr; j++) {
			if (buf[j] == bytenr)
				break;
		}
5841 5842
		if (j == nr) {
			WARN_ON(nr >= map->num_stripes);
Y
Yan Zheng 已提交
5843
			buf[nr++] = bytenr;
5844
		}
Y
Yan Zheng 已提交
5845 5846 5847 5848
	}

	*logical = buf;
	*naddrs = nr;
D
David Woodhouse 已提交
5849
	*stripe_len = rmap_len;
Y
Yan Zheng 已提交
5850 5851 5852

	free_extent_map(em);
	return 0;
5853 5854
}

5855
static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
5856
{
5857 5858
	bio->bi_private = bbio->private;
	bio->bi_end_io = bbio->end_io;
5859
	bio_endio(bio);
5860

5861
	btrfs_put_bbio(bbio);
5862 5863
}

5864
static void btrfs_end_bio(struct bio *bio)
5865
{
5866
	struct btrfs_bio *bbio = bio->bi_private;
5867
	int is_orig_bio = 0;
5868

5869
	if (bio->bi_error) {
5870
		atomic_inc(&bbio->error);
5871
		if (bio->bi_error == -EIO || bio->bi_error == -EREMOTEIO) {
5872
			unsigned int stripe_index =
5873
				btrfs_io_bio(bio)->stripe_index;
5874
			struct btrfs_device *dev;
5875 5876 5877

			BUG_ON(stripe_index >= bbio->num_stripes);
			dev = bbio->stripes[stripe_index].dev;
5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889
			if (dev->bdev) {
				if (bio->bi_rw & WRITE)
					btrfs_dev_stat_inc(dev,
						BTRFS_DEV_STAT_WRITE_ERRS);
				else
					btrfs_dev_stat_inc(dev,
						BTRFS_DEV_STAT_READ_ERRS);
				if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
					btrfs_dev_stat_inc(dev,
						BTRFS_DEV_STAT_FLUSH_ERRS);
				btrfs_dev_stat_print_on_error(dev);
			}
5890 5891
		}
	}
5892

5893
	if (bio == bbio->orig_bio)
5894 5895
		is_orig_bio = 1;

5896 5897
	btrfs_bio_counter_dec(bbio->fs_info);

5898
	if (atomic_dec_and_test(&bbio->stripes_pending)) {
5899 5900
		if (!is_orig_bio) {
			bio_put(bio);
5901
			bio = bbio->orig_bio;
5902
		}
5903

5904
		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5905
		/* only send an error to the higher layers if it is
D
David Woodhouse 已提交
5906
		 * beyond the tolerance of the btrfs bio
5907
		 */
5908
		if (atomic_read(&bbio->error) > bbio->max_errors) {
5909
			bio->bi_error = -EIO;
5910
		} else {
5911 5912 5913 5914
			/*
			 * this bio is actually up to date, we didn't
			 * go over the max number of errors
			 */
5915
			bio->bi_error = 0;
5916
		}
5917

5918
		btrfs_end_bbio(bbio, bio);
5919
	} else if (!is_orig_bio) {
5920 5921 5922 5923
		bio_put(bio);
	}
}

5924 5925 5926 5927 5928 5929 5930
/*
 * see run_scheduled_bios for a description of why bios are collected for
 * async submit.
 *
 * This will add one bio to the pending list for a device and make sure
 * the work struct is scheduled.
 */
5931 5932 5933
static noinline void btrfs_schedule_bio(struct btrfs_root *root,
					struct btrfs_device *device,
					int rw, struct bio *bio)
5934 5935
{
	int should_queue = 1;
5936
	struct btrfs_pending_bios *pending_bios;
5937

D
David Woodhouse 已提交
5938
	if (device->missing || !device->bdev) {
5939
		bio_io_error(bio);
D
David Woodhouse 已提交
5940 5941 5942
		return;
	}

5943
	/* don't bother with additional async steps for reads, right now */
5944
	if (!(rw & REQ_WRITE)) {
5945
		bio_get(bio);
5946
		btrfsic_submit_bio(rw, bio);
5947
		bio_put(bio);
5948
		return;
5949 5950 5951
	}

	/*
5952
	 * nr_async_bios allows us to reliably return congestion to the
5953 5954 5955 5956
	 * higher layers.  Otherwise, the async bio makes it appear we have
	 * made progress against dirty pages when we've really just put it
	 * on a queue for later
	 */
5957
	atomic_inc(&root->fs_info->nr_async_bios);
5958
	WARN_ON(bio->bi_next);
5959 5960 5961 5962
	bio->bi_next = NULL;
	bio->bi_rw |= rw;

	spin_lock(&device->io_lock);
5963
	if (bio->bi_rw & REQ_SYNC)
5964 5965 5966
		pending_bios = &device->pending_sync_bios;
	else
		pending_bios = &device->pending_bios;
5967

5968 5969
	if (pending_bios->tail)
		pending_bios->tail->bi_next = bio;
5970

5971 5972 5973
	pending_bios->tail = bio;
	if (!pending_bios->head)
		pending_bios->head = bio;
5974 5975 5976 5977 5978 5979
	if (device->running_pending)
		should_queue = 0;

	spin_unlock(&device->io_lock);

	if (should_queue)
5980 5981
		btrfs_queue_work(root->fs_info->submit_workers,
				 &device->work);
5982 5983
}

5984 5985 5986 5987 5988 5989 5990
static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
			      struct bio *bio, u64 physical, int dev_nr,
			      int rw, int async)
{
	struct btrfs_device *dev = bbio->stripes[dev_nr].dev;

	bio->bi_private = bbio;
5991
	btrfs_io_bio(bio)->stripe_index = dev_nr;
5992
	bio->bi_end_io = btrfs_end_bio;
5993
	bio->bi_iter.bi_sector = physical >> 9;
5994 5995 5996 5997 5998 5999
#ifdef DEBUG
	{
		struct rcu_string *name;

		rcu_read_lock();
		name = rcu_dereference(dev->name);
M
Masanari Iida 已提交
6000
		pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
6001
			 "(%s id %llu), size=%u\n", rw,
6002 6003
			 (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev,
			 name->str, dev->devid, bio->bi_iter.bi_size);
6004 6005 6006 6007
		rcu_read_unlock();
	}
#endif
	bio->bi_bdev = dev->bdev;
6008 6009 6010

	btrfs_bio_counter_inc_noblocked(root->fs_info);

6011
	if (async)
D
David Woodhouse 已提交
6012
		btrfs_schedule_bio(root, dev, rw, bio);
6013 6014 6015 6016 6017 6018 6019 6020
	else
		btrfsic_submit_bio(rw, bio);
}

static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
{
	atomic_inc(&bbio->error);
	if (atomic_dec_and_test(&bbio->stripes_pending)) {
6021 6022 6023
		/* Shoud be the original bio. */
		WARN_ON(bio != bbio->orig_bio);

6024
		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6025
		bio->bi_iter.bi_sector = logical >> 9;
6026 6027
		bio->bi_error = -EIO;
		btrfs_end_bbio(bbio, bio);
6028 6029 6030
	}
}

6031
int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
6032
		  int mirror_num, int async_submit)
6033 6034
{
	struct btrfs_device *dev;
6035
	struct bio *first_bio = bio;
6036
	u64 logical = (u64)bio->bi_iter.bi_sector << 9;
6037 6038 6039
	u64 length = 0;
	u64 map_length;
	int ret;
6040 6041
	int dev_nr;
	int total_devs;
6042
	struct btrfs_bio *bbio = NULL;
6043

6044
	length = bio->bi_iter.bi_size;
6045
	map_length = length;
6046

6047
	btrfs_bio_counter_inc_blocked(root->fs_info);
D
David Woodhouse 已提交
6048
	ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
6049
			      mirror_num, 1);
6050 6051
	if (ret) {
		btrfs_bio_counter_dec(root->fs_info);
6052
		return ret;
6053
	}
6054

6055
	total_devs = bbio->num_stripes;
D
David Woodhouse 已提交
6056 6057 6058
	bbio->orig_bio = first_bio;
	bbio->private = first_bio->bi_private;
	bbio->end_io = first_bio->bi_end_io;
6059
	bbio->fs_info = root->fs_info;
D
David Woodhouse 已提交
6060 6061
	atomic_set(&bbio->stripes_pending, bbio->num_stripes);

6062 6063
	if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
	    ((rw & WRITE) || (mirror_num > 1))) {
D
David Woodhouse 已提交
6064 6065 6066
		/* In this case, map_length has been set to the length of
		   a single stripe; not the whole write */
		if (rw & WRITE) {
6067
			ret = raid56_parity_write(root, bio, bbio, map_length);
D
David Woodhouse 已提交
6068
		} else {
6069
			ret = raid56_parity_recover(root, bio, bbio, map_length,
6070
						    mirror_num, 1);
D
David Woodhouse 已提交
6071
		}
6072

6073 6074
		btrfs_bio_counter_dec(root->fs_info);
		return ret;
D
David Woodhouse 已提交
6075 6076
	}

6077
	if (map_length < length) {
6078
		btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
6079
			logical, length, map_length);
6080 6081
		BUG();
	}
6082

6083
	for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6084 6085 6086 6087 6088 6089
		dev = bbio->stripes[dev_nr].dev;
		if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
			bbio_error(bbio, first_bio, logical);
			continue;
		}

6090
		if (dev_nr < total_devs - 1) {
6091
			bio = btrfs_bio_clone(first_bio, GFP_NOFS);
6092
			BUG_ON(!bio); /* -ENOMEM */
6093
		} else
6094
			bio = first_bio;
6095 6096 6097 6098

		submit_stripe_bio(root, bbio, bio,
				  bbio->stripes[dev_nr].physical, dev_nr, rw,
				  async_submit);
6099
	}
6100
	btrfs_bio_counter_dec(root->fs_info);
6101 6102 6103
	return 0;
}

6104
struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
Y
Yan Zheng 已提交
6105
				       u8 *uuid, u8 *fsid)
6106
{
Y
Yan Zheng 已提交
6107 6108 6109
	struct btrfs_device *device;
	struct btrfs_fs_devices *cur_devices;

6110
	cur_devices = fs_info->fs_devices;
Y
Yan Zheng 已提交
6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121
	while (cur_devices) {
		if (!fsid ||
		    !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
			device = __find_device(&cur_devices->devices,
					       devid, uuid);
			if (device)
				return device;
		}
		cur_devices = cur_devices->seed;
	}
	return NULL;
6122 6123
}

6124
static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
6125
					    struct btrfs_fs_devices *fs_devices,
6126 6127 6128 6129
					    u64 devid, u8 *dev_uuid)
{
	struct btrfs_device *device;

6130 6131
	device = btrfs_alloc_device(NULL, &devid, dev_uuid);
	if (IS_ERR(device))
6132
		return NULL;
6133 6134

	list_add(&device->dev_list, &fs_devices->devices);
Y
Yan Zheng 已提交
6135
	device->fs_devices = fs_devices;
6136
	fs_devices->num_devices++;
6137 6138

	device->missing = 1;
6139
	fs_devices->missing_devices++;
6140

6141 6142 6143
	return device;
}

6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163
/**
 * btrfs_alloc_device - allocate struct btrfs_device
 * @fs_info:	used only for generating a new devid, can be NULL if
 *		devid is provided (i.e. @devid != NULL).
 * @devid:	a pointer to devid for this device.  If NULL a new devid
 *		is generated.
 * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
 *		is generated.
 *
 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
 * on error.  Returned struct is not linked onto any lists and can be
 * destroyed with kfree() right away.
 */
struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
					const u64 *devid,
					const u8 *uuid)
{
	struct btrfs_device *dev;
	u64 tmp;

6164
	if (WARN_ON(!devid && !fs_info))
6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188
		return ERR_PTR(-EINVAL);

	dev = __alloc_device();
	if (IS_ERR(dev))
		return dev;

	if (devid)
		tmp = *devid;
	else {
		int ret;

		ret = find_next_devid(fs_info, &tmp);
		if (ret) {
			kfree(dev);
			return ERR_PTR(ret);
		}
	}
	dev->devid = tmp;

	if (uuid)
		memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
	else
		generate_random_uuid(dev->uuid);

6189 6190
	btrfs_init_work(&dev->work, btrfs_submit_helper,
			pending_bios_fn, NULL, NULL);
6191 6192 6193 6194

	return dev;
}

6195 6196 6197 6198 6199 6200 6201 6202 6203
static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
			  struct extent_buffer *leaf,
			  struct btrfs_chunk *chunk)
{
	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
	struct map_lookup *map;
	struct extent_map *em;
	u64 logical;
	u64 length;
6204
	u64 stripe_len;
6205
	u64 devid;
6206
	u8 uuid[BTRFS_UUID_SIZE];
6207
	int num_stripes;
6208
	int ret;
6209
	int i;
6210

6211 6212
	logical = key->offset;
	length = btrfs_chunk_length(leaf, chunk);
6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243
	stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
	/* Validation check */
	if (!num_stripes) {
		btrfs_err(root->fs_info, "invalid chunk num_stripes: %u",
			  num_stripes);
		return -EIO;
	}
	if (!IS_ALIGNED(logical, root->sectorsize)) {
		btrfs_err(root->fs_info,
			  "invalid chunk logical %llu", logical);
		return -EIO;
	}
	if (!length || !IS_ALIGNED(length, root->sectorsize)) {
		btrfs_err(root->fs_info,
			"invalid chunk length %llu", length);
		return -EIO;
	}
	if (!is_power_of_2(stripe_len)) {
		btrfs_err(root->fs_info, "invalid chunk stripe length: %llu",
			  stripe_len);
		return -EIO;
	}
	if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
	    btrfs_chunk_type(leaf, chunk)) {
		btrfs_err(root->fs_info, "unrecognized chunk type: %llu",
			  ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
			    BTRFS_BLOCK_GROUP_PROFILE_MASK) &
			  btrfs_chunk_type(leaf, chunk));
		return -EIO;
	}
6244

6245
	read_lock(&map_tree->map_tree.lock);
6246
	em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
6247
	read_unlock(&map_tree->map_tree.lock);
6248 6249 6250 6251 6252 6253 6254 6255 6256

	/* already mapped? */
	if (em && em->start <= logical && em->start + em->len > logical) {
		free_extent_map(em);
		return 0;
	} else if (em) {
		free_extent_map(em);
	}

6257
	em = alloc_extent_map();
6258 6259
	if (!em)
		return -ENOMEM;
6260
	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6261 6262 6263 6264 6265
	if (!map) {
		free_extent_map(em);
		return -ENOMEM;
	}

6266
	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6267
	em->map_lookup = map;
6268 6269
	em->start = logical;
	em->len = length;
6270
	em->orig_start = 0;
6271
	em->block_start = 0;
C
Chris Mason 已提交
6272
	em->block_len = em->len;
6273

6274 6275 6276 6277 6278 6279
	map->num_stripes = num_stripes;
	map->io_width = btrfs_chunk_io_width(leaf, chunk);
	map->io_align = btrfs_chunk_io_align(leaf, chunk);
	map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
	map->type = btrfs_chunk_type(leaf, chunk);
C
Chris Mason 已提交
6280
	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6281 6282 6283 6284
	for (i = 0; i < num_stripes; i++) {
		map->stripes[i].physical =
			btrfs_stripe_offset_nr(leaf, chunk, i);
		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6285 6286 6287
		read_extent_buffer(leaf, uuid, (unsigned long)
				   btrfs_stripe_dev_uuid_nr(chunk, i),
				   BTRFS_UUID_SIZE);
6288 6289
		map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
							uuid, NULL);
6290
		if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
6291 6292 6293
			free_extent_map(em);
			return -EIO;
		}
6294 6295
		if (!map->stripes[i].dev) {
			map->stripes[i].dev =
6296 6297
				add_missing_dev(root, root->fs_info->fs_devices,
						devid, uuid);
6298 6299 6300 6301
			if (!map->stripes[i].dev) {
				free_extent_map(em);
				return -EIO;
			}
6302 6303
			btrfs_warn(root->fs_info, "devid %llu uuid %pU is missing",
						devid, uuid);
6304 6305
		}
		map->stripes[i].dev->in_fs_metadata = 1;
6306 6307
	}

6308
	write_lock(&map_tree->map_tree.lock);
J
Josef Bacik 已提交
6309
	ret = add_extent_mapping(&map_tree->map_tree, em, 0);
6310
	write_unlock(&map_tree->map_tree.lock);
6311
	BUG_ON(ret); /* Tree corruption */
6312 6313 6314 6315 6316
	free_extent_map(em);

	return 0;
}

6317
static void fill_device_from_item(struct extent_buffer *leaf,
6318 6319 6320 6321 6322 6323
				 struct btrfs_dev_item *dev_item,
				 struct btrfs_device *device)
{
	unsigned long ptr;

	device->devid = btrfs_device_id(leaf, dev_item);
6324 6325
	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
	device->total_bytes = device->disk_total_bytes;
6326
	device->commit_total_bytes = device->disk_total_bytes;
6327
	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6328
	device->commit_bytes_used = device->bytes_used;
6329 6330 6331 6332
	device->type = btrfs_device_type(leaf, dev_item);
	device->io_align = btrfs_device_io_align(leaf, dev_item);
	device->io_width = btrfs_device_io_width(leaf, dev_item);
	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6333
	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6334
	device->is_tgtdev_for_dev_replace = 0;
6335

6336
	ptr = btrfs_device_uuid(dev_item);
6337
	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6338 6339
}

6340 6341
static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
						  u8 *fsid)
Y
Yan Zheng 已提交
6342 6343 6344 6345
{
	struct btrfs_fs_devices *fs_devices;
	int ret;

6346
	BUG_ON(!mutex_is_locked(&uuid_mutex));
Y
Yan Zheng 已提交
6347 6348 6349

	fs_devices = root->fs_info->fs_devices->seed;
	while (fs_devices) {
6350 6351 6352
		if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE))
			return fs_devices;

Y
Yan Zheng 已提交
6353 6354 6355 6356 6357
		fs_devices = fs_devices->seed;
	}

	fs_devices = find_fsid(fsid);
	if (!fs_devices) {
6358 6359 6360 6361 6362 6363 6364 6365 6366 6367
		if (!btrfs_test_opt(root, DEGRADED))
			return ERR_PTR(-ENOENT);

		fs_devices = alloc_fs_devices(fsid);
		if (IS_ERR(fs_devices))
			return fs_devices;

		fs_devices->seeding = 1;
		fs_devices->opened = 1;
		return fs_devices;
Y
Yan Zheng 已提交
6368
	}
Y
Yan Zheng 已提交
6369 6370

	fs_devices = clone_fs_devices(fs_devices);
6371 6372
	if (IS_ERR(fs_devices))
		return fs_devices;
Y
Yan Zheng 已提交
6373

6374
	ret = __btrfs_open_devices(fs_devices, FMODE_READ,
6375
				   root->fs_info->bdev_holder);
6376 6377
	if (ret) {
		free_fs_devices(fs_devices);
6378
		fs_devices = ERR_PTR(ret);
Y
Yan Zheng 已提交
6379
		goto out;
6380
	}
Y
Yan Zheng 已提交
6381 6382 6383

	if (!fs_devices->seeding) {
		__btrfs_close_devices(fs_devices);
Y
Yan Zheng 已提交
6384
		free_fs_devices(fs_devices);
6385
		fs_devices = ERR_PTR(-EINVAL);
Y
Yan Zheng 已提交
6386 6387 6388 6389 6390 6391
		goto out;
	}

	fs_devices->seed = root->fs_info->fs_devices->seed;
	root->fs_info->fs_devices->seed = fs_devices;
out:
6392
	return fs_devices;
Y
Yan Zheng 已提交
6393 6394
}

6395
static int read_one_dev(struct btrfs_root *root,
6396 6397 6398
			struct extent_buffer *leaf,
			struct btrfs_dev_item *dev_item)
{
6399
	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6400 6401 6402
	struct btrfs_device *device;
	u64 devid;
	int ret;
Y
Yan Zheng 已提交
6403
	u8 fs_uuid[BTRFS_UUID_SIZE];
6404 6405
	u8 dev_uuid[BTRFS_UUID_SIZE];

6406
	devid = btrfs_device_id(leaf, dev_item);
6407
	read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6408
			   BTRFS_UUID_SIZE);
6409
	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
Y
Yan Zheng 已提交
6410 6411 6412
			   BTRFS_UUID_SIZE);

	if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
6413 6414 6415
		fs_devices = open_seed_devices(root, fs_uuid);
		if (IS_ERR(fs_devices))
			return PTR_ERR(fs_devices);
Y
Yan Zheng 已提交
6416 6417
	}

6418
	device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
6419
	if (!device) {
Y
Yan Zheng 已提交
6420
		if (!btrfs_test_opt(root, DEGRADED))
Y
Yan Zheng 已提交
6421 6422
			return -EIO;

6423 6424 6425
		device = add_missing_dev(root, fs_devices, devid, dev_uuid);
		if (!device)
			return -ENOMEM;
6426 6427
		btrfs_warn(root->fs_info, "devid %llu uuid %pU missing",
				devid, dev_uuid);
6428 6429 6430 6431 6432
	} else {
		if (!device->bdev && !btrfs_test_opt(root, DEGRADED))
			return -EIO;

		if(!device->bdev && !device->missing) {
6433 6434 6435 6436 6437 6438
			/*
			 * this happens when a device that was properly setup
			 * in the device info lists suddenly goes bad.
			 * device->bdev is NULL, and so we have to set
			 * device->missing to one here
			 */
6439
			device->fs_devices->missing_devices++;
6440
			device->missing = 1;
Y
Yan Zheng 已提交
6441
		}
6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455

		/* Move the device to its own fs_devices */
		if (device->fs_devices != fs_devices) {
			ASSERT(device->missing);

			list_move(&device->dev_list, &fs_devices->devices);
			device->fs_devices->num_devices--;
			fs_devices->num_devices++;

			device->fs_devices->missing_devices--;
			fs_devices->missing_devices++;

			device->fs_devices = fs_devices;
		}
Y
Yan Zheng 已提交
6456 6457 6458 6459 6460 6461 6462
	}

	if (device->fs_devices != root->fs_info->fs_devices) {
		BUG_ON(device->writeable);
		if (device->generation !=
		    btrfs_device_generation(leaf, dev_item))
			return -EINVAL;
6463
	}
6464 6465

	fill_device_from_item(leaf, dev_item, device);
6466
	device->in_fs_metadata = 1;
6467
	if (device->writeable && !device->is_tgtdev_for_dev_replace) {
Y
Yan Zheng 已提交
6468
		device->fs_devices->total_rw_bytes += device->total_bytes;
6469 6470 6471 6472 6473
		spin_lock(&root->fs_info->free_chunk_lock);
		root->fs_info->free_chunk_space += device->total_bytes -
			device->bytes_used;
		spin_unlock(&root->fs_info->free_chunk_lock);
	}
6474 6475 6476 6477
	ret = 0;
	return ret;
}

Y
Yan Zheng 已提交
6478
int btrfs_read_sys_array(struct btrfs_root *root)
6479
{
6480
	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
6481
	struct extent_buffer *sb;
6482 6483
	struct btrfs_disk_key *disk_key;
	struct btrfs_chunk *chunk;
6484 6485
	u8 *array_ptr;
	unsigned long sb_array_offset;
6486
	int ret = 0;
6487 6488 6489
	u32 num_stripes;
	u32 array_size;
	u32 len = 0;
6490
	u32 cur_offset;
6491
	struct btrfs_key key;
6492

6493 6494 6495 6496 6497 6498 6499
	ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize);
	/*
	 * This will create extent buffer of nodesize, superblock size is
	 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
	 * overallocate but we can keep it as-is, only the first page is used.
	 */
	sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
6500 6501
	if (!sb)
		return -ENOMEM;
6502
	set_extent_buffer_uptodate(sb);
6503
	btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6504 6505
	/*
	 * The sb extent buffer is artifical and just used to read the system array.
6506
	 * set_extent_buffer_uptodate() call does not properly mark all it's
6507 6508 6509 6510 6511 6512 6513 6514 6515
	 * pages up-to-date when the page is larger: extent does not cover the
	 * whole page and consequently check_page_uptodate does not find all
	 * the page's extents up-to-date (the hole beyond sb),
	 * write_extent_buffer then triggers a WARN_ON.
	 *
	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
	 * but sb spans only this function. Add an explicit SetPageUptodate call
	 * to silence the warning eg. on PowerPC 64.
	 */
6516
	if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
6517
		SetPageUptodate(sb->pages[0]);
6518

6519
	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
6520 6521
	array_size = btrfs_super_sys_array_size(super_copy);

6522 6523 6524
	array_ptr = super_copy->sys_chunk_array;
	sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
	cur_offset = 0;
6525

6526 6527
	while (cur_offset < array_size) {
		disk_key = (struct btrfs_disk_key *)array_ptr;
6528 6529 6530 6531
		len = sizeof(*disk_key);
		if (cur_offset + len > array_size)
			goto out_short_read;

6532 6533
		btrfs_disk_key_to_cpu(&key, disk_key);

6534 6535 6536
		array_ptr += len;
		sb_array_offset += len;
		cur_offset += len;
6537

6538
		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
6539
			chunk = (struct btrfs_chunk *)sb_array_offset;
6540 6541 6542 6543 6544 6545 6546 6547 6548
			/*
			 * At least one btrfs_chunk with one stripe must be
			 * present, exact stripe count check comes afterwards
			 */
			len = btrfs_chunk_item_size(1);
			if (cur_offset + len > array_size)
				goto out_short_read;

			num_stripes = btrfs_chunk_num_stripes(sb, chunk);
6549 6550 6551 6552 6553 6554 6555 6556
			if (!num_stripes) {
				printk(KERN_ERR
	    "BTRFS: invalid number of stripes %u in sys_array at offset %u\n",
					num_stripes, cur_offset);
				ret = -EIO;
				break;
			}

6557 6558 6559 6560
			len = btrfs_chunk_item_size(num_stripes);
			if (cur_offset + len > array_size)
				goto out_short_read;

6561
			ret = read_one_chunk(root, &key, sb, chunk);
6562 6563
			if (ret)
				break;
6564
		} else {
6565 6566 6567
			printk(KERN_ERR
		"BTRFS: unexpected item type %u in sys_array at offset %u\n",
				(u32)key.type, cur_offset);
6568 6569
			ret = -EIO;
			break;
6570
		}
6571 6572 6573
		array_ptr += len;
		sb_array_offset += len;
		cur_offset += len;
6574
	}
6575
	free_extent_buffer(sb);
6576
	return ret;
6577 6578 6579 6580 6581 6582

out_short_read:
	printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
			len, cur_offset);
	free_extent_buffer(sb);
	return -EIO;
6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599
}

int btrfs_read_chunk_tree(struct btrfs_root *root)
{
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	struct btrfs_key found_key;
	int ret;
	int slot;

	root = root->fs_info->chunk_root;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

6600 6601 6602
	mutex_lock(&uuid_mutex);
	lock_chunks(root);

6603 6604 6605 6606 6607
	/*
	 * Read all device items, and then all the chunk items. All
	 * device items are found before any chunk item (their object id
	 * is smaller than the lowest possible object id for a chunk
	 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
6608 6609 6610 6611 6612
	 */
	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.offset = 0;
	key.type = 0;
	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6613 6614
	if (ret < 0)
		goto error;
C
Chris Mason 已提交
6615
	while (1) {
6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626
		leaf = path->nodes[0];
		slot = path->slots[0];
		if (slot >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret == 0)
				continue;
			if (ret < 0)
				goto error;
			break;
		}
		btrfs_item_key_to_cpu(leaf, &found_key, slot);
6627 6628 6629
		if (found_key.type == BTRFS_DEV_ITEM_KEY) {
			struct btrfs_dev_item *dev_item;
			dev_item = btrfs_item_ptr(leaf, slot,
6630
						  struct btrfs_dev_item);
6631 6632 6633
			ret = read_one_dev(root, leaf, dev_item);
			if (ret)
				goto error;
6634 6635 6636 6637
		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
			struct btrfs_chunk *chunk;
			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
			ret = read_one_chunk(root, &found_key, leaf, chunk);
Y
Yan Zheng 已提交
6638 6639
			if (ret)
				goto error;
6640 6641 6642 6643 6644
		}
		path->slots[0]++;
	}
	ret = 0;
error:
6645 6646 6647
	unlock_chunks(root);
	mutex_unlock(&uuid_mutex);

Y
Yan Zheng 已提交
6648
	btrfs_free_path(path);
6649 6650
	return ret;
}
6651

6652 6653 6654 6655 6656
void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
{
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	struct btrfs_device *device;

6657 6658 6659 6660 6661 6662 6663 6664
	while (fs_devices) {
		mutex_lock(&fs_devices->device_list_mutex);
		list_for_each_entry(device, &fs_devices->devices, dev_list)
			device->dev_root = fs_info->dev_root;
		mutex_unlock(&fs_devices->device_list_mutex);

		fs_devices = fs_devices->seed;
	}
6665 6666
}

6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698
static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
{
	int i;

	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
		btrfs_dev_stat_reset(dev, i);
}

int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
{
	struct btrfs_key key;
	struct btrfs_key found_key;
	struct btrfs_root *dev_root = fs_info->dev_root;
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	struct extent_buffer *eb;
	int slot;
	int ret = 0;
	struct btrfs_device *device;
	struct btrfs_path *path = NULL;
	int i;

	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

	mutex_lock(&fs_devices->device_list_mutex);
	list_for_each_entry(device, &fs_devices->devices, dev_list) {
		int item_size;
		struct btrfs_dev_stats_item *ptr;

6699 6700
		key.objectid = BTRFS_DEV_STATS_OBJECTID;
		key.type = BTRFS_PERSISTENT_ITEM_KEY;
6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746
		key.offset = device->devid;
		ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
		if (ret) {
			__btrfs_reset_dev_stats(device);
			device->dev_stats_valid = 1;
			btrfs_release_path(path);
			continue;
		}
		slot = path->slots[0];
		eb = path->nodes[0];
		btrfs_item_key_to_cpu(eb, &found_key, slot);
		item_size = btrfs_item_size_nr(eb, slot);

		ptr = btrfs_item_ptr(eb, slot,
				     struct btrfs_dev_stats_item);

		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
			if (item_size >= (1 + i) * sizeof(__le64))
				btrfs_dev_stat_set(device, i,
					btrfs_dev_stats_value(eb, ptr, i));
			else
				btrfs_dev_stat_reset(device, i);
		}

		device->dev_stats_valid = 1;
		btrfs_dev_stat_print_on_load(device);
		btrfs_release_path(path);
	}
	mutex_unlock(&fs_devices->device_list_mutex);

out:
	btrfs_free_path(path);
	return ret < 0 ? ret : 0;
}

static int update_dev_stat_item(struct btrfs_trans_handle *trans,
				struct btrfs_root *dev_root,
				struct btrfs_device *device)
{
	struct btrfs_path *path;
	struct btrfs_key key;
	struct extent_buffer *eb;
	struct btrfs_dev_stats_item *ptr;
	int ret;
	int i;

6747 6748
	key.objectid = BTRFS_DEV_STATS_OBJECTID;
	key.type = BTRFS_PERSISTENT_ITEM_KEY;
6749 6750 6751 6752 6753 6754
	key.offset = device->devid;

	path = btrfs_alloc_path();
	BUG_ON(!path);
	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
	if (ret < 0) {
6755 6756
		btrfs_warn_in_rcu(dev_root->fs_info,
			"error %d while searching for dev_stats item for device %s",
6757
			      ret, rcu_str_deref(device->name));
6758 6759 6760 6761 6762 6763 6764 6765
		goto out;
	}

	if (ret == 0 &&
	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
		/* need to delete old one and insert a new one */
		ret = btrfs_del_item(trans, dev_root, path);
		if (ret != 0) {
6766 6767
			btrfs_warn_in_rcu(dev_root->fs_info,
				"delete too small dev_stats item for device %s failed %d",
6768
				      rcu_str_deref(device->name), ret);
6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779
			goto out;
		}
		ret = 1;
	}

	if (ret == 1) {
		/* need to insert a new item */
		btrfs_release_path(path);
		ret = btrfs_insert_empty_item(trans, dev_root, path,
					      &key, sizeof(*ptr));
		if (ret < 0) {
6780 6781 6782
			btrfs_warn_in_rcu(dev_root->fs_info,
				"insert dev_stats item for device %s failed %d",
				rcu_str_deref(device->name), ret);
6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807
			goto out;
		}
	}

	eb = path->nodes[0];
	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
		btrfs_set_dev_stats_value(eb, ptr, i,
					  btrfs_dev_stat_read(device, i));
	btrfs_mark_buffer_dirty(eb);

out:
	btrfs_free_path(path);
	return ret;
}

/*
 * called from commit_transaction. Writes all changed device stats to disk.
 */
int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
			struct btrfs_fs_info *fs_info)
{
	struct btrfs_root *dev_root = fs_info->dev_root;
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	struct btrfs_device *device;
6808
	int stats_cnt;
6809 6810 6811 6812
	int ret = 0;

	mutex_lock(&fs_devices->device_list_mutex);
	list_for_each_entry(device, &fs_devices->devices, dev_list) {
6813
		if (!device->dev_stats_valid || !btrfs_dev_stats_dirty(device))
6814 6815
			continue;

6816
		stats_cnt = atomic_read(&device->dev_stats_ccnt);
6817 6818
		ret = update_dev_stat_item(trans, dev_root, device);
		if (!ret)
6819
			atomic_sub(stats_cnt, &device->dev_stats_ccnt);
6820 6821 6822 6823 6824 6825
	}
	mutex_unlock(&fs_devices->device_list_mutex);

	return ret;
}

6826 6827 6828 6829 6830 6831
void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
{
	btrfs_dev_stat_inc(dev, index);
	btrfs_dev_stat_print_on_error(dev);
}

6832
static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
6833
{
6834 6835
	if (!dev->dev_stats_valid)
		return;
6836 6837
	btrfs_err_rl_in_rcu(dev->dev_root->fs_info,
		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
6838
			   rcu_str_deref(dev->name),
6839 6840 6841
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6842 6843
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
6844
}
6845

6846 6847
static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
{
6848 6849 6850 6851 6852 6853 6854 6855
	int i;

	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
		if (btrfs_dev_stat_read(dev, i) != 0)
			break;
	if (i == BTRFS_DEV_STAT_VALUES_MAX)
		return; /* all values == 0, suppress message */

6856 6857
	btrfs_info_in_rcu(dev->dev_root->fs_info,
		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
6858
	       rcu_str_deref(dev->name),
6859 6860 6861 6862 6863 6864 6865
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
}

6866
int btrfs_get_dev_stats(struct btrfs_root *root,
6867
			struct btrfs_ioctl_get_dev_stats *stats)
6868 6869 6870 6871 6872 6873
{
	struct btrfs_device *dev;
	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
	int i;

	mutex_lock(&fs_devices->device_list_mutex);
6874
	dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
6875 6876 6877
	mutex_unlock(&fs_devices->device_list_mutex);

	if (!dev) {
6878
		btrfs_warn(root->fs_info, "get dev_stats failed, device not found");
6879
		return -ENODEV;
6880
	} else if (!dev->dev_stats_valid) {
6881
		btrfs_warn(root->fs_info, "get dev_stats failed, not yet valid");
6882
		return -ENODEV;
6883
	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899
		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
			if (stats->nr_items > i)
				stats->values[i] =
					btrfs_dev_stat_read_and_reset(dev, i);
			else
				btrfs_dev_stat_reset(dev, i);
		}
	} else {
		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
			if (stats->nr_items > i)
				stats->values[i] = btrfs_dev_stat_read(dev, i);
	}
	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
	return 0;
}
6900

6901
void btrfs_scratch_superblocks(struct block_device *bdev, char *device_path)
6902 6903 6904
{
	struct buffer_head *bh;
	struct btrfs_super_block *disk_super;
6905
	int copy_num;
6906

6907 6908
	if (!bdev)
		return;
6909

6910 6911
	for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX;
		copy_num++) {
6912

6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928
		if (btrfs_read_dev_one_super(bdev, copy_num, &bh))
			continue;

		disk_super = (struct btrfs_super_block *)bh->b_data;

		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
		set_buffer_dirty(bh);
		sync_dirty_buffer(bh);
		brelse(bh);
	}

	/* Notify udev that device has changed */
	btrfs_kobject_uevent(bdev, KOBJ_CHANGE);

	/* Update ctime/mtime for device path for libblkid */
	update_dev_time(device_path);
6929
}
6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952

/*
 * Update the size of all devices, which is used for writing out the
 * super blocks.
 */
void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info)
{
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	struct btrfs_device *curr, *next;

	if (list_empty(&fs_devices->resized_devices))
		return;

	mutex_lock(&fs_devices->device_list_mutex);
	lock_chunks(fs_info->dev_root);
	list_for_each_entry_safe(curr, next, &fs_devices->resized_devices,
				 resized_list) {
		list_del_init(&curr->resized_list);
		curr->commit_total_bytes = curr->disk_total_bytes;
	}
	unlock_chunks(fs_info->dev_root);
	mutex_unlock(&fs_devices->device_list_mutex);
}
6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968

/* Must be invoked during the transaction commit */
void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
					struct btrfs_transaction *transaction)
{
	struct extent_map *em;
	struct map_lookup *map;
	struct btrfs_device *dev;
	int i;

	if (list_empty(&transaction->pending_chunks))
		return;

	/* In order to kick the device replace finish process */
	lock_chunks(root);
	list_for_each_entry(em, &transaction->pending_chunks, list) {
6969
		map = em->map_lookup;
6970 6971 6972 6973 6974 6975 6976 6977

		for (i = 0; i < map->num_stripes; i++) {
			dev = map->stripes[i].dev;
			dev->commit_bytes_used = dev->bytes_used;
		}
	}
	unlock_chunks(root);
}
6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995

void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info)
{
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	while (fs_devices) {
		fs_devices->fs_info = fs_info;
		fs_devices = fs_devices->seed;
	}
}

void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
{
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	while (fs_devices) {
		fs_devices->fs_info = NULL;
		fs_devices = fs_devices->seed;
	}
}
6996

6997
static void btrfs_close_one_device(struct btrfs_device *device)
6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030
{
	struct btrfs_fs_devices *fs_devices = device->fs_devices;
	struct btrfs_device *new_device;
	struct rcu_string *name;

	if (device->bdev)
		fs_devices->open_devices--;

	if (device->writeable &&
	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
		list_del_init(&device->dev_alloc_list);
		fs_devices->rw_devices--;
	}

	if (device->missing)
		fs_devices->missing_devices--;

	new_device = btrfs_alloc_device(NULL, &device->devid,
					device->uuid);
	BUG_ON(IS_ERR(new_device)); /* -ENOMEM */

	/* Safe because we are under uuid_mutex */
	if (device->name) {
		name = rcu_string_strdup(device->name->str, GFP_NOFS);
		BUG_ON(!name); /* -ENOMEM */
		rcu_assign_pointer(new_device->name, name);
	}

	list_replace_rcu(&device->dev_list, &new_device->dev_list);
	new_device->fs_devices = device->fs_devices;

	call_rcu(&device->rcu, free_device);
}