volumes.c 186.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */
#include <linux/sched.h>
#include <linux/bio.h>
20
#include <linux/slab.h>
21
#include <linux/buffer_head.h>
22
#include <linux/blkdev.h>
23
#include <linux/random.h>
24
#include <linux/iocontext.h>
25
#include <linux/capability.h>
26
#include <linux/ratelimit.h>
I
Ilya Dryomov 已提交
27
#include <linux/kthread.h>
D
David Woodhouse 已提交
28
#include <linux/raid/pq.h>
S
Stefan Behrens 已提交
29
#include <linux/semaphore.h>
D
David Woodhouse 已提交
30
#include <asm/div64.h>
31 32 33 34 35 36
#include "ctree.h"
#include "extent_map.h"
#include "disk-io.h"
#include "transaction.h"
#include "print-tree.h"
#include "volumes.h"
D
David Woodhouse 已提交
37
#include "raid56.h"
38
#include "async-thread.h"
39
#include "check-integrity.h"
40
#include "rcu-string.h"
41
#include "math.h"
42
#include "dev-replace.h"
43
#include "sysfs.h"
44

Z
Zhao Lei 已提交
45 46 47 48 49 50
const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
	[BTRFS_RAID_RAID10] = {
		.sub_stripes	= 2,
		.dev_stripes	= 1,
		.devs_max	= 0,	/* 0 == as many as possible */
		.devs_min	= 4,
51
		.tolerated_failures = 1,
Z
Zhao Lei 已提交
52 53 54 55 56 57 58 59
		.devs_increment	= 2,
		.ncopies	= 2,
	},
	[BTRFS_RAID_RAID1] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 2,
		.devs_min	= 2,
60
		.tolerated_failures = 1,
Z
Zhao Lei 已提交
61 62 63 64 65 66 67 68
		.devs_increment	= 2,
		.ncopies	= 2,
	},
	[BTRFS_RAID_DUP] = {
		.sub_stripes	= 1,
		.dev_stripes	= 2,
		.devs_max	= 1,
		.devs_min	= 1,
69
		.tolerated_failures = 0,
Z
Zhao Lei 已提交
70 71 72 73 74 75 76 77
		.devs_increment	= 1,
		.ncopies	= 2,
	},
	[BTRFS_RAID_RAID0] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 0,
		.devs_min	= 2,
78
		.tolerated_failures = 0,
Z
Zhao Lei 已提交
79 80 81 82 83 84 85 86
		.devs_increment	= 1,
		.ncopies	= 1,
	},
	[BTRFS_RAID_SINGLE] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 1,
		.devs_min	= 1,
87
		.tolerated_failures = 0,
Z
Zhao Lei 已提交
88 89 90 91 92 93 94 95
		.devs_increment	= 1,
		.ncopies	= 1,
	},
	[BTRFS_RAID_RAID5] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 0,
		.devs_min	= 2,
96
		.tolerated_failures = 1,
Z
Zhao Lei 已提交
97 98 99 100 101 102 103 104
		.devs_increment	= 1,
		.ncopies	= 2,
	},
	[BTRFS_RAID_RAID6] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 0,
		.devs_min	= 3,
105
		.tolerated_failures = 2,
Z
Zhao Lei 已提交
106 107 108 109 110
		.devs_increment	= 1,
		.ncopies	= 3,
	},
};

111
const u64 btrfs_raid_group[BTRFS_NR_RAID_TYPES] = {
Z
Zhao Lei 已提交
112 113 114 115 116 117 118 119 120
	[BTRFS_RAID_RAID10] = BTRFS_BLOCK_GROUP_RAID10,
	[BTRFS_RAID_RAID1]  = BTRFS_BLOCK_GROUP_RAID1,
	[BTRFS_RAID_DUP]    = BTRFS_BLOCK_GROUP_DUP,
	[BTRFS_RAID_RAID0]  = BTRFS_BLOCK_GROUP_RAID0,
	[BTRFS_RAID_SINGLE] = 0,
	[BTRFS_RAID_RAID5]  = BTRFS_BLOCK_GROUP_RAID5,
	[BTRFS_RAID_RAID6]  = BTRFS_BLOCK_GROUP_RAID6,
};

121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
/*
 * Table to convert BTRFS_RAID_* to the error code if minimum number of devices
 * condition is not met. Zero means there's no corresponding
 * BTRFS_ERROR_DEV_*_NOT_MET value.
 */
const int btrfs_raid_mindev_error[BTRFS_NR_RAID_TYPES] = {
	[BTRFS_RAID_RAID10] = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
	[BTRFS_RAID_RAID1]  = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
	[BTRFS_RAID_DUP]    = 0,
	[BTRFS_RAID_RAID0]  = 0,
	[BTRFS_RAID_SINGLE] = 0,
	[BTRFS_RAID_RAID5]  = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
	[BTRFS_RAID_RAID6]  = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
};

Y
Yan Zheng 已提交
136 137 138 139
static int init_first_rw_device(struct btrfs_trans_handle *trans,
				struct btrfs_root *root,
				struct btrfs_device *device);
static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
140
static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
141
static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
142
static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
143
static void btrfs_close_one_device(struct btrfs_device *device);
Y
Yan Zheng 已提交
144

145
DEFINE_MUTEX(uuid_mutex);
146
static LIST_HEAD(fs_uuids);
147 148 149 150
struct list_head *btrfs_get_fs_uuids(void)
{
	return &fs_uuids;
}
151

152 153 154 155
static struct btrfs_fs_devices *__alloc_fs_devices(void)
{
	struct btrfs_fs_devices *fs_devs;

156
	fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
157 158 159 160 161 162
	if (!fs_devs)
		return ERR_PTR(-ENOMEM);

	mutex_init(&fs_devs->device_list_mutex);

	INIT_LIST_HEAD(&fs_devs->devices);
163
	INIT_LIST_HEAD(&fs_devs->resized_devices);
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
	INIT_LIST_HEAD(&fs_devs->alloc_list);
	INIT_LIST_HEAD(&fs_devs->list);

	return fs_devs;
}

/**
 * alloc_fs_devices - allocate struct btrfs_fs_devices
 * @fsid:	a pointer to UUID for this FS.  If NULL a new UUID is
 *		generated.
 *
 * Return: a pointer to a new &struct btrfs_fs_devices on success;
 * ERR_PTR() on error.  Returned struct is not linked onto any lists and
 * can be destroyed with kfree() right away.
 */
static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
{
	struct btrfs_fs_devices *fs_devs;

	fs_devs = __alloc_fs_devices();
	if (IS_ERR(fs_devs))
		return fs_devs;

	if (fsid)
		memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
	else
		generate_random_uuid(fs_devs->fsid);

	return fs_devs;
}

Y
Yan Zheng 已提交
195 196 197 198 199 200 201 202
static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
{
	struct btrfs_device *device;
	WARN_ON(fs_devices->opened);
	while (!list_empty(&fs_devices->devices)) {
		device = list_entry(fs_devices->devices.next,
				    struct btrfs_device, dev_list);
		list_del(&device->dev_list);
203
		rcu_string_free(device->name);
Y
Yan Zheng 已提交
204 205 206 207 208
		kfree(device);
	}
	kfree(fs_devices);
}

209 210 211 212 213 214 215
static void btrfs_kobject_uevent(struct block_device *bdev,
				 enum kobject_action action)
{
	int ret;

	ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
	if (ret)
216
		pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
217 218 219 220 221
			action,
			kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
			&disk_to_dev(bdev->bd_disk)->kobj);
}

222
void btrfs_cleanup_fs_uuids(void)
223 224 225
{
	struct btrfs_fs_devices *fs_devices;

Y
Yan Zheng 已提交
226 227 228 229
	while (!list_empty(&fs_uuids)) {
		fs_devices = list_entry(fs_uuids.next,
					struct btrfs_fs_devices, list);
		list_del(&fs_devices->list);
Y
Yan Zheng 已提交
230
		free_fs_devices(fs_devices);
231 232 233
	}
}

234 235 236 237
static struct btrfs_device *__alloc_device(void)
{
	struct btrfs_device *dev;

238
	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
239 240 241 242 243
	if (!dev)
		return ERR_PTR(-ENOMEM);

	INIT_LIST_HEAD(&dev->dev_list);
	INIT_LIST_HEAD(&dev->dev_alloc_list);
244
	INIT_LIST_HEAD(&dev->resized_list);
245 246 247 248 249

	spin_lock_init(&dev->io_lock);

	spin_lock_init(&dev->reada_lock);
	atomic_set(&dev->reada_in_flight, 0);
250
	atomic_set(&dev->dev_stats_ccnt, 0);
251
	btrfs_device_data_ordered_init(dev);
252 253
	INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
	INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
254 255 256 257

	return dev;
}

258 259
static noinline struct btrfs_device *__find_device(struct list_head *head,
						   u64 devid, u8 *uuid)
260 261 262
{
	struct btrfs_device *dev;

Q
Qinghuang Feng 已提交
263
	list_for_each_entry(dev, head, dev_list) {
264
		if (dev->devid == devid &&
265
		    (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
266
			return dev;
267
		}
268 269 270 271
	}
	return NULL;
}

272
static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
273 274 275
{
	struct btrfs_fs_devices *fs_devices;

Q
Qinghuang Feng 已提交
276
	list_for_each_entry(fs_devices, &fs_uuids, list) {
277 278 279 280 281 282
		if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
			return fs_devices;
	}
	return NULL;
}

283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
static int
btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
		      int flush, struct block_device **bdev,
		      struct buffer_head **bh)
{
	int ret;

	*bdev = blkdev_get_by_path(device_path, flags, holder);

	if (IS_ERR(*bdev)) {
		ret = PTR_ERR(*bdev);
		goto error;
	}

	if (flush)
		filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
	ret = set_blocksize(*bdev, 4096);
	if (ret) {
		blkdev_put(*bdev, flags);
		goto error;
	}
	invalidate_bdev(*bdev);
	*bh = btrfs_read_dev_super(*bdev);
306 307
	if (IS_ERR(*bh)) {
		ret = PTR_ERR(*bh);
308 309 310 311 312 313 314 315 316 317 318 319
		blkdev_put(*bdev, flags);
		goto error;
	}

	return 0;

error:
	*bdev = NULL;
	*bh = NULL;
	return ret;
}

320 321 322 323 324 325 326 327 328 329 330 331 332 333
static void requeue_list(struct btrfs_pending_bios *pending_bios,
			struct bio *head, struct bio *tail)
{

	struct bio *old_head;

	old_head = pending_bios->head;
	pending_bios->head = head;
	if (pending_bios->tail)
		tail->bi_next = old_head;
	else
		pending_bios->tail = tail;
}

334 335 336 337 338 339 340 341 342 343 344
/*
 * we try to collect pending bios for a device so we don't get a large
 * number of procs sending bios down to the same device.  This greatly
 * improves the schedulers ability to collect and merge the bios.
 *
 * But, it also turns into a long list of bios to process and that is sure
 * to eventually make the worker thread block.  The solution here is to
 * make some progress and then put this work struct back at the end of
 * the list if the block device is congested.  This way, multiple devices
 * can make progress from a single worker thread.
 */
345
static noinline void run_scheduled_bios(struct btrfs_device *device)
346 347 348
{
	struct bio *pending;
	struct backing_dev_info *bdi;
349
	struct btrfs_fs_info *fs_info;
350
	struct btrfs_pending_bios *pending_bios;
351 352 353
	struct bio *tail;
	struct bio *cur;
	int again = 0;
354
	unsigned long num_run;
355
	unsigned long batch_run = 0;
356
	unsigned long limit;
357
	unsigned long last_waited = 0;
358
	int force_reg = 0;
M
Miao Xie 已提交
359
	int sync_pending = 0;
360 361 362 363 364 365 366 367 368
	struct blk_plug plug;

	/*
	 * this function runs all the bios we've collected for
	 * a particular device.  We don't want to wander off to
	 * another device without first sending all of these down.
	 * So, setup a plug here and finish it off before we return
	 */
	blk_start_plug(&plug);
369

370
	bdi = blk_get_backing_dev_info(device->bdev);
371 372 373 374
	fs_info = device->dev_root->fs_info;
	limit = btrfs_async_submit_limit(fs_info);
	limit = limit * 2 / 3;

375 376 377
loop:
	spin_lock(&device->io_lock);

378
loop_lock:
379
	num_run = 0;
380

381 382 383 384 385
	/* take all the bios off the list at once and process them
	 * later on (without the lock held).  But, remember the
	 * tail and other pointers so the bios can be properly reinserted
	 * into the list if we hit congestion
	 */
386
	if (!force_reg && device->pending_sync_bios.head) {
387
		pending_bios = &device->pending_sync_bios;
388 389
		force_reg = 1;
	} else {
390
		pending_bios = &device->pending_bios;
391 392
		force_reg = 0;
	}
393 394 395

	pending = pending_bios->head;
	tail = pending_bios->tail;
396 397 398 399 400 401 402 403 404 405
	WARN_ON(pending && !tail);

	/*
	 * if pending was null this time around, no bios need processing
	 * at all and we can stop.  Otherwise it'll loop back up again
	 * and do an additional check so no bios are missed.
	 *
	 * device->running_pending is used to synchronize with the
	 * schedule_bio code.
	 */
406 407
	if (device->pending_sync_bios.head == NULL &&
	    device->pending_bios.head == NULL) {
408 409
		again = 0;
		device->running_pending = 0;
410 411 412
	} else {
		again = 1;
		device->running_pending = 1;
413
	}
414 415 416 417

	pending_bios->head = NULL;
	pending_bios->tail = NULL;

418 419
	spin_unlock(&device->io_lock);

C
Chris Mason 已提交
420
	while (pending) {
421 422

		rmb();
423 424 425 426 427 428 429 430
		/* we want to work on both lists, but do more bios on the
		 * sync list than the regular list
		 */
		if ((num_run > 32 &&
		    pending_bios != &device->pending_sync_bios &&
		    device->pending_sync_bios.head) ||
		   (num_run > 64 && pending_bios == &device->pending_sync_bios &&
		    device->pending_bios.head)) {
431 432 433 434 435
			spin_lock(&device->io_lock);
			requeue_list(pending_bios, pending, tail);
			goto loop_lock;
		}

436 437 438
		cur = pending;
		pending = pending->bi_next;
		cur->bi_next = NULL;
439

440 441 442
		/*
		 * atomic_dec_return implies a barrier for waitqueue_active
		 */
443
		if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
444 445
		    waitqueue_active(&fs_info->async_submit_wait))
			wake_up(&fs_info->async_submit_wait);
446

447
		BUG_ON(atomic_read(&cur->__bi_cnt) == 0);
448

449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
		/*
		 * if we're doing the sync list, record that our
		 * plug has some sync requests on it
		 *
		 * If we're doing the regular list and there are
		 * sync requests sitting around, unplug before
		 * we add more
		 */
		if (pending_bios == &device->pending_sync_bios) {
			sync_pending = 1;
		} else if (sync_pending) {
			blk_finish_plug(&plug);
			blk_start_plug(&plug);
			sync_pending = 0;
		}

465
		btrfsic_submit_bio(cur->bi_rw, cur);
466 467
		num_run++;
		batch_run++;
468 469

		cond_resched();
470 471 472 473 474 475

		/*
		 * we made progress, there is more work to do and the bdi
		 * is now congested.  Back off and let other work structs
		 * run instead
		 */
C
Chris Mason 已提交
476
		if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
477
		    fs_info->fs_devices->open_devices > 1) {
478
			struct io_context *ioc;
479

480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
			ioc = current->io_context;

			/*
			 * the main goal here is that we don't want to
			 * block if we're going to be able to submit
			 * more requests without blocking.
			 *
			 * This code does two great things, it pokes into
			 * the elevator code from a filesystem _and_
			 * it makes assumptions about how batching works.
			 */
			if (ioc && ioc->nr_batch_requests > 0 &&
			    time_before(jiffies, ioc->last_waited + HZ/50UL) &&
			    (last_waited == 0 ||
			     ioc->last_waited == last_waited)) {
				/*
				 * we want to go through our batch of
				 * requests and stop.  So, we copy out
				 * the ioc->last_waited time and test
				 * against it before looping
				 */
				last_waited = ioc->last_waited;
502
				cond_resched();
503 504
				continue;
			}
505
			spin_lock(&device->io_lock);
506
			requeue_list(pending_bios, pending, tail);
507
			device->running_pending = 1;
508 509

			spin_unlock(&device->io_lock);
510 511
			btrfs_queue_work(fs_info->submit_workers,
					 &device->work);
512 513
			goto done;
		}
C
Chris Mason 已提交
514 515 516 517 518 519
		/* unplug every 64 requests just for good measure */
		if (batch_run % 64 == 0) {
			blk_finish_plug(&plug);
			blk_start_plug(&plug);
			sync_pending = 0;
		}
520
	}
521

522 523 524 525 526 527 528 529 530
	cond_resched();
	if (again)
		goto loop;

	spin_lock(&device->io_lock);
	if (device->pending_bios.head || device->pending_sync_bios.head)
		goto loop_lock;
	spin_unlock(&device->io_lock);

531
done:
532
	blk_finish_plug(&plug);
533 534
}

535
static void pending_bios_fn(struct btrfs_work *work)
536 537 538 539 540 541 542
{
	struct btrfs_device *device;

	device = container_of(work, struct btrfs_device, work);
	run_scheduled_bios(device);
}

A
Anand Jain 已提交
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597

void btrfs_free_stale_device(struct btrfs_device *cur_dev)
{
	struct btrfs_fs_devices *fs_devs;
	struct btrfs_device *dev;

	if (!cur_dev->name)
		return;

	list_for_each_entry(fs_devs, &fs_uuids, list) {
		int del = 1;

		if (fs_devs->opened)
			continue;
		if (fs_devs->seeding)
			continue;

		list_for_each_entry(dev, &fs_devs->devices, dev_list) {

			if (dev == cur_dev)
				continue;
			if (!dev->name)
				continue;

			/*
			 * Todo: This won't be enough. What if the same device
			 * comes back (with new uuid and) with its mapper path?
			 * But for now, this does help as mostly an admin will
			 * either use mapper or non mapper path throughout.
			 */
			rcu_read_lock();
			del = strcmp(rcu_str_deref(dev->name),
						rcu_str_deref(cur_dev->name));
			rcu_read_unlock();
			if (!del)
				break;
		}

		if (!del) {
			/* delete the stale device */
			if (fs_devs->num_devices == 1) {
				btrfs_sysfs_remove_fsid(fs_devs);
				list_del(&fs_devs->list);
				free_fs_devices(fs_devs);
			} else {
				fs_devs->num_devices--;
				list_del(&dev->dev_list);
				rcu_string_free(dev->name);
				kfree(dev);
			}
			break;
		}
	}
}

598 599 600 601 602 603 604 605
/*
 * Add new device to list of registered devices
 *
 * Returns:
 * 1   - first time device is seen
 * 0   - device already known
 * < 0 - error
 */
606
static noinline int device_list_add(const char *path,
607 608 609 610 611
			   struct btrfs_super_block *disk_super,
			   u64 devid, struct btrfs_fs_devices **fs_devices_ret)
{
	struct btrfs_device *device;
	struct btrfs_fs_devices *fs_devices;
612
	struct rcu_string *name;
613
	int ret = 0;
614 615 616 617
	u64 found_transid = btrfs_super_generation(disk_super);

	fs_devices = find_fsid(disk_super->fsid);
	if (!fs_devices) {
618 619 620 621
		fs_devices = alloc_fs_devices(disk_super->fsid);
		if (IS_ERR(fs_devices))
			return PTR_ERR(fs_devices);

622
		list_add(&fs_devices->list, &fs_uuids);
623

624 625
		device = NULL;
	} else {
626 627
		device = __find_device(&fs_devices->devices, devid,
				       disk_super->dev_item.uuid);
628
	}
629

630
	if (!device) {
Y
Yan Zheng 已提交
631 632 633
		if (fs_devices->opened)
			return -EBUSY;

634 635 636
		device = btrfs_alloc_device(NULL, &devid,
					    disk_super->dev_item.uuid);
		if (IS_ERR(device)) {
637
			/* we can safely leave the fs_devices entry around */
638
			return PTR_ERR(device);
639
		}
640 641 642

		name = rcu_string_strdup(path, GFP_NOFS);
		if (!name) {
643 644 645
			kfree(device);
			return -ENOMEM;
		}
646
		rcu_assign_pointer(device->name, name);
647

648
		mutex_lock(&fs_devices->device_list_mutex);
649
		list_add_rcu(&device->dev_list, &fs_devices->devices);
650
		fs_devices->num_devices++;
651 652
		mutex_unlock(&fs_devices->device_list_mutex);

653
		ret = 1;
Y
Yan Zheng 已提交
654
		device->fs_devices = fs_devices;
655
	} else if (!device->name || strcmp(device->name->str, path)) {
656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
		/*
		 * When FS is already mounted.
		 * 1. If you are here and if the device->name is NULL that
		 *    means this device was missing at time of FS mount.
		 * 2. If you are here and if the device->name is different
		 *    from 'path' that means either
		 *      a. The same device disappeared and reappeared with
		 *         different name. or
		 *      b. The missing-disk-which-was-replaced, has
		 *         reappeared now.
		 *
		 * We must allow 1 and 2a above. But 2b would be a spurious
		 * and unintentional.
		 *
		 * Further in case of 1 and 2a above, the disk at 'path'
		 * would have missed some transaction when it was away and
		 * in case of 2a the stale bdev has to be updated as well.
		 * 2b must not be allowed at all time.
		 */

		/*
677 678 679 680
		 * For now, we do allow update to btrfs_fs_device through the
		 * btrfs dev scan cli after FS has been mounted.  We're still
		 * tracking a problem where systems fail mount by subvolume id
		 * when we reject replacement on a mounted FS.
681
		 */
682
		if (!fs_devices->opened && found_transid < device->generation) {
683 684 685 686 687 688 689
			/*
			 * That is if the FS is _not_ mounted and if you
			 * are here, that means there is more than one
			 * disk with same uuid and devid.We keep the one
			 * with larger generation number or the last-in if
			 * generation are equal.
			 */
690
			return -EEXIST;
691
		}
692

693
		name = rcu_string_strdup(path, GFP_NOFS);
694 695
		if (!name)
			return -ENOMEM;
696 697
		rcu_string_free(device->name);
		rcu_assign_pointer(device->name, name);
698 699 700 701
		if (device->missing) {
			fs_devices->missing_devices--;
			device->missing = 0;
		}
702 703
	}

704 705 706 707 708 709 710 711 712
	/*
	 * Unmount does not free the btrfs_device struct but would zero
	 * generation along with most of the other members. So just update
	 * it back. We need it to pick the disk with largest generation
	 * (as above).
	 */
	if (!fs_devices->opened)
		device->generation = found_transid;

A
Anand Jain 已提交
713 714 715 716
	/*
	 * if there is new btrfs on an already registered device,
	 * then remove the stale device entry.
	 */
717 718
	if (ret > 0)
		btrfs_free_stale_device(device);
A
Anand Jain 已提交
719

720
	*fs_devices_ret = fs_devices;
721 722

	return ret;
723 724
}

Y
Yan Zheng 已提交
725 726 727 728 729 730
static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
{
	struct btrfs_fs_devices *fs_devices;
	struct btrfs_device *device;
	struct btrfs_device *orig_dev;

731 732 733
	fs_devices = alloc_fs_devices(orig->fsid);
	if (IS_ERR(fs_devices))
		return fs_devices;
Y
Yan Zheng 已提交
734

735
	mutex_lock(&orig->device_list_mutex);
J
Josef Bacik 已提交
736
	fs_devices->total_devices = orig->total_devices;
Y
Yan Zheng 已提交
737

738
	/* We have held the volume lock, it is safe to get the devices. */
Y
Yan Zheng 已提交
739
	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
740 741
		struct rcu_string *name;

742 743 744
		device = btrfs_alloc_device(NULL, &orig_dev->devid,
					    orig_dev->uuid);
		if (IS_ERR(device))
Y
Yan Zheng 已提交
745 746
			goto error;

747 748 749 750
		/*
		 * This is ok to do without rcu read locked because we hold the
		 * uuid mutex so nothing we touch in here is going to disappear.
		 */
751
		if (orig_dev->name) {
752 753
			name = rcu_string_strdup(orig_dev->name->str,
					GFP_KERNEL);
754 755 756 757 758
			if (!name) {
				kfree(device);
				goto error;
			}
			rcu_assign_pointer(device->name, name);
J
Julia Lawall 已提交
759
		}
Y
Yan Zheng 已提交
760 761 762 763 764

		list_add(&device->dev_list, &fs_devices->devices);
		device->fs_devices = fs_devices;
		fs_devices->num_devices++;
	}
765
	mutex_unlock(&orig->device_list_mutex);
Y
Yan Zheng 已提交
766 767
	return fs_devices;
error:
768
	mutex_unlock(&orig->device_list_mutex);
Y
Yan Zheng 已提交
769 770 771 772
	free_fs_devices(fs_devices);
	return ERR_PTR(-ENOMEM);
}

773
void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step)
774
{
Q
Qinghuang Feng 已提交
775
	struct btrfs_device *device, *next;
776
	struct btrfs_device *latest_dev = NULL;
777

778 779
	mutex_lock(&uuid_mutex);
again:
780
	/* This is the initialized path, it is safe to release the devices. */
Q
Qinghuang Feng 已提交
781
	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
782
		if (device->in_fs_metadata) {
783
			if (!device->is_tgtdev_for_dev_replace &&
784 785 786
			    (!latest_dev ||
			     device->generation > latest_dev->generation)) {
				latest_dev = device;
787
			}
Y
Yan Zheng 已提交
788
			continue;
789
		}
Y
Yan Zheng 已提交
790

791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
		if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
			/*
			 * In the first step, keep the device which has
			 * the correct fsid and the devid that is used
			 * for the dev_replace procedure.
			 * In the second step, the dev_replace state is
			 * read from the device tree and it is known
			 * whether the procedure is really active or
			 * not, which means whether this device is
			 * used or whether it should be removed.
			 */
			if (step == 0 || device->is_tgtdev_for_dev_replace) {
				continue;
			}
		}
Y
Yan Zheng 已提交
806
		if (device->bdev) {
807
			blkdev_put(device->bdev, device->mode);
Y
Yan Zheng 已提交
808 809 810 811 812 813
			device->bdev = NULL;
			fs_devices->open_devices--;
		}
		if (device->writeable) {
			list_del_init(&device->dev_alloc_list);
			device->writeable = 0;
814 815
			if (!device->is_tgtdev_for_dev_replace)
				fs_devices->rw_devices--;
Y
Yan Zheng 已提交
816
		}
Y
Yan Zheng 已提交
817 818
		list_del_init(&device->dev_list);
		fs_devices->num_devices--;
819
		rcu_string_free(device->name);
Y
Yan Zheng 已提交
820
		kfree(device);
821
	}
Y
Yan Zheng 已提交
822 823 824 825 826 827

	if (fs_devices->seed) {
		fs_devices = fs_devices->seed;
		goto again;
	}

828
	fs_devices->latest_bdev = latest_dev->bdev;
829

830 831
	mutex_unlock(&uuid_mutex);
}
832

833 834 835 836 837 838 839 840 841
static void __free_device(struct work_struct *work)
{
	struct btrfs_device *device;

	device = container_of(work, struct btrfs_device, rcu_work);

	if (device->bdev)
		blkdev_put(device->bdev, device->mode);

842
	rcu_string_free(device->name);
843 844 845 846 847 848 849 850 851 852 853 854 855
	kfree(device);
}

static void free_device(struct rcu_head *head)
{
	struct btrfs_device *device;

	device = container_of(head, struct btrfs_device, rcu);

	INIT_WORK(&device->rcu_work, __free_device);
	schedule_work(&device->rcu_work);
}

Y
Yan Zheng 已提交
856
static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
857
{
858
	struct btrfs_device *device, *tmp;
Y
Yan Zheng 已提交
859

Y
Yan Zheng 已提交
860 861
	if (--fs_devices->opened > 0)
		return 0;
862

863
	mutex_lock(&fs_devices->device_list_mutex);
864
	list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
865
		btrfs_close_one_device(device);
866
	}
867 868
	mutex_unlock(&fs_devices->device_list_mutex);

Y
Yan Zheng 已提交
869 870
	WARN_ON(fs_devices->open_devices);
	WARN_ON(fs_devices->rw_devices);
Y
Yan Zheng 已提交
871 872 873
	fs_devices->opened = 0;
	fs_devices->seeding = 0;

874 875 876
	return 0;
}

Y
Yan Zheng 已提交
877 878
int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
{
Y
Yan Zheng 已提交
879
	struct btrfs_fs_devices *seed_devices = NULL;
Y
Yan Zheng 已提交
880 881 882 883
	int ret;

	mutex_lock(&uuid_mutex);
	ret = __btrfs_close_devices(fs_devices);
Y
Yan Zheng 已提交
884 885 886 887
	if (!fs_devices->opened) {
		seed_devices = fs_devices->seed;
		fs_devices->seed = NULL;
	}
Y
Yan Zheng 已提交
888
	mutex_unlock(&uuid_mutex);
Y
Yan Zheng 已提交
889 890 891 892 893 894 895

	while (seed_devices) {
		fs_devices = seed_devices;
		seed_devices = fs_devices->seed;
		__btrfs_close_devices(fs_devices);
		free_fs_devices(fs_devices);
	}
896 897 898 899 900 901
	/*
	 * Wait for rcu kworkers under __btrfs_close_devices
	 * to finish all blkdev_puts so device is really
	 * free when umount is done.
	 */
	rcu_barrier();
Y
Yan Zheng 已提交
902 903 904
	return ret;
}

Y
Yan Zheng 已提交
905 906
static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
				fmode_t flags, void *holder)
907
{
908
	struct request_queue *q;
909 910 911
	struct block_device *bdev;
	struct list_head *head = &fs_devices->devices;
	struct btrfs_device *device;
912
	struct btrfs_device *latest_dev = NULL;
913 914 915
	struct buffer_head *bh;
	struct btrfs_super_block *disk_super;
	u64 devid;
Y
Yan Zheng 已提交
916
	int seeding = 1;
917
	int ret = 0;
918

919 920
	flags |= FMODE_EXCL;

Q
Qinghuang Feng 已提交
921
	list_for_each_entry(device, head, dev_list) {
922 923
		if (device->bdev)
			continue;
924 925 926
		if (!device->name)
			continue;

927 928 929
		/* Just open everything we can; ignore failures here */
		if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
					    &bdev, &bh))
930
			continue;
931 932

		disk_super = (struct btrfs_super_block *)bh->b_data;
933
		devid = btrfs_stack_device_id(&disk_super->dev_item);
934 935 936
		if (devid != device->devid)
			goto error_brelse;

Y
Yan Zheng 已提交
937 938 939 940 941
		if (memcmp(device->uuid, disk_super->dev_item.uuid,
			   BTRFS_UUID_SIZE))
			goto error_brelse;

		device->generation = btrfs_super_generation(disk_super);
942 943 944
		if (!latest_dev ||
		    device->generation > latest_dev->generation)
			latest_dev = device;
945

Y
Yan Zheng 已提交
946 947 948 949 950 951 952
		if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
			device->writeable = 0;
		} else {
			device->writeable = !bdev_read_only(bdev);
			seeding = 0;
		}

953
		q = bdev_get_queue(bdev);
954
		if (blk_queue_discard(q))
955 956
			device->can_discard = 1;

957
		device->bdev = bdev;
958
		device->in_fs_metadata = 0;
959 960
		device->mode = flags;

C
Chris Mason 已提交
961 962 963
		if (!blk_queue_nonrot(bdev_get_queue(bdev)))
			fs_devices->rotating = 1;

964
		fs_devices->open_devices++;
965 966
		if (device->writeable &&
		    device->devid != BTRFS_DEV_REPLACE_DEVID) {
Y
Yan Zheng 已提交
967 968 969 970
			fs_devices->rw_devices++;
			list_add(&device->dev_alloc_list,
				 &fs_devices->alloc_list);
		}
971
		brelse(bh);
972
		continue;
973

974 975
error_brelse:
		brelse(bh);
976
		blkdev_put(bdev, flags);
977
		continue;
978
	}
979
	if (fs_devices->open_devices == 0) {
980
		ret = -EINVAL;
981 982
		goto out;
	}
Y
Yan Zheng 已提交
983 984
	fs_devices->seeding = seeding;
	fs_devices->opened = 1;
985
	fs_devices->latest_bdev = latest_dev->bdev;
Y
Yan Zheng 已提交
986
	fs_devices->total_rw_bytes = 0;
987
out:
Y
Yan Zheng 已提交
988 989 990 991
	return ret;
}

int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
992
		       fmode_t flags, void *holder)
Y
Yan Zheng 已提交
993 994 995 996 997
{
	int ret;

	mutex_lock(&uuid_mutex);
	if (fs_devices->opened) {
Y
Yan Zheng 已提交
998 999
		fs_devices->opened++;
		ret = 0;
Y
Yan Zheng 已提交
1000
	} else {
1001
		ret = __btrfs_open_devices(fs_devices, flags, holder);
Y
Yan Zheng 已提交
1002
	}
1003 1004 1005 1006
	mutex_unlock(&uuid_mutex);
	return ret;
}

1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
void btrfs_release_disk_super(struct page *page)
{
	kunmap(page);
	put_page(page);
}

int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr,
		struct page **page, struct btrfs_super_block **disk_super)
{
	void *p;
	pgoff_t index;

	/* make sure our super fits in the device */
	if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
		return 1;

	/* make sure our super fits in the page */
	if (sizeof(**disk_super) > PAGE_SIZE)
		return 1;

	/* make sure our super doesn't straddle pages on disk */
	index = bytenr >> PAGE_SHIFT;
	if ((bytenr + sizeof(**disk_super) - 1) >> PAGE_SHIFT != index)
		return 1;

	/* pull in the page with our super */
	*page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
				   index, GFP_KERNEL);

	if (IS_ERR_OR_NULL(*page))
		return 1;

	p = kmap(*page);

	/* align our pointer to the offset of the super block */
	*disk_super = p + (bytenr & ~PAGE_MASK);

	if (btrfs_super_bytenr(*disk_super) != bytenr ||
	    btrfs_super_magic(*disk_super) != BTRFS_MAGIC) {
		btrfs_release_disk_super(*page);
		return 1;
	}

	if ((*disk_super)->label[0] &&
		(*disk_super)->label[BTRFS_LABEL_SIZE - 1])
		(*disk_super)->label[BTRFS_LABEL_SIZE - 1] = '\0';

	return 0;
}

1057 1058 1059 1060 1061
/*
 * Look for a btrfs signature on a device. This may be called out of the mount path
 * and we are not allowed to call set_blocksize during the scan. The superblock
 * is read via pagecache
 */
1062
int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
1063 1064 1065 1066
			  struct btrfs_fs_devices **fs_devices_ret)
{
	struct btrfs_super_block *disk_super;
	struct block_device *bdev;
1067 1068
	struct page *page;
	int ret = -EINVAL;
1069
	u64 devid;
1070
	u64 transid;
J
Josef Bacik 已提交
1071
	u64 total_devices;
1072
	u64 bytenr;
1073

1074 1075 1076 1077 1078 1079 1080
	/*
	 * we would like to check all the supers, but that would make
	 * a btrfs mount succeed after a mkfs from a different FS.
	 * So, we need to add a special mount option to scan for
	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
	 */
	bytenr = btrfs_sb_offset(0);
1081
	flags |= FMODE_EXCL;
1082
	mutex_lock(&uuid_mutex);
1083 1084 1085 1086

	bdev = blkdev_get_by_path(path, flags, holder);
	if (IS_ERR(bdev)) {
		ret = PTR_ERR(bdev);
1087
		goto error;
1088 1089
	}

1090
	if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super))
1091 1092
		goto error_bdev_put;

1093
	devid = btrfs_stack_device_id(&disk_super->dev_item);
1094
	transid = btrfs_super_generation(disk_super);
J
Josef Bacik 已提交
1095
	total_devices = btrfs_super_num_devices(disk_super);
1096

1097
	ret = device_list_add(path, disk_super, devid, fs_devices_ret);
1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
	if (ret > 0) {
		if (disk_super->label[0]) {
			printk(KERN_INFO "BTRFS: device label %s ", disk_super->label);
		} else {
			printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid);
		}

		printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
		ret = 0;
	}
J
Josef Bacik 已提交
1108 1109
	if (!ret && fs_devices_ret)
		(*fs_devices_ret)->total_devices = total_devices;
1110

1111
	btrfs_release_disk_super(page);
1112 1113

error_bdev_put:
1114
	blkdev_put(bdev, flags);
1115
error:
1116
	mutex_unlock(&uuid_mutex);
1117 1118
	return ret;
}
1119

1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
/* helper to account the used device space in the range */
int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
				   u64 end, u64 *length)
{
	struct btrfs_key key;
	struct btrfs_root *root = device->dev_root;
	struct btrfs_dev_extent *dev_extent;
	struct btrfs_path *path;
	u64 extent_end;
	int ret;
	int slot;
	struct extent_buffer *l;

	*length = 0;

1135
	if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
1136 1137 1138 1139 1140
		return 0;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
1141
	path->reada = READA_FORWARD;
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175

	key.objectid = device->devid;
	key.offset = start;
	key.type = BTRFS_DEV_EXTENT_KEY;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		goto out;
	if (ret > 0) {
		ret = btrfs_previous_item(root, path, key.objectid, key.type);
		if (ret < 0)
			goto out;
	}

	while (1) {
		l = path->nodes[0];
		slot = path->slots[0];
		if (slot >= btrfs_header_nritems(l)) {
			ret = btrfs_next_leaf(root, path);
			if (ret == 0)
				continue;
			if (ret < 0)
				goto out;

			break;
		}
		btrfs_item_key_to_cpu(l, &key, slot);

		if (key.objectid < device->devid)
			goto next;

		if (key.objectid > device->devid)
			break;

1176
		if (key.type != BTRFS_DEV_EXTENT_KEY)
1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
			goto next;

		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
		extent_end = key.offset + btrfs_dev_extent_length(l,
								  dev_extent);
		if (key.offset <= start && extent_end > end) {
			*length = end - start + 1;
			break;
		} else if (key.offset <= start && extent_end > start)
			*length += extent_end - start;
		else if (key.offset > start && extent_end <= end)
			*length += extent_end - key.offset;
		else if (key.offset > start && key.offset <= end) {
			*length += end - key.offset + 1;
			break;
		} else if (key.offset > end)
			break;

next:
		path->slots[0]++;
	}
	ret = 0;
out:
	btrfs_free_path(path);
	return ret;
}

1204
static int contains_pending_extent(struct btrfs_transaction *transaction,
1205 1206 1207
				   struct btrfs_device *device,
				   u64 *start, u64 len)
{
1208
	struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
1209
	struct extent_map *em;
1210
	struct list_head *search_list = &fs_info->pinned_chunks;
1211
	int ret = 0;
1212
	u64 physical_start = *start;
1213

1214 1215
	if (transaction)
		search_list = &transaction->pending_chunks;
1216 1217
again:
	list_for_each_entry(em, search_list, list) {
1218 1219 1220
		struct map_lookup *map;
		int i;

1221
		map = em->map_lookup;
1222
		for (i = 0; i < map->num_stripes; i++) {
1223 1224
			u64 end;

1225 1226
			if (map->stripes[i].dev != device)
				continue;
1227
			if (map->stripes[i].physical >= physical_start + len ||
1228
			    map->stripes[i].physical + em->orig_block_len <=
1229
			    physical_start)
1230
				continue;
1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247
			/*
			 * Make sure that while processing the pinned list we do
			 * not override our *start with a lower value, because
			 * we can have pinned chunks that fall within this
			 * device hole and that have lower physical addresses
			 * than the pending chunks we processed before. If we
			 * do not take this special care we can end up getting
			 * 2 pending chunks that start at the same physical
			 * device offsets because the end offset of a pinned
			 * chunk can be equal to the start offset of some
			 * pending chunk.
			 */
			end = map->stripes[i].physical + em->orig_block_len;
			if (end > *start) {
				*start = end;
				ret = 1;
			}
1248 1249
		}
	}
1250 1251
	if (search_list != &fs_info->pinned_chunks) {
		search_list = &fs_info->pinned_chunks;
1252 1253
		goto again;
	}
1254 1255 1256 1257 1258

	return ret;
}


1259
/*
1260 1261 1262 1263 1264 1265 1266
 * find_free_dev_extent_start - find free space in the specified device
 * @device:	  the device which we search the free space in
 * @num_bytes:	  the size of the free space that we need
 * @search_start: the position from which to begin the search
 * @start:	  store the start of the free space.
 * @len:	  the size of the free space. that we find, or the size
 *		  of the max free space if we don't find suitable free space
1267
 *
1268 1269 1270
 * this uses a pretty simple search, the expectation is that it is
 * called very infrequently and that a given device has a small number
 * of extents
1271 1272 1273 1274 1275 1276 1277 1278
 *
 * @start is used to store the start of the free space if we find. But if we
 * don't find suitable free space, it will be used to store the start position
 * of the max free space.
 *
 * @len is used to store the size of the free space that we find.
 * But if we don't find suitable free space, it is used to store the size of
 * the max free space.
1279
 */
1280 1281 1282
int find_free_dev_extent_start(struct btrfs_transaction *transaction,
			       struct btrfs_device *device, u64 num_bytes,
			       u64 search_start, u64 *start, u64 *len)
1283 1284 1285
{
	struct btrfs_key key;
	struct btrfs_root *root = device->dev_root;
1286
	struct btrfs_dev_extent *dev_extent;
Y
Yan Zheng 已提交
1287
	struct btrfs_path *path;
1288 1289 1290 1291
	u64 hole_size;
	u64 max_hole_start;
	u64 max_hole_size;
	u64 extent_end;
1292 1293
	u64 search_end = device->total_bytes;
	int ret;
1294
	int slot;
1295
	struct extent_buffer *l;
1296 1297 1298 1299 1300 1301 1302 1303 1304
	u64 min_search_start;

	/*
	 * We don't want to overwrite the superblock on the drive nor any area
	 * used by the boot loader (grub for example), so we make sure to start
	 * at an offset of at least 1MB.
	 */
	min_search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
	search_start = max(search_start, min_search_start);
1305

1306 1307 1308
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
1309

1310 1311 1312
	max_hole_start = search_start;
	max_hole_size = 0;

1313
again:
1314
	if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
1315
		ret = -ENOSPC;
1316
		goto out;
1317 1318
	}

1319
	path->reada = READA_FORWARD;
1320 1321
	path->search_commit_root = 1;
	path->skip_locking = 1;
1322

1323 1324 1325
	key.objectid = device->devid;
	key.offset = search_start;
	key.type = BTRFS_DEV_EXTENT_KEY;
1326

1327
	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1328
	if (ret < 0)
1329
		goto out;
1330 1331 1332
	if (ret > 0) {
		ret = btrfs_previous_item(root, path, key.objectid, key.type);
		if (ret < 0)
1333
			goto out;
1334
	}
1335

1336 1337 1338 1339 1340 1341 1342 1343
	while (1) {
		l = path->nodes[0];
		slot = path->slots[0];
		if (slot >= btrfs_header_nritems(l)) {
			ret = btrfs_next_leaf(root, path);
			if (ret == 0)
				continue;
			if (ret < 0)
1344 1345 1346
				goto out;

			break;
1347 1348 1349 1350 1351 1352 1353
		}
		btrfs_item_key_to_cpu(l, &key, slot);

		if (key.objectid < device->devid)
			goto next;

		if (key.objectid > device->devid)
1354
			break;
1355

1356
		if (key.type != BTRFS_DEV_EXTENT_KEY)
1357
			goto next;
1358

1359 1360
		if (key.offset > search_start) {
			hole_size = key.offset - search_start;
1361

1362 1363 1364 1365
			/*
			 * Have to check before we set max_hole_start, otherwise
			 * we could end up sending back this offset anyway.
			 */
1366
			if (contains_pending_extent(transaction, device,
1367
						    &search_start,
1368 1369 1370 1371 1372 1373 1374 1375
						    hole_size)) {
				if (key.offset >= search_start) {
					hole_size = key.offset - search_start;
				} else {
					WARN_ON_ONCE(1);
					hole_size = 0;
				}
			}
1376

1377 1378 1379 1380
			if (hole_size > max_hole_size) {
				max_hole_start = search_start;
				max_hole_size = hole_size;
			}
1381

1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393
			/*
			 * If this free space is greater than which we need,
			 * it must be the max free space that we have found
			 * until now, so max_hole_start must point to the start
			 * of this free space and the length of this free space
			 * is stored in max_hole_size. Thus, we return
			 * max_hole_start and max_hole_size and go back to the
			 * caller.
			 */
			if (hole_size >= num_bytes) {
				ret = 0;
				goto out;
1394 1395 1396 1397
			}
		}

		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1398 1399 1400 1401
		extent_end = key.offset + btrfs_dev_extent_length(l,
								  dev_extent);
		if (extent_end > search_start)
			search_start = extent_end;
1402 1403 1404 1405 1406
next:
		path->slots[0]++;
		cond_resched();
	}

1407 1408 1409 1410 1411
	/*
	 * At this point, search_start should be the end of
	 * allocated dev extents, and when shrinking the device,
	 * search_end may be smaller than search_start.
	 */
1412
	if (search_end > search_start) {
1413 1414
		hole_size = search_end - search_start;

1415
		if (contains_pending_extent(transaction, device, &search_start,
1416 1417 1418 1419
					    hole_size)) {
			btrfs_release_path(path);
			goto again;
		}
1420

1421 1422 1423 1424
		if (hole_size > max_hole_size) {
			max_hole_start = search_start;
			max_hole_size = hole_size;
		}
1425 1426
	}

1427
	/* See above. */
1428
	if (max_hole_size < num_bytes)
1429 1430 1431 1432 1433
		ret = -ENOSPC;
	else
		ret = 0;

out:
Y
Yan Zheng 已提交
1434
	btrfs_free_path(path);
1435
	*start = max_hole_start;
1436
	if (len)
1437
		*len = max_hole_size;
1438 1439 1440
	return ret;
}

1441 1442 1443 1444 1445 1446
int find_free_dev_extent(struct btrfs_trans_handle *trans,
			 struct btrfs_device *device, u64 num_bytes,
			 u64 *start, u64 *len)
{
	/* FIXME use last free of some kind */
	return find_free_dev_extent_start(trans->transaction, device,
1447
					  num_bytes, 0, start, len);
1448 1449
}

1450
static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1451
			  struct btrfs_device *device,
M
Miao Xie 已提交
1452
			  u64 start, u64 *dev_extent_len)
1453 1454 1455 1456 1457
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_root *root = device->dev_root;
	struct btrfs_key key;
1458 1459 1460
	struct btrfs_key found_key;
	struct extent_buffer *leaf = NULL;
	struct btrfs_dev_extent *extent = NULL;
1461 1462 1463 1464 1465 1466 1467 1468

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = device->devid;
	key.offset = start;
	key.type = BTRFS_DEV_EXTENT_KEY;
M
Miao Xie 已提交
1469
again:
1470
	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1471 1472 1473
	if (ret > 0) {
		ret = btrfs_previous_item(root, path, key.objectid,
					  BTRFS_DEV_EXTENT_KEY);
1474 1475
		if (ret)
			goto out;
1476 1477 1478 1479 1480 1481
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
		extent = btrfs_item_ptr(leaf, path->slots[0],
					struct btrfs_dev_extent);
		BUG_ON(found_key.offset > start || found_key.offset +
		       btrfs_dev_extent_length(leaf, extent) < start);
M
Miao Xie 已提交
1482 1483 1484
		key = found_key;
		btrfs_release_path(path);
		goto again;
1485 1486 1487 1488
	} else if (ret == 0) {
		leaf = path->nodes[0];
		extent = btrfs_item_ptr(leaf, path->slots[0],
					struct btrfs_dev_extent);
1489
	} else {
1490
		btrfs_handle_fs_error(root->fs_info, ret, "Slot search failed");
1491
		goto out;
1492
	}
1493

M
Miao Xie 已提交
1494 1495
	*dev_extent_len = btrfs_dev_extent_length(leaf, extent);

1496
	ret = btrfs_del_item(trans, root, path);
1497
	if (ret) {
1498
		btrfs_handle_fs_error(root->fs_info, ret,
1499
			    "Failed to remove dev extent item");
Z
Zhao Lei 已提交
1500
	} else {
1501
		set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1502
	}
1503
out:
1504 1505 1506 1507
	btrfs_free_path(path);
	return ret;
}

1508 1509 1510 1511
static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
				  struct btrfs_device *device,
				  u64 chunk_tree, u64 chunk_objectid,
				  u64 chunk_offset, u64 start, u64 num_bytes)
1512 1513 1514 1515 1516 1517 1518 1519
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_root *root = device->dev_root;
	struct btrfs_dev_extent *extent;
	struct extent_buffer *leaf;
	struct btrfs_key key;

1520
	WARN_ON(!device->in_fs_metadata);
1521
	WARN_ON(device->is_tgtdev_for_dev_replace);
1522 1523 1524 1525 1526
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = device->devid;
Y
Yan Zheng 已提交
1527
	key.offset = start;
1528 1529 1530
	key.type = BTRFS_DEV_EXTENT_KEY;
	ret = btrfs_insert_empty_item(trans, root, path, &key,
				      sizeof(*extent));
1531 1532
	if (ret)
		goto out;
1533 1534 1535 1536

	leaf = path->nodes[0];
	extent = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_dev_extent);
1537 1538 1539 1540 1541
	btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
	btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);

	write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1542
		    btrfs_dev_extent_chunk_tree_uuid(extent), BTRFS_UUID_SIZE);
1543

1544 1545
	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
	btrfs_mark_buffer_dirty(leaf);
1546
out:
1547 1548 1549 1550
	btrfs_free_path(path);
	return ret;
}

1551
static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1552
{
1553 1554 1555 1556
	struct extent_map_tree *em_tree;
	struct extent_map *em;
	struct rb_node *n;
	u64 ret = 0;
1557

1558 1559 1560 1561 1562 1563
	em_tree = &fs_info->mapping_tree.map_tree;
	read_lock(&em_tree->lock);
	n = rb_last(&em_tree->map);
	if (n) {
		em = rb_entry(n, struct extent_map, rb_node);
		ret = em->start + em->len;
1564
	}
1565 1566
	read_unlock(&em_tree->lock);

1567 1568 1569
	return ret;
}

1570 1571
static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
				    u64 *devid_ret)
1572 1573 1574 1575
{
	int ret;
	struct btrfs_key key;
	struct btrfs_key found_key;
Y
Yan Zheng 已提交
1576 1577 1578 1579 1580
	struct btrfs_path *path;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
1581 1582 1583 1584 1585

	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
	key.offset = (u64)-1;

1586
	ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1587 1588 1589
	if (ret < 0)
		goto error;

1590
	BUG_ON(ret == 0); /* Corruption */
1591

1592 1593
	ret = btrfs_previous_item(fs_info->chunk_root, path,
				  BTRFS_DEV_ITEMS_OBJECTID,
1594 1595
				  BTRFS_DEV_ITEM_KEY);
	if (ret) {
1596
		*devid_ret = 1;
1597 1598 1599
	} else {
		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
				      path->slots[0]);
1600
		*devid_ret = found_key.offset + 1;
1601 1602 1603
	}
	ret = 0;
error:
Y
Yan Zheng 已提交
1604
	btrfs_free_path(path);
1605 1606 1607 1608 1609 1610 1611
	return ret;
}

/*
 * the device information is stored in the chunk root
 * the btrfs_device struct should be fully filled in
 */
1612 1613 1614
static int btrfs_add_device(struct btrfs_trans_handle *trans,
			    struct btrfs_root *root,
			    struct btrfs_device *device)
1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_dev_item *dev_item;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	unsigned long ptr;

	root = root->fs_info->chunk_root;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
Y
Yan Zheng 已提交
1631
	key.offset = device->devid;
1632 1633

	ret = btrfs_insert_empty_item(trans, root, path, &key,
1634
				      sizeof(*dev_item));
1635 1636 1637 1638 1639 1640 1641
	if (ret)
		goto out;

	leaf = path->nodes[0];
	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);

	btrfs_set_device_id(leaf, dev_item, device->devid);
Y
Yan Zheng 已提交
1642
	btrfs_set_device_generation(leaf, dev_item, 0);
1643 1644 1645 1646
	btrfs_set_device_type(leaf, dev_item, device->type);
	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1647 1648 1649 1650
	btrfs_set_device_total_bytes(leaf, dev_item,
				     btrfs_device_get_disk_total_bytes(device));
	btrfs_set_device_bytes_used(leaf, dev_item,
				    btrfs_device_get_bytes_used(device));
1651 1652 1653
	btrfs_set_device_group(leaf, dev_item, 0);
	btrfs_set_device_seek_speed(leaf, dev_item, 0);
	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1654
	btrfs_set_device_start_offset(leaf, dev_item, 0);
1655

1656
	ptr = btrfs_device_uuid(dev_item);
1657
	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1658
	ptr = btrfs_device_fsid(dev_item);
Y
Yan Zheng 已提交
1659
	write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1660 1661
	btrfs_mark_buffer_dirty(leaf);

Y
Yan Zheng 已提交
1662
	ret = 0;
1663 1664 1665 1666
out:
	btrfs_free_path(path);
	return ret;
}
1667

1668 1669 1670 1671 1672 1673 1674 1675 1676
/*
 * Function to update ctime/mtime for a given device path.
 * Mainly used for ctime/mtime based probe like libblkid.
 */
static void update_dev_time(char *path_name)
{
	struct file *filp;

	filp = filp_open(path_name, O_RDWR, 0);
1677
	if (IS_ERR(filp))
1678 1679 1680 1681 1682
		return;
	file_update_time(filp);
	filp_close(filp, NULL);
}

1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696
static int btrfs_rm_dev_item(struct btrfs_root *root,
			     struct btrfs_device *device)
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_key key;
	struct btrfs_trans_handle *trans;

	root = root->fs_info->chunk_root;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

1697
	trans = btrfs_start_transaction(root, 0);
1698 1699 1700 1701
	if (IS_ERR(trans)) {
		btrfs_free_path(path);
		return PTR_ERR(trans);
	}
1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723
	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
	key.offset = device->devid;

	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
	if (ret < 0)
		goto out;

	if (ret > 0) {
		ret = -ENOENT;
		goto out;
	}

	ret = btrfs_del_item(trans, root, path);
	if (ret)
		goto out;
out:
	btrfs_free_path(path);
	btrfs_commit_transaction(trans, root);
	return ret;
}

1724 1725 1726 1727 1728 1729 1730
/*
 * Verify that @num_devices satisfies the RAID profile constraints in the whole
 * filesystem. It's up to the caller to adjust that number regarding eg. device
 * replace.
 */
static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
		u64 num_devices)
1731 1732
{
	u64 all_avail;
1733
	unsigned seq;
1734
	int i;
1735

1736
	do {
1737
		seq = read_seqbegin(&fs_info->profiles_lock);
1738

1739 1740 1741 1742
		all_avail = fs_info->avail_data_alloc_bits |
			    fs_info->avail_system_alloc_bits |
			    fs_info->avail_metadata_alloc_bits;
	} while (read_seqretry(&fs_info->profiles_lock, seq));
1743

1744 1745 1746
	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
		if (!(all_avail & btrfs_raid_group[i]))
			continue;
1747

1748 1749
		if (num_devices < btrfs_raid_array[i].devs_min) {
			int ret = btrfs_raid_mindev_error[i];
1750

1751 1752 1753
			if (ret)
				return ret;
		}
D
David Woodhouse 已提交
1754 1755
	}

1756
	return 0;
1757 1758
}

1759 1760
struct btrfs_device *btrfs_find_next_active_device(struct btrfs_fs_devices *fs_devs,
					struct btrfs_device *device)
1761
{
Y
Yan Zheng 已提交
1762
	struct btrfs_device *next_device;
1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798

	list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
		if (next_device != device &&
			!next_device->missing && next_device->bdev)
			return next_device;
	}

	return NULL;
}

/*
 * Helper function to check if the given device is part of s_bdev / latest_bdev
 * and replace it with the provided or the next active device, in the context
 * where this function called, there should be always be another device (or
 * this_dev) which is active.
 */
void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info,
		struct btrfs_device *device, struct btrfs_device *this_dev)
{
	struct btrfs_device *next_device;

	if (this_dev)
		next_device = this_dev;
	else
		next_device = btrfs_find_next_active_device(fs_info->fs_devices,
								device);
	ASSERT(next_device);

	if (fs_info->sb->s_bdev &&
			(fs_info->sb->s_bdev == device->bdev))
		fs_info->sb->s_bdev = next_device->bdev;

	if (fs_info->fs_devices->latest_bdev == device->bdev)
		fs_info->fs_devices->latest_bdev = next_device->bdev;
}

1799
int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
1800 1801
{
	struct btrfs_device *device;
1802
	struct btrfs_fs_devices *cur_devices;
Y
Yan Zheng 已提交
1803
	u64 num_devices;
1804
	int ret = 0;
1805
	bool clear_super = false;
1806
	char *dev_name = NULL;
1807 1808 1809

	mutex_lock(&uuid_mutex);

1810
	num_devices = root->fs_info->fs_devices->num_devices;
1811
	btrfs_dev_replace_lock(&root->fs_info->dev_replace, 0);
1812 1813 1814 1815
	if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
		WARN_ON(num_devices < 1);
		num_devices--;
	}
1816
	btrfs_dev_replace_unlock(&root->fs_info->dev_replace, 0);
1817

1818
	ret = btrfs_check_raid_min_devices(root->fs_info, num_devices - 1);
1819
	if (ret)
1820 1821
		goto out;

1822
	ret = btrfs_find_device_by_devspec(root, devid, device_path,
1823 1824
				&device);
	if (ret)
D
David Woodhouse 已提交
1825
		goto out;
1826

1827
	if (device->is_tgtdev_for_dev_replace) {
1828
		ret = BTRFS_ERROR_DEV_TGT_REPLACE;
1829
		goto out;
1830 1831
	}

Y
Yan Zheng 已提交
1832
	if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1833
		ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
1834
		goto out;
Y
Yan Zheng 已提交
1835 1836 1837
	}

	if (device->writeable) {
1838
		lock_chunks(root);
Y
Yan Zheng 已提交
1839
		list_del_init(&device->dev_alloc_list);
1840
		device->fs_devices->rw_devices--;
1841
		unlock_chunks(root);
1842 1843 1844 1845 1846
		dev_name = kstrdup(device->name->str, GFP_KERNEL);
		if (!dev_name) {
			ret = -ENOMEM;
			goto error_undo;
		}
1847
		clear_super = true;
1848
	}
1849

1850
	mutex_unlock(&uuid_mutex);
1851
	ret = btrfs_shrink_device(device, 0);
1852
	mutex_lock(&uuid_mutex);
1853
	if (ret)
1854
		goto error_undo;
1855

1856 1857 1858 1859 1860
	/*
	 * TODO: the superblock still includes this device in its num_devices
	 * counter although write_all_supers() is not locked out. This
	 * could give a filesystem state which requires a degraded mount.
	 */
1861 1862
	ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
	if (ret)
1863
		goto error_undo;
1864

Y
Yan Zheng 已提交
1865
	device->in_fs_metadata = 0;
1866
	btrfs_scrub_cancel_dev(root->fs_info, device);
1867 1868 1869 1870

	/*
	 * the device list mutex makes sure that we don't change
	 * the device list while someone else is writing out all
1871 1872 1873 1874 1875
	 * the device supers. Whoever is writing all supers, should
	 * lock the device list mutex before getting the number of
	 * devices in the super block (super_copy). Conversely,
	 * whoever updates the number of devices in the super block
	 * (super_copy) should hold the device list mutex.
1876
	 */
1877 1878

	cur_devices = device->fs_devices;
1879
	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1880
	list_del_rcu(&device->dev_list);
1881

Y
Yan Zheng 已提交
1882
	device->fs_devices->num_devices--;
J
Josef Bacik 已提交
1883
	device->fs_devices->total_devices--;
Y
Yan Zheng 已提交
1884

1885
	if (device->missing)
1886
		device->fs_devices->missing_devices--;
1887

1888
	btrfs_assign_next_active_device(root->fs_info, device, NULL);
Y
Yan Zheng 已提交
1889

1890
	if (device->bdev) {
Y
Yan Zheng 已提交
1891
		device->fs_devices->open_devices--;
1892
		/* remove sysfs entry */
1893
		btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
1894
	}
1895

1896
	call_rcu(&device->rcu, free_device);
Y
Yan Zheng 已提交
1897

1898 1899
	num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
	btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1900
	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
Y
Yan Zheng 已提交
1901

1902
	if (cur_devices->open_devices == 0) {
Y
Yan Zheng 已提交
1903 1904 1905
		struct btrfs_fs_devices *fs_devices;
		fs_devices = root->fs_info->fs_devices;
		while (fs_devices) {
1906 1907
			if (fs_devices->seed == cur_devices) {
				fs_devices->seed = cur_devices->seed;
Y
Yan Zheng 已提交
1908
				break;
1909
			}
Y
Yan Zheng 已提交
1910
			fs_devices = fs_devices->seed;
Y
Yan Zheng 已提交
1911
		}
1912 1913 1914
		cur_devices->seed = NULL;
		__btrfs_close_devices(cur_devices);
		free_fs_devices(cur_devices);
Y
Yan Zheng 已提交
1915 1916
	}

1917 1918 1919
	root->fs_info->num_tolerated_disk_barrier_failures =
		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);

Y
Yan Zheng 已提交
1920 1921 1922 1923
	/*
	 * at this point, the device is zero sized.  We want to
	 * remove it from the devices list and zero out the old super
	 */
1924
	if (clear_super) {
1925
		struct block_device *bdev;
1926

1927 1928 1929 1930
		bdev = blkdev_get_by_path(dev_name, FMODE_READ | FMODE_EXCL,
						root->fs_info->bdev_holder);
		if (!IS_ERR(bdev)) {
			btrfs_scratch_superblocks(bdev, dev_name);
1931
			blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1932
		}
1933
	}
1934 1935

out:
1936 1937
	kfree(dev_name);

1938 1939
	mutex_unlock(&uuid_mutex);
	return ret;
1940

1941 1942
error_undo:
	if (device->writeable) {
1943
		lock_chunks(root);
1944 1945
		list_add(&device->dev_alloc_list,
			 &root->fs_info->fs_devices->alloc_list);
1946
		device->fs_devices->rw_devices++;
1947
		unlock_chunks(root);
1948
	}
1949
	goto out;
1950 1951
}

1952 1953
void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
					struct btrfs_device *srcdev)
1954
{
1955 1956
	struct btrfs_fs_devices *fs_devices;

1957
	WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1958

1959 1960 1961 1962 1963 1964 1965
	/*
	 * in case of fs with no seed, srcdev->fs_devices will point
	 * to fs_devices of fs_info. However when the dev being replaced is
	 * a seed dev it will point to the seed's local fs_devices. In short
	 * srcdev will have its correct fs_devices in both the cases.
	 */
	fs_devices = srcdev->fs_devices;
1966

1967 1968
	list_del_rcu(&srcdev->dev_list);
	list_del_rcu(&srcdev->dev_alloc_list);
1969
	fs_devices->num_devices--;
1970
	if (srcdev->missing)
1971
		fs_devices->missing_devices--;
1972

1973
	if (srcdev->writeable)
1974
		fs_devices->rw_devices--;
1975

1976
	if (srcdev->bdev)
1977
		fs_devices->open_devices--;
1978 1979 1980 1981 1982 1983
}

void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
				      struct btrfs_device *srcdev)
{
	struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
1984

1985 1986 1987 1988
	if (srcdev->writeable) {
		/* zero out the old super if it is writable */
		btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
	}
1989
	call_rcu(&srcdev->rcu, free_device);
1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009

	/*
	 * unless fs_devices is seed fs, num_devices shouldn't go
	 * zero
	 */
	BUG_ON(!fs_devices->num_devices && !fs_devices->seeding);

	/* if this is no devs we rather delete the fs_devices */
	if (!fs_devices->num_devices) {
		struct btrfs_fs_devices *tmp_fs_devices;

		tmp_fs_devices = fs_info->fs_devices;
		while (tmp_fs_devices) {
			if (tmp_fs_devices->seed == fs_devices) {
				tmp_fs_devices->seed = fs_devices->seed;
				break;
			}
			tmp_fs_devices = tmp_fs_devices->seed;
		}
		fs_devices->seed = NULL;
2010 2011
		__btrfs_close_devices(fs_devices);
		free_fs_devices(fs_devices);
2012
	}
2013 2014 2015 2016 2017
}

void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
				      struct btrfs_device *tgtdev)
{
2018
	mutex_lock(&uuid_mutex);
2019 2020
	WARN_ON(!tgtdev);
	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2021

2022
	btrfs_sysfs_rm_device_link(fs_info->fs_devices, tgtdev);
2023

2024
	if (tgtdev->bdev)
2025
		fs_info->fs_devices->open_devices--;
2026

2027 2028
	fs_info->fs_devices->num_devices--;

2029 2030
	btrfs_assign_next_active_device(fs_info, tgtdev, NULL);

2031 2032 2033
	list_del_rcu(&tgtdev->dev_list);

	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2034
	mutex_unlock(&uuid_mutex);
2035 2036 2037 2038 2039 2040 2041 2042 2043 2044

	/*
	 * The update_dev_time() with in btrfs_scratch_superblocks()
	 * may lead to a call to btrfs_show_devname() which will try
	 * to hold device_list_mutex. And here this device
	 * is already out of device list, so we don't have to hold
	 * the device_list_mutex lock.
	 */
	btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
	call_rcu(&tgtdev->rcu, free_device);
2045 2046
}

2047 2048
static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
				     struct btrfs_device **device)
2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064
{
	int ret = 0;
	struct btrfs_super_block *disk_super;
	u64 devid;
	u8 *dev_uuid;
	struct block_device *bdev;
	struct buffer_head *bh;

	*device = NULL;
	ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
				    root->fs_info->bdev_holder, 0, &bdev, &bh);
	if (ret)
		return ret;
	disk_super = (struct btrfs_super_block *)bh->b_data;
	devid = btrfs_stack_device_id(&disk_super->dev_item);
	dev_uuid = disk_super->dev_item.uuid;
2065
	*device = btrfs_find_device(root->fs_info, devid, dev_uuid,
2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094
				    disk_super->fsid);
	brelse(bh);
	if (!*device)
		ret = -ENOENT;
	blkdev_put(bdev, FMODE_READ);
	return ret;
}

int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
					 char *device_path,
					 struct btrfs_device **device)
{
	*device = NULL;
	if (strcmp(device_path, "missing") == 0) {
		struct list_head *devices;
		struct btrfs_device *tmp;

		devices = &root->fs_info->fs_devices->devices;
		/*
		 * It is safe to read the devices since the volume_mutex
		 * is held by the caller.
		 */
		list_for_each_entry(tmp, devices, dev_list) {
			if (tmp->in_fs_metadata && !tmp->bdev) {
				*device = tmp;
				break;
			}
		}

2095 2096
		if (!*device)
			return BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2097 2098 2099 2100 2101 2102 2103

		return 0;
	} else {
		return btrfs_find_device_by_path(root, device_path, device);
	}
}

2104 2105 2106 2107 2108
/*
 * Lookup a device given by device id, or the path if the id is 0.
 */
int btrfs_find_device_by_devspec(struct btrfs_root *root, u64 devid,
					 char *devpath,
2109 2110 2111 2112
					 struct btrfs_device **device)
{
	int ret;

2113
	if (devid) {
2114
		ret = 0;
2115
		*device = btrfs_find_device(root->fs_info, devid, NULL,
2116 2117 2118 2119
					    NULL);
		if (!*device)
			ret = -ENOENT;
	} else {
2120
		if (!devpath || !devpath[0])
2121 2122
			return -EINVAL;

2123
		ret = btrfs_find_device_missing_or_by_path(root, devpath,
2124 2125 2126 2127 2128
							   device);
	}
	return ret;
}

Y
Yan Zheng 已提交
2129 2130 2131
/*
 * does all the dirty work required for changing file system's UUID.
 */
2132
static int btrfs_prepare_sprout(struct btrfs_root *root)
Y
Yan Zheng 已提交
2133 2134 2135
{
	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
	struct btrfs_fs_devices *old_devices;
Y
Yan Zheng 已提交
2136
	struct btrfs_fs_devices *seed_devices;
2137
	struct btrfs_super_block *disk_super = root->fs_info->super_copy;
Y
Yan Zheng 已提交
2138 2139 2140 2141
	struct btrfs_device *device;
	u64 super_flags;

	BUG_ON(!mutex_is_locked(&uuid_mutex));
Y
Yan Zheng 已提交
2142
	if (!fs_devices->seeding)
Y
Yan Zheng 已提交
2143 2144
		return -EINVAL;

2145 2146 2147
	seed_devices = __alloc_fs_devices();
	if (IS_ERR(seed_devices))
		return PTR_ERR(seed_devices);
Y
Yan Zheng 已提交
2148

Y
Yan Zheng 已提交
2149 2150 2151 2152
	old_devices = clone_fs_devices(fs_devices);
	if (IS_ERR(old_devices)) {
		kfree(seed_devices);
		return PTR_ERR(old_devices);
Y
Yan Zheng 已提交
2153
	}
Y
Yan Zheng 已提交
2154

Y
Yan Zheng 已提交
2155 2156
	list_add(&old_devices->list, &fs_uuids);

Y
Yan Zheng 已提交
2157 2158 2159 2160
	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
	seed_devices->opened = 1;
	INIT_LIST_HEAD(&seed_devices->devices);
	INIT_LIST_HEAD(&seed_devices->alloc_list);
2161
	mutex_init(&seed_devices->device_list_mutex);
2162 2163

	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2164 2165
	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
			      synchronize_rcu);
M
Miao Xie 已提交
2166 2167
	list_for_each_entry(device, &seed_devices->devices, dev_list)
		device->fs_devices = seed_devices;
2168

M
Miao Xie 已提交
2169
	lock_chunks(root);
Y
Yan Zheng 已提交
2170
	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
M
Miao Xie 已提交
2171
	unlock_chunks(root);
Y
Yan Zheng 已提交
2172

Y
Yan Zheng 已提交
2173 2174 2175
	fs_devices->seeding = 0;
	fs_devices->num_devices = 0;
	fs_devices->open_devices = 0;
2176 2177
	fs_devices->missing_devices = 0;
	fs_devices->rotating = 0;
Y
Yan Zheng 已提交
2178
	fs_devices->seed = seed_devices;
Y
Yan Zheng 已提交
2179 2180 2181 2182

	generate_random_uuid(fs_devices->fsid);
	memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2183 2184
	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);

Y
Yan Zheng 已提交
2185 2186 2187 2188 2189 2190 2191 2192
	super_flags = btrfs_super_flags(disk_super) &
		      ~BTRFS_SUPER_FLAG_SEEDING;
	btrfs_set_super_flags(disk_super, super_flags);

	return 0;
}

/*
2193
 * Store the expected generation for seed devices in device items.
Y
Yan Zheng 已提交
2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231
 */
static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
			       struct btrfs_root *root)
{
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_dev_item *dev_item;
	struct btrfs_device *device;
	struct btrfs_key key;
	u8 fs_uuid[BTRFS_UUID_SIZE];
	u8 dev_uuid[BTRFS_UUID_SIZE];
	u64 devid;
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	root = root->fs_info->chunk_root;
	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.offset = 0;
	key.type = BTRFS_DEV_ITEM_KEY;

	while (1) {
		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
		if (ret < 0)
			goto error;

		leaf = path->nodes[0];
next_slot:
		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret > 0)
				break;
			if (ret < 0)
				goto error;
			leaf = path->nodes[0];
			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2232
			btrfs_release_path(path);
Y
Yan Zheng 已提交
2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243
			continue;
		}

		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
		    key.type != BTRFS_DEV_ITEM_KEY)
			break;

		dev_item = btrfs_item_ptr(leaf, path->slots[0],
					  struct btrfs_dev_item);
		devid = btrfs_device_id(leaf, dev_item);
2244
		read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
Y
Yan Zheng 已提交
2245
				   BTRFS_UUID_SIZE);
2246
		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
Y
Yan Zheng 已提交
2247
				   BTRFS_UUID_SIZE);
2248 2249
		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
					   fs_uuid);
2250
		BUG_ON(!device); /* Logic error */
Y
Yan Zheng 已提交
2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266

		if (device->fs_devices->seeding) {
			btrfs_set_device_generation(leaf, dev_item,
						    device->generation);
			btrfs_mark_buffer_dirty(leaf);
		}

		path->slots[0]++;
		goto next_slot;
	}
	ret = 0;
error:
	btrfs_free_path(path);
	return ret;
}

2267 2268
int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
{
2269
	struct request_queue *q;
2270 2271 2272 2273
	struct btrfs_trans_handle *trans;
	struct btrfs_device *device;
	struct block_device *bdev;
	struct list_head *devices;
Y
Yan Zheng 已提交
2274
	struct super_block *sb = root->fs_info->sb;
2275
	struct rcu_string *name;
2276
	u64 tmp;
Y
Yan Zheng 已提交
2277
	int seeding_dev = 0;
2278 2279
	int ret = 0;

Y
Yan Zheng 已提交
2280
	if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
2281
		return -EROFS;
2282

2283
	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2284
				  root->fs_info->bdev_holder);
2285 2286
	if (IS_ERR(bdev))
		return PTR_ERR(bdev);
2287

Y
Yan Zheng 已提交
2288 2289 2290 2291 2292 2293
	if (root->fs_info->fs_devices->seeding) {
		seeding_dev = 1;
		down_write(&sb->s_umount);
		mutex_lock(&uuid_mutex);
	}

2294
	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2295

2296
	devices = &root->fs_info->fs_devices->devices;
2297 2298

	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
Q
Qinghuang Feng 已提交
2299
	list_for_each_entry(device, devices, dev_list) {
2300 2301
		if (device->bdev == bdev) {
			ret = -EEXIST;
2302 2303
			mutex_unlock(
				&root->fs_info->fs_devices->device_list_mutex);
Y
Yan Zheng 已提交
2304
			goto error;
2305 2306
		}
	}
2307
	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2308

2309 2310
	device = btrfs_alloc_device(root->fs_info, NULL, NULL);
	if (IS_ERR(device)) {
2311
		/* we can safely leave the fs_devices entry around */
2312
		ret = PTR_ERR(device);
Y
Yan Zheng 已提交
2313
		goto error;
2314 2315
	}

2316
	name = rcu_string_strdup(device_path, GFP_KERNEL);
2317
	if (!name) {
2318
		kfree(device);
Y
Yan Zheng 已提交
2319 2320
		ret = -ENOMEM;
		goto error;
2321
	}
2322
	rcu_assign_pointer(device->name, name);
Y
Yan Zheng 已提交
2323

2324
	trans = btrfs_start_transaction(root, 0);
2325
	if (IS_ERR(trans)) {
2326
		rcu_string_free(device->name);
2327 2328 2329 2330 2331
		kfree(device);
		ret = PTR_ERR(trans);
		goto error;
	}

2332 2333 2334
	q = bdev_get_queue(bdev);
	if (blk_queue_discard(q))
		device->can_discard = 1;
Y
Yan Zheng 已提交
2335 2336
	device->writeable = 1;
	device->generation = trans->transid;
2337 2338 2339 2340
	device->io_width = root->sectorsize;
	device->io_align = root->sectorsize;
	device->sector_size = root->sectorsize;
	device->total_bytes = i_size_read(bdev->bd_inode);
2341
	device->disk_total_bytes = device->total_bytes;
2342
	device->commit_total_bytes = device->total_bytes;
2343 2344
	device->dev_root = root->fs_info->dev_root;
	device->bdev = bdev;
2345
	device->in_fs_metadata = 1;
2346
	device->is_tgtdev_for_dev_replace = 0;
2347
	device->mode = FMODE_EXCL;
2348
	device->dev_stats_valid = 1;
Y
Yan Zheng 已提交
2349
	set_blocksize(device->bdev, 4096);
2350

Y
Yan Zheng 已提交
2351 2352
	if (seeding_dev) {
		sb->s_flags &= ~MS_RDONLY;
2353
		ret = btrfs_prepare_sprout(root);
2354
		BUG_ON(ret); /* -ENOMEM */
Y
Yan Zheng 已提交
2355
	}
2356

Y
Yan Zheng 已提交
2357
	device->fs_devices = root->fs_info->fs_devices;
2358 2359

	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
M
Miao Xie 已提交
2360
	lock_chunks(root);
2361
	list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
Y
Yan Zheng 已提交
2362 2363 2364 2365 2366
	list_add(&device->dev_alloc_list,
		 &root->fs_info->fs_devices->alloc_list);
	root->fs_info->fs_devices->num_devices++;
	root->fs_info->fs_devices->open_devices++;
	root->fs_info->fs_devices->rw_devices++;
J
Josef Bacik 已提交
2367
	root->fs_info->fs_devices->total_devices++;
Y
Yan Zheng 已提交
2368
	root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
2369

2370 2371 2372 2373
	spin_lock(&root->fs_info->free_chunk_lock);
	root->fs_info->free_chunk_space += device->total_bytes;
	spin_unlock(&root->fs_info->free_chunk_lock);

C
Chris Mason 已提交
2374 2375 2376
	if (!blk_queue_nonrot(bdev_get_queue(bdev)))
		root->fs_info->fs_devices->rotating = 1;

2377
	tmp = btrfs_super_total_bytes(root->fs_info->super_copy);
2378
	btrfs_set_super_total_bytes(root->fs_info->super_copy,
2379
				    tmp + device->total_bytes);
2380

2381
	tmp = btrfs_super_num_devices(root->fs_info->super_copy);
2382
	btrfs_set_super_num_devices(root->fs_info->super_copy,
2383
				    tmp + 1);
2384 2385

	/* add sysfs device entry */
2386
	btrfs_sysfs_add_device_link(root->fs_info->fs_devices, device);
2387

M
Miao Xie 已提交
2388 2389 2390 2391 2392 2393 2394
	/*
	 * we've got more storage, clear any full flags on the space
	 * infos
	 */
	btrfs_clear_space_info_full(root->fs_info);

	unlock_chunks(root);
2395
	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2396

Y
Yan Zheng 已提交
2397
	if (seeding_dev) {
M
Miao Xie 已提交
2398
		lock_chunks(root);
Y
Yan Zheng 已提交
2399
		ret = init_first_rw_device(trans, root, device);
M
Miao Xie 已提交
2400
		unlock_chunks(root);
2401 2402
		if (ret) {
			btrfs_abort_transaction(trans, root, ret);
2403
			goto error_trans;
2404
		}
M
Miao Xie 已提交
2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415
	}

	ret = btrfs_add_device(trans, root, device);
	if (ret) {
		btrfs_abort_transaction(trans, root, ret);
		goto error_trans;
	}

	if (seeding_dev) {
		char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];

Y
Yan Zheng 已提交
2416
		ret = btrfs_finish_sprout(trans, root);
2417 2418
		if (ret) {
			btrfs_abort_transaction(trans, root, ret);
2419
			goto error_trans;
2420
		}
2421 2422 2423 2424 2425 2426

		/* Sprouting would change fsid of the mounted root,
		 * so rename the fsid on the sysfs
		 */
		snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
						root->fs_info->fsid);
2427
		if (kobject_rename(&root->fs_info->fs_devices->fsid_kobj,
2428
								fsid_buf))
2429 2430
			btrfs_warn(root->fs_info,
				"sysfs: failed to create fsid for sprout");
Y
Yan Zheng 已提交
2431 2432
	}

2433 2434
	root->fs_info->num_tolerated_disk_barrier_failures =
		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
2435
	ret = btrfs_commit_transaction(trans, root);
2436

Y
Yan Zheng 已提交
2437 2438 2439
	if (seeding_dev) {
		mutex_unlock(&uuid_mutex);
		up_write(&sb->s_umount);
2440

2441 2442 2443
		if (ret) /* transaction commit */
			return ret;

Y
Yan Zheng 已提交
2444
		ret = btrfs_relocate_sys_chunks(root);
2445
		if (ret < 0)
2446
			btrfs_handle_fs_error(root->fs_info, ret,
2447 2448 2449
				    "Failed to relocate sys chunks after "
				    "device initialization. This can be fixed "
				    "using the \"btrfs balance\" command.");
2450 2451 2452 2453 2454 2455 2456
		trans = btrfs_attach_transaction(root);
		if (IS_ERR(trans)) {
			if (PTR_ERR(trans) == -ENOENT)
				return 0;
			return PTR_ERR(trans);
		}
		ret = btrfs_commit_transaction(trans, root);
Y
Yan Zheng 已提交
2457
	}
2458

2459 2460
	/* Update ctime/mtime for libblkid */
	update_dev_time(device_path);
Y
Yan Zheng 已提交
2461
	return ret;
2462 2463 2464

error_trans:
	btrfs_end_transaction(trans, root);
2465
	rcu_string_free(device->name);
2466
	btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
2467
	kfree(device);
Y
Yan Zheng 已提交
2468
error:
2469
	blkdev_put(bdev, FMODE_EXCL);
Y
Yan Zheng 已提交
2470 2471 2472 2473
	if (seeding_dev) {
		mutex_unlock(&uuid_mutex);
		up_write(&sb->s_umount);
	}
2474
	return ret;
2475 2476
}

2477
int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2478
				  struct btrfs_device *srcdev,
2479 2480 2481 2482 2483 2484 2485 2486
				  struct btrfs_device **device_out)
{
	struct request_queue *q;
	struct btrfs_device *device;
	struct block_device *bdev;
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct list_head *devices;
	struct rcu_string *name;
2487
	u64 devid = BTRFS_DEV_REPLACE_DEVID;
2488 2489 2490
	int ret = 0;

	*device_out = NULL;
2491 2492
	if (fs_info->fs_devices->seeding) {
		btrfs_err(fs_info, "the filesystem is a seed filesystem!");
2493
		return -EINVAL;
2494
	}
2495 2496 2497

	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
				  fs_info->bdev_holder);
2498 2499
	if (IS_ERR(bdev)) {
		btrfs_err(fs_info, "target device %s is invalid!", device_path);
2500
		return PTR_ERR(bdev);
2501
	}
2502 2503 2504 2505 2506 2507

	filemap_write_and_wait(bdev->bd_inode->i_mapping);

	devices = &fs_info->fs_devices->devices;
	list_for_each_entry(device, devices, dev_list) {
		if (device->bdev == bdev) {
2508
			btrfs_err(fs_info, "target device is in the filesystem!");
2509 2510 2511 2512 2513
			ret = -EEXIST;
			goto error;
		}
	}

2514

2515 2516
	if (i_size_read(bdev->bd_inode) <
	    btrfs_device_get_total_bytes(srcdev)) {
2517 2518 2519 2520 2521 2522
		btrfs_err(fs_info, "target device is smaller than source device!");
		ret = -EINVAL;
		goto error;
	}


2523 2524 2525
	device = btrfs_alloc_device(NULL, &devid, NULL);
	if (IS_ERR(device)) {
		ret = PTR_ERR(device);
2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545
		goto error;
	}

	name = rcu_string_strdup(device_path, GFP_NOFS);
	if (!name) {
		kfree(device);
		ret = -ENOMEM;
		goto error;
	}
	rcu_assign_pointer(device->name, name);

	q = bdev_get_queue(bdev);
	if (blk_queue_discard(q))
		device->can_discard = 1;
	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
	device->writeable = 1;
	device->generation = 0;
	device->io_width = root->sectorsize;
	device->io_align = root->sectorsize;
	device->sector_size = root->sectorsize;
2546 2547 2548
	device->total_bytes = btrfs_device_get_total_bytes(srcdev);
	device->disk_total_bytes = btrfs_device_get_disk_total_bytes(srcdev);
	device->bytes_used = btrfs_device_get_bytes_used(srcdev);
2549 2550
	ASSERT(list_empty(&srcdev->resized_list));
	device->commit_total_bytes = srcdev->commit_total_bytes;
2551
	device->commit_bytes_used = device->bytes_used;
2552 2553 2554 2555 2556
	device->dev_root = fs_info->dev_root;
	device->bdev = bdev;
	device->in_fs_metadata = 1;
	device->is_tgtdev_for_dev_replace = 1;
	device->mode = FMODE_EXCL;
2557
	device->dev_stats_valid = 1;
2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583
	set_blocksize(device->bdev, 4096);
	device->fs_devices = fs_info->fs_devices;
	list_add(&device->dev_list, &fs_info->fs_devices->devices);
	fs_info->fs_devices->num_devices++;
	fs_info->fs_devices->open_devices++;
	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);

	*device_out = device;
	return ret;

error:
	blkdev_put(bdev, FMODE_EXCL);
	return ret;
}

void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
					      struct btrfs_device *tgtdev)
{
	WARN_ON(fs_info->fs_devices->rw_devices == 0);
	tgtdev->io_width = fs_info->dev_root->sectorsize;
	tgtdev->io_align = fs_info->dev_root->sectorsize;
	tgtdev->sector_size = fs_info->dev_root->sectorsize;
	tgtdev->dev_root = fs_info->dev_root;
	tgtdev->in_fs_metadata = 1;
}

C
Chris Mason 已提交
2584 2585
static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
					struct btrfs_device *device)
2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_root *root;
	struct btrfs_dev_item *dev_item;
	struct extent_buffer *leaf;
	struct btrfs_key key;

	root = device->dev_root->fs_info->chunk_root;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
	key.offset = device->devid;

	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
	if (ret < 0)
		goto out;

	if (ret > 0) {
		ret = -ENOENT;
		goto out;
	}

	leaf = path->nodes[0];
	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);

	btrfs_set_device_id(leaf, dev_item, device->devid);
	btrfs_set_device_type(leaf, dev_item, device->type);
	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2621 2622 2623 2624
	btrfs_set_device_total_bytes(leaf, dev_item,
				     btrfs_device_get_disk_total_bytes(device));
	btrfs_set_device_bytes_used(leaf, dev_item,
				    btrfs_device_get_bytes_used(device));
2625 2626 2627 2628 2629 2630 2631
	btrfs_mark_buffer_dirty(leaf);

out:
	btrfs_free_path(path);
	return ret;
}

M
Miao Xie 已提交
2632
int btrfs_grow_device(struct btrfs_trans_handle *trans,
2633 2634 2635
		      struct btrfs_device *device, u64 new_size)
{
	struct btrfs_super_block *super_copy =
2636
		device->dev_root->fs_info->super_copy;
2637
	struct btrfs_fs_devices *fs_devices;
M
Miao Xie 已提交
2638 2639
	u64 old_total;
	u64 diff;
2640

Y
Yan Zheng 已提交
2641 2642
	if (!device->writeable)
		return -EACCES;
M
Miao Xie 已提交
2643 2644 2645 2646 2647

	lock_chunks(device->dev_root);
	old_total = btrfs_super_total_bytes(super_copy);
	diff = new_size - device->total_bytes;

2648
	if (new_size <= device->total_bytes ||
M
Miao Xie 已提交
2649 2650
	    device->is_tgtdev_for_dev_replace) {
		unlock_chunks(device->dev_root);
Y
Yan Zheng 已提交
2651
		return -EINVAL;
M
Miao Xie 已提交
2652
	}
Y
Yan Zheng 已提交
2653

2654
	fs_devices = device->dev_root->fs_info->fs_devices;
Y
Yan Zheng 已提交
2655

2656
	btrfs_set_super_total_bytes(super_copy, old_total + diff);
Y
Yan Zheng 已提交
2657 2658
	device->fs_devices->total_rw_bytes += diff;

2659 2660
	btrfs_device_set_total_bytes(device, new_size);
	btrfs_device_set_disk_total_bytes(device, new_size);
2661
	btrfs_clear_space_info_full(device->dev_root->fs_info);
2662 2663 2664
	if (list_empty(&device->resized_list))
		list_add_tail(&device->resized_list,
			      &fs_devices->resized_devices);
M
Miao Xie 已提交
2665
	unlock_chunks(device->dev_root);
2666

2667 2668 2669 2670
	return btrfs_update_device(trans, device);
}

static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2671
			    struct btrfs_root *root, u64 chunk_objectid,
2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687
			    u64 chunk_offset)
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_key key;

	root = root->fs_info->chunk_root;
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = chunk_objectid;
	key.offset = chunk_offset;
	key.type = BTRFS_CHUNK_ITEM_KEY;

	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2688 2689 2690
	if (ret < 0)
		goto out;
	else if (ret > 0) { /* Logic error or corruption */
2691
		btrfs_handle_fs_error(root->fs_info, -ENOENT,
2692 2693 2694 2695
			    "Failed lookup while freeing chunk.");
		ret = -ENOENT;
		goto out;
	}
2696 2697

	ret = btrfs_del_item(trans, root, path);
2698
	if (ret < 0)
2699
		btrfs_handle_fs_error(root->fs_info, ret,
2700 2701
			    "Failed to delete chunk item.");
out:
2702
	btrfs_free_path(path);
2703
	return ret;
2704 2705
}

2706
static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2707 2708
			chunk_offset)
{
2709
	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2710 2711 2712 2713 2714 2715 2716 2717 2718 2719
	struct btrfs_disk_key *disk_key;
	struct btrfs_chunk *chunk;
	u8 *ptr;
	int ret = 0;
	u32 num_stripes;
	u32 array_size;
	u32 len = 0;
	u32 cur;
	struct btrfs_key key;

M
Miao Xie 已提交
2720
	lock_chunks(root);
2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749
	array_size = btrfs_super_sys_array_size(super_copy);

	ptr = super_copy->sys_chunk_array;
	cur = 0;

	while (cur < array_size) {
		disk_key = (struct btrfs_disk_key *)ptr;
		btrfs_disk_key_to_cpu(&key, disk_key);

		len = sizeof(*disk_key);

		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
			chunk = (struct btrfs_chunk *)(ptr + len);
			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
			len += btrfs_chunk_item_size(num_stripes);
		} else {
			ret = -EIO;
			break;
		}
		if (key.objectid == chunk_objectid &&
		    key.offset == chunk_offset) {
			memmove(ptr, ptr + len, array_size - (cur + len));
			array_size -= len;
			btrfs_set_super_sys_array_size(super_copy, array_size);
		} else {
			ptr += len;
			cur += len;
		}
	}
M
Miao Xie 已提交
2750
	unlock_chunks(root);
2751 2752 2753
	return ret;
}

2754 2755
int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
		       struct btrfs_root *root, u64 chunk_offset)
2756 2757 2758
{
	struct extent_map_tree *em_tree;
	struct extent_map *em;
2759
	struct btrfs_root *extent_root = root->fs_info->extent_root;
2760
	struct map_lookup *map;
M
Miao Xie 已提交
2761
	u64 dev_extent_len = 0;
2762 2763
	u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
	int i, ret = 0;
2764
	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2765

2766
	/* Just in case */
2767 2768 2769
	root = root->fs_info->chunk_root;
	em_tree = &root->fs_info->mapping_tree.map_tree;

2770
	read_lock(&em_tree->lock);
2771
	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2772
	read_unlock(&em_tree->lock);
2773

2774 2775 2776 2777
	if (!em || em->start > chunk_offset ||
	    em->start + em->len < chunk_offset) {
		/*
		 * This is a logic error, but we don't want to just rely on the
2778
		 * user having built with ASSERT enabled, so if ASSERT doesn't
2779 2780 2781 2782 2783 2784 2785
		 * do anything we still error out.
		 */
		ASSERT(0);
		if (em)
			free_extent_map(em);
		return -EINVAL;
	}
2786
	map = em->map_lookup;
2787
	lock_chunks(root->fs_info->chunk_root);
2788
	check_system_chunk(trans, extent_root, map->type);
2789
	unlock_chunks(root->fs_info->chunk_root);
2790

2791 2792 2793 2794 2795 2796
	/*
	 * Take the device list mutex to prevent races with the final phase of
	 * a device replace operation that replaces the device object associated
	 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
	 */
	mutex_lock(&fs_devices->device_list_mutex);
2797
	for (i = 0; i < map->num_stripes; i++) {
2798
		struct btrfs_device *device = map->stripes[i].dev;
M
Miao Xie 已提交
2799 2800 2801
		ret = btrfs_free_dev_extent(trans, device,
					    map->stripes[i].physical,
					    &dev_extent_len);
2802
		if (ret) {
2803
			mutex_unlock(&fs_devices->device_list_mutex);
2804 2805 2806
			btrfs_abort_transaction(trans, root, ret);
			goto out;
		}
2807

M
Miao Xie 已提交
2808 2809 2810 2811 2812 2813 2814 2815 2816 2817
		if (device->bytes_used > 0) {
			lock_chunks(root);
			btrfs_device_set_bytes_used(device,
					device->bytes_used - dev_extent_len);
			spin_lock(&root->fs_info->free_chunk_lock);
			root->fs_info->free_chunk_space += dev_extent_len;
			spin_unlock(&root->fs_info->free_chunk_lock);
			btrfs_clear_space_info_full(root->fs_info);
			unlock_chunks(root);
		}
2818

2819 2820
		if (map->stripes[i].dev) {
			ret = btrfs_update_device(trans, map->stripes[i].dev);
2821
			if (ret) {
2822
				mutex_unlock(&fs_devices->device_list_mutex);
2823 2824 2825
				btrfs_abort_transaction(trans, root, ret);
				goto out;
			}
2826
		}
2827
	}
2828 2829
	mutex_unlock(&fs_devices->device_list_mutex);

2830
	ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset);
2831 2832 2833 2834
	if (ret) {
		btrfs_abort_transaction(trans, root, ret);
		goto out;
	}
2835

2836 2837
	trace_btrfs_chunk_free(root, map, chunk_offset, em->len);

2838 2839
	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
		ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2840 2841 2842 2843
		if (ret) {
			btrfs_abort_transaction(trans, root, ret);
			goto out;
		}
2844 2845
	}

2846
	ret = btrfs_remove_block_group(trans, extent_root, chunk_offset, em);
2847 2848 2849 2850
	if (ret) {
		btrfs_abort_transaction(trans, extent_root, ret);
		goto out;
	}
Y
Yan Zheng 已提交
2851

2852
out:
Y
Yan Zheng 已提交
2853 2854
	/* once for us */
	free_extent_map(em);
2855 2856
	return ret;
}
Y
Yan Zheng 已提交
2857

2858
static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset)
2859 2860 2861 2862
{
	struct btrfs_root *extent_root;
	struct btrfs_trans_handle *trans;
	int ret;
Y
Yan Zheng 已提交
2863

2864 2865 2866
	root = root->fs_info->chunk_root;
	extent_root = root->fs_info->extent_root;

2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880
	/*
	 * Prevent races with automatic removal of unused block groups.
	 * After we relocate and before we remove the chunk with offset
	 * chunk_offset, automatic removal of the block group can kick in,
	 * resulting in a failure when calling btrfs_remove_chunk() below.
	 *
	 * Make sure to acquire this mutex before doing a tree search (dev
	 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
	 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
	 * we release the path used to search the chunk/dev tree and before
	 * the current task acquires this mutex and calls us.
	 */
	ASSERT(mutex_is_locked(&root->fs_info->delete_unused_bgs_mutex));

2881 2882 2883 2884 2885
	ret = btrfs_can_relocate(extent_root, chunk_offset);
	if (ret)
		return -ENOSPC;

	/* step one, relocate all the extents inside this chunk */
2886
	btrfs_scrub_pause(root);
2887
	ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2888
	btrfs_scrub_continue(root);
2889 2890 2891
	if (ret)
		return ret;

2892 2893
	trans = btrfs_start_trans_remove_block_group(root->fs_info,
						     chunk_offset);
2894 2895
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
2896
		btrfs_handle_fs_error(root->fs_info, ret, NULL);
2897 2898 2899 2900 2901 2902 2903 2904
		return ret;
	}

	/*
	 * step two, delete the device extents and the
	 * chunk tree entries
	 */
	ret = btrfs_remove_chunk(trans, root, chunk_offset);
Y
Yan Zheng 已提交
2905
	btrfs_end_transaction(trans, root);
2906
	return ret;
Y
Yan Zheng 已提交
2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917
}

static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
{
	struct btrfs_root *chunk_root = root->fs_info->chunk_root;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_chunk *chunk;
	struct btrfs_key key;
	struct btrfs_key found_key;
	u64 chunk_type;
2918 2919
	bool retried = false;
	int failed = 0;
Y
Yan Zheng 已提交
2920 2921 2922 2923 2924 2925
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

2926
again:
Y
Yan Zheng 已提交
2927 2928 2929 2930 2931
	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
	key.offset = (u64)-1;
	key.type = BTRFS_CHUNK_ITEM_KEY;

	while (1) {
2932
		mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
Y
Yan Zheng 已提交
2933
		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2934 2935
		if (ret < 0) {
			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
Y
Yan Zheng 已提交
2936
			goto error;
2937
		}
2938
		BUG_ON(ret == 0); /* Corruption */
Y
Yan Zheng 已提交
2939 2940 2941

		ret = btrfs_previous_item(chunk_root, path, key.objectid,
					  key.type);
2942 2943
		if (ret)
			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
Y
Yan Zheng 已提交
2944 2945 2946 2947
		if (ret < 0)
			goto error;
		if (ret > 0)
			break;
Z
Zheng Yan 已提交
2948

Y
Yan Zheng 已提交
2949 2950
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
Z
Zheng Yan 已提交
2951

Y
Yan Zheng 已提交
2952 2953 2954
		chunk = btrfs_item_ptr(leaf, path->slots[0],
				       struct btrfs_chunk);
		chunk_type = btrfs_chunk_type(leaf, chunk);
2955
		btrfs_release_path(path);
2956

Y
Yan Zheng 已提交
2957
		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2958
			ret = btrfs_relocate_chunk(chunk_root,
Y
Yan Zheng 已提交
2959
						   found_key.offset);
2960 2961
			if (ret == -ENOSPC)
				failed++;
H
HIMANGI SARAOGI 已提交
2962 2963
			else
				BUG_ON(ret);
Y
Yan Zheng 已提交
2964
		}
2965
		mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2966

Y
Yan Zheng 已提交
2967 2968 2969 2970 2971
		if (found_key.offset == 0)
			break;
		key.offset = found_key.offset - 1;
	}
	ret = 0;
2972 2973 2974 2975
	if (failed && !retried) {
		failed = 0;
		retried = true;
		goto again;
2976
	} else if (WARN_ON(failed && retried)) {
2977 2978
		ret = -ENOSPC;
	}
Y
Yan Zheng 已提交
2979 2980 2981
error:
	btrfs_free_path(path);
	return ret;
2982 2983
}

2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005
static int insert_balance_item(struct btrfs_root *root,
			       struct btrfs_balance_control *bctl)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_balance_item *item;
	struct btrfs_disk_balance_args disk_bargs;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	int ret, err;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	trans = btrfs_start_transaction(root, 0);
	if (IS_ERR(trans)) {
		btrfs_free_path(path);
		return PTR_ERR(trans);
	}

	key.objectid = BTRFS_BALANCE_OBJECTID;
3006
	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054
	key.offset = 0;

	ret = btrfs_insert_empty_item(trans, root, path, &key,
				      sizeof(*item));
	if (ret)
		goto out;

	leaf = path->nodes[0];
	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);

	memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));

	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
	btrfs_set_balance_data(leaf, item, &disk_bargs);
	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
	btrfs_set_balance_meta(leaf, item, &disk_bargs);
	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
	btrfs_set_balance_sys(leaf, item, &disk_bargs);

	btrfs_set_balance_flags(leaf, item, bctl->flags);

	btrfs_mark_buffer_dirty(leaf);
out:
	btrfs_free_path(path);
	err = btrfs_commit_transaction(trans, root);
	if (err && !ret)
		ret = err;
	return ret;
}

static int del_balance_item(struct btrfs_root *root)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_path *path;
	struct btrfs_key key;
	int ret, err;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	trans = btrfs_start_transaction(root, 0);
	if (IS_ERR(trans)) {
		btrfs_free_path(path);
		return PTR_ERR(trans);
	}

	key.objectid = BTRFS_BALANCE_OBJECTID;
3055
	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074
	key.offset = 0;

	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
	if (ret < 0)
		goto out;
	if (ret > 0) {
		ret = -ENOENT;
		goto out;
	}

	ret = btrfs_del_item(trans, root, path);
out:
	btrfs_free_path(path);
	err = btrfs_commit_transaction(trans, root);
	if (err && !ret)
		ret = err;
	return ret;
}

I
Ilya Dryomov 已提交
3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098
/*
 * This is a heuristic used to reduce the number of chunks balanced on
 * resume after balance was interrupted.
 */
static void update_balance_args(struct btrfs_balance_control *bctl)
{
	/*
	 * Turn on soft mode for chunk types that were being converted.
	 */
	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;

	/*
	 * Turn on usage filter if is not already used.  The idea is
	 * that chunks that we have already balanced should be
	 * reasonably full.  Don't do it for chunks that are being
	 * converted - that will keep us from relocating unconverted
	 * (albeit full) chunks.
	 */
	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3099
	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
I
Ilya Dryomov 已提交
3100 3101 3102 3103 3104
	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
		bctl->data.usage = 90;
	}
	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3105
	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
I
Ilya Dryomov 已提交
3106 3107 3108 3109 3110
	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
		bctl->sys.usage = 90;
	}
	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3111
	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
I
Ilya Dryomov 已提交
3112 3113 3114 3115 3116 3117
	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
		bctl->meta.usage = 90;
	}
}

3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146
/*
 * Should be called with both balance and volume mutexes held to
 * serialize other volume operations (add_dev/rm_dev/resize) with
 * restriper.  Same goes for unset_balance_control.
 */
static void set_balance_control(struct btrfs_balance_control *bctl)
{
	struct btrfs_fs_info *fs_info = bctl->fs_info;

	BUG_ON(fs_info->balance_ctl);

	spin_lock(&fs_info->balance_lock);
	fs_info->balance_ctl = bctl;
	spin_unlock(&fs_info->balance_lock);
}

static void unset_balance_control(struct btrfs_fs_info *fs_info)
{
	struct btrfs_balance_control *bctl = fs_info->balance_ctl;

	BUG_ON(!fs_info->balance_ctl);

	spin_lock(&fs_info->balance_lock);
	fs_info->balance_ctl = NULL;
	spin_unlock(&fs_info->balance_lock);

	kfree(bctl);
}

I
Ilya Dryomov 已提交
3147 3148 3149 3150
/*
 * Balance filters.  Return 1 if chunk should be filtered out
 * (should not be balanced).
 */
3151
static int chunk_profiles_filter(u64 chunk_type,
I
Ilya Dryomov 已提交
3152 3153
				 struct btrfs_balance_args *bargs)
{
3154 3155
	chunk_type = chunk_to_extended(chunk_type) &
				BTRFS_EXTENDED_PROFILE_MASK;
I
Ilya Dryomov 已提交
3156

3157
	if (bargs->profiles & chunk_type)
I
Ilya Dryomov 已提交
3158 3159 3160 3161 3162
		return 0;

	return 1;
}

3163
static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
I
Ilya Dryomov 已提交
3164
			      struct btrfs_balance_args *bargs)
3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195
{
	struct btrfs_block_group_cache *cache;
	u64 chunk_used;
	u64 user_thresh_min;
	u64 user_thresh_max;
	int ret = 1;

	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
	chunk_used = btrfs_block_group_used(&cache->item);

	if (bargs->usage_min == 0)
		user_thresh_min = 0;
	else
		user_thresh_min = div_factor_fine(cache->key.offset,
					bargs->usage_min);

	if (bargs->usage_max == 0)
		user_thresh_max = 1;
	else if (bargs->usage_max > 100)
		user_thresh_max = cache->key.offset;
	else
		user_thresh_max = div_factor_fine(cache->key.offset,
					bargs->usage_max);

	if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
		ret = 0;

	btrfs_put_block_group(cache);
	return ret;
}

3196
static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3197
		u64 chunk_offset, struct btrfs_balance_args *bargs)
I
Ilya Dryomov 已提交
3198 3199 3200 3201 3202 3203 3204 3205
{
	struct btrfs_block_group_cache *cache;
	u64 chunk_used, user_thresh;
	int ret = 1;

	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
	chunk_used = btrfs_block_group_used(&cache->item);

3206
	if (bargs->usage_min == 0)
3207
		user_thresh = 1;
3208 3209 3210 3211 3212 3213
	else if (bargs->usage > 100)
		user_thresh = cache->key.offset;
	else
		user_thresh = div_factor_fine(cache->key.offset,
					      bargs->usage);

I
Ilya Dryomov 已提交
3214 3215 3216 3217 3218 3219 3220
	if (chunk_used < user_thresh)
		ret = 0;

	btrfs_put_block_group(cache);
	return ret;
}

I
Ilya Dryomov 已提交
3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237
static int chunk_devid_filter(struct extent_buffer *leaf,
			      struct btrfs_chunk *chunk,
			      struct btrfs_balance_args *bargs)
{
	struct btrfs_stripe *stripe;
	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
	int i;

	for (i = 0; i < num_stripes; i++) {
		stripe = btrfs_stripe_nr(chunk, i);
		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
			return 0;
	}

	return 1;
}

I
Ilya Dryomov 已提交
3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254
/* [pstart, pend) */
static int chunk_drange_filter(struct extent_buffer *leaf,
			       struct btrfs_chunk *chunk,
			       u64 chunk_offset,
			       struct btrfs_balance_args *bargs)
{
	struct btrfs_stripe *stripe;
	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
	u64 stripe_offset;
	u64 stripe_length;
	int factor;
	int i;

	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
		return 0;

	if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
D
David Woodhouse 已提交
3255 3256 3257 3258 3259 3260 3261 3262 3263
	     BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
		factor = num_stripes / 2;
	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
		factor = num_stripes - 1;
	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
		factor = num_stripes - 2;
	} else {
		factor = num_stripes;
	}
I
Ilya Dryomov 已提交
3264 3265 3266 3267 3268 3269 3270 3271

	for (i = 0; i < num_stripes; i++) {
		stripe = btrfs_stripe_nr(chunk, i);
		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
			continue;

		stripe_offset = btrfs_stripe_offset(leaf, stripe);
		stripe_length = btrfs_chunk_length(leaf, chunk);
3272
		stripe_length = div_u64(stripe_length, factor);
I
Ilya Dryomov 已提交
3273 3274 3275 3276 3277 3278 3279 3280 3281

		if (stripe_offset < bargs->pend &&
		    stripe_offset + stripe_length > bargs->pstart)
			return 0;
	}

	return 1;
}

3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295
/* [vstart, vend) */
static int chunk_vrange_filter(struct extent_buffer *leaf,
			       struct btrfs_chunk *chunk,
			       u64 chunk_offset,
			       struct btrfs_balance_args *bargs)
{
	if (chunk_offset < bargs->vend &&
	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
		/* at least part of the chunk is inside this vrange */
		return 0;

	return 1;
}

3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308
static int chunk_stripes_range_filter(struct extent_buffer *leaf,
			       struct btrfs_chunk *chunk,
			       struct btrfs_balance_args *bargs)
{
	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);

	if (bargs->stripes_min <= num_stripes
			&& num_stripes <= bargs->stripes_max)
		return 0;

	return 1;
}

3309
static int chunk_soft_convert_filter(u64 chunk_type,
3310 3311 3312 3313 3314
				     struct btrfs_balance_args *bargs)
{
	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
		return 0;

3315 3316
	chunk_type = chunk_to_extended(chunk_type) &
				BTRFS_EXTENDED_PROFILE_MASK;
3317

3318
	if (bargs->target == chunk_type)
3319 3320 3321 3322 3323
		return 1;

	return 0;
}

3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344
static int should_balance_chunk(struct btrfs_root *root,
				struct extent_buffer *leaf,
				struct btrfs_chunk *chunk, u64 chunk_offset)
{
	struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
	struct btrfs_balance_args *bargs = NULL;
	u64 chunk_type = btrfs_chunk_type(leaf, chunk);

	/* type filter */
	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
		return 0;
	}

	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
		bargs = &bctl->data;
	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
		bargs = &bctl->sys;
	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
		bargs = &bctl->meta;

I
Ilya Dryomov 已提交
3345 3346 3347 3348
	/* profiles filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
	    chunk_profiles_filter(chunk_type, bargs)) {
		return 0;
I
Ilya Dryomov 已提交
3349 3350 3351 3352 3353 3354
	}

	/* usage filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
	    chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
		return 0;
3355 3356 3357
	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
	    chunk_usage_range_filter(bctl->fs_info, chunk_offset, bargs)) {
		return 0;
I
Ilya Dryomov 已提交
3358 3359 3360 3361 3362 3363
	}

	/* devid filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
	    chunk_devid_filter(leaf, chunk, bargs)) {
		return 0;
I
Ilya Dryomov 已提交
3364 3365 3366 3367 3368 3369
	}

	/* drange filter, makes sense only with devid filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
	    chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
		return 0;
3370 3371 3372 3373 3374 3375
	}

	/* vrange filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
		return 0;
I
Ilya Dryomov 已提交
3376 3377
	}

3378 3379 3380 3381 3382 3383
	/* stripes filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
	    chunk_stripes_range_filter(leaf, chunk, bargs)) {
		return 0;
	}

3384 3385 3386 3387 3388 3389
	/* soft profile changing mode */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
	    chunk_soft_convert_filter(chunk_type, bargs)) {
		return 0;
	}

3390 3391 3392 3393 3394 3395 3396 3397
	/*
	 * limited by count, must be the last filter
	 */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
		if (bargs->limit == 0)
			return 0;
		else
			bargs->limit--;
3398 3399 3400
	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
		/*
		 * Same logic as the 'limit' filter; the minimum cannot be
3401
		 * determined here because we do not have the global information
3402 3403 3404 3405 3406 3407
		 * about the count of all chunks that satisfy the filters.
		 */
		if (bargs->limit_max == 0)
			return 0;
		else
			bargs->limit_max--;
3408 3409
	}

3410 3411 3412
	return 1;
}

3413
static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3414
{
3415
	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3416 3417 3418
	struct btrfs_root *chunk_root = fs_info->chunk_root;
	struct btrfs_root *dev_root = fs_info->dev_root;
	struct list_head *devices;
3419 3420 3421
	struct btrfs_device *device;
	u64 old_size;
	u64 size_to_free;
3422
	u64 chunk_type;
3423
	struct btrfs_chunk *chunk;
3424 3425 3426
	struct btrfs_path *path;
	struct btrfs_key key;
	struct btrfs_key found_key;
3427
	struct btrfs_trans_handle *trans;
3428 3429
	struct extent_buffer *leaf;
	int slot;
3430 3431
	int ret;
	int enospc_errors = 0;
3432
	bool counting = true;
3433
	/* The single value limit and min/max limits use the same bytes in the */
3434 3435 3436
	u64 limit_data = bctl->data.limit;
	u64 limit_meta = bctl->meta.limit;
	u64 limit_sys = bctl->sys.limit;
3437 3438 3439
	u32 count_data = 0;
	u32 count_meta = 0;
	u32 count_sys = 0;
3440
	int chunk_reserved = 0;
3441
	u64 bytes_used = 0;
3442 3443

	/* step one make some room on all the devices */
3444
	devices = &fs_info->fs_devices->devices;
Q
Qinghuang Feng 已提交
3445
	list_for_each_entry(device, devices, dev_list) {
3446
		old_size = btrfs_device_get_total_bytes(device);
3447
		size_to_free = div_factor(old_size, 1);
3448
		size_to_free = min_t(u64, size_to_free, SZ_1M);
Y
Yan Zheng 已提交
3449
		if (!device->writeable ||
3450 3451
		    btrfs_device_get_total_bytes(device) -
		    btrfs_device_get_bytes_used(device) > size_to_free ||
3452
		    device->is_tgtdev_for_dev_replace)
3453 3454 3455
			continue;

		ret = btrfs_shrink_device(device, old_size - size_to_free);
3456 3457
		if (ret == -ENOSPC)
			break;
3458 3459
		BUG_ON(ret);

3460
		trans = btrfs_start_transaction(dev_root, 0);
3461
		BUG_ON(IS_ERR(trans));
3462 3463 3464 3465 3466 3467 3468 3469 3470

		ret = btrfs_grow_device(trans, device, old_size);
		BUG_ON(ret);

		btrfs_end_transaction(trans, dev_root);
	}

	/* step two, relocate all the chunks */
	path = btrfs_alloc_path();
3471 3472 3473 3474
	if (!path) {
		ret = -ENOMEM;
		goto error;
	}
3475 3476 3477 3478 3479 3480

	/* zero out stat counters */
	spin_lock(&fs_info->balance_lock);
	memset(&bctl->stat, 0, sizeof(bctl->stat));
	spin_unlock(&fs_info->balance_lock);
again:
3481
	if (!counting) {
3482 3483 3484 3485
		/*
		 * The single value limit and min/max limits use the same bytes
		 * in the
		 */
3486 3487 3488 3489
		bctl->data.limit = limit_data;
		bctl->meta.limit = limit_meta;
		bctl->sys.limit = limit_sys;
	}
3490 3491 3492 3493
	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
	key.offset = (u64)-1;
	key.type = BTRFS_CHUNK_ITEM_KEY;

C
Chris Mason 已提交
3494
	while (1) {
3495
		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3496
		    atomic_read(&fs_info->balance_cancel_req)) {
3497 3498 3499 3500
			ret = -ECANCELED;
			goto error;
		}

3501
		mutex_lock(&fs_info->delete_unused_bgs_mutex);
3502
		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3503 3504
		if (ret < 0) {
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3505
			goto error;
3506
		}
3507 3508 3509 3510 3511 3512

		/*
		 * this shouldn't happen, it means the last relocate
		 * failed
		 */
		if (ret == 0)
3513
			BUG(); /* FIXME break ? */
3514 3515 3516

		ret = btrfs_previous_item(chunk_root, path, 0,
					  BTRFS_CHUNK_ITEM_KEY);
3517
		if (ret) {
3518
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3519
			ret = 0;
3520
			break;
3521
		}
3522

3523 3524 3525
		leaf = path->nodes[0];
		slot = path->slots[0];
		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3526

3527 3528
		if (found_key.objectid != key.objectid) {
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3529
			break;
3530
		}
3531

3532
		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3533
		chunk_type = btrfs_chunk_type(leaf, chunk);
3534

3535 3536 3537 3538 3539 3540
		if (!counting) {
			spin_lock(&fs_info->balance_lock);
			bctl->stat.considered++;
			spin_unlock(&fs_info->balance_lock);
		}

3541 3542
		ret = should_balance_chunk(chunk_root, leaf, chunk,
					   found_key.offset);
3543

3544
		btrfs_release_path(path);
3545 3546
		if (!ret) {
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3547
			goto loop;
3548
		}
3549

3550
		if (counting) {
3551
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3552 3553 3554
			spin_lock(&fs_info->balance_lock);
			bctl->stat.expected++;
			spin_unlock(&fs_info->balance_lock);
3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576

			if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
				count_data++;
			else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
				count_sys++;
			else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
				count_meta++;

			goto loop;
		}

		/*
		 * Apply limit_min filter, no need to check if the LIMITS
		 * filter is used, limit_min is 0 by default
		 */
		if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
					count_data < bctl->data.limit_min)
				|| ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
					count_meta < bctl->meta.limit_min)
				|| ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
					count_sys < bctl->sys.limit_min)) {
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3577 3578 3579
			goto loop;
		}

3580 3581 3582 3583 3584 3585 3586
		ASSERT(fs_info->data_sinfo);
		spin_lock(&fs_info->data_sinfo->lock);
		bytes_used = fs_info->data_sinfo->bytes_used;
		spin_unlock(&fs_info->data_sinfo->lock);

		if ((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
		    !chunk_reserved && !bytes_used) {
3587 3588 3589 3590 3591 3592 3593 3594 3595
			trans = btrfs_start_transaction(chunk_root, 0);
			if (IS_ERR(trans)) {
				mutex_unlock(&fs_info->delete_unused_bgs_mutex);
				ret = PTR_ERR(trans);
				goto error;
			}

			ret = btrfs_force_chunk_alloc(trans, chunk_root,
						      BTRFS_BLOCK_GROUP_DATA);
3596
			btrfs_end_transaction(trans, chunk_root);
3597 3598 3599 3600 3601 3602 3603
			if (ret < 0) {
				mutex_unlock(&fs_info->delete_unused_bgs_mutex);
				goto error;
			}
			chunk_reserved = 1;
		}

3604 3605
		ret = btrfs_relocate_chunk(chunk_root,
					   found_key.offset);
3606
		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3607 3608
		if (ret && ret != -ENOSPC)
			goto error;
3609
		if (ret == -ENOSPC) {
3610
			enospc_errors++;
3611 3612 3613 3614 3615
		} else {
			spin_lock(&fs_info->balance_lock);
			bctl->stat.completed++;
			spin_unlock(&fs_info->balance_lock);
		}
3616
loop:
3617 3618
		if (found_key.offset == 0)
			break;
3619
		key.offset = found_key.offset - 1;
3620
	}
3621

3622 3623 3624 3625 3626
	if (counting) {
		btrfs_release_path(path);
		counting = false;
		goto again;
	}
3627 3628
error:
	btrfs_free_path(path);
3629
	if (enospc_errors) {
3630
		btrfs_info(fs_info, "%d enospc errors during balance",
3631 3632 3633 3634 3635
		       enospc_errors);
		if (!ret)
			ret = -ENOSPC;
	}

3636 3637 3638
	return ret;
}

3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662
/**
 * alloc_profile_is_valid - see if a given profile is valid and reduced
 * @flags: profile to validate
 * @extended: if true @flags is treated as an extended profile
 */
static int alloc_profile_is_valid(u64 flags, int extended)
{
	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
			       BTRFS_BLOCK_GROUP_PROFILE_MASK);

	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;

	/* 1) check that all other bits are zeroed */
	if (flags & ~mask)
		return 0;

	/* 2) see if profile is reduced */
	if (flags == 0)
		return !extended; /* "0" is valid for usual profiles */

	/* true if exactly one bit set */
	return (flags & (flags - 1)) == 0;
}

3663 3664
static inline int balance_need_close(struct btrfs_fs_info *fs_info)
{
3665 3666 3667 3668
	/* cancel requested || normal exit path */
	return atomic_read(&fs_info->balance_cancel_req) ||
		(atomic_read(&fs_info->balance_pause_req) == 0 &&
		 atomic_read(&fs_info->balance_cancel_req) == 0);
3669 3670
}

3671 3672
static void __cancel_balance(struct btrfs_fs_info *fs_info)
{
3673 3674
	int ret;

3675
	unset_balance_control(fs_info);
3676
	ret = del_balance_item(fs_info->tree_root);
3677
	if (ret)
3678
		btrfs_handle_fs_error(fs_info, ret, NULL);
3679 3680

	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3681 3682
}

3683 3684 3685 3686 3687 3688 3689 3690 3691
/* Non-zero return value signifies invalidity */
static inline int validate_convert_profile(struct btrfs_balance_args *bctl_arg,
		u64 allowed)
{
	return ((bctl_arg->flags & BTRFS_BALANCE_ARGS_CONVERT) &&
		(!alloc_profile_is_valid(bctl_arg->target, 1) ||
		 (bctl_arg->target & ~allowed)));
}

3692 3693 3694 3695 3696 3697 3698
/*
 * Should be called with both balance and volume mutexes held
 */
int btrfs_balance(struct btrfs_balance_control *bctl,
		  struct btrfs_ioctl_balance_args *bargs)
{
	struct btrfs_fs_info *fs_info = bctl->fs_info;
3699
	u64 allowed;
3700
	int mixed = 0;
3701
	int ret;
3702
	u64 num_devices;
3703
	unsigned seq;
3704

3705
	if (btrfs_fs_closing(fs_info) ||
3706 3707
	    atomic_read(&fs_info->balance_pause_req) ||
	    atomic_read(&fs_info->balance_cancel_req)) {
3708 3709 3710 3711
		ret = -EINVAL;
		goto out;
	}

3712 3713 3714 3715
	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
		mixed = 1;

3716 3717 3718 3719
	/*
	 * In case of mixed groups both data and meta should be picked,
	 * and identical options should be given for both of them.
	 */
3720 3721
	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
	if (mixed && (bctl->flags & allowed)) {
3722 3723 3724
		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3725 3726
			btrfs_err(fs_info, "with mixed groups data and "
				   "metadata balance options must be the same");
3727 3728 3729 3730 3731
			ret = -EINVAL;
			goto out;
		}
	}

3732
	num_devices = fs_info->fs_devices->num_devices;
3733
	btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
3734 3735 3736 3737
	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
		BUG_ON(num_devices < 1);
		num_devices--;
	}
3738
	btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
3739 3740
	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE | BTRFS_BLOCK_GROUP_DUP;
	if (num_devices > 1)
3741
		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3742 3743 3744 3745 3746
	if (num_devices > 2)
		allowed |= BTRFS_BLOCK_GROUP_RAID5;
	if (num_devices > 3)
		allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
			    BTRFS_BLOCK_GROUP_RAID6);
3747
	if (validate_convert_profile(&bctl->data, allowed)) {
3748 3749
		btrfs_err(fs_info, "unable to start balance with target "
			   "data profile %llu",
3750
		       bctl->data.target);
3751 3752 3753
		ret = -EINVAL;
		goto out;
	}
3754
	if (validate_convert_profile(&bctl->meta, allowed)) {
3755 3756
		btrfs_err(fs_info,
			   "unable to start balance with target metadata profile %llu",
3757
		       bctl->meta.target);
3758 3759 3760
		ret = -EINVAL;
		goto out;
	}
3761
	if (validate_convert_profile(&bctl->sys, allowed)) {
3762 3763
		btrfs_err(fs_info,
			   "unable to start balance with target system profile %llu",
3764
		       bctl->sys.target);
3765 3766 3767 3768 3769 3770
		ret = -EINVAL;
		goto out;
	}

	/* allow to reduce meta or sys integrity only if force set */
	allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
D
David Woodhouse 已提交
3771 3772 3773
			BTRFS_BLOCK_GROUP_RAID10 |
			BTRFS_BLOCK_GROUP_RAID5 |
			BTRFS_BLOCK_GROUP_RAID6;
3774 3775 3776 3777 3778 3779 3780 3781 3782 3783
	do {
		seq = read_seqbegin(&fs_info->profiles_lock);

		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
		     (fs_info->avail_system_alloc_bits & allowed) &&
		     !(bctl->sys.target & allowed)) ||
		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
		     (fs_info->avail_metadata_alloc_bits & allowed) &&
		     !(bctl->meta.target & allowed))) {
			if (bctl->flags & BTRFS_BALANCE_FORCE) {
3784
				btrfs_info(fs_info, "force reducing metadata integrity");
3785
			} else {
3786 3787
				btrfs_err(fs_info, "balance will reduce metadata "
					   "integrity, use force if you want this");
3788 3789 3790
				ret = -EINVAL;
				goto out;
			}
3791
		}
3792
	} while (read_seqretry(&fs_info->profiles_lock, seq));
3793

3794 3795 3796
	if (btrfs_get_num_tolerated_disk_barrier_failures(bctl->meta.target) <
		btrfs_get_num_tolerated_disk_barrier_failures(bctl->data.target)) {
		btrfs_warn(fs_info,
3797
	"metadata profile 0x%llx has lower redundancy than data profile 0x%llx",
3798 3799 3800
			bctl->meta.target, bctl->data.target);
	}

3801
	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3802 3803 3804 3805
		fs_info->num_tolerated_disk_barrier_failures = min(
			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info),
			btrfs_get_num_tolerated_disk_barrier_failures(
				bctl->sys.target));
3806 3807
	}

3808
	ret = insert_balance_item(fs_info->tree_root, bctl);
I
Ilya Dryomov 已提交
3809
	if (ret && ret != -EEXIST)
3810 3811
		goto out;

I
Ilya Dryomov 已提交
3812 3813 3814 3815 3816 3817 3818 3819 3820
	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
		BUG_ON(ret == -EEXIST);
		set_balance_control(bctl);
	} else {
		BUG_ON(ret != -EEXIST);
		spin_lock(&fs_info->balance_lock);
		update_balance_args(bctl);
		spin_unlock(&fs_info->balance_lock);
	}
3821

3822
	atomic_inc(&fs_info->balance_running);
3823 3824 3825 3826 3827
	mutex_unlock(&fs_info->balance_mutex);

	ret = __btrfs_balance(fs_info);

	mutex_lock(&fs_info->balance_mutex);
3828
	atomic_dec(&fs_info->balance_running);
3829

3830 3831 3832 3833 3834
	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
		fs_info->num_tolerated_disk_barrier_failures =
			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
	}

3835 3836
	if (bargs) {
		memset(bargs, 0, sizeof(*bargs));
3837
		update_ioctl_balance_args(fs_info, 0, bargs);
3838 3839
	}

3840 3841 3842 3843 3844
	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
	    balance_need_close(fs_info)) {
		__cancel_balance(fs_info);
	}

3845
	wake_up(&fs_info->balance_wait_q);
3846 3847 3848

	return ret;
out:
I
Ilya Dryomov 已提交
3849 3850
	if (bctl->flags & BTRFS_BALANCE_RESUME)
		__cancel_balance(fs_info);
3851
	else {
I
Ilya Dryomov 已提交
3852
		kfree(bctl);
3853 3854
		atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
	}
I
Ilya Dryomov 已提交
3855 3856 3857 3858 3859
	return ret;
}

static int balance_kthread(void *data)
{
3860
	struct btrfs_fs_info *fs_info = data;
3861
	int ret = 0;
I
Ilya Dryomov 已提交
3862 3863 3864 3865

	mutex_lock(&fs_info->volume_mutex);
	mutex_lock(&fs_info->balance_mutex);

3866
	if (fs_info->balance_ctl) {
3867
		btrfs_info(fs_info, "continuing balance");
3868
		ret = btrfs_balance(fs_info->balance_ctl, NULL);
3869
	}
I
Ilya Dryomov 已提交
3870 3871 3872

	mutex_unlock(&fs_info->balance_mutex);
	mutex_unlock(&fs_info->volume_mutex);
3873

I
Ilya Dryomov 已提交
3874 3875 3876
	return ret;
}

3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888
int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
{
	struct task_struct *tsk;

	spin_lock(&fs_info->balance_lock);
	if (!fs_info->balance_ctl) {
		spin_unlock(&fs_info->balance_lock);
		return 0;
	}
	spin_unlock(&fs_info->balance_lock);

	if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3889
		btrfs_info(fs_info, "force skipping balance");
3890 3891 3892 3893
		return 0;
	}

	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3894
	return PTR_ERR_OR_ZERO(tsk);
3895 3896
}

3897
int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
I
Ilya Dryomov 已提交
3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911
{
	struct btrfs_balance_control *bctl;
	struct btrfs_balance_item *item;
	struct btrfs_disk_balance_args disk_bargs;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = BTRFS_BALANCE_OBJECTID;
3912
	key.type = BTRFS_TEMPORARY_ITEM_KEY;
I
Ilya Dryomov 已提交
3913 3914
	key.offset = 0;

3915
	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
I
Ilya Dryomov 已提交
3916
	if (ret < 0)
3917
		goto out;
I
Ilya Dryomov 已提交
3918 3919
	if (ret > 0) { /* ret = -ENOENT; */
		ret = 0;
3920 3921 3922 3923 3924 3925 3926
		goto out;
	}

	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
	if (!bctl) {
		ret = -ENOMEM;
		goto out;
I
Ilya Dryomov 已提交
3927 3928 3929 3930 3931
	}

	leaf = path->nodes[0];
	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);

3932 3933 3934
	bctl->fs_info = fs_info;
	bctl->flags = btrfs_balance_flags(leaf, item);
	bctl->flags |= BTRFS_BALANCE_RESUME;
I
Ilya Dryomov 已提交
3935 3936 3937 3938 3939 3940 3941 3942

	btrfs_balance_data(leaf, item, &disk_bargs);
	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
	btrfs_balance_meta(leaf, item, &disk_bargs);
	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
	btrfs_balance_sys(leaf, item, &disk_bargs);
	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);

3943 3944
	WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));

3945 3946
	mutex_lock(&fs_info->volume_mutex);
	mutex_lock(&fs_info->balance_mutex);
I
Ilya Dryomov 已提交
3947

3948 3949 3950 3951
	set_balance_control(bctl);

	mutex_unlock(&fs_info->balance_mutex);
	mutex_unlock(&fs_info->volume_mutex);
I
Ilya Dryomov 已提交
3952 3953
out:
	btrfs_free_path(path);
3954 3955 3956
	return ret;
}

3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985
int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
{
	int ret = 0;

	mutex_lock(&fs_info->balance_mutex);
	if (!fs_info->balance_ctl) {
		mutex_unlock(&fs_info->balance_mutex);
		return -ENOTCONN;
	}

	if (atomic_read(&fs_info->balance_running)) {
		atomic_inc(&fs_info->balance_pause_req);
		mutex_unlock(&fs_info->balance_mutex);

		wait_event(fs_info->balance_wait_q,
			   atomic_read(&fs_info->balance_running) == 0);

		mutex_lock(&fs_info->balance_mutex);
		/* we are good with balance_ctl ripped off from under us */
		BUG_ON(atomic_read(&fs_info->balance_running));
		atomic_dec(&fs_info->balance_pause_req);
	} else {
		ret = -ENOTCONN;
	}

	mutex_unlock(&fs_info->balance_mutex);
	return ret;
}

3986 3987
int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
{
3988 3989 3990
	if (fs_info->sb->s_flags & MS_RDONLY)
		return -EROFS;

3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024
	mutex_lock(&fs_info->balance_mutex);
	if (!fs_info->balance_ctl) {
		mutex_unlock(&fs_info->balance_mutex);
		return -ENOTCONN;
	}

	atomic_inc(&fs_info->balance_cancel_req);
	/*
	 * if we are running just wait and return, balance item is
	 * deleted in btrfs_balance in this case
	 */
	if (atomic_read(&fs_info->balance_running)) {
		mutex_unlock(&fs_info->balance_mutex);
		wait_event(fs_info->balance_wait_q,
			   atomic_read(&fs_info->balance_running) == 0);
		mutex_lock(&fs_info->balance_mutex);
	} else {
		/* __cancel_balance needs volume_mutex */
		mutex_unlock(&fs_info->balance_mutex);
		mutex_lock(&fs_info->volume_mutex);
		mutex_lock(&fs_info->balance_mutex);

		if (fs_info->balance_ctl)
			__cancel_balance(fs_info);

		mutex_unlock(&fs_info->volume_mutex);
	}

	BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
	atomic_dec(&fs_info->balance_cancel_req);
	mutex_unlock(&fs_info->balance_mutex);
	return 0;
}

S
Stefan Behrens 已提交
4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036
static int btrfs_uuid_scan_kthread(void *data)
{
	struct btrfs_fs_info *fs_info = data;
	struct btrfs_root *root = fs_info->tree_root;
	struct btrfs_key key;
	struct btrfs_key max_key;
	struct btrfs_path *path = NULL;
	int ret = 0;
	struct extent_buffer *eb;
	int slot;
	struct btrfs_root_item root_item;
	u32 item_size;
4037
	struct btrfs_trans_handle *trans = NULL;
S
Stefan Behrens 已提交
4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053

	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

	key.objectid = 0;
	key.type = BTRFS_ROOT_ITEM_KEY;
	key.offset = 0;

	max_key.objectid = (u64)-1;
	max_key.type = BTRFS_ROOT_ITEM_KEY;
	max_key.offset = (u64)-1;

	while (1) {
4054
		ret = btrfs_search_forward(root, &key, path, 0);
S
Stefan Behrens 已提交
4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077
		if (ret) {
			if (ret > 0)
				ret = 0;
			break;
		}

		if (key.type != BTRFS_ROOT_ITEM_KEY ||
		    (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
		     key.objectid != BTRFS_FS_TREE_OBJECTID) ||
		    key.objectid > BTRFS_LAST_FREE_OBJECTID)
			goto skip;

		eb = path->nodes[0];
		slot = path->slots[0];
		item_size = btrfs_item_size_nr(eb, slot);
		if (item_size < sizeof(root_item))
			goto skip;

		read_extent_buffer(eb, &root_item,
				   btrfs_item_ptr_offset(eb, slot),
				   (int)sizeof(root_item));
		if (btrfs_root_refs(&root_item) == 0)
			goto skip;
4078 4079 4080 4081 4082 4083 4084

		if (!btrfs_is_empty_uuid(root_item.uuid) ||
		    !btrfs_is_empty_uuid(root_item.received_uuid)) {
			if (trans)
				goto update_tree;

			btrfs_release_path(path);
S
Stefan Behrens 已提交
4085 4086 4087 4088 4089 4090 4091 4092 4093
			/*
			 * 1 - subvol uuid item
			 * 1 - received_subvol uuid item
			 */
			trans = btrfs_start_transaction(fs_info->uuid_root, 2);
			if (IS_ERR(trans)) {
				ret = PTR_ERR(trans);
				break;
			}
4094 4095 4096 4097 4098 4099
			continue;
		} else {
			goto skip;
		}
update_tree:
		if (!btrfs_is_empty_uuid(root_item.uuid)) {
S
Stefan Behrens 已提交
4100 4101 4102 4103 4104
			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
						  root_item.uuid,
						  BTRFS_UUID_KEY_SUBVOL,
						  key.objectid);
			if (ret < 0) {
4105
				btrfs_warn(fs_info, "uuid_tree_add failed %d",
S
Stefan Behrens 已提交
4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116
					ret);
				break;
			}
		}

		if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
						  root_item.received_uuid,
						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
						  key.objectid);
			if (ret < 0) {
4117
				btrfs_warn(fs_info, "uuid_tree_add failed %d",
S
Stefan Behrens 已提交
4118 4119 4120 4121 4122
					ret);
				break;
			}
		}

4123
skip:
S
Stefan Behrens 已提交
4124 4125
		if (trans) {
			ret = btrfs_end_transaction(trans, fs_info->uuid_root);
4126
			trans = NULL;
S
Stefan Behrens 已提交
4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148
			if (ret)
				break;
		}

		btrfs_release_path(path);
		if (key.offset < (u64)-1) {
			key.offset++;
		} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
			key.offset = 0;
			key.type = BTRFS_ROOT_ITEM_KEY;
		} else if (key.objectid < (u64)-1) {
			key.offset = 0;
			key.type = BTRFS_ROOT_ITEM_KEY;
			key.objectid++;
		} else {
			break;
		}
		cond_resched();
	}

out:
	btrfs_free_path(path);
4149 4150
	if (trans && !IS_ERR(trans))
		btrfs_end_transaction(trans, fs_info->uuid_root);
S
Stefan Behrens 已提交
4151
	if (ret)
4152
		btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4153 4154
	else
		fs_info->update_uuid_tree_gen = 1;
S
Stefan Behrens 已提交
4155 4156 4157 4158
	up(&fs_info->uuid_tree_rescan_sem);
	return 0;
}

4159 4160 4161 4162
/*
 * Callback for btrfs_uuid_tree_iterate().
 * returns:
 * 0	check succeeded, the entry is not outdated.
4163
 * < 0	if an error occurred.
4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215
 * > 0	if the check failed, which means the caller shall remove the entry.
 */
static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
				       u8 *uuid, u8 type, u64 subid)
{
	struct btrfs_key key;
	int ret = 0;
	struct btrfs_root *subvol_root;

	if (type != BTRFS_UUID_KEY_SUBVOL &&
	    type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
		goto out;

	key.objectid = subid;
	key.type = BTRFS_ROOT_ITEM_KEY;
	key.offset = (u64)-1;
	subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
	if (IS_ERR(subvol_root)) {
		ret = PTR_ERR(subvol_root);
		if (ret == -ENOENT)
			ret = 1;
		goto out;
	}

	switch (type) {
	case BTRFS_UUID_KEY_SUBVOL:
		if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
			ret = 1;
		break;
	case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
		if (memcmp(uuid, subvol_root->root_item.received_uuid,
			   BTRFS_UUID_SIZE))
			ret = 1;
		break;
	}

out:
	return ret;
}

static int btrfs_uuid_rescan_kthread(void *data)
{
	struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
	int ret;

	/*
	 * 1st step is to iterate through the existing UUID tree and
	 * to delete all entries that contain outdated data.
	 * 2nd step is to add all missing entries to the UUID tree.
	 */
	ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
	if (ret < 0) {
4216
		btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret);
4217 4218 4219 4220 4221 4222
		up(&fs_info->uuid_tree_rescan_sem);
		return ret;
	}
	return btrfs_uuid_scan_kthread(data);
}

4223 4224 4225 4226 4227
int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *tree_root = fs_info->tree_root;
	struct btrfs_root *uuid_root;
S
Stefan Behrens 已提交
4228 4229
	struct task_struct *task;
	int ret;
4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241

	/*
	 * 1 - root node
	 * 1 - root item
	 */
	trans = btrfs_start_transaction(tree_root, 2);
	if (IS_ERR(trans))
		return PTR_ERR(trans);

	uuid_root = btrfs_create_tree(trans, fs_info,
				      BTRFS_UUID_TREE_OBJECTID);
	if (IS_ERR(uuid_root)) {
4242 4243
		ret = PTR_ERR(uuid_root);
		btrfs_abort_transaction(trans, tree_root, ret);
4244
		btrfs_end_transaction(trans, tree_root);
4245
		return ret;
4246 4247 4248 4249
	}

	fs_info->uuid_root = uuid_root;

S
Stefan Behrens 已提交
4250 4251 4252 4253 4254 4255 4256
	ret = btrfs_commit_transaction(trans, tree_root);
	if (ret)
		return ret;

	down(&fs_info->uuid_tree_rescan_sem);
	task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
	if (IS_ERR(task)) {
4257
		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4258
		btrfs_warn(fs_info, "failed to start uuid_scan task");
S
Stefan Behrens 已提交
4259 4260 4261 4262 4263
		up(&fs_info->uuid_tree_rescan_sem);
		return PTR_ERR(task);
	}

	return 0;
4264
}
S
Stefan Behrens 已提交
4265

4266 4267 4268 4269 4270 4271 4272 4273
int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
{
	struct task_struct *task;

	down(&fs_info->uuid_tree_rescan_sem);
	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
	if (IS_ERR(task)) {
		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4274
		btrfs_warn(fs_info, "failed to start uuid_rescan task");
4275 4276 4277 4278 4279 4280 4281
		up(&fs_info->uuid_tree_rescan_sem);
		return PTR_ERR(task);
	}

	return 0;
}

4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296
/*
 * shrinking a device means finding all of the device extents past
 * the new size, and then following the back refs to the chunks.
 * The chunk relocation code actually frees the device extent
 */
int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = device->dev_root;
	struct btrfs_dev_extent *dev_extent = NULL;
	struct btrfs_path *path;
	u64 length;
	u64 chunk_offset;
	int ret;
	int slot;
4297 4298
	int failed = 0;
	bool retried = false;
4299
	bool checked_pending_chunks = false;
4300 4301
	struct extent_buffer *l;
	struct btrfs_key key;
4302
	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4303
	u64 old_total = btrfs_super_total_bytes(super_copy);
4304 4305
	u64 old_size = btrfs_device_get_total_bytes(device);
	u64 diff = old_size - new_size;
4306

4307 4308 4309
	if (device->is_tgtdev_for_dev_replace)
		return -EINVAL;

4310 4311 4312 4313
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

4314
	path->reada = READA_FORWARD;
4315

4316 4317
	lock_chunks(root);

4318
	btrfs_device_set_total_bytes(device, new_size);
4319
	if (device->writeable) {
Y
Yan Zheng 已提交
4320
		device->fs_devices->total_rw_bytes -= diff;
4321 4322 4323 4324
		spin_lock(&root->fs_info->free_chunk_lock);
		root->fs_info->free_chunk_space -= diff;
		spin_unlock(&root->fs_info->free_chunk_lock);
	}
4325
	unlock_chunks(root);
4326

4327
again:
4328 4329 4330 4331
	key.objectid = device->devid;
	key.offset = (u64)-1;
	key.type = BTRFS_DEV_EXTENT_KEY;

4332
	do {
4333
		mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
4334
		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4335 4336
		if (ret < 0) {
			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4337
			goto done;
4338
		}
4339 4340

		ret = btrfs_previous_item(root, path, 0, key.type);
4341 4342
		if (ret)
			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4343 4344 4345 4346
		if (ret < 0)
			goto done;
		if (ret) {
			ret = 0;
4347
			btrfs_release_path(path);
4348
			break;
4349 4350 4351 4352 4353 4354
		}

		l = path->nodes[0];
		slot = path->slots[0];
		btrfs_item_key_to_cpu(l, &key, path->slots[0]);

4355
		if (key.objectid != device->devid) {
4356
			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4357
			btrfs_release_path(path);
4358
			break;
4359
		}
4360 4361 4362 4363

		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
		length = btrfs_dev_extent_length(l, dev_extent);

4364
		if (key.offset + length <= new_size) {
4365
			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4366
			btrfs_release_path(path);
4367
			break;
4368
		}
4369 4370

		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4371
		btrfs_release_path(path);
4372

4373
		ret = btrfs_relocate_chunk(root, chunk_offset);
4374
		mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4375
		if (ret && ret != -ENOSPC)
4376
			goto done;
4377 4378
		if (ret == -ENOSPC)
			failed++;
4379
	} while (key.offset-- > 0);
4380 4381 4382 4383 4384 4385 4386 4387

	if (failed && !retried) {
		failed = 0;
		retried = true;
		goto again;
	} else if (failed && retried) {
		ret = -ENOSPC;
		goto done;
4388 4389
	}

4390
	/* Shrinking succeeded, else we would be at "done". */
4391
	trans = btrfs_start_transaction(root, 0);
4392 4393 4394 4395 4396
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto done;
	}

4397
	lock_chunks(root);
4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414

	/*
	 * We checked in the above loop all device extents that were already in
	 * the device tree. However before we have updated the device's
	 * total_bytes to the new size, we might have had chunk allocations that
	 * have not complete yet (new block groups attached to transaction
	 * handles), and therefore their device extents were not yet in the
	 * device tree and we missed them in the loop above. So if we have any
	 * pending chunk using a device extent that overlaps the device range
	 * that we can not use anymore, commit the current transaction and
	 * repeat the search on the device tree - this way we guarantee we will
	 * not have chunks using device extents that end beyond 'new_size'.
	 */
	if (!checked_pending_chunks) {
		u64 start = new_size;
		u64 len = old_size - new_size;

4415 4416
		if (contains_pending_extent(trans->transaction, device,
					    &start, len)) {
4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427
			unlock_chunks(root);
			checked_pending_chunks = true;
			failed = 0;
			retried = false;
			ret = btrfs_commit_transaction(trans, root);
			if (ret)
				goto done;
			goto again;
		}
	}

4428
	btrfs_device_set_disk_total_bytes(device, new_size);
4429 4430 4431
	if (list_empty(&device->resized_list))
		list_add_tail(&device->resized_list,
			      &root->fs_info->fs_devices->resized_devices);
4432 4433 4434 4435

	WARN_ON(diff > old_total);
	btrfs_set_super_total_bytes(super_copy, old_total - diff);
	unlock_chunks(root);
M
Miao Xie 已提交
4436 4437 4438

	/* Now btrfs_update_device() will change the on-disk size. */
	ret = btrfs_update_device(trans, device);
4439
	btrfs_end_transaction(trans, root);
4440 4441
done:
	btrfs_free_path(path);
4442 4443 4444 4445 4446 4447 4448 4449 4450 4451
	if (ret) {
		lock_chunks(root);
		btrfs_device_set_total_bytes(device, old_size);
		if (device->writeable)
			device->fs_devices->total_rw_bytes += diff;
		spin_lock(&root->fs_info->free_chunk_lock);
		root->fs_info->free_chunk_space += diff;
		spin_unlock(&root->fs_info->free_chunk_lock);
		unlock_chunks(root);
	}
4452 4453 4454
	return ret;
}

4455
static int btrfs_add_system_chunk(struct btrfs_root *root,
4456 4457 4458
			   struct btrfs_key *key,
			   struct btrfs_chunk *chunk, int item_size)
{
4459
	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4460 4461 4462 4463
	struct btrfs_disk_key disk_key;
	u32 array_size;
	u8 *ptr;

4464
	lock_chunks(root);
4465
	array_size = btrfs_super_sys_array_size(super_copy);
4466
	if (array_size + item_size + sizeof(disk_key)
4467 4468
			> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
		unlock_chunks(root);
4469
		return -EFBIG;
4470
	}
4471 4472 4473 4474 4475 4476 4477 4478

	ptr = super_copy->sys_chunk_array + array_size;
	btrfs_cpu_key_to_disk(&disk_key, key);
	memcpy(ptr, &disk_key, sizeof(disk_key));
	ptr += sizeof(disk_key);
	memcpy(ptr, chunk, item_size);
	item_size += sizeof(disk_key);
	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4479 4480
	unlock_chunks(root);

4481 4482 4483
	return 0;
}

4484 4485 4486 4487
/*
 * sort the devices in descending order by max_avail, total_avail
 */
static int btrfs_cmp_device_info(const void *a, const void *b)
4488
{
4489 4490
	const struct btrfs_device_info *di_a = a;
	const struct btrfs_device_info *di_b = b;
4491

4492
	if (di_a->max_avail > di_b->max_avail)
4493
		return -1;
4494
	if (di_a->max_avail < di_b->max_avail)
4495
		return 1;
4496 4497 4498 4499 4500
	if (di_a->total_avail > di_b->total_avail)
		return -1;
	if (di_a->total_avail < di_b->total_avail)
		return 1;
	return 0;
4501
}
4502

D
David Woodhouse 已提交
4503 4504 4505
static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
{
	/* TODO allow them to set a preferred stripe size */
4506
	return SZ_64K;
D
David Woodhouse 已提交
4507 4508 4509 4510
}

static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
{
4511
	if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
D
David Woodhouse 已提交
4512 4513
		return;

4514
	btrfs_set_fs_incompat(info, RAID56);
D
David Woodhouse 已提交
4515 4516
}

4517 4518 4519 4520 4521 4522 4523 4524 4525 4526
#define BTRFS_MAX_DEVS(r) ((BTRFS_LEAF_DATA_SIZE(r)		\
			- sizeof(struct btrfs_item)		\
			- sizeof(struct btrfs_chunk))		\
			/ sizeof(struct btrfs_stripe) + 1)

#define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE	\
				- 2 * sizeof(struct btrfs_disk_key)	\
				- 2 * sizeof(struct btrfs_chunk))	\
				/ sizeof(struct btrfs_stripe) + 1)

4527
static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4528 4529
			       struct btrfs_root *extent_root, u64 start,
			       u64 type)
4530
{
4531 4532 4533 4534 4535 4536 4537 4538 4539
	struct btrfs_fs_info *info = extent_root->fs_info;
	struct btrfs_fs_devices *fs_devices = info->fs_devices;
	struct list_head *cur;
	struct map_lookup *map = NULL;
	struct extent_map_tree *em_tree;
	struct extent_map *em;
	struct btrfs_device_info *devices_info = NULL;
	u64 total_avail;
	int num_stripes;	/* total number of stripes to allocate */
D
David Woodhouse 已提交
4540 4541
	int data_stripes;	/* number of stripes that count for
				   block group size */
4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552
	int sub_stripes;	/* sub_stripes info for map */
	int dev_stripes;	/* stripes per dev */
	int devs_max;		/* max devs to use */
	int devs_min;		/* min devs needed */
	int devs_increment;	/* ndevs has to be a multiple of this */
	int ncopies;		/* how many copies to data has */
	int ret;
	u64 max_stripe_size;
	u64 max_chunk_size;
	u64 stripe_size;
	u64 num_bytes;
D
David Woodhouse 已提交
4553
	u64 raid_stripe_len = BTRFS_STRIPE_LEN;
4554 4555 4556
	int ndevs;
	int i;
	int j;
4557
	int index;
4558

4559
	BUG_ON(!alloc_profile_is_valid(type, 0));
4560

4561 4562
	if (list_empty(&fs_devices->alloc_list))
		return -ENOSPC;
4563

4564
	index = __get_raid_index(type);
4565

4566 4567 4568 4569 4570 4571
	sub_stripes = btrfs_raid_array[index].sub_stripes;
	dev_stripes = btrfs_raid_array[index].dev_stripes;
	devs_max = btrfs_raid_array[index].devs_max;
	devs_min = btrfs_raid_array[index].devs_min;
	devs_increment = btrfs_raid_array[index].devs_increment;
	ncopies = btrfs_raid_array[index].ncopies;
4572

4573
	if (type & BTRFS_BLOCK_GROUP_DATA) {
4574
		max_stripe_size = SZ_1G;
4575
		max_chunk_size = 10 * max_stripe_size;
4576 4577
		if (!devs_max)
			devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4578
	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4579
		/* for larger filesystems, use larger metadata chunks */
4580 4581
		if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
			max_stripe_size = SZ_1G;
4582
		else
4583
			max_stripe_size = SZ_256M;
4584
		max_chunk_size = max_stripe_size;
4585 4586
		if (!devs_max)
			devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4587
	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4588
		max_stripe_size = SZ_32M;
4589
		max_chunk_size = 2 * max_stripe_size;
4590 4591
		if (!devs_max)
			devs_max = BTRFS_MAX_DEVS_SYS_CHUNK;
4592
	} else {
4593
		btrfs_err(info, "invalid chunk type 0x%llx requested",
4594 4595
		       type);
		BUG_ON(1);
4596 4597
	}

Y
Yan Zheng 已提交
4598 4599 4600
	/* we don't want a chunk larger than 10% of writeable space */
	max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
			     max_chunk_size);
4601

4602
	devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
4603 4604 4605
			       GFP_NOFS);
	if (!devices_info)
		return -ENOMEM;
4606

4607
	cur = fs_devices->alloc_list.next;
4608

4609
	/*
4610 4611
	 * in the first pass through the devices list, we gather information
	 * about the available holes on each device.
4612
	 */
4613 4614 4615 4616 4617
	ndevs = 0;
	while (cur != &fs_devices->alloc_list) {
		struct btrfs_device *device;
		u64 max_avail;
		u64 dev_offset;
4618

4619
		device = list_entry(cur, struct btrfs_device, dev_alloc_list);
4620

4621
		cur = cur->next;
4622

4623
		if (!device->writeable) {
J
Julia Lawall 已提交
4624
			WARN(1, KERN_ERR
4625
			       "BTRFS: read-only device in alloc_list\n");
4626 4627
			continue;
		}
4628

4629 4630
		if (!device->in_fs_metadata ||
		    device->is_tgtdev_for_dev_replace)
4631
			continue;
4632

4633 4634 4635 4636
		if (device->total_bytes > device->bytes_used)
			total_avail = device->total_bytes - device->bytes_used;
		else
			total_avail = 0;
4637 4638 4639 4640

		/* If there is no space on this device, skip it. */
		if (total_avail == 0)
			continue;
4641

4642
		ret = find_free_dev_extent(trans, device,
4643 4644 4645 4646
					   max_stripe_size * dev_stripes,
					   &dev_offset, &max_avail);
		if (ret && ret != -ENOSPC)
			goto error;
4647

4648 4649
		if (ret == 0)
			max_avail = max_stripe_size * dev_stripes;
4650

4651 4652
		if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
			continue;
4653

4654 4655 4656 4657 4658
		if (ndevs == fs_devices->rw_devices) {
			WARN(1, "%s: found more than %llu devices\n",
			     __func__, fs_devices->rw_devices);
			break;
		}
4659 4660 4661 4662 4663 4664
		devices_info[ndevs].dev_offset = dev_offset;
		devices_info[ndevs].max_avail = max_avail;
		devices_info[ndevs].total_avail = total_avail;
		devices_info[ndevs].dev = device;
		++ndevs;
	}
4665

4666 4667 4668 4669 4670
	/*
	 * now sort the devices by hole size / available space
	 */
	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
	     btrfs_cmp_device_info, NULL);
4671

4672 4673
	/* round down to number of usable stripes */
	ndevs -= ndevs % devs_increment;
4674

4675 4676 4677
	if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
		ret = -ENOSPC;
		goto error;
4678
	}
4679

4680 4681 4682 4683 4684 4685 4686 4687
	if (devs_max && ndevs > devs_max)
		ndevs = devs_max;
	/*
	 * the primary goal is to maximize the number of stripes, so use as many
	 * devices as possible, even if the stripes are not maximum sized.
	 */
	stripe_size = devices_info[ndevs-1].max_avail;
	num_stripes = ndevs * dev_stripes;
4688

D
David Woodhouse 已提交
4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704
	/*
	 * this will have to be fixed for RAID1 and RAID10 over
	 * more drives
	 */
	data_stripes = num_stripes / ncopies;

	if (type & BTRFS_BLOCK_GROUP_RAID5) {
		raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
				 btrfs_super_stripesize(info->super_copy));
		data_stripes = num_stripes - 1;
	}
	if (type & BTRFS_BLOCK_GROUP_RAID6) {
		raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
				 btrfs_super_stripesize(info->super_copy));
		data_stripes = num_stripes - 2;
	}
4705 4706 4707 4708 4709 4710 4711 4712

	/*
	 * Use the number of data stripes to figure out how big this chunk
	 * is really going to be in terms of logical address space,
	 * and compare that answer with the max chunk size
	 */
	if (stripe_size * data_stripes > max_chunk_size) {
		u64 mask = (1ULL << 24) - 1;
4713 4714

		stripe_size = div_u64(max_chunk_size, data_stripes);
4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725

		/* bump the answer up to a 16MB boundary */
		stripe_size = (stripe_size + mask) & ~mask;

		/* but don't go higher than the limits we found
		 * while searching for free extents
		 */
		if (stripe_size > devices_info[ndevs-1].max_avail)
			stripe_size = devices_info[ndevs-1].max_avail;
	}

4726
	stripe_size = div_u64(stripe_size, dev_stripes);
4727 4728

	/* align to BTRFS_STRIPE_LEN */
4729
	stripe_size = div_u64(stripe_size, raid_stripe_len);
D
David Woodhouse 已提交
4730
	stripe_size *= raid_stripe_len;
4731 4732 4733 4734 4735 4736 4737

	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
	if (!map) {
		ret = -ENOMEM;
		goto error;
	}
	map->num_stripes = num_stripes;
4738

4739 4740 4741 4742 4743 4744
	for (i = 0; i < ndevs; ++i) {
		for (j = 0; j < dev_stripes; ++j) {
			int s = i * dev_stripes + j;
			map->stripes[s].dev = devices_info[i].dev;
			map->stripes[s].physical = devices_info[i].dev_offset +
						   j * stripe_size;
4745 4746
		}
	}
Y
Yan Zheng 已提交
4747
	map->sector_size = extent_root->sectorsize;
D
David Woodhouse 已提交
4748 4749 4750
	map->stripe_len = raid_stripe_len;
	map->io_align = raid_stripe_len;
	map->io_width = raid_stripe_len;
Y
Yan Zheng 已提交
4751 4752
	map->type = type;
	map->sub_stripes = sub_stripes;
4753

D
David Woodhouse 已提交
4754
	num_bytes = stripe_size * data_stripes;
4755

4756
	trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
4757

4758
	em = alloc_extent_map();
Y
Yan Zheng 已提交
4759
	if (!em) {
4760
		kfree(map);
4761 4762
		ret = -ENOMEM;
		goto error;
4763
	}
4764
	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
4765
	em->map_lookup = map;
Y
Yan Zheng 已提交
4766
	em->start = start;
4767
	em->len = num_bytes;
Y
Yan Zheng 已提交
4768 4769
	em->block_start = 0;
	em->block_len = em->len;
4770
	em->orig_block_len = stripe_size;
4771

Y
Yan Zheng 已提交
4772
	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4773
	write_lock(&em_tree->lock);
J
Josef Bacik 已提交
4774
	ret = add_extent_mapping(em_tree, em, 0);
4775 4776 4777 4778
	if (!ret) {
		list_add_tail(&em->list, &trans->transaction->pending_chunks);
		atomic_inc(&em->refs);
	}
4779
	write_unlock(&em_tree->lock);
4780 4781
	if (ret) {
		free_extent_map(em);
4782
		goto error;
4783
	}
4784

4785 4786 4787
	ret = btrfs_make_block_group(trans, extent_root, 0, type,
				     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
				     start, num_bytes);
4788 4789
	if (ret)
		goto error_del_extent;
Y
Yan Zheng 已提交
4790

4791 4792 4793 4794
	for (i = 0; i < map->num_stripes; i++) {
		num_bytes = map->stripes[i].dev->bytes_used + stripe_size;
		btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes);
	}
4795

4796 4797 4798 4799 4800
	spin_lock(&extent_root->fs_info->free_chunk_lock);
	extent_root->fs_info->free_chunk_space -= (stripe_size *
						   map->num_stripes);
	spin_unlock(&extent_root->fs_info->free_chunk_lock);

4801
	free_extent_map(em);
D
David Woodhouse 已提交
4802 4803
	check_raid56_incompat_flag(extent_root->fs_info, type);

4804
	kfree(devices_info);
Y
Yan Zheng 已提交
4805
	return 0;
4806

4807
error_del_extent:
4808 4809 4810 4811 4812 4813 4814 4815
	write_lock(&em_tree->lock);
	remove_extent_mapping(em_tree, em);
	write_unlock(&em_tree->lock);

	/* One for our allocation */
	free_extent_map(em);
	/* One for the tree reference */
	free_extent_map(em);
4816 4817
	/* One for the pending_chunks list reference */
	free_extent_map(em);
4818 4819 4820
error:
	kfree(devices_info);
	return ret;
Y
Yan Zheng 已提交
4821 4822
}

4823
int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
Y
Yan Zheng 已提交
4824
				struct btrfs_root *extent_root,
4825
				u64 chunk_offset, u64 chunk_size)
Y
Yan Zheng 已提交
4826 4827 4828 4829 4830 4831
{
	struct btrfs_key key;
	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
	struct btrfs_device *device;
	struct btrfs_chunk *chunk;
	struct btrfs_stripe *stripe;
4832 4833 4834 4835 4836 4837 4838
	struct extent_map_tree *em_tree;
	struct extent_map *em;
	struct map_lookup *map;
	size_t item_size;
	u64 dev_offset;
	u64 stripe_size;
	int i = 0;
4839
	int ret = 0;
Y
Yan Zheng 已提交
4840

4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853
	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
	read_lock(&em_tree->lock);
	em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size);
	read_unlock(&em_tree->lock);

	if (!em) {
		btrfs_crit(extent_root->fs_info, "unable to find logical "
			   "%Lu len %Lu", chunk_offset, chunk_size);
		return -EINVAL;
	}

	if (em->start != chunk_offset || em->len != chunk_size) {
		btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted"
4854
			  " %Lu-%Lu, found %Lu-%Lu", chunk_offset,
4855 4856 4857 4858 4859
			  chunk_size, em->start, em->len);
		free_extent_map(em);
		return -EINVAL;
	}

4860
	map = em->map_lookup;
4861 4862 4863
	item_size = btrfs_chunk_item_size(map->num_stripes);
	stripe_size = em->orig_block_len;

Y
Yan Zheng 已提交
4864
	chunk = kzalloc(item_size, GFP_NOFS);
4865 4866 4867 4868 4869
	if (!chunk) {
		ret = -ENOMEM;
		goto out;
	}

4870 4871 4872 4873 4874 4875 4876 4877
	/*
	 * Take the device list mutex to prevent races with the final phase of
	 * a device replace operation that replaces the device object associated
	 * with the map's stripes, because the device object's id can change
	 * at any time during that final phase of the device replace operation
	 * (dev-replace.c:btrfs_dev_replace_finishing()).
	 */
	mutex_lock(&chunk_root->fs_info->fs_devices->device_list_mutex);
4878 4879 4880
	for (i = 0; i < map->num_stripes; i++) {
		device = map->stripes[i].dev;
		dev_offset = map->stripes[i].physical;
Y
Yan Zheng 已提交
4881

4882
		ret = btrfs_update_device(trans, device);
4883
		if (ret)
4884
			break;
4885 4886 4887 4888 4889 4890
		ret = btrfs_alloc_dev_extent(trans, device,
					     chunk_root->root_key.objectid,
					     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
					     chunk_offset, dev_offset,
					     stripe_size);
		if (ret)
4891 4892 4893 4894 4895
			break;
	}
	if (ret) {
		mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex);
		goto out;
Y
Yan Zheng 已提交
4896 4897 4898
	}

	stripe = &chunk->stripe;
4899 4900 4901
	for (i = 0; i < map->num_stripes; i++) {
		device = map->stripes[i].dev;
		dev_offset = map->stripes[i].physical;
4902

4903 4904 4905
		btrfs_set_stack_stripe_devid(stripe, device->devid);
		btrfs_set_stack_stripe_offset(stripe, dev_offset);
		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
Y
Yan Zheng 已提交
4906
		stripe++;
4907
	}
4908
	mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex);
4909

Y
Yan Zheng 已提交
4910
	btrfs_set_stack_chunk_length(chunk, chunk_size);
4911
	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
Y
Yan Zheng 已提交
4912 4913 4914 4915 4916
	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
	btrfs_set_stack_chunk_type(chunk, map->type);
	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4917
	btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
Y
Yan Zheng 已提交
4918
	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4919

Y
Yan Zheng 已提交
4920 4921 4922
	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
	key.type = BTRFS_CHUNK_ITEM_KEY;
	key.offset = chunk_offset;
4923

Y
Yan Zheng 已提交
4924
	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
4925 4926 4927 4928 4929
	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
		/*
		 * TODO: Cleanup of inserted chunk root in case of
		 * failure.
		 */
4930
		ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
Y
Yan Zheng 已提交
4931
					     item_size);
4932
	}
4933

4934
out:
4935
	kfree(chunk);
4936
	free_extent_map(em);
4937
	return ret;
Y
Yan Zheng 已提交
4938
}
4939

Y
Yan Zheng 已提交
4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951
/*
 * Chunk allocation falls into two parts. The first part does works
 * that make the new allocated chunk useable, but not do any operation
 * that modifies the chunk tree. The second part does the works that
 * require modifying the chunk tree. This division is important for the
 * bootstrap process of adding storage to a seed btrfs.
 */
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
		      struct btrfs_root *extent_root, u64 type)
{
	u64 chunk_offset;

4952
	ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex));
4953 4954
	chunk_offset = find_next_chunk(extent_root->fs_info);
	return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
Y
Yan Zheng 已提交
4955 4956
}

C
Chris Mason 已提交
4957
static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
Y
Yan Zheng 已提交
4958 4959 4960 4961 4962 4963 4964 4965 4966 4967
					 struct btrfs_root *root,
					 struct btrfs_device *device)
{
	u64 chunk_offset;
	u64 sys_chunk_offset;
	u64 alloc_profile;
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct btrfs_root *extent_root = fs_info->extent_root;
	int ret;

4968
	chunk_offset = find_next_chunk(fs_info);
4969
	alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
4970 4971
	ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
				  alloc_profile);
4972 4973
	if (ret)
		return ret;
Y
Yan Zheng 已提交
4974

4975
	sys_chunk_offset = find_next_chunk(root->fs_info);
4976
	alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
4977 4978
	ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
				  alloc_profile);
4979
	return ret;
Y
Yan Zheng 已提交
4980 4981
}

4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994
static inline int btrfs_chunk_max_errors(struct map_lookup *map)
{
	int max_errors;

	if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
			 BTRFS_BLOCK_GROUP_RAID10 |
			 BTRFS_BLOCK_GROUP_RAID5 |
			 BTRFS_BLOCK_GROUP_DUP)) {
		max_errors = 1;
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
		max_errors = 2;
	} else {
		max_errors = 0;
4995
	}
Y
Yan Zheng 已提交
4996

4997
	return max_errors;
Y
Yan Zheng 已提交
4998 4999 5000 5001 5002 5003 5004 5005
}

int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
{
	struct extent_map *em;
	struct map_lookup *map;
	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
	int readonly = 0;
5006
	int miss_ndevs = 0;
Y
Yan Zheng 已提交
5007 5008
	int i;

5009
	read_lock(&map_tree->map_tree.lock);
Y
Yan Zheng 已提交
5010
	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
5011
	read_unlock(&map_tree->map_tree.lock);
Y
Yan Zheng 已提交
5012 5013 5014
	if (!em)
		return 1;

5015
	map = em->map_lookup;
Y
Yan Zheng 已提交
5016
	for (i = 0; i < map->num_stripes; i++) {
5017 5018 5019 5020 5021
		if (map->stripes[i].dev->missing) {
			miss_ndevs++;
			continue;
		}

Y
Yan Zheng 已提交
5022 5023
		if (!map->stripes[i].dev->writeable) {
			readonly = 1;
5024
			goto end;
Y
Yan Zheng 已提交
5025 5026
		}
	}
5027 5028 5029 5030 5031 5032 5033 5034 5035

	/*
	 * If the number of missing devices is larger than max errors,
	 * we can not write the data into that chunk successfully, so
	 * set it readonly.
	 */
	if (miss_ndevs > btrfs_chunk_max_errors(map))
		readonly = 1;
end:
5036
	free_extent_map(em);
Y
Yan Zheng 已提交
5037
	return readonly;
5038 5039 5040 5041
}

void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
{
5042
	extent_map_tree_init(&tree->map_tree);
5043 5044 5045 5046 5047 5048
}

void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
{
	struct extent_map *em;

C
Chris Mason 已提交
5049
	while (1) {
5050
		write_lock(&tree->map_tree.lock);
5051 5052 5053
		em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
		if (em)
			remove_extent_mapping(&tree->map_tree, em);
5054
		write_unlock(&tree->map_tree.lock);
5055 5056 5057 5058 5059 5060 5061 5062 5063
		if (!em)
			break;
		/* once for us */
		free_extent_map(em);
		/* once for the tree */
		free_extent_map(em);
	}
}

5064
int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5065
{
5066
	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
5067 5068 5069 5070 5071
	struct extent_map *em;
	struct map_lookup *map;
	struct extent_map_tree *em_tree = &map_tree->map_tree;
	int ret;

5072
	read_lock(&em_tree->lock);
5073
	em = lookup_extent_mapping(em_tree, logical, len);
5074
	read_unlock(&em_tree->lock);
5075

5076 5077 5078 5079 5080 5081
	/*
	 * We could return errors for these cases, but that could get ugly and
	 * we'd probably do the same thing which is just not do anything else
	 * and exit, so return 1 so the callers don't try to use other copies.
	 */
	if (!em) {
5082
		btrfs_crit(fs_info, "No mapping for %Lu-%Lu", logical,
5083 5084 5085 5086 5087
			    logical+len);
		return 1;
	}

	if (em->start > logical || em->start + em->len < logical) {
5088
		btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got "
5089
			    "%Lu-%Lu", logical, logical+len, em->start,
5090
			    em->start + em->len);
5091
		free_extent_map(em);
5092 5093 5094
		return 1;
	}

5095
	map = em->map_lookup;
5096 5097
	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
		ret = map->num_stripes;
C
Chris Mason 已提交
5098 5099
	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
		ret = map->sub_stripes;
D
David Woodhouse 已提交
5100 5101 5102 5103
	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
		ret = 2;
	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
		ret = 3;
5104 5105 5106
	else
		ret = 1;
	free_extent_map(em);
5107

5108
	btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
5109 5110
	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
		ret++;
5111
	btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
5112

5113 5114 5115
	return ret;
}

D
David Woodhouse 已提交
5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130
unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
				    struct btrfs_mapping_tree *map_tree,
				    u64 logical)
{
	struct extent_map *em;
	struct map_lookup *map;
	struct extent_map_tree *em_tree = &map_tree->map_tree;
	unsigned long len = root->sectorsize;

	read_lock(&em_tree->lock);
	em = lookup_extent_mapping(em_tree, logical, len);
	read_unlock(&em_tree->lock);
	BUG_ON(!em);

	BUG_ON(em->start > logical || em->start + em->len < logical);
5131
	map = em->map_lookup;
5132
	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
D
David Woodhouse 已提交
5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151
		len = map->stripe_len * nr_data_stripes(map);
	free_extent_map(em);
	return len;
}

int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
			   u64 logical, u64 len, int mirror_num)
{
	struct extent_map *em;
	struct map_lookup *map;
	struct extent_map_tree *em_tree = &map_tree->map_tree;
	int ret = 0;

	read_lock(&em_tree->lock);
	em = lookup_extent_mapping(em_tree, logical, len);
	read_unlock(&em_tree->lock);
	BUG_ON(!em);

	BUG_ON(em->start > logical || em->start + em->len < logical);
5152
	map = em->map_lookup;
5153
	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
D
David Woodhouse 已提交
5154 5155 5156 5157 5158
		ret = 1;
	free_extent_map(em);
	return ret;
}

5159 5160 5161
static int find_live_mirror(struct btrfs_fs_info *fs_info,
			    struct map_lookup *map, int first, int num,
			    int optimal, int dev_replace_is_ongoing)
5162 5163
{
	int i;
5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187
	int tolerance;
	struct btrfs_device *srcdev;

	if (dev_replace_is_ongoing &&
	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
		srcdev = fs_info->dev_replace.srcdev;
	else
		srcdev = NULL;

	/*
	 * try to avoid the drive that is the source drive for a
	 * dev-replace procedure, only choose it if no other non-missing
	 * mirror is available
	 */
	for (tolerance = 0; tolerance < 2; tolerance++) {
		if (map->stripes[optimal].dev->bdev &&
		    (tolerance || map->stripes[optimal].dev != srcdev))
			return optimal;
		for (i = first; i < first + num; i++) {
			if (map->stripes[i].dev->bdev &&
			    (tolerance || map->stripes[i].dev != srcdev))
				return i;
		}
5188
	}
5189

5190 5191 5192 5193 5194 5195
	/* we couldn't find one that doesn't fail.  Just return something
	 * and the io error handling code will clean up eventually
	 */
	return optimal;
}

D
David Woodhouse 已提交
5196 5197 5198 5199 5200 5201
static inline int parity_smaller(u64 a, u64 b)
{
	return a > b;
}

/* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5202
static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
D
David Woodhouse 已提交
5203 5204 5205 5206 5207 5208 5209 5210
{
	struct btrfs_bio_stripe s;
	int i;
	u64 l;
	int again = 1;

	while (again) {
		again = 0;
5211
		for (i = 0; i < num_stripes - 1; i++) {
5212 5213
			if (parity_smaller(bbio->raid_map[i],
					   bbio->raid_map[i+1])) {
D
David Woodhouse 已提交
5214
				s = bbio->stripes[i];
5215
				l = bbio->raid_map[i];
D
David Woodhouse 已提交
5216
				bbio->stripes[i] = bbio->stripes[i+1];
5217
				bbio->raid_map[i] = bbio->raid_map[i+1];
D
David Woodhouse 已提交
5218
				bbio->stripes[i+1] = s;
5219
				bbio->raid_map[i+1] = l;
5220

D
David Woodhouse 已提交
5221 5222 5223 5224 5225 5226
				again = 1;
			}
		}
	}
}

5227 5228 5229
static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
{
	struct btrfs_bio *bbio = kzalloc(
5230
		 /* the size of the btrfs_bio */
5231
		sizeof(struct btrfs_bio) +
5232
		/* plus the variable array for the stripes */
5233
		sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5234
		/* plus the variable array for the tgt dev */
5235
		sizeof(int) * (real_stripes) +
5236 5237 5238 5239 5240
		/*
		 * plus the raid_map, which includes both the tgt dev
		 * and the stripes
		 */
		sizeof(u64) * (total_stripes),
5241
		GFP_NOFS|__GFP_NOFAIL);
5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262

	atomic_set(&bbio->error, 0);
	atomic_set(&bbio->refs, 1);

	return bbio;
}

void btrfs_get_bbio(struct btrfs_bio *bbio)
{
	WARN_ON(!atomic_read(&bbio->refs));
	atomic_inc(&bbio->refs);
}

void btrfs_put_bbio(struct btrfs_bio *bbio)
{
	if (!bbio)
		return;
	if (atomic_dec_and_test(&bbio->refs))
		kfree(bbio);
}

5263
static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5264
			     u64 logical, u64 *length,
5265
			     struct btrfs_bio **bbio_ret,
5266
			     int mirror_num, int need_raid_map)
5267 5268 5269
{
	struct extent_map *em;
	struct map_lookup *map;
5270
	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
5271 5272
	struct extent_map_tree *em_tree = &map_tree->map_tree;
	u64 offset;
5273
	u64 stripe_offset;
5274
	u64 stripe_end_offset;
5275
	u64 stripe_nr;
5276 5277
	u64 stripe_nr_orig;
	u64 stripe_nr_end;
D
David Woodhouse 已提交
5278
	u64 stripe_len;
5279
	u32 stripe_index;
5280
	int i;
L
Li Zefan 已提交
5281
	int ret = 0;
5282
	int num_stripes;
5283
	int max_errors = 0;
5284
	int tgtdev_indexes = 0;
5285
	struct btrfs_bio *bbio = NULL;
5286 5287 5288
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
	int dev_replace_is_ongoing = 0;
	int num_alloc_stripes;
5289 5290
	int patch_the_first_stripe_for_dev_replace = 0;
	u64 physical_to_patch_in_first_stripe = 0;
D
David Woodhouse 已提交
5291
	u64 raid56_full_stripe_start = (u64)-1;
5292

5293
	read_lock(&em_tree->lock);
5294
	em = lookup_extent_mapping(em_tree, logical, *length);
5295
	read_unlock(&em_tree->lock);
5296

5297
	if (!em) {
5298
		btrfs_crit(fs_info, "unable to find logical %llu len %llu",
5299
			logical, *length);
5300 5301 5302 5303 5304
		return -EINVAL;
	}

	if (em->start > logical || em->start + em->len < logical) {
		btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
5305
			   "found %Lu-%Lu", logical, em->start,
5306
			   em->start + em->len);
5307
		free_extent_map(em);
5308
		return -EINVAL;
5309
	}
5310

5311
	map = em->map_lookup;
5312
	offset = logical - em->start;
5313

D
David Woodhouse 已提交
5314
	stripe_len = map->stripe_len;
5315 5316 5317 5318 5319
	stripe_nr = offset;
	/*
	 * stripe_nr counts the total number of stripes we have to stride
	 * to get to this block
	 */
5320
	stripe_nr = div64_u64(stripe_nr, stripe_len);
5321

D
David Woodhouse 已提交
5322
	stripe_offset = stripe_nr * stripe_len;
5323 5324 5325 5326 5327 5328 5329 5330 5331
	if (offset < stripe_offset) {
		btrfs_crit(fs_info, "stripe math has gone wrong, "
			   "stripe_offset=%llu, offset=%llu, start=%llu, "
			   "logical=%llu, stripe_len=%llu",
			   stripe_offset, offset, em->start, logical,
			   stripe_len);
		free_extent_map(em);
		return -EINVAL;
	}
5332 5333 5334 5335

	/* stripe_offset is the offset of this block in its stripe*/
	stripe_offset = offset - stripe_offset;

D
David Woodhouse 已提交
5336
	/* if we're here for raid56, we need to know the stripe aligned start */
5337
	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
D
David Woodhouse 已提交
5338 5339 5340 5341 5342 5343
		unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
		raid56_full_stripe_start = offset;

		/* allow a write of a full stripe, but make sure we don't
		 * allow straddling of stripes
		 */
5344 5345
		raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
				full_stripe_len);
D
David Woodhouse 已提交
5346 5347 5348 5349 5350
		raid56_full_stripe_start *= full_stripe_len;
	}

	if (rw & REQ_DISCARD) {
		/* we don't discard raid56 yet */
5351
		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
D
David Woodhouse 已提交
5352 5353 5354
			ret = -EOPNOTSUPP;
			goto out;
		}
5355
		*length = min_t(u64, em->len - offset, *length);
D
David Woodhouse 已提交
5356 5357 5358 5359 5360
	} else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
		u64 max_len;
		/* For writes to RAID[56], allow a full stripeset across all disks.
		   For other RAID types and for RAID[56] reads, just allow a single
		   stripe (on a single disk). */
5361
		if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
D
David Woodhouse 已提交
5362 5363 5364 5365 5366 5367 5368 5369
		    (rw & REQ_WRITE)) {
			max_len = stripe_len * nr_data_stripes(map) -
				(offset - raid56_full_stripe_start);
		} else {
			/* we limit the length of each bio to what fits in a stripe */
			max_len = stripe_len - stripe_offset;
		}
		*length = min_t(u64, em->len - offset, max_len);
5370 5371 5372
	} else {
		*length = em->len - offset;
	}
5373

D
David Woodhouse 已提交
5374 5375
	/* This is for when we're called from btrfs_merge_bio_hook() and all
	   it cares about is the length */
5376
	if (!bbio_ret)
5377 5378
		goto out;

5379
	btrfs_dev_replace_lock(dev_replace, 0);
5380 5381
	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
	if (!dev_replace_is_ongoing)
5382 5383 5384
		btrfs_dev_replace_unlock(dev_replace, 0);
	else
		btrfs_dev_replace_set_lock_blocking(dev_replace);
5385

5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409
	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
	    !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
	    dev_replace->tgtdev != NULL) {
		/*
		 * in dev-replace case, for repair case (that's the only
		 * case where the mirror is selected explicitly when
		 * calling btrfs_map_block), blocks left of the left cursor
		 * can also be read from the target drive.
		 * For REQ_GET_READ_MIRRORS, the target drive is added as
		 * the last one to the array of stripes. For READ, it also
		 * needs to be supported using the same mirror number.
		 * If the requested block is not left of the left cursor,
		 * EIO is returned. This can happen because btrfs_num_copies()
		 * returns one more in the dev-replace case.
		 */
		u64 tmp_length = *length;
		struct btrfs_bio *tmp_bbio = NULL;
		int tmp_num_stripes;
		u64 srcdev_devid = dev_replace->srcdev->devid;
		int index_srcdev = 0;
		int found = 0;
		u64 physical_of_found = 0;

		ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
5410
			     logical, &tmp_length, &tmp_bbio, 0, 0);
5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423
		if (ret) {
			WARN_ON(tmp_bbio != NULL);
			goto out;
		}

		tmp_num_stripes = tmp_bbio->num_stripes;
		if (mirror_num > tmp_num_stripes) {
			/*
			 * REQ_GET_READ_MIRRORS does not contain this
			 * mirror, that means that the requested area
			 * is not left of the left cursor
			 */
			ret = -EIO;
5424
			btrfs_put_bbio(tmp_bbio);
5425 5426 5427 5428 5429 5430 5431 5432 5433 5434
			goto out;
		}

		/*
		 * process the rest of the function using the mirror_num
		 * of the source drive. Therefore look it up first.
		 * At the end, patch the device pointer to the one of the
		 * target drive.
		 */
		for (i = 0; i < tmp_num_stripes; i++) {
5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448
			if (tmp_bbio->stripes[i].dev->devid != srcdev_devid)
				continue;

			/*
			 * In case of DUP, in order to keep it simple, only add
			 * the mirror with the lowest physical address
			 */
			if (found &&
			    physical_of_found <= tmp_bbio->stripes[i].physical)
				continue;

			index_srcdev = i;
			found = 1;
			physical_of_found = tmp_bbio->stripes[i].physical;
5449 5450
		}

5451 5452 5453
		btrfs_put_bbio(tmp_bbio);

		if (!found) {
5454 5455 5456 5457 5458
			WARN_ON(1);
			ret = -EIO;
			goto out;
		}

5459 5460 5461
		mirror_num = index_srcdev + 1;
		patch_the_first_stripe_for_dev_replace = 1;
		physical_to_patch_in_first_stripe = physical_of_found;
5462 5463 5464 5465
	} else if (mirror_num > map->num_stripes) {
		mirror_num = 0;
	}

5466
	num_stripes = 1;
5467
	stripe_index = 0;
5468
	stripe_nr_orig = stripe_nr;
5469
	stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
5470
	stripe_nr_end = div_u64(stripe_nr_end, map->stripe_len);
5471 5472
	stripe_end_offset = stripe_nr_end * map->stripe_len -
			    (offset + *length);
D
David Woodhouse 已提交
5473

5474 5475 5476 5477
	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
		if (rw & REQ_DISCARD)
			num_stripes = min_t(u64, map->num_stripes,
					    stripe_nr_end - stripe_nr_orig);
5478 5479
		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
				&stripe_index);
5480 5481
		if (!(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)))
			mirror_num = 1;
5482
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
5483
		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
5484
			num_stripes = map->num_stripes;
5485
		else if (mirror_num)
5486
			stripe_index = mirror_num - 1;
5487
		else {
5488
			stripe_index = find_live_mirror(fs_info, map, 0,
5489
					    map->num_stripes,
5490 5491
					    current->pid % map->num_stripes,
					    dev_replace_is_ongoing);
5492
			mirror_num = stripe_index + 1;
5493
		}
5494

5495
	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
5496
		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
5497
			num_stripes = map->num_stripes;
5498
		} else if (mirror_num) {
5499
			stripe_index = mirror_num - 1;
5500 5501 5502
		} else {
			mirror_num = 1;
		}
5503

C
Chris Mason 已提交
5504
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5505
		u32 factor = map->num_stripes / map->sub_stripes;
C
Chris Mason 已提交
5506

5507
		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
C
Chris Mason 已提交
5508 5509
		stripe_index *= map->sub_stripes;

5510
		if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
5511
			num_stripes = map->sub_stripes;
5512 5513 5514 5515
		else if (rw & REQ_DISCARD)
			num_stripes = min_t(u64, map->sub_stripes *
					    (stripe_nr_end - stripe_nr_orig),
					    map->num_stripes);
C
Chris Mason 已提交
5516 5517
		else if (mirror_num)
			stripe_index += mirror_num - 1;
5518
		else {
J
Jan Schmidt 已提交
5519
			int old_stripe_index = stripe_index;
5520 5521
			stripe_index = find_live_mirror(fs_info, map,
					      stripe_index,
5522
					      map->sub_stripes, stripe_index +
5523 5524
					      current->pid % map->sub_stripes,
					      dev_replace_is_ongoing);
J
Jan Schmidt 已提交
5525
			mirror_num = stripe_index - old_stripe_index + 1;
5526
		}
D
David Woodhouse 已提交
5527

5528
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5529
		if (need_raid_map &&
5530 5531
		    ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
		     mirror_num > 1)) {
D
David Woodhouse 已提交
5532
			/* push stripe_nr back to the start of the full stripe */
5533 5534
			stripe_nr = div_u64(raid56_full_stripe_start,
					stripe_len * nr_data_stripes(map));
D
David Woodhouse 已提交
5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548

			/* RAID[56] write or recovery. Return all stripes */
			num_stripes = map->num_stripes;
			max_errors = nr_parity_stripes(map);

			*length = map->stripe_len;
			stripe_index = 0;
			stripe_offset = 0;
		} else {
			/*
			 * Mirror #0 or #1 means the original data block.
			 * Mirror #2 is RAID5 parity block.
			 * Mirror #3 is RAID6 Q block.
			 */
5549 5550
			stripe_nr = div_u64_rem(stripe_nr,
					nr_data_stripes(map), &stripe_index);
D
David Woodhouse 已提交
5551 5552 5553 5554 5555
			if (mirror_num > 1)
				stripe_index = nr_data_stripes(map) +
						mirror_num - 2;

			/* We distribute the parity blocks across stripes */
5556 5557
			div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
					&stripe_index);
5558 5559 5560
			if (!(rw & (REQ_WRITE | REQ_DISCARD |
				    REQ_GET_READ_MIRRORS)) && mirror_num <= 1)
				mirror_num = 1;
D
David Woodhouse 已提交
5561
		}
5562 5563
	} else {
		/*
5564 5565 5566
		 * after this, stripe_nr is the number of stripes on this
		 * device we have to walk to find the data, and stripe_index is
		 * the number of our device in the stripe array
5567
		 */
5568 5569
		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
				&stripe_index);
5570
		mirror_num = stripe_index + 1;
5571
	}
5572 5573 5574 5575 5576 5577 5578
	if (stripe_index >= map->num_stripes) {
		btrfs_crit(fs_info, "stripe index math went horribly wrong, "
			   "got stripe_index=%u, num_stripes=%u",
			   stripe_index, map->num_stripes);
		ret = -EINVAL;
		goto out;
	}
5579

5580
	num_alloc_stripes = num_stripes;
5581 5582 5583 5584 5585
	if (dev_replace_is_ongoing) {
		if (rw & (REQ_WRITE | REQ_DISCARD))
			num_alloc_stripes <<= 1;
		if (rw & REQ_GET_READ_MIRRORS)
			num_alloc_stripes++;
5586
		tgtdev_indexes = num_stripes;
5587
	}
5588

5589
	bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
L
Li Zefan 已提交
5590 5591 5592 5593
	if (!bbio) {
		ret = -ENOMEM;
		goto out;
	}
5594 5595
	if (dev_replace_is_ongoing)
		bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
L
Li Zefan 已提交
5596

5597
	/* build raid_map */
5598
	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
5599 5600 5601
	    need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
	    mirror_num > 1)) {
		u64 tmp;
5602
		unsigned rot;
5603 5604 5605 5606 5607 5608 5609

		bbio->raid_map = (u64 *)((void *)bbio->stripes +
				 sizeof(struct btrfs_bio_stripe) *
				 num_alloc_stripes +
				 sizeof(int) * tgtdev_indexes);

		/* Work out the disk rotation on this stripe-set */
5610
		div_u64_rem(stripe_nr, num_stripes, &rot);
5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623

		/* Fill in the logical address of each stripe */
		tmp = stripe_nr * nr_data_stripes(map);
		for (i = 0; i < nr_data_stripes(map); i++)
			bbio->raid_map[(i+rot) % num_stripes] =
				em->start + (tmp + i) * map->stripe_len;

		bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
		if (map->type & BTRFS_BLOCK_GROUP_RAID6)
			bbio->raid_map[(i+rot+1) % num_stripes] =
				RAID6_Q_STRIPE;
	}

5624
	if (rw & REQ_DISCARD) {
5625 5626
		u32 factor = 0;
		u32 sub_stripes = 0;
5627 5628
		u64 stripes_per_dev = 0;
		u32 remaining_stripes = 0;
L
Liu Bo 已提交
5629
		u32 last_stripe = 0;
5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642

		if (map->type &
		    (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
			if (map->type & BTRFS_BLOCK_GROUP_RAID0)
				sub_stripes = 1;
			else
				sub_stripes = map->sub_stripes;

			factor = map->num_stripes / sub_stripes;
			stripes_per_dev = div_u64_rem(stripe_nr_end -
						      stripe_nr_orig,
						      factor,
						      &remaining_stripes);
L
Liu Bo 已提交
5643 5644
			div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
			last_stripe *= sub_stripes;
5645 5646
		}

5647
		for (i = 0; i < num_stripes; i++) {
5648
			bbio->stripes[i].physical =
5649 5650
				map->stripes[stripe_index].physical +
				stripe_offset + stripe_nr * map->stripe_len;
5651
			bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5652

5653 5654 5655 5656
			if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
					 BTRFS_BLOCK_GROUP_RAID10)) {
				bbio->stripes[i].length = stripes_per_dev *
							  map->stripe_len;
L
Liu Bo 已提交
5657

5658 5659 5660
				if (i / sub_stripes < remaining_stripes)
					bbio->stripes[i].length +=
						map->stripe_len;
L
Liu Bo 已提交
5661 5662 5663 5664 5665 5666 5667 5668 5669

				/*
				 * Special for the first stripe and
				 * the last stripe:
				 *
				 * |-------|...|-------|
				 *     |----------|
				 *    off     end_off
				 */
5670
				if (i < sub_stripes)
5671
					bbio->stripes[i].length -=
5672
						stripe_offset;
L
Liu Bo 已提交
5673 5674 5675 5676

				if (stripe_index >= last_stripe &&
				    stripe_index <= (last_stripe +
						     sub_stripes - 1))
5677
					bbio->stripes[i].length -=
5678
						stripe_end_offset;
L
Liu Bo 已提交
5679

5680 5681
				if (i == sub_stripes - 1)
					stripe_offset = 0;
5682
			} else
5683
				bbio->stripes[i].length = *length;
5684 5685 5686 5687 5688 5689 5690 5691 5692 5693

			stripe_index++;
			if (stripe_index == map->num_stripes) {
				/* This could only happen for RAID0/10 */
				stripe_index = 0;
				stripe_nr++;
			}
		}
	} else {
		for (i = 0; i < num_stripes; i++) {
5694
			bbio->stripes[i].physical =
5695 5696 5697
				map->stripes[stripe_index].physical +
				stripe_offset +
				stripe_nr * map->stripe_len;
5698
			bbio->stripes[i].dev =
5699
				map->stripes[stripe_index].dev;
5700
			stripe_index++;
5701
		}
5702
	}
L
Li Zefan 已提交
5703

5704 5705
	if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
		max_errors = btrfs_chunk_max_errors(map);
L
Li Zefan 已提交
5706

5707 5708
	if (bbio->raid_map)
		sort_parity_stripes(bbio, num_stripes);
5709

5710
	tgtdev_indexes = 0;
5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738
	if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
	    dev_replace->tgtdev != NULL) {
		int index_where_to_add;
		u64 srcdev_devid = dev_replace->srcdev->devid;

		/*
		 * duplicate the write operations while the dev replace
		 * procedure is running. Since the copying of the old disk
		 * to the new disk takes place at run time while the
		 * filesystem is mounted writable, the regular write
		 * operations to the old disk have to be duplicated to go
		 * to the new disk as well.
		 * Note that device->missing is handled by the caller, and
		 * that the write to the old disk is already set up in the
		 * stripes array.
		 */
		index_where_to_add = num_stripes;
		for (i = 0; i < num_stripes; i++) {
			if (bbio->stripes[i].dev->devid == srcdev_devid) {
				/* write to new disk, too */
				struct btrfs_bio_stripe *new =
					bbio->stripes + index_where_to_add;
				struct btrfs_bio_stripe *old =
					bbio->stripes + i;

				new->physical = old->physical;
				new->length = old->length;
				new->dev = dev_replace->tgtdev;
5739
				bbio->tgtdev_map[i] = index_where_to_add;
5740 5741
				index_where_to_add++;
				max_errors++;
5742
				tgtdev_indexes++;
5743 5744 5745
			}
		}
		num_stripes = index_where_to_add;
5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776
	} else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
		   dev_replace->tgtdev != NULL) {
		u64 srcdev_devid = dev_replace->srcdev->devid;
		int index_srcdev = 0;
		int found = 0;
		u64 physical_of_found = 0;

		/*
		 * During the dev-replace procedure, the target drive can
		 * also be used to read data in case it is needed to repair
		 * a corrupt block elsewhere. This is possible if the
		 * requested area is left of the left cursor. In this area,
		 * the target drive is a full copy of the source drive.
		 */
		for (i = 0; i < num_stripes; i++) {
			if (bbio->stripes[i].dev->devid == srcdev_devid) {
				/*
				 * In case of DUP, in order to keep it
				 * simple, only add the mirror with the
				 * lowest physical address
				 */
				if (found &&
				    physical_of_found <=
				     bbio->stripes[i].physical)
					continue;
				index_srcdev = i;
				found = 1;
				physical_of_found = bbio->stripes[i].physical;
			}
		}
		if (found) {
5777 5778
			struct btrfs_bio_stripe *tgtdev_stripe =
				bbio->stripes + num_stripes;
5779

5780 5781 5782 5783 5784
			tgtdev_stripe->physical = physical_of_found;
			tgtdev_stripe->length =
				bbio->stripes[index_srcdev].length;
			tgtdev_stripe->dev = dev_replace->tgtdev;
			bbio->tgtdev_map[index_srcdev] = num_stripes;
5785

5786 5787
			tgtdev_indexes++;
			num_stripes++;
5788
		}
5789 5790
	}

L
Li Zefan 已提交
5791
	*bbio_ret = bbio;
Z
Zhao Lei 已提交
5792
	bbio->map_type = map->type;
L
Li Zefan 已提交
5793 5794 5795
	bbio->num_stripes = num_stripes;
	bbio->max_errors = max_errors;
	bbio->mirror_num = mirror_num;
5796
	bbio->num_tgtdevs = tgtdev_indexes;
5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808

	/*
	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
	 * mirror_num == num_stripes + 1 && dev_replace target drive is
	 * available as a mirror
	 */
	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
		WARN_ON(num_stripes > 1);
		bbio->stripes[0].dev = dev_replace->tgtdev;
		bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
		bbio->mirror_num = map->num_stripes + 1;
	}
5809
out:
5810 5811 5812 5813
	if (dev_replace_is_ongoing) {
		btrfs_dev_replace_clear_lock_blocking(dev_replace);
		btrfs_dev_replace_unlock(dev_replace, 0);
	}
5814
	free_extent_map(em);
L
Li Zefan 已提交
5815
	return ret;
5816 5817
}

5818
int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5819
		      u64 logical, u64 *length,
5820
		      struct btrfs_bio **bbio_ret, int mirror_num)
5821
{
5822
	return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5823
				 mirror_num, 0);
5824 5825
}

5826 5827 5828 5829
/* For Scrub/replace */
int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
		     u64 logical, u64 *length,
		     struct btrfs_bio **bbio_ret, int mirror_num,
5830
		     int need_raid_map)
5831 5832
{
	return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5833
				 mirror_num, need_raid_map);
5834 5835
}

Y
Yan Zheng 已提交
5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846
int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
		     u64 chunk_start, u64 physical, u64 devid,
		     u64 **logical, int *naddrs, int *stripe_len)
{
	struct extent_map_tree *em_tree = &map_tree->map_tree;
	struct extent_map *em;
	struct map_lookup *map;
	u64 *buf;
	u64 bytenr;
	u64 length;
	u64 stripe_nr;
D
David Woodhouse 已提交
5847
	u64 rmap_len;
Y
Yan Zheng 已提交
5848 5849
	int i, j, nr = 0;

5850
	read_lock(&em_tree->lock);
Y
Yan Zheng 已提交
5851
	em = lookup_extent_mapping(em_tree, chunk_start, 1);
5852
	read_unlock(&em_tree->lock);
Y
Yan Zheng 已提交
5853

5854
	if (!em) {
5855
		printk(KERN_ERR "BTRFS: couldn't find em for chunk %Lu\n",
5856 5857 5858 5859 5860
		       chunk_start);
		return -EIO;
	}

	if (em->start != chunk_start) {
5861
		printk(KERN_ERR "BTRFS: bad chunk start, em=%Lu, wanted=%Lu\n",
5862 5863 5864 5865
		       em->start, chunk_start);
		free_extent_map(em);
		return -EIO;
	}
5866
	map = em->map_lookup;
Y
Yan Zheng 已提交
5867 5868

	length = em->len;
D
David Woodhouse 已提交
5869 5870
	rmap_len = map->stripe_len;

Y
Yan Zheng 已提交
5871
	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5872
		length = div_u64(length, map->num_stripes / map->sub_stripes);
Y
Yan Zheng 已提交
5873
	else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5874
		length = div_u64(length, map->num_stripes);
5875
	else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5876
		length = div_u64(length, nr_data_stripes(map));
D
David Woodhouse 已提交
5877 5878
		rmap_len = map->stripe_len * nr_data_stripes(map);
	}
Y
Yan Zheng 已提交
5879

5880
	buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
5881
	BUG_ON(!buf); /* -ENOMEM */
Y
Yan Zheng 已提交
5882 5883 5884 5885 5886 5887 5888 5889 5890

	for (i = 0; i < map->num_stripes; i++) {
		if (devid && map->stripes[i].dev->devid != devid)
			continue;
		if (map->stripes[i].physical > physical ||
		    map->stripes[i].physical + length <= physical)
			continue;

		stripe_nr = physical - map->stripes[i].physical;
5891
		stripe_nr = div_u64(stripe_nr, map->stripe_len);
Y
Yan Zheng 已提交
5892 5893 5894

		if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
			stripe_nr = stripe_nr * map->num_stripes + i;
5895
			stripe_nr = div_u64(stripe_nr, map->sub_stripes);
Y
Yan Zheng 已提交
5896 5897
		} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
			stripe_nr = stripe_nr * map->num_stripes + i;
D
David Woodhouse 已提交
5898 5899 5900 5901 5902
		} /* else if RAID[56], multiply by nr_data_stripes().
		   * Alternatively, just use rmap_len below instead of
		   * map->stripe_len */

		bytenr = chunk_start + stripe_nr * rmap_len;
5903
		WARN_ON(nr >= map->num_stripes);
Y
Yan Zheng 已提交
5904 5905 5906 5907
		for (j = 0; j < nr; j++) {
			if (buf[j] == bytenr)
				break;
		}
5908 5909
		if (j == nr) {
			WARN_ON(nr >= map->num_stripes);
Y
Yan Zheng 已提交
5910
			buf[nr++] = bytenr;
5911
		}
Y
Yan Zheng 已提交
5912 5913 5914 5915
	}

	*logical = buf;
	*naddrs = nr;
D
David Woodhouse 已提交
5916
	*stripe_len = rmap_len;
Y
Yan Zheng 已提交
5917 5918 5919

	free_extent_map(em);
	return 0;
5920 5921
}

5922
static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
5923
{
5924 5925
	bio->bi_private = bbio->private;
	bio->bi_end_io = bbio->end_io;
5926
	bio_endio(bio);
5927

5928
	btrfs_put_bbio(bbio);
5929 5930
}

5931
static void btrfs_end_bio(struct bio *bio)
5932
{
5933
	struct btrfs_bio *bbio = bio->bi_private;
5934
	int is_orig_bio = 0;
5935

5936
	if (bio->bi_error) {
5937
		atomic_inc(&bbio->error);
5938
		if (bio->bi_error == -EIO || bio->bi_error == -EREMOTEIO) {
5939
			unsigned int stripe_index =
5940
				btrfs_io_bio(bio)->stripe_index;
5941
			struct btrfs_device *dev;
5942 5943 5944

			BUG_ON(stripe_index >= bbio->num_stripes);
			dev = bbio->stripes[stripe_index].dev;
5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956
			if (dev->bdev) {
				if (bio->bi_rw & WRITE)
					btrfs_dev_stat_inc(dev,
						BTRFS_DEV_STAT_WRITE_ERRS);
				else
					btrfs_dev_stat_inc(dev,
						BTRFS_DEV_STAT_READ_ERRS);
				if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
					btrfs_dev_stat_inc(dev,
						BTRFS_DEV_STAT_FLUSH_ERRS);
				btrfs_dev_stat_print_on_error(dev);
			}
5957 5958
		}
	}
5959

5960
	if (bio == bbio->orig_bio)
5961 5962
		is_orig_bio = 1;

5963 5964
	btrfs_bio_counter_dec(bbio->fs_info);

5965
	if (atomic_dec_and_test(&bbio->stripes_pending)) {
5966 5967
		if (!is_orig_bio) {
			bio_put(bio);
5968
			bio = bbio->orig_bio;
5969
		}
5970

5971
		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5972
		/* only send an error to the higher layers if it is
D
David Woodhouse 已提交
5973
		 * beyond the tolerance of the btrfs bio
5974
		 */
5975
		if (atomic_read(&bbio->error) > bbio->max_errors) {
5976
			bio->bi_error = -EIO;
5977
		} else {
5978 5979 5980 5981
			/*
			 * this bio is actually up to date, we didn't
			 * go over the max number of errors
			 */
5982
			bio->bi_error = 0;
5983
		}
5984

5985
		btrfs_end_bbio(bbio, bio);
5986
	} else if (!is_orig_bio) {
5987 5988 5989 5990
		bio_put(bio);
	}
}

5991 5992 5993 5994 5995 5996 5997
/*
 * see run_scheduled_bios for a description of why bios are collected for
 * async submit.
 *
 * This will add one bio to the pending list for a device and make sure
 * the work struct is scheduled.
 */
5998 5999 6000
static noinline void btrfs_schedule_bio(struct btrfs_root *root,
					struct btrfs_device *device,
					int rw, struct bio *bio)
6001 6002
{
	int should_queue = 1;
6003
	struct btrfs_pending_bios *pending_bios;
6004

D
David Woodhouse 已提交
6005
	if (device->missing || !device->bdev) {
6006
		bio_io_error(bio);
D
David Woodhouse 已提交
6007 6008 6009
		return;
	}

6010
	/* don't bother with additional async steps for reads, right now */
6011
	if (!(rw & REQ_WRITE)) {
6012
		bio_get(bio);
6013
		btrfsic_submit_bio(rw, bio);
6014
		bio_put(bio);
6015
		return;
6016 6017 6018
	}

	/*
6019
	 * nr_async_bios allows us to reliably return congestion to the
6020 6021 6022 6023
	 * higher layers.  Otherwise, the async bio makes it appear we have
	 * made progress against dirty pages when we've really just put it
	 * on a queue for later
	 */
6024
	atomic_inc(&root->fs_info->nr_async_bios);
6025
	WARN_ON(bio->bi_next);
6026 6027 6028 6029
	bio->bi_next = NULL;
	bio->bi_rw |= rw;

	spin_lock(&device->io_lock);
6030
	if (bio->bi_rw & REQ_SYNC)
6031 6032 6033
		pending_bios = &device->pending_sync_bios;
	else
		pending_bios = &device->pending_bios;
6034

6035 6036
	if (pending_bios->tail)
		pending_bios->tail->bi_next = bio;
6037

6038 6039 6040
	pending_bios->tail = bio;
	if (!pending_bios->head)
		pending_bios->head = bio;
6041 6042 6043 6044 6045 6046
	if (device->running_pending)
		should_queue = 0;

	spin_unlock(&device->io_lock);

	if (should_queue)
6047 6048
		btrfs_queue_work(root->fs_info->submit_workers,
				 &device->work);
6049 6050
}

6051 6052 6053 6054 6055 6056 6057
static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
			      struct bio *bio, u64 physical, int dev_nr,
			      int rw, int async)
{
	struct btrfs_device *dev = bbio->stripes[dev_nr].dev;

	bio->bi_private = bbio;
6058
	btrfs_io_bio(bio)->stripe_index = dev_nr;
6059
	bio->bi_end_io = btrfs_end_bio;
6060
	bio->bi_iter.bi_sector = physical >> 9;
6061 6062 6063 6064 6065 6066
#ifdef DEBUG
	{
		struct rcu_string *name;

		rcu_read_lock();
		name = rcu_dereference(dev->name);
M
Masanari Iida 已提交
6067
		pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
6068
			 "(%s id %llu), size=%u\n", rw,
6069 6070
			 (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev,
			 name->str, dev->devid, bio->bi_iter.bi_size);
6071 6072 6073 6074
		rcu_read_unlock();
	}
#endif
	bio->bi_bdev = dev->bdev;
6075 6076 6077

	btrfs_bio_counter_inc_noblocked(root->fs_info);

6078
	if (async)
D
David Woodhouse 已提交
6079
		btrfs_schedule_bio(root, dev, rw, bio);
6080 6081 6082 6083 6084 6085 6086 6087
	else
		btrfsic_submit_bio(rw, bio);
}

static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
{
	atomic_inc(&bbio->error);
	if (atomic_dec_and_test(&bbio->stripes_pending)) {
6088
		/* Should be the original bio. */
6089 6090
		WARN_ON(bio != bbio->orig_bio);

6091
		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6092
		bio->bi_iter.bi_sector = logical >> 9;
6093 6094
		bio->bi_error = -EIO;
		btrfs_end_bbio(bbio, bio);
6095 6096 6097
	}
}

6098
int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
6099
		  int mirror_num, int async_submit)
6100 6101
{
	struct btrfs_device *dev;
6102
	struct bio *first_bio = bio;
6103
	u64 logical = (u64)bio->bi_iter.bi_sector << 9;
6104 6105 6106
	u64 length = 0;
	u64 map_length;
	int ret;
6107 6108
	int dev_nr;
	int total_devs;
6109
	struct btrfs_bio *bbio = NULL;
6110

6111
	length = bio->bi_iter.bi_size;
6112
	map_length = length;
6113

6114
	btrfs_bio_counter_inc_blocked(root->fs_info);
D
David Woodhouse 已提交
6115
	ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
6116
			      mirror_num, 1);
6117 6118
	if (ret) {
		btrfs_bio_counter_dec(root->fs_info);
6119
		return ret;
6120
	}
6121

6122
	total_devs = bbio->num_stripes;
D
David Woodhouse 已提交
6123 6124 6125
	bbio->orig_bio = first_bio;
	bbio->private = first_bio->bi_private;
	bbio->end_io = first_bio->bi_end_io;
6126
	bbio->fs_info = root->fs_info;
D
David Woodhouse 已提交
6127 6128
	atomic_set(&bbio->stripes_pending, bbio->num_stripes);

6129 6130
	if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
	    ((rw & WRITE) || (mirror_num > 1))) {
D
David Woodhouse 已提交
6131 6132 6133
		/* In this case, map_length has been set to the length of
		   a single stripe; not the whole write */
		if (rw & WRITE) {
6134
			ret = raid56_parity_write(root, bio, bbio, map_length);
D
David Woodhouse 已提交
6135
		} else {
6136
			ret = raid56_parity_recover(root, bio, bbio, map_length,
6137
						    mirror_num, 1);
D
David Woodhouse 已提交
6138
		}
6139

6140 6141
		btrfs_bio_counter_dec(root->fs_info);
		return ret;
D
David Woodhouse 已提交
6142 6143
	}

6144
	if (map_length < length) {
6145
		btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
6146
			logical, length, map_length);
6147 6148
		BUG();
	}
6149

6150
	for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6151 6152 6153 6154 6155 6156
		dev = bbio->stripes[dev_nr].dev;
		if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
			bbio_error(bbio, first_bio, logical);
			continue;
		}

6157
		if (dev_nr < total_devs - 1) {
6158
			bio = btrfs_bio_clone(first_bio, GFP_NOFS);
6159
			BUG_ON(!bio); /* -ENOMEM */
6160
		} else
6161
			bio = first_bio;
6162 6163 6164 6165

		submit_stripe_bio(root, bbio, bio,
				  bbio->stripes[dev_nr].physical, dev_nr, rw,
				  async_submit);
6166
	}
6167
	btrfs_bio_counter_dec(root->fs_info);
6168 6169 6170
	return 0;
}

6171
struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
Y
Yan Zheng 已提交
6172
				       u8 *uuid, u8 *fsid)
6173
{
Y
Yan Zheng 已提交
6174 6175 6176
	struct btrfs_device *device;
	struct btrfs_fs_devices *cur_devices;

6177
	cur_devices = fs_info->fs_devices;
Y
Yan Zheng 已提交
6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188
	while (cur_devices) {
		if (!fsid ||
		    !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
			device = __find_device(&cur_devices->devices,
					       devid, uuid);
			if (device)
				return device;
		}
		cur_devices = cur_devices->seed;
	}
	return NULL;
6189 6190
}

6191
static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
6192
					    struct btrfs_fs_devices *fs_devices,
6193 6194 6195 6196
					    u64 devid, u8 *dev_uuid)
{
	struct btrfs_device *device;

6197 6198
	device = btrfs_alloc_device(NULL, &devid, dev_uuid);
	if (IS_ERR(device))
6199
		return NULL;
6200 6201

	list_add(&device->dev_list, &fs_devices->devices);
Y
Yan Zheng 已提交
6202
	device->fs_devices = fs_devices;
6203
	fs_devices->num_devices++;
6204 6205

	device->missing = 1;
6206
	fs_devices->missing_devices++;
6207

6208 6209 6210
	return device;
}

6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230
/**
 * btrfs_alloc_device - allocate struct btrfs_device
 * @fs_info:	used only for generating a new devid, can be NULL if
 *		devid is provided (i.e. @devid != NULL).
 * @devid:	a pointer to devid for this device.  If NULL a new devid
 *		is generated.
 * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
 *		is generated.
 *
 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
 * on error.  Returned struct is not linked onto any lists and can be
 * destroyed with kfree() right away.
 */
struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
					const u64 *devid,
					const u8 *uuid)
{
	struct btrfs_device *dev;
	u64 tmp;

6231
	if (WARN_ON(!devid && !fs_info))
6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255
		return ERR_PTR(-EINVAL);

	dev = __alloc_device();
	if (IS_ERR(dev))
		return dev;

	if (devid)
		tmp = *devid;
	else {
		int ret;

		ret = find_next_devid(fs_info, &tmp);
		if (ret) {
			kfree(dev);
			return ERR_PTR(ret);
		}
	}
	dev->devid = tmp;

	if (uuid)
		memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
	else
		generate_random_uuid(dev->uuid);

6256 6257
	btrfs_init_work(&dev->work, btrfs_submit_helper,
			pending_bios_fn, NULL, NULL);
6258 6259 6260 6261

	return dev;
}

6262 6263 6264 6265
/* Return -EIO if any error, otherwise return 0. */
static int btrfs_check_chunk_valid(struct btrfs_root *root,
				   struct extent_buffer *leaf,
				   struct btrfs_chunk *chunk, u64 logical)
6266 6267
{
	u64 length;
6268
	u64 stripe_len;
6269 6270 6271
	u16 num_stripes;
	u16 sub_stripes;
	u64 type;
6272

6273
	length = btrfs_chunk_length(leaf, chunk);
6274 6275
	stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6276 6277 6278
	sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
	type = btrfs_chunk_type(leaf, chunk);

6279 6280 6281 6282 6283 6284 6285 6286 6287 6288
	if (!num_stripes) {
		btrfs_err(root->fs_info, "invalid chunk num_stripes: %u",
			  num_stripes);
		return -EIO;
	}
	if (!IS_ALIGNED(logical, root->sectorsize)) {
		btrfs_err(root->fs_info,
			  "invalid chunk logical %llu", logical);
		return -EIO;
	}
6289 6290 6291 6292 6293
	if (btrfs_chunk_sector_size(leaf, chunk) != root->sectorsize) {
		btrfs_err(root->fs_info, "invalid chunk sectorsize %u",
			  btrfs_chunk_sector_size(leaf, chunk));
		return -EIO;
	}
6294 6295 6296 6297 6298
	if (!length || !IS_ALIGNED(length, root->sectorsize)) {
		btrfs_err(root->fs_info,
			"invalid chunk length %llu", length);
		return -EIO;
	}
6299
	if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) {
6300 6301 6302 6303 6304
		btrfs_err(root->fs_info, "invalid chunk stripe length: %llu",
			  stripe_len);
		return -EIO;
	}
	if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
6305
	    type) {
6306 6307 6308 6309 6310 6311
		btrfs_err(root->fs_info, "unrecognized chunk type: %llu",
			  ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
			    BTRFS_BLOCK_GROUP_PROFILE_MASK) &
			  btrfs_chunk_type(leaf, chunk));
		return -EIO;
	}
6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352
	if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
	    (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) ||
	    (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
	    (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
	    (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) ||
	    ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
	     num_stripes != 1)) {
		btrfs_err(root->fs_info,
			"invalid num_stripes:sub_stripes %u:%u for profile %llu",
			num_stripes, sub_stripes,
			type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
		return -EIO;
	}

	return 0;
}

static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
			  struct extent_buffer *leaf,
			  struct btrfs_chunk *chunk)
{
	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
	struct map_lookup *map;
	struct extent_map *em;
	u64 logical;
	u64 length;
	u64 stripe_len;
	u64 devid;
	u8 uuid[BTRFS_UUID_SIZE];
	int num_stripes;
	int ret;
	int i;

	logical = key->offset;
	length = btrfs_chunk_length(leaf, chunk);
	stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);

	ret = btrfs_check_chunk_valid(root, leaf, chunk, logical);
	if (ret)
		return ret;
6353

6354
	read_lock(&map_tree->map_tree.lock);
6355
	em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
6356
	read_unlock(&map_tree->map_tree.lock);
6357 6358 6359 6360 6361 6362 6363 6364 6365

	/* already mapped? */
	if (em && em->start <= logical && em->start + em->len > logical) {
		free_extent_map(em);
		return 0;
	} else if (em) {
		free_extent_map(em);
	}

6366
	em = alloc_extent_map();
6367 6368
	if (!em)
		return -ENOMEM;
6369
	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6370 6371 6372 6373 6374
	if (!map) {
		free_extent_map(em);
		return -ENOMEM;
	}

6375
	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6376
	em->map_lookup = map;
6377 6378
	em->start = logical;
	em->len = length;
6379
	em->orig_start = 0;
6380
	em->block_start = 0;
C
Chris Mason 已提交
6381
	em->block_len = em->len;
6382

6383 6384 6385 6386 6387 6388
	map->num_stripes = num_stripes;
	map->io_width = btrfs_chunk_io_width(leaf, chunk);
	map->io_align = btrfs_chunk_io_align(leaf, chunk);
	map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
	map->type = btrfs_chunk_type(leaf, chunk);
C
Chris Mason 已提交
6389
	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6390 6391 6392 6393
	for (i = 0; i < num_stripes; i++) {
		map->stripes[i].physical =
			btrfs_stripe_offset_nr(leaf, chunk, i);
		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6394 6395 6396
		read_extent_buffer(leaf, uuid, (unsigned long)
				   btrfs_stripe_dev_uuid_nr(chunk, i),
				   BTRFS_UUID_SIZE);
6397 6398
		map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
							uuid, NULL);
6399
		if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
6400 6401 6402
			free_extent_map(em);
			return -EIO;
		}
6403 6404
		if (!map->stripes[i].dev) {
			map->stripes[i].dev =
6405 6406
				add_missing_dev(root, root->fs_info->fs_devices,
						devid, uuid);
6407 6408 6409 6410
			if (!map->stripes[i].dev) {
				free_extent_map(em);
				return -EIO;
			}
6411 6412
			btrfs_warn(root->fs_info, "devid %llu uuid %pU is missing",
						devid, uuid);
6413 6414
		}
		map->stripes[i].dev->in_fs_metadata = 1;
6415 6416
	}

6417
	write_lock(&map_tree->map_tree.lock);
J
Josef Bacik 已提交
6418
	ret = add_extent_mapping(&map_tree->map_tree, em, 0);
6419
	write_unlock(&map_tree->map_tree.lock);
6420
	BUG_ON(ret); /* Tree corruption */
6421 6422 6423 6424 6425
	free_extent_map(em);

	return 0;
}

6426
static void fill_device_from_item(struct extent_buffer *leaf,
6427 6428 6429 6430 6431 6432
				 struct btrfs_dev_item *dev_item,
				 struct btrfs_device *device)
{
	unsigned long ptr;

	device->devid = btrfs_device_id(leaf, dev_item);
6433 6434
	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
	device->total_bytes = device->disk_total_bytes;
6435
	device->commit_total_bytes = device->disk_total_bytes;
6436
	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6437
	device->commit_bytes_used = device->bytes_used;
6438 6439 6440 6441
	device->type = btrfs_device_type(leaf, dev_item);
	device->io_align = btrfs_device_io_align(leaf, dev_item);
	device->io_width = btrfs_device_io_width(leaf, dev_item);
	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6442
	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6443
	device->is_tgtdev_for_dev_replace = 0;
6444

6445
	ptr = btrfs_device_uuid(dev_item);
6446
	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6447 6448
}

6449 6450
static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
						  u8 *fsid)
Y
Yan Zheng 已提交
6451 6452 6453 6454
{
	struct btrfs_fs_devices *fs_devices;
	int ret;

6455
	BUG_ON(!mutex_is_locked(&uuid_mutex));
Y
Yan Zheng 已提交
6456 6457 6458

	fs_devices = root->fs_info->fs_devices->seed;
	while (fs_devices) {
6459 6460 6461
		if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE))
			return fs_devices;

Y
Yan Zheng 已提交
6462 6463 6464 6465 6466
		fs_devices = fs_devices->seed;
	}

	fs_devices = find_fsid(fsid);
	if (!fs_devices) {
6467 6468 6469 6470 6471 6472 6473 6474 6475 6476
		if (!btrfs_test_opt(root, DEGRADED))
			return ERR_PTR(-ENOENT);

		fs_devices = alloc_fs_devices(fsid);
		if (IS_ERR(fs_devices))
			return fs_devices;

		fs_devices->seeding = 1;
		fs_devices->opened = 1;
		return fs_devices;
Y
Yan Zheng 已提交
6477
	}
Y
Yan Zheng 已提交
6478 6479

	fs_devices = clone_fs_devices(fs_devices);
6480 6481
	if (IS_ERR(fs_devices))
		return fs_devices;
Y
Yan Zheng 已提交
6482

6483
	ret = __btrfs_open_devices(fs_devices, FMODE_READ,
6484
				   root->fs_info->bdev_holder);
6485 6486
	if (ret) {
		free_fs_devices(fs_devices);
6487
		fs_devices = ERR_PTR(ret);
Y
Yan Zheng 已提交
6488
		goto out;
6489
	}
Y
Yan Zheng 已提交
6490 6491 6492

	if (!fs_devices->seeding) {
		__btrfs_close_devices(fs_devices);
Y
Yan Zheng 已提交
6493
		free_fs_devices(fs_devices);
6494
		fs_devices = ERR_PTR(-EINVAL);
Y
Yan Zheng 已提交
6495 6496 6497 6498 6499 6500
		goto out;
	}

	fs_devices->seed = root->fs_info->fs_devices->seed;
	root->fs_info->fs_devices->seed = fs_devices;
out:
6501
	return fs_devices;
Y
Yan Zheng 已提交
6502 6503
}

6504
static int read_one_dev(struct btrfs_root *root,
6505 6506 6507
			struct extent_buffer *leaf,
			struct btrfs_dev_item *dev_item)
{
6508
	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6509 6510 6511
	struct btrfs_device *device;
	u64 devid;
	int ret;
Y
Yan Zheng 已提交
6512
	u8 fs_uuid[BTRFS_UUID_SIZE];
6513 6514
	u8 dev_uuid[BTRFS_UUID_SIZE];

6515
	devid = btrfs_device_id(leaf, dev_item);
6516
	read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6517
			   BTRFS_UUID_SIZE);
6518
	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
Y
Yan Zheng 已提交
6519 6520 6521
			   BTRFS_UUID_SIZE);

	if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
6522 6523 6524
		fs_devices = open_seed_devices(root, fs_uuid);
		if (IS_ERR(fs_devices))
			return PTR_ERR(fs_devices);
Y
Yan Zheng 已提交
6525 6526
	}

6527
	device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
6528
	if (!device) {
Y
Yan Zheng 已提交
6529
		if (!btrfs_test_opt(root, DEGRADED))
Y
Yan Zheng 已提交
6530 6531
			return -EIO;

6532 6533 6534
		device = add_missing_dev(root, fs_devices, devid, dev_uuid);
		if (!device)
			return -ENOMEM;
6535 6536
		btrfs_warn(root->fs_info, "devid %llu uuid %pU missing",
				devid, dev_uuid);
6537 6538 6539 6540 6541
	} else {
		if (!device->bdev && !btrfs_test_opt(root, DEGRADED))
			return -EIO;

		if(!device->bdev && !device->missing) {
6542 6543 6544 6545 6546 6547
			/*
			 * this happens when a device that was properly setup
			 * in the device info lists suddenly goes bad.
			 * device->bdev is NULL, and so we have to set
			 * device->missing to one here
			 */
6548
			device->fs_devices->missing_devices++;
6549
			device->missing = 1;
Y
Yan Zheng 已提交
6550
		}
6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564

		/* Move the device to its own fs_devices */
		if (device->fs_devices != fs_devices) {
			ASSERT(device->missing);

			list_move(&device->dev_list, &fs_devices->devices);
			device->fs_devices->num_devices--;
			fs_devices->num_devices++;

			device->fs_devices->missing_devices--;
			fs_devices->missing_devices++;

			device->fs_devices = fs_devices;
		}
Y
Yan Zheng 已提交
6565 6566 6567 6568 6569 6570 6571
	}

	if (device->fs_devices != root->fs_info->fs_devices) {
		BUG_ON(device->writeable);
		if (device->generation !=
		    btrfs_device_generation(leaf, dev_item))
			return -EINVAL;
6572
	}
6573 6574

	fill_device_from_item(leaf, dev_item, device);
6575
	device->in_fs_metadata = 1;
6576
	if (device->writeable && !device->is_tgtdev_for_dev_replace) {
Y
Yan Zheng 已提交
6577
		device->fs_devices->total_rw_bytes += device->total_bytes;
6578 6579 6580 6581 6582
		spin_lock(&root->fs_info->free_chunk_lock);
		root->fs_info->free_chunk_space += device->total_bytes -
			device->bytes_used;
		spin_unlock(&root->fs_info->free_chunk_lock);
	}
6583 6584 6585 6586
	ret = 0;
	return ret;
}

Y
Yan Zheng 已提交
6587
int btrfs_read_sys_array(struct btrfs_root *root)
6588
{
6589
	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
6590
	struct extent_buffer *sb;
6591 6592
	struct btrfs_disk_key *disk_key;
	struct btrfs_chunk *chunk;
6593 6594
	u8 *array_ptr;
	unsigned long sb_array_offset;
6595
	int ret = 0;
6596 6597 6598
	u32 num_stripes;
	u32 array_size;
	u32 len = 0;
6599
	u32 cur_offset;
6600
	u64 type;
6601
	struct btrfs_key key;
6602

6603 6604 6605 6606 6607 6608 6609
	ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize);
	/*
	 * This will create extent buffer of nodesize, superblock size is
	 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
	 * overallocate but we can keep it as-is, only the first page is used.
	 */
	sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
6610 6611
	if (!sb)
		return -ENOMEM;
6612
	set_extent_buffer_uptodate(sb);
6613
	btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6614
	/*
6615
	 * The sb extent buffer is artificial and just used to read the system array.
6616
	 * set_extent_buffer_uptodate() call does not properly mark all it's
6617 6618 6619 6620 6621 6622 6623 6624 6625
	 * pages up-to-date when the page is larger: extent does not cover the
	 * whole page and consequently check_page_uptodate does not find all
	 * the page's extents up-to-date (the hole beyond sb),
	 * write_extent_buffer then triggers a WARN_ON.
	 *
	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
	 * but sb spans only this function. Add an explicit SetPageUptodate call
	 * to silence the warning eg. on PowerPC 64.
	 */
6626
	if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
6627
		SetPageUptodate(sb->pages[0]);
6628

6629
	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
6630 6631
	array_size = btrfs_super_sys_array_size(super_copy);

6632 6633 6634
	array_ptr = super_copy->sys_chunk_array;
	sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
	cur_offset = 0;
6635

6636 6637
	while (cur_offset < array_size) {
		disk_key = (struct btrfs_disk_key *)array_ptr;
6638 6639 6640 6641
		len = sizeof(*disk_key);
		if (cur_offset + len > array_size)
			goto out_short_read;

6642 6643
		btrfs_disk_key_to_cpu(&key, disk_key);

6644 6645 6646
		array_ptr += len;
		sb_array_offset += len;
		cur_offset += len;
6647

6648
		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
6649
			chunk = (struct btrfs_chunk *)sb_array_offset;
6650 6651 6652 6653 6654 6655 6656 6657 6658
			/*
			 * At least one btrfs_chunk with one stripe must be
			 * present, exact stripe count check comes afterwards
			 */
			len = btrfs_chunk_item_size(1);
			if (cur_offset + len > array_size)
				goto out_short_read;

			num_stripes = btrfs_chunk_num_stripes(sb, chunk);
6659 6660 6661 6662 6663 6664 6665 6666
			if (!num_stripes) {
				printk(KERN_ERR
	    "BTRFS: invalid number of stripes %u in sys_array at offset %u\n",
					num_stripes, cur_offset);
				ret = -EIO;
				break;
			}

6667 6668 6669 6670 6671 6672 6673 6674 6675
			type = btrfs_chunk_type(sb, chunk);
			if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
				btrfs_err(root->fs_info,
			    "invalid chunk type %llu in sys_array at offset %u",
					type, cur_offset);
				ret = -EIO;
				break;
			}

6676 6677 6678 6679
			len = btrfs_chunk_item_size(num_stripes);
			if (cur_offset + len > array_size)
				goto out_short_read;

6680
			ret = read_one_chunk(root, &key, sb, chunk);
6681 6682
			if (ret)
				break;
6683
		} else {
6684 6685 6686
			printk(KERN_ERR
		"BTRFS: unexpected item type %u in sys_array at offset %u\n",
				(u32)key.type, cur_offset);
6687 6688
			ret = -EIO;
			break;
6689
		}
6690 6691 6692
		array_ptr += len;
		sb_array_offset += len;
		cur_offset += len;
6693
	}
6694
	clear_extent_buffer_uptodate(sb);
6695
	free_extent_buffer_stale(sb);
6696
	return ret;
6697 6698 6699 6700

out_short_read:
	printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
			len, cur_offset);
6701
	clear_extent_buffer_uptodate(sb);
6702
	free_extent_buffer_stale(sb);
6703
	return -EIO;
6704 6705 6706 6707 6708 6709 6710 6711 6712 6713
}

int btrfs_read_chunk_tree(struct btrfs_root *root)
{
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	struct btrfs_key found_key;
	int ret;
	int slot;
6714
	u64 total_dev = 0;
6715 6716 6717 6718 6719 6720 6721

	root = root->fs_info->chunk_root;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

6722 6723 6724
	mutex_lock(&uuid_mutex);
	lock_chunks(root);

6725 6726 6727 6728 6729
	/*
	 * Read all device items, and then all the chunk items. All
	 * device items are found before any chunk item (their object id
	 * is smaller than the lowest possible object id for a chunk
	 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
6730 6731 6732 6733 6734
	 */
	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.offset = 0;
	key.type = 0;
	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6735 6736
	if (ret < 0)
		goto error;
C
Chris Mason 已提交
6737
	while (1) {
6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748
		leaf = path->nodes[0];
		slot = path->slots[0];
		if (slot >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret == 0)
				continue;
			if (ret < 0)
				goto error;
			break;
		}
		btrfs_item_key_to_cpu(leaf, &found_key, slot);
6749 6750 6751
		if (found_key.type == BTRFS_DEV_ITEM_KEY) {
			struct btrfs_dev_item *dev_item;
			dev_item = btrfs_item_ptr(leaf, slot,
6752
						  struct btrfs_dev_item);
6753 6754 6755
			ret = read_one_dev(root, leaf, dev_item);
			if (ret)
				goto error;
6756
			total_dev++;
6757 6758 6759 6760
		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
			struct btrfs_chunk *chunk;
			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
			ret = read_one_chunk(root, &found_key, leaf, chunk);
Y
Yan Zheng 已提交
6761 6762
			if (ret)
				goto error;
6763 6764 6765
		}
		path->slots[0]++;
	}
6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787

	/*
	 * After loading chunk tree, we've got all device information,
	 * do another round of validation checks.
	 */
	if (total_dev != root->fs_info->fs_devices->total_devices) {
		btrfs_err(root->fs_info,
	   "super_num_devices %llu mismatch with num_devices %llu found here",
			  btrfs_super_num_devices(root->fs_info->super_copy),
			  total_dev);
		ret = -EINVAL;
		goto error;
	}
	if (btrfs_super_total_bytes(root->fs_info->super_copy) <
	    root->fs_info->fs_devices->total_rw_bytes) {
		btrfs_err(root->fs_info,
	"super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
			  btrfs_super_total_bytes(root->fs_info->super_copy),
			  root->fs_info->fs_devices->total_rw_bytes);
		ret = -EINVAL;
		goto error;
	}
6788 6789
	ret = 0;
error:
6790 6791 6792
	unlock_chunks(root);
	mutex_unlock(&uuid_mutex);

Y
Yan Zheng 已提交
6793
	btrfs_free_path(path);
6794 6795
	return ret;
}
6796

6797 6798 6799 6800 6801
void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
{
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	struct btrfs_device *device;

6802 6803 6804 6805 6806 6807 6808 6809
	while (fs_devices) {
		mutex_lock(&fs_devices->device_list_mutex);
		list_for_each_entry(device, &fs_devices->devices, dev_list)
			device->dev_root = fs_info->dev_root;
		mutex_unlock(&fs_devices->device_list_mutex);

		fs_devices = fs_devices->seed;
	}
6810 6811
}

6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843
static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
{
	int i;

	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
		btrfs_dev_stat_reset(dev, i);
}

int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
{
	struct btrfs_key key;
	struct btrfs_key found_key;
	struct btrfs_root *dev_root = fs_info->dev_root;
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	struct extent_buffer *eb;
	int slot;
	int ret = 0;
	struct btrfs_device *device;
	struct btrfs_path *path = NULL;
	int i;

	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

	mutex_lock(&fs_devices->device_list_mutex);
	list_for_each_entry(device, &fs_devices->devices, dev_list) {
		int item_size;
		struct btrfs_dev_stats_item *ptr;

6844 6845
		key.objectid = BTRFS_DEV_STATS_OBJECTID;
		key.type = BTRFS_PERSISTENT_ITEM_KEY;
6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891
		key.offset = device->devid;
		ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
		if (ret) {
			__btrfs_reset_dev_stats(device);
			device->dev_stats_valid = 1;
			btrfs_release_path(path);
			continue;
		}
		slot = path->slots[0];
		eb = path->nodes[0];
		btrfs_item_key_to_cpu(eb, &found_key, slot);
		item_size = btrfs_item_size_nr(eb, slot);

		ptr = btrfs_item_ptr(eb, slot,
				     struct btrfs_dev_stats_item);

		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
			if (item_size >= (1 + i) * sizeof(__le64))
				btrfs_dev_stat_set(device, i,
					btrfs_dev_stats_value(eb, ptr, i));
			else
				btrfs_dev_stat_reset(device, i);
		}

		device->dev_stats_valid = 1;
		btrfs_dev_stat_print_on_load(device);
		btrfs_release_path(path);
	}
	mutex_unlock(&fs_devices->device_list_mutex);

out:
	btrfs_free_path(path);
	return ret < 0 ? ret : 0;
}

static int update_dev_stat_item(struct btrfs_trans_handle *trans,
				struct btrfs_root *dev_root,
				struct btrfs_device *device)
{
	struct btrfs_path *path;
	struct btrfs_key key;
	struct extent_buffer *eb;
	struct btrfs_dev_stats_item *ptr;
	int ret;
	int i;

6892 6893
	key.objectid = BTRFS_DEV_STATS_OBJECTID;
	key.type = BTRFS_PERSISTENT_ITEM_KEY;
6894 6895 6896 6897 6898 6899
	key.offset = device->devid;

	path = btrfs_alloc_path();
	BUG_ON(!path);
	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
	if (ret < 0) {
6900 6901
		btrfs_warn_in_rcu(dev_root->fs_info,
			"error %d while searching for dev_stats item for device %s",
6902
			      ret, rcu_str_deref(device->name));
6903 6904 6905 6906 6907 6908 6909 6910
		goto out;
	}

	if (ret == 0 &&
	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
		/* need to delete old one and insert a new one */
		ret = btrfs_del_item(trans, dev_root, path);
		if (ret != 0) {
6911 6912
			btrfs_warn_in_rcu(dev_root->fs_info,
				"delete too small dev_stats item for device %s failed %d",
6913
				      rcu_str_deref(device->name), ret);
6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924
			goto out;
		}
		ret = 1;
	}

	if (ret == 1) {
		/* need to insert a new item */
		btrfs_release_path(path);
		ret = btrfs_insert_empty_item(trans, dev_root, path,
					      &key, sizeof(*ptr));
		if (ret < 0) {
6925 6926 6927
			btrfs_warn_in_rcu(dev_root->fs_info,
				"insert dev_stats item for device %s failed %d",
				rcu_str_deref(device->name), ret);
6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952
			goto out;
		}
	}

	eb = path->nodes[0];
	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
		btrfs_set_dev_stats_value(eb, ptr, i,
					  btrfs_dev_stat_read(device, i));
	btrfs_mark_buffer_dirty(eb);

out:
	btrfs_free_path(path);
	return ret;
}

/*
 * called from commit_transaction. Writes all changed device stats to disk.
 */
int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
			struct btrfs_fs_info *fs_info)
{
	struct btrfs_root *dev_root = fs_info->dev_root;
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	struct btrfs_device *device;
6953
	int stats_cnt;
6954 6955 6956 6957
	int ret = 0;

	mutex_lock(&fs_devices->device_list_mutex);
	list_for_each_entry(device, &fs_devices->devices, dev_list) {
6958
		if (!device->dev_stats_valid || !btrfs_dev_stats_dirty(device))
6959 6960
			continue;

6961
		stats_cnt = atomic_read(&device->dev_stats_ccnt);
6962 6963
		ret = update_dev_stat_item(trans, dev_root, device);
		if (!ret)
6964
			atomic_sub(stats_cnt, &device->dev_stats_ccnt);
6965 6966 6967 6968 6969 6970
	}
	mutex_unlock(&fs_devices->device_list_mutex);

	return ret;
}

6971 6972 6973 6974 6975 6976
void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
{
	btrfs_dev_stat_inc(dev, index);
	btrfs_dev_stat_print_on_error(dev);
}

6977
static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
6978
{
6979 6980
	if (!dev->dev_stats_valid)
		return;
6981 6982
	btrfs_err_rl_in_rcu(dev->dev_root->fs_info,
		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
6983
			   rcu_str_deref(dev->name),
6984 6985 6986
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6987 6988
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
6989
}
6990

6991 6992
static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
{
6993 6994 6995 6996 6997 6998 6999 7000
	int i;

	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
		if (btrfs_dev_stat_read(dev, i) != 0)
			break;
	if (i == BTRFS_DEV_STAT_VALUES_MAX)
		return; /* all values == 0, suppress message */

7001 7002
	btrfs_info_in_rcu(dev->dev_root->fs_info,
		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7003
	       rcu_str_deref(dev->name),
7004 7005 7006 7007 7008 7009 7010
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
}

7011
int btrfs_get_dev_stats(struct btrfs_root *root,
7012
			struct btrfs_ioctl_get_dev_stats *stats)
7013 7014 7015 7016 7017 7018
{
	struct btrfs_device *dev;
	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
	int i;

	mutex_lock(&fs_devices->device_list_mutex);
7019
	dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
7020 7021 7022
	mutex_unlock(&fs_devices->device_list_mutex);

	if (!dev) {
7023
		btrfs_warn(root->fs_info, "get dev_stats failed, device not found");
7024
		return -ENODEV;
7025
	} else if (!dev->dev_stats_valid) {
7026
		btrfs_warn(root->fs_info, "get dev_stats failed, not yet valid");
7027
		return -ENODEV;
7028
	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044
		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
			if (stats->nr_items > i)
				stats->values[i] =
					btrfs_dev_stat_read_and_reset(dev, i);
			else
				btrfs_dev_stat_reset(dev, i);
		}
	} else {
		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
			if (stats->nr_items > i)
				stats->values[i] = btrfs_dev_stat_read(dev, i);
	}
	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
	return 0;
}
7045

7046
void btrfs_scratch_superblocks(struct block_device *bdev, char *device_path)
7047 7048 7049
{
	struct buffer_head *bh;
	struct btrfs_super_block *disk_super;
7050
	int copy_num;
7051

7052 7053
	if (!bdev)
		return;
7054

7055 7056
	for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX;
		copy_num++) {
7057

7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073
		if (btrfs_read_dev_one_super(bdev, copy_num, &bh))
			continue;

		disk_super = (struct btrfs_super_block *)bh->b_data;

		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
		set_buffer_dirty(bh);
		sync_dirty_buffer(bh);
		brelse(bh);
	}

	/* Notify udev that device has changed */
	btrfs_kobject_uevent(bdev, KOBJ_CHANGE);

	/* Update ctime/mtime for device path for libblkid */
	update_dev_time(device_path);
7074
}
7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097

/*
 * Update the size of all devices, which is used for writing out the
 * super blocks.
 */
void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info)
{
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	struct btrfs_device *curr, *next;

	if (list_empty(&fs_devices->resized_devices))
		return;

	mutex_lock(&fs_devices->device_list_mutex);
	lock_chunks(fs_info->dev_root);
	list_for_each_entry_safe(curr, next, &fs_devices->resized_devices,
				 resized_list) {
		list_del_init(&curr->resized_list);
		curr->commit_total_bytes = curr->disk_total_bytes;
	}
	unlock_chunks(fs_info->dev_root);
	mutex_unlock(&fs_devices->device_list_mutex);
}
7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113

/* Must be invoked during the transaction commit */
void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
					struct btrfs_transaction *transaction)
{
	struct extent_map *em;
	struct map_lookup *map;
	struct btrfs_device *dev;
	int i;

	if (list_empty(&transaction->pending_chunks))
		return;

	/* In order to kick the device replace finish process */
	lock_chunks(root);
	list_for_each_entry(em, &transaction->pending_chunks, list) {
7114
		map = em->map_lookup;
7115 7116 7117 7118 7119 7120 7121 7122

		for (i = 0; i < map->num_stripes; i++) {
			dev = map->stripes[i].dev;
			dev->commit_bytes_used = dev->bytes_used;
		}
	}
	unlock_chunks(root);
}
7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140

void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info)
{
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	while (fs_devices) {
		fs_devices->fs_info = fs_info;
		fs_devices = fs_devices->seed;
	}
}

void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
{
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	while (fs_devices) {
		fs_devices->fs_info = NULL;
		fs_devices = fs_devices->seed;
	}
}
7141

7142
static void btrfs_close_one_device(struct btrfs_device *device)
7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175
{
	struct btrfs_fs_devices *fs_devices = device->fs_devices;
	struct btrfs_device *new_device;
	struct rcu_string *name;

	if (device->bdev)
		fs_devices->open_devices--;

	if (device->writeable &&
	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
		list_del_init(&device->dev_alloc_list);
		fs_devices->rw_devices--;
	}

	if (device->missing)
		fs_devices->missing_devices--;

	new_device = btrfs_alloc_device(NULL, &device->devid,
					device->uuid);
	BUG_ON(IS_ERR(new_device)); /* -ENOMEM */

	/* Safe because we are under uuid_mutex */
	if (device->name) {
		name = rcu_string_strdup(device->name->str, GFP_NOFS);
		BUG_ON(!name); /* -ENOMEM */
		rcu_assign_pointer(new_device->name, name);
	}

	list_replace_rcu(&device->dev_list, &new_device->dev_list);
	new_device->fs_devices = device->fs_devices;

	call_rcu(&device->rcu, free_device);
}