volumes.c 186.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */
#include <linux/sched.h>
#include <linux/bio.h>
20
#include <linux/slab.h>
21
#include <linux/buffer_head.h>
22
#include <linux/blkdev.h>
23
#include <linux/iocontext.h>
24
#include <linux/capability.h>
25
#include <linux/ratelimit.h>
I
Ilya Dryomov 已提交
26
#include <linux/kthread.h>
D
David Woodhouse 已提交
27
#include <linux/raid/pq.h>
S
Stefan Behrens 已提交
28
#include <linux/semaphore.h>
29
#include <linux/uuid.h>
D
David Woodhouse 已提交
30
#include <asm/div64.h>
31 32 33 34 35 36
#include "ctree.h"
#include "extent_map.h"
#include "disk-io.h"
#include "transaction.h"
#include "print-tree.h"
#include "volumes.h"
D
David Woodhouse 已提交
37
#include "raid56.h"
38
#include "async-thread.h"
39
#include "check-integrity.h"
40
#include "rcu-string.h"
41
#include "math.h"
42
#include "dev-replace.h"
43
#include "sysfs.h"
44

Z
Zhao Lei 已提交
45 46 47 48 49 50
const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
	[BTRFS_RAID_RAID10] = {
		.sub_stripes	= 2,
		.dev_stripes	= 1,
		.devs_max	= 0,	/* 0 == as many as possible */
		.devs_min	= 4,
51
		.tolerated_failures = 1,
Z
Zhao Lei 已提交
52 53 54 55 56 57 58 59
		.devs_increment	= 2,
		.ncopies	= 2,
	},
	[BTRFS_RAID_RAID1] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 2,
		.devs_min	= 2,
60
		.tolerated_failures = 1,
Z
Zhao Lei 已提交
61 62 63 64 65 66 67 68
		.devs_increment	= 2,
		.ncopies	= 2,
	},
	[BTRFS_RAID_DUP] = {
		.sub_stripes	= 1,
		.dev_stripes	= 2,
		.devs_max	= 1,
		.devs_min	= 1,
69
		.tolerated_failures = 0,
Z
Zhao Lei 已提交
70 71 72 73 74 75 76 77
		.devs_increment	= 1,
		.ncopies	= 2,
	},
	[BTRFS_RAID_RAID0] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 0,
		.devs_min	= 2,
78
		.tolerated_failures = 0,
Z
Zhao Lei 已提交
79 80 81 82 83 84 85 86
		.devs_increment	= 1,
		.ncopies	= 1,
	},
	[BTRFS_RAID_SINGLE] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 1,
		.devs_min	= 1,
87
		.tolerated_failures = 0,
Z
Zhao Lei 已提交
88 89 90 91 92 93 94 95
		.devs_increment	= 1,
		.ncopies	= 1,
	},
	[BTRFS_RAID_RAID5] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 0,
		.devs_min	= 2,
96
		.tolerated_failures = 1,
Z
Zhao Lei 已提交
97 98 99 100 101 102 103 104
		.devs_increment	= 1,
		.ncopies	= 2,
	},
	[BTRFS_RAID_RAID6] = {
		.sub_stripes	= 1,
		.dev_stripes	= 1,
		.devs_max	= 0,
		.devs_min	= 3,
105
		.tolerated_failures = 2,
Z
Zhao Lei 已提交
106 107 108 109 110
		.devs_increment	= 1,
		.ncopies	= 3,
	},
};

111
const u64 btrfs_raid_group[BTRFS_NR_RAID_TYPES] = {
Z
Zhao Lei 已提交
112 113 114 115 116 117 118 119 120
	[BTRFS_RAID_RAID10] = BTRFS_BLOCK_GROUP_RAID10,
	[BTRFS_RAID_RAID1]  = BTRFS_BLOCK_GROUP_RAID1,
	[BTRFS_RAID_DUP]    = BTRFS_BLOCK_GROUP_DUP,
	[BTRFS_RAID_RAID0]  = BTRFS_BLOCK_GROUP_RAID0,
	[BTRFS_RAID_SINGLE] = 0,
	[BTRFS_RAID_RAID5]  = BTRFS_BLOCK_GROUP_RAID5,
	[BTRFS_RAID_RAID6]  = BTRFS_BLOCK_GROUP_RAID6,
};

121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
/*
 * Table to convert BTRFS_RAID_* to the error code if minimum number of devices
 * condition is not met. Zero means there's no corresponding
 * BTRFS_ERROR_DEV_*_NOT_MET value.
 */
const int btrfs_raid_mindev_error[BTRFS_NR_RAID_TYPES] = {
	[BTRFS_RAID_RAID10] = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
	[BTRFS_RAID_RAID1]  = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
	[BTRFS_RAID_DUP]    = 0,
	[BTRFS_RAID_RAID0]  = 0,
	[BTRFS_RAID_SINGLE] = 0,
	[BTRFS_RAID_RAID5]  = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
	[BTRFS_RAID_RAID6]  = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
};

Y
Yan Zheng 已提交
136 137 138 139
static int init_first_rw_device(struct btrfs_trans_handle *trans,
				struct btrfs_root *root,
				struct btrfs_device *device);
static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
140
static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
141
static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
142
static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
Y
Yan Zheng 已提交
143

144
DEFINE_MUTEX(uuid_mutex);
145
static LIST_HEAD(fs_uuids);
146 147 148 149
struct list_head *btrfs_get_fs_uuids(void)
{
	return &fs_uuids;
}
150

151 152 153 154
static struct btrfs_fs_devices *__alloc_fs_devices(void)
{
	struct btrfs_fs_devices *fs_devs;

155
	fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
156 157 158 159 160 161
	if (!fs_devs)
		return ERR_PTR(-ENOMEM);

	mutex_init(&fs_devs->device_list_mutex);

	INIT_LIST_HEAD(&fs_devs->devices);
162
	INIT_LIST_HEAD(&fs_devs->resized_devices);
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
	INIT_LIST_HEAD(&fs_devs->alloc_list);
	INIT_LIST_HEAD(&fs_devs->list);

	return fs_devs;
}

/**
 * alloc_fs_devices - allocate struct btrfs_fs_devices
 * @fsid:	a pointer to UUID for this FS.  If NULL a new UUID is
 *		generated.
 *
 * Return: a pointer to a new &struct btrfs_fs_devices on success;
 * ERR_PTR() on error.  Returned struct is not linked onto any lists and
 * can be destroyed with kfree() right away.
 */
static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
{
	struct btrfs_fs_devices *fs_devs;

	fs_devs = __alloc_fs_devices();
	if (IS_ERR(fs_devs))
		return fs_devs;

	if (fsid)
		memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
	else
		generate_random_uuid(fs_devs->fsid);

	return fs_devs;
}

Y
Yan Zheng 已提交
194 195 196 197 198 199 200 201
static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
{
	struct btrfs_device *device;
	WARN_ON(fs_devices->opened);
	while (!list_empty(&fs_devices->devices)) {
		device = list_entry(fs_devices->devices.next,
				    struct btrfs_device, dev_list);
		list_del(&device->dev_list);
202
		rcu_string_free(device->name);
Y
Yan Zheng 已提交
203 204 205 206 207
		kfree(device);
	}
	kfree(fs_devices);
}

208 209 210 211 212 213 214
static void btrfs_kobject_uevent(struct block_device *bdev,
				 enum kobject_action action)
{
	int ret;

	ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
	if (ret)
215
		pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
216 217 218 219 220
			action,
			kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
			&disk_to_dev(bdev->bd_disk)->kobj);
}

221
void btrfs_cleanup_fs_uuids(void)
222 223 224
{
	struct btrfs_fs_devices *fs_devices;

Y
Yan Zheng 已提交
225 226 227 228
	while (!list_empty(&fs_uuids)) {
		fs_devices = list_entry(fs_uuids.next,
					struct btrfs_fs_devices, list);
		list_del(&fs_devices->list);
Y
Yan Zheng 已提交
229
		free_fs_devices(fs_devices);
230 231 232
	}
}

233 234 235 236
static struct btrfs_device *__alloc_device(void)
{
	struct btrfs_device *dev;

237
	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
238 239 240 241 242
	if (!dev)
		return ERR_PTR(-ENOMEM);

	INIT_LIST_HEAD(&dev->dev_list);
	INIT_LIST_HEAD(&dev->dev_alloc_list);
243
	INIT_LIST_HEAD(&dev->resized_list);
244 245 246 247 248

	spin_lock_init(&dev->io_lock);

	spin_lock_init(&dev->reada_lock);
	atomic_set(&dev->reada_in_flight, 0);
249
	atomic_set(&dev->dev_stats_ccnt, 0);
250
	btrfs_device_data_ordered_init(dev);
251 252
	INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
	INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
253 254 255 256

	return dev;
}

257 258
static noinline struct btrfs_device *__find_device(struct list_head *head,
						   u64 devid, u8 *uuid)
259 260 261
{
	struct btrfs_device *dev;

Q
Qinghuang Feng 已提交
262
	list_for_each_entry(dev, head, dev_list) {
263
		if (dev->devid == devid &&
264
		    (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
265
			return dev;
266
		}
267 268 269 270
	}
	return NULL;
}

271
static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
272 273 274
{
	struct btrfs_fs_devices *fs_devices;

Q
Qinghuang Feng 已提交
275
	list_for_each_entry(fs_devices, &fs_uuids, list) {
276 277 278 279 280 281
		if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
			return fs_devices;
	}
	return NULL;
}

282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
static int
btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
		      int flush, struct block_device **bdev,
		      struct buffer_head **bh)
{
	int ret;

	*bdev = blkdev_get_by_path(device_path, flags, holder);

	if (IS_ERR(*bdev)) {
		ret = PTR_ERR(*bdev);
		goto error;
	}

	if (flush)
		filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
	ret = set_blocksize(*bdev, 4096);
	if (ret) {
		blkdev_put(*bdev, flags);
		goto error;
	}
	invalidate_bdev(*bdev);
	*bh = btrfs_read_dev_super(*bdev);
305 306
	if (IS_ERR(*bh)) {
		ret = PTR_ERR(*bh);
307 308 309 310 311 312 313 314 315 316 317 318
		blkdev_put(*bdev, flags);
		goto error;
	}

	return 0;

error:
	*bdev = NULL;
	*bh = NULL;
	return ret;
}

319 320 321 322 323 324 325 326 327 328 329 330 331 332
static void requeue_list(struct btrfs_pending_bios *pending_bios,
			struct bio *head, struct bio *tail)
{

	struct bio *old_head;

	old_head = pending_bios->head;
	pending_bios->head = head;
	if (pending_bios->tail)
		tail->bi_next = old_head;
	else
		pending_bios->tail = tail;
}

333 334 335 336 337 338 339 340 341 342 343
/*
 * we try to collect pending bios for a device so we don't get a large
 * number of procs sending bios down to the same device.  This greatly
 * improves the schedulers ability to collect and merge the bios.
 *
 * But, it also turns into a long list of bios to process and that is sure
 * to eventually make the worker thread block.  The solution here is to
 * make some progress and then put this work struct back at the end of
 * the list if the block device is congested.  This way, multiple devices
 * can make progress from a single worker thread.
 */
344
static noinline void run_scheduled_bios(struct btrfs_device *device)
345 346 347
{
	struct bio *pending;
	struct backing_dev_info *bdi;
348
	struct btrfs_fs_info *fs_info;
349
	struct btrfs_pending_bios *pending_bios;
350 351 352
	struct bio *tail;
	struct bio *cur;
	int again = 0;
353
	unsigned long num_run;
354
	unsigned long batch_run = 0;
355
	unsigned long limit;
356
	unsigned long last_waited = 0;
357
	int force_reg = 0;
M
Miao Xie 已提交
358
	int sync_pending = 0;
359 360 361 362 363 364 365 366 367
	struct blk_plug plug;

	/*
	 * this function runs all the bios we've collected for
	 * a particular device.  We don't want to wander off to
	 * another device without first sending all of these down.
	 * So, setup a plug here and finish it off before we return
	 */
	blk_start_plug(&plug);
368

369
	bdi = blk_get_backing_dev_info(device->bdev);
370 371 372 373
	fs_info = device->dev_root->fs_info;
	limit = btrfs_async_submit_limit(fs_info);
	limit = limit * 2 / 3;

374 375 376
loop:
	spin_lock(&device->io_lock);

377
loop_lock:
378
	num_run = 0;
379

380 381 382 383 384
	/* take all the bios off the list at once and process them
	 * later on (without the lock held).  But, remember the
	 * tail and other pointers so the bios can be properly reinserted
	 * into the list if we hit congestion
	 */
385
	if (!force_reg && device->pending_sync_bios.head) {
386
		pending_bios = &device->pending_sync_bios;
387 388
		force_reg = 1;
	} else {
389
		pending_bios = &device->pending_bios;
390 391
		force_reg = 0;
	}
392 393 394

	pending = pending_bios->head;
	tail = pending_bios->tail;
395 396 397 398 399 400 401 402 403 404
	WARN_ON(pending && !tail);

	/*
	 * if pending was null this time around, no bios need processing
	 * at all and we can stop.  Otherwise it'll loop back up again
	 * and do an additional check so no bios are missed.
	 *
	 * device->running_pending is used to synchronize with the
	 * schedule_bio code.
	 */
405 406
	if (device->pending_sync_bios.head == NULL &&
	    device->pending_bios.head == NULL) {
407 408
		again = 0;
		device->running_pending = 0;
409 410 411
	} else {
		again = 1;
		device->running_pending = 1;
412
	}
413 414 415 416

	pending_bios->head = NULL;
	pending_bios->tail = NULL;

417 418
	spin_unlock(&device->io_lock);

C
Chris Mason 已提交
419
	while (pending) {
420 421

		rmb();
422 423 424 425 426 427 428 429
		/* we want to work on both lists, but do more bios on the
		 * sync list than the regular list
		 */
		if ((num_run > 32 &&
		    pending_bios != &device->pending_sync_bios &&
		    device->pending_sync_bios.head) ||
		   (num_run > 64 && pending_bios == &device->pending_sync_bios &&
		    device->pending_bios.head)) {
430 431 432 433 434
			spin_lock(&device->io_lock);
			requeue_list(pending_bios, pending, tail);
			goto loop_lock;
		}

435 436 437
		cur = pending;
		pending = pending->bi_next;
		cur->bi_next = NULL;
438

439 440 441
		/*
		 * atomic_dec_return implies a barrier for waitqueue_active
		 */
442
		if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
443 444
		    waitqueue_active(&fs_info->async_submit_wait))
			wake_up(&fs_info->async_submit_wait);
445

446
		BUG_ON(atomic_read(&cur->__bi_cnt) == 0);
447

448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
		/*
		 * if we're doing the sync list, record that our
		 * plug has some sync requests on it
		 *
		 * If we're doing the regular list and there are
		 * sync requests sitting around, unplug before
		 * we add more
		 */
		if (pending_bios == &device->pending_sync_bios) {
			sync_pending = 1;
		} else if (sync_pending) {
			blk_finish_plug(&plug);
			blk_start_plug(&plug);
			sync_pending = 0;
		}

464
		btrfsic_submit_bio(cur->bi_rw, cur);
465 466
		num_run++;
		batch_run++;
467 468

		cond_resched();
469 470 471 472 473 474

		/*
		 * we made progress, there is more work to do and the bdi
		 * is now congested.  Back off and let other work structs
		 * run instead
		 */
C
Chris Mason 已提交
475
		if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
476
		    fs_info->fs_devices->open_devices > 1) {
477
			struct io_context *ioc;
478

479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
			ioc = current->io_context;

			/*
			 * the main goal here is that we don't want to
			 * block if we're going to be able to submit
			 * more requests without blocking.
			 *
			 * This code does two great things, it pokes into
			 * the elevator code from a filesystem _and_
			 * it makes assumptions about how batching works.
			 */
			if (ioc && ioc->nr_batch_requests > 0 &&
			    time_before(jiffies, ioc->last_waited + HZ/50UL) &&
			    (last_waited == 0 ||
			     ioc->last_waited == last_waited)) {
				/*
				 * we want to go through our batch of
				 * requests and stop.  So, we copy out
				 * the ioc->last_waited time and test
				 * against it before looping
				 */
				last_waited = ioc->last_waited;
501
				cond_resched();
502 503
				continue;
			}
504
			spin_lock(&device->io_lock);
505
			requeue_list(pending_bios, pending, tail);
506
			device->running_pending = 1;
507 508

			spin_unlock(&device->io_lock);
509 510
			btrfs_queue_work(fs_info->submit_workers,
					 &device->work);
511 512
			goto done;
		}
C
Chris Mason 已提交
513 514 515 516 517 518
		/* unplug every 64 requests just for good measure */
		if (batch_run % 64 == 0) {
			blk_finish_plug(&plug);
			blk_start_plug(&plug);
			sync_pending = 0;
		}
519
	}
520

521 522 523 524 525 526 527 528 529
	cond_resched();
	if (again)
		goto loop;

	spin_lock(&device->io_lock);
	if (device->pending_bios.head || device->pending_sync_bios.head)
		goto loop_lock;
	spin_unlock(&device->io_lock);

530
done:
531
	blk_finish_plug(&plug);
532 533
}

534
static void pending_bios_fn(struct btrfs_work *work)
535 536 537 538 539 540 541
{
	struct btrfs_device *device;

	device = container_of(work, struct btrfs_device, work);
	run_scheduled_bios(device);
}

A
Anand Jain 已提交
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596

void btrfs_free_stale_device(struct btrfs_device *cur_dev)
{
	struct btrfs_fs_devices *fs_devs;
	struct btrfs_device *dev;

	if (!cur_dev->name)
		return;

	list_for_each_entry(fs_devs, &fs_uuids, list) {
		int del = 1;

		if (fs_devs->opened)
			continue;
		if (fs_devs->seeding)
			continue;

		list_for_each_entry(dev, &fs_devs->devices, dev_list) {

			if (dev == cur_dev)
				continue;
			if (!dev->name)
				continue;

			/*
			 * Todo: This won't be enough. What if the same device
			 * comes back (with new uuid and) with its mapper path?
			 * But for now, this does help as mostly an admin will
			 * either use mapper or non mapper path throughout.
			 */
			rcu_read_lock();
			del = strcmp(rcu_str_deref(dev->name),
						rcu_str_deref(cur_dev->name));
			rcu_read_unlock();
			if (!del)
				break;
		}

		if (!del) {
			/* delete the stale device */
			if (fs_devs->num_devices == 1) {
				btrfs_sysfs_remove_fsid(fs_devs);
				list_del(&fs_devs->list);
				free_fs_devices(fs_devs);
			} else {
				fs_devs->num_devices--;
				list_del(&dev->dev_list);
				rcu_string_free(dev->name);
				kfree(dev);
			}
			break;
		}
	}
}

597 598 599 600 601 602 603 604
/*
 * Add new device to list of registered devices
 *
 * Returns:
 * 1   - first time device is seen
 * 0   - device already known
 * < 0 - error
 */
605
static noinline int device_list_add(const char *path,
606 607 608 609 610
			   struct btrfs_super_block *disk_super,
			   u64 devid, struct btrfs_fs_devices **fs_devices_ret)
{
	struct btrfs_device *device;
	struct btrfs_fs_devices *fs_devices;
611
	struct rcu_string *name;
612
	int ret = 0;
613 614 615 616
	u64 found_transid = btrfs_super_generation(disk_super);

	fs_devices = find_fsid(disk_super->fsid);
	if (!fs_devices) {
617 618 619 620
		fs_devices = alloc_fs_devices(disk_super->fsid);
		if (IS_ERR(fs_devices))
			return PTR_ERR(fs_devices);

621
		list_add(&fs_devices->list, &fs_uuids);
622

623 624
		device = NULL;
	} else {
625 626
		device = __find_device(&fs_devices->devices, devid,
				       disk_super->dev_item.uuid);
627
	}
628

629
	if (!device) {
Y
Yan Zheng 已提交
630 631 632
		if (fs_devices->opened)
			return -EBUSY;

633 634 635
		device = btrfs_alloc_device(NULL, &devid,
					    disk_super->dev_item.uuid);
		if (IS_ERR(device)) {
636
			/* we can safely leave the fs_devices entry around */
637
			return PTR_ERR(device);
638
		}
639 640 641

		name = rcu_string_strdup(path, GFP_NOFS);
		if (!name) {
642 643 644
			kfree(device);
			return -ENOMEM;
		}
645
		rcu_assign_pointer(device->name, name);
646

647
		mutex_lock(&fs_devices->device_list_mutex);
648
		list_add_rcu(&device->dev_list, &fs_devices->devices);
649
		fs_devices->num_devices++;
650 651
		mutex_unlock(&fs_devices->device_list_mutex);

652
		ret = 1;
Y
Yan Zheng 已提交
653
		device->fs_devices = fs_devices;
654
	} else if (!device->name || strcmp(device->name->str, path)) {
655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675
		/*
		 * When FS is already mounted.
		 * 1. If you are here and if the device->name is NULL that
		 *    means this device was missing at time of FS mount.
		 * 2. If you are here and if the device->name is different
		 *    from 'path' that means either
		 *      a. The same device disappeared and reappeared with
		 *         different name. or
		 *      b. The missing-disk-which-was-replaced, has
		 *         reappeared now.
		 *
		 * We must allow 1 and 2a above. But 2b would be a spurious
		 * and unintentional.
		 *
		 * Further in case of 1 and 2a above, the disk at 'path'
		 * would have missed some transaction when it was away and
		 * in case of 2a the stale bdev has to be updated as well.
		 * 2b must not be allowed at all time.
		 */

		/*
676 677 678 679
		 * For now, we do allow update to btrfs_fs_device through the
		 * btrfs dev scan cli after FS has been mounted.  We're still
		 * tracking a problem where systems fail mount by subvolume id
		 * when we reject replacement on a mounted FS.
680
		 */
681
		if (!fs_devices->opened && found_transid < device->generation) {
682 683 684 685 686 687 688
			/*
			 * That is if the FS is _not_ mounted and if you
			 * are here, that means there is more than one
			 * disk with same uuid and devid.We keep the one
			 * with larger generation number or the last-in if
			 * generation are equal.
			 */
689
			return -EEXIST;
690
		}
691

692
		name = rcu_string_strdup(path, GFP_NOFS);
693 694
		if (!name)
			return -ENOMEM;
695 696
		rcu_string_free(device->name);
		rcu_assign_pointer(device->name, name);
697 698 699 700
		if (device->missing) {
			fs_devices->missing_devices--;
			device->missing = 0;
		}
701 702
	}

703 704 705 706 707 708 709 710 711
	/*
	 * Unmount does not free the btrfs_device struct but would zero
	 * generation along with most of the other members. So just update
	 * it back. We need it to pick the disk with largest generation
	 * (as above).
	 */
	if (!fs_devices->opened)
		device->generation = found_transid;

A
Anand Jain 已提交
712 713 714 715
	/*
	 * if there is new btrfs on an already registered device,
	 * then remove the stale device entry.
	 */
716 717
	if (ret > 0)
		btrfs_free_stale_device(device);
A
Anand Jain 已提交
718

719
	*fs_devices_ret = fs_devices;
720 721

	return ret;
722 723
}

Y
Yan Zheng 已提交
724 725 726 727 728 729
static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
{
	struct btrfs_fs_devices *fs_devices;
	struct btrfs_device *device;
	struct btrfs_device *orig_dev;

730 731 732
	fs_devices = alloc_fs_devices(orig->fsid);
	if (IS_ERR(fs_devices))
		return fs_devices;
Y
Yan Zheng 已提交
733

734
	mutex_lock(&orig->device_list_mutex);
J
Josef Bacik 已提交
735
	fs_devices->total_devices = orig->total_devices;
Y
Yan Zheng 已提交
736

737
	/* We have held the volume lock, it is safe to get the devices. */
Y
Yan Zheng 已提交
738
	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
739 740
		struct rcu_string *name;

741 742 743
		device = btrfs_alloc_device(NULL, &orig_dev->devid,
					    orig_dev->uuid);
		if (IS_ERR(device))
Y
Yan Zheng 已提交
744 745
			goto error;

746 747 748 749
		/*
		 * This is ok to do without rcu read locked because we hold the
		 * uuid mutex so nothing we touch in here is going to disappear.
		 */
750
		if (orig_dev->name) {
751 752
			name = rcu_string_strdup(orig_dev->name->str,
					GFP_KERNEL);
753 754 755 756 757
			if (!name) {
				kfree(device);
				goto error;
			}
			rcu_assign_pointer(device->name, name);
J
Julia Lawall 已提交
758
		}
Y
Yan Zheng 已提交
759 760 761 762 763

		list_add(&device->dev_list, &fs_devices->devices);
		device->fs_devices = fs_devices;
		fs_devices->num_devices++;
	}
764
	mutex_unlock(&orig->device_list_mutex);
Y
Yan Zheng 已提交
765 766
	return fs_devices;
error:
767
	mutex_unlock(&orig->device_list_mutex);
Y
Yan Zheng 已提交
768 769 770 771
	free_fs_devices(fs_devices);
	return ERR_PTR(-ENOMEM);
}

772
void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step)
773
{
Q
Qinghuang Feng 已提交
774
	struct btrfs_device *device, *next;
775
	struct btrfs_device *latest_dev = NULL;
776

777 778
	mutex_lock(&uuid_mutex);
again:
779
	/* This is the initialized path, it is safe to release the devices. */
Q
Qinghuang Feng 已提交
780
	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
781
		if (device->in_fs_metadata) {
782
			if (!device->is_tgtdev_for_dev_replace &&
783 784 785
			    (!latest_dev ||
			     device->generation > latest_dev->generation)) {
				latest_dev = device;
786
			}
Y
Yan Zheng 已提交
787
			continue;
788
		}
Y
Yan Zheng 已提交
789

790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
		if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
			/*
			 * In the first step, keep the device which has
			 * the correct fsid and the devid that is used
			 * for the dev_replace procedure.
			 * In the second step, the dev_replace state is
			 * read from the device tree and it is known
			 * whether the procedure is really active or
			 * not, which means whether this device is
			 * used or whether it should be removed.
			 */
			if (step == 0 || device->is_tgtdev_for_dev_replace) {
				continue;
			}
		}
Y
Yan Zheng 已提交
805
		if (device->bdev) {
806
			blkdev_put(device->bdev, device->mode);
Y
Yan Zheng 已提交
807 808 809 810 811 812
			device->bdev = NULL;
			fs_devices->open_devices--;
		}
		if (device->writeable) {
			list_del_init(&device->dev_alloc_list);
			device->writeable = 0;
813 814
			if (!device->is_tgtdev_for_dev_replace)
				fs_devices->rw_devices--;
Y
Yan Zheng 已提交
815
		}
Y
Yan Zheng 已提交
816 817
		list_del_init(&device->dev_list);
		fs_devices->num_devices--;
818
		rcu_string_free(device->name);
Y
Yan Zheng 已提交
819
		kfree(device);
820
	}
Y
Yan Zheng 已提交
821 822 823 824 825 826

	if (fs_devices->seed) {
		fs_devices = fs_devices->seed;
		goto again;
	}

827
	fs_devices->latest_bdev = latest_dev->bdev;
828

829 830
	mutex_unlock(&uuid_mutex);
}
831

832 833 834 835 836 837 838 839 840
static void __free_device(struct work_struct *work)
{
	struct btrfs_device *device;

	device = container_of(work, struct btrfs_device, rcu_work);

	if (device->bdev)
		blkdev_put(device->bdev, device->mode);

841
	rcu_string_free(device->name);
842 843 844 845 846 847 848 849 850 851 852 853 854
	kfree(device);
}

static void free_device(struct rcu_head *head)
{
	struct btrfs_device *device;

	device = container_of(head, struct btrfs_device, rcu);

	INIT_WORK(&device->rcu_work, __free_device);
	schedule_work(&device->rcu_work);
}

855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872
static void btrfs_close_one_device(struct btrfs_device *device)
{
	struct btrfs_fs_devices *fs_devices = device->fs_devices;
	struct btrfs_device *new_device;
	struct rcu_string *name;

	if (device->bdev)
		fs_devices->open_devices--;

	if (device->writeable &&
	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
		list_del_init(&device->dev_alloc_list);
		fs_devices->rw_devices--;
	}

	if (device->missing)
		fs_devices->missing_devices--;

873 874 875 876 877
	if (device->bdev && device->writeable) {
		sync_blockdev(device->bdev);
		invalidate_bdev(device->bdev);
	}

878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894
	new_device = btrfs_alloc_device(NULL, &device->devid,
					device->uuid);
	BUG_ON(IS_ERR(new_device)); /* -ENOMEM */

	/* Safe because we are under uuid_mutex */
	if (device->name) {
		name = rcu_string_strdup(device->name->str, GFP_NOFS);
		BUG_ON(!name); /* -ENOMEM */
		rcu_assign_pointer(new_device->name, name);
	}

	list_replace_rcu(&device->dev_list, &new_device->dev_list);
	new_device->fs_devices = device->fs_devices;

	call_rcu(&device->rcu, free_device);
}

Y
Yan Zheng 已提交
895
static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
896
{
897
	struct btrfs_device *device, *tmp;
Y
Yan Zheng 已提交
898

Y
Yan Zheng 已提交
899 900
	if (--fs_devices->opened > 0)
		return 0;
901

902
	mutex_lock(&fs_devices->device_list_mutex);
903
	list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
904
		btrfs_close_one_device(device);
905
	}
906 907
	mutex_unlock(&fs_devices->device_list_mutex);

Y
Yan Zheng 已提交
908 909
	WARN_ON(fs_devices->open_devices);
	WARN_ON(fs_devices->rw_devices);
Y
Yan Zheng 已提交
910 911 912
	fs_devices->opened = 0;
	fs_devices->seeding = 0;

913 914 915
	return 0;
}

Y
Yan Zheng 已提交
916 917
int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
{
Y
Yan Zheng 已提交
918
	struct btrfs_fs_devices *seed_devices = NULL;
Y
Yan Zheng 已提交
919 920 921 922
	int ret;

	mutex_lock(&uuid_mutex);
	ret = __btrfs_close_devices(fs_devices);
Y
Yan Zheng 已提交
923 924 925 926
	if (!fs_devices->opened) {
		seed_devices = fs_devices->seed;
		fs_devices->seed = NULL;
	}
Y
Yan Zheng 已提交
927
	mutex_unlock(&uuid_mutex);
Y
Yan Zheng 已提交
928 929 930 931 932 933 934

	while (seed_devices) {
		fs_devices = seed_devices;
		seed_devices = fs_devices->seed;
		__btrfs_close_devices(fs_devices);
		free_fs_devices(fs_devices);
	}
935 936 937 938 939 940
	/*
	 * Wait for rcu kworkers under __btrfs_close_devices
	 * to finish all blkdev_puts so device is really
	 * free when umount is done.
	 */
	rcu_barrier();
Y
Yan Zheng 已提交
941 942 943
	return ret;
}

Y
Yan Zheng 已提交
944 945
static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
				fmode_t flags, void *holder)
946
{
947
	struct request_queue *q;
948 949 950
	struct block_device *bdev;
	struct list_head *head = &fs_devices->devices;
	struct btrfs_device *device;
951
	struct btrfs_device *latest_dev = NULL;
952 953 954
	struct buffer_head *bh;
	struct btrfs_super_block *disk_super;
	u64 devid;
Y
Yan Zheng 已提交
955
	int seeding = 1;
956
	int ret = 0;
957

958 959
	flags |= FMODE_EXCL;

Q
Qinghuang Feng 已提交
960
	list_for_each_entry(device, head, dev_list) {
961 962
		if (device->bdev)
			continue;
963 964 965
		if (!device->name)
			continue;

966 967 968
		/* Just open everything we can; ignore failures here */
		if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
					    &bdev, &bh))
969
			continue;
970 971

		disk_super = (struct btrfs_super_block *)bh->b_data;
972
		devid = btrfs_stack_device_id(&disk_super->dev_item);
973 974 975
		if (devid != device->devid)
			goto error_brelse;

Y
Yan Zheng 已提交
976 977 978 979 980
		if (memcmp(device->uuid, disk_super->dev_item.uuid,
			   BTRFS_UUID_SIZE))
			goto error_brelse;

		device->generation = btrfs_super_generation(disk_super);
981 982 983
		if (!latest_dev ||
		    device->generation > latest_dev->generation)
			latest_dev = device;
984

Y
Yan Zheng 已提交
985 986 987 988 989 990 991
		if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
			device->writeable = 0;
		} else {
			device->writeable = !bdev_read_only(bdev);
			seeding = 0;
		}

992
		q = bdev_get_queue(bdev);
993
		if (blk_queue_discard(q))
994 995
			device->can_discard = 1;

996
		device->bdev = bdev;
997
		device->in_fs_metadata = 0;
998 999
		device->mode = flags;

C
Chris Mason 已提交
1000 1001 1002
		if (!blk_queue_nonrot(bdev_get_queue(bdev)))
			fs_devices->rotating = 1;

1003
		fs_devices->open_devices++;
1004 1005
		if (device->writeable &&
		    device->devid != BTRFS_DEV_REPLACE_DEVID) {
Y
Yan Zheng 已提交
1006 1007 1008 1009
			fs_devices->rw_devices++;
			list_add(&device->dev_alloc_list,
				 &fs_devices->alloc_list);
		}
1010
		brelse(bh);
1011
		continue;
1012

1013 1014
error_brelse:
		brelse(bh);
1015
		blkdev_put(bdev, flags);
1016
		continue;
1017
	}
1018
	if (fs_devices->open_devices == 0) {
1019
		ret = -EINVAL;
1020 1021
		goto out;
	}
Y
Yan Zheng 已提交
1022 1023
	fs_devices->seeding = seeding;
	fs_devices->opened = 1;
1024
	fs_devices->latest_bdev = latest_dev->bdev;
Y
Yan Zheng 已提交
1025
	fs_devices->total_rw_bytes = 0;
1026
out:
Y
Yan Zheng 已提交
1027 1028 1029 1030
	return ret;
}

int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1031
		       fmode_t flags, void *holder)
Y
Yan Zheng 已提交
1032 1033 1034 1035 1036
{
	int ret;

	mutex_lock(&uuid_mutex);
	if (fs_devices->opened) {
Y
Yan Zheng 已提交
1037 1038
		fs_devices->opened++;
		ret = 0;
Y
Yan Zheng 已提交
1039
	} else {
1040
		ret = __btrfs_open_devices(fs_devices, flags, holder);
Y
Yan Zheng 已提交
1041
	}
1042 1043 1044 1045
	mutex_unlock(&uuid_mutex);
	return ret;
}

1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
void btrfs_release_disk_super(struct page *page)
{
	kunmap(page);
	put_page(page);
}

int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr,
		struct page **page, struct btrfs_super_block **disk_super)
{
	void *p;
	pgoff_t index;

	/* make sure our super fits in the device */
	if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
		return 1;

	/* make sure our super fits in the page */
	if (sizeof(**disk_super) > PAGE_SIZE)
		return 1;

	/* make sure our super doesn't straddle pages on disk */
	index = bytenr >> PAGE_SHIFT;
	if ((bytenr + sizeof(**disk_super) - 1) >> PAGE_SHIFT != index)
		return 1;

	/* pull in the page with our super */
	*page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
				   index, GFP_KERNEL);

	if (IS_ERR_OR_NULL(*page))
		return 1;

	p = kmap(*page);

	/* align our pointer to the offset of the super block */
	*disk_super = p + (bytenr & ~PAGE_MASK);

	if (btrfs_super_bytenr(*disk_super) != bytenr ||
	    btrfs_super_magic(*disk_super) != BTRFS_MAGIC) {
		btrfs_release_disk_super(*page);
		return 1;
	}

	if ((*disk_super)->label[0] &&
		(*disk_super)->label[BTRFS_LABEL_SIZE - 1])
		(*disk_super)->label[BTRFS_LABEL_SIZE - 1] = '\0';

	return 0;
}

1096 1097 1098 1099 1100
/*
 * Look for a btrfs signature on a device. This may be called out of the mount path
 * and we are not allowed to call set_blocksize during the scan. The superblock
 * is read via pagecache
 */
1101
int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
1102 1103 1104 1105
			  struct btrfs_fs_devices **fs_devices_ret)
{
	struct btrfs_super_block *disk_super;
	struct block_device *bdev;
1106 1107
	struct page *page;
	int ret = -EINVAL;
1108
	u64 devid;
1109
	u64 transid;
J
Josef Bacik 已提交
1110
	u64 total_devices;
1111
	u64 bytenr;
1112

1113 1114 1115 1116 1117 1118 1119
	/*
	 * we would like to check all the supers, but that would make
	 * a btrfs mount succeed after a mkfs from a different FS.
	 * So, we need to add a special mount option to scan for
	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
	 */
	bytenr = btrfs_sb_offset(0);
1120
	flags |= FMODE_EXCL;
1121
	mutex_lock(&uuid_mutex);
1122 1123 1124 1125

	bdev = blkdev_get_by_path(path, flags, holder);
	if (IS_ERR(bdev)) {
		ret = PTR_ERR(bdev);
1126
		goto error;
1127 1128
	}

1129
	if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super))
1130 1131
		goto error_bdev_put;

1132
	devid = btrfs_stack_device_id(&disk_super->dev_item);
1133
	transid = btrfs_super_generation(disk_super);
J
Josef Bacik 已提交
1134
	total_devices = btrfs_super_num_devices(disk_super);
1135

1136
	ret = device_list_add(path, disk_super, devid, fs_devices_ret);
1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
	if (ret > 0) {
		if (disk_super->label[0]) {
			printk(KERN_INFO "BTRFS: device label %s ", disk_super->label);
		} else {
			printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid);
		}

		printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
		ret = 0;
	}
J
Josef Bacik 已提交
1147 1148
	if (!ret && fs_devices_ret)
		(*fs_devices_ret)->total_devices = total_devices;
1149

1150
	btrfs_release_disk_super(page);
1151 1152

error_bdev_put:
1153
	blkdev_put(bdev, flags);
1154
error:
1155
	mutex_unlock(&uuid_mutex);
1156 1157
	return ret;
}
1158

1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173
/* helper to account the used device space in the range */
int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
				   u64 end, u64 *length)
{
	struct btrfs_key key;
	struct btrfs_root *root = device->dev_root;
	struct btrfs_dev_extent *dev_extent;
	struct btrfs_path *path;
	u64 extent_end;
	int ret;
	int slot;
	struct extent_buffer *l;

	*length = 0;

1174
	if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
1175 1176 1177 1178 1179
		return 0;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
1180
	path->reada = READA_FORWARD;
1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214

	key.objectid = device->devid;
	key.offset = start;
	key.type = BTRFS_DEV_EXTENT_KEY;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		goto out;
	if (ret > 0) {
		ret = btrfs_previous_item(root, path, key.objectid, key.type);
		if (ret < 0)
			goto out;
	}

	while (1) {
		l = path->nodes[0];
		slot = path->slots[0];
		if (slot >= btrfs_header_nritems(l)) {
			ret = btrfs_next_leaf(root, path);
			if (ret == 0)
				continue;
			if (ret < 0)
				goto out;

			break;
		}
		btrfs_item_key_to_cpu(l, &key, slot);

		if (key.objectid < device->devid)
			goto next;

		if (key.objectid > device->devid)
			break;

1215
		if (key.type != BTRFS_DEV_EXTENT_KEY)
1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
			goto next;

		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
		extent_end = key.offset + btrfs_dev_extent_length(l,
								  dev_extent);
		if (key.offset <= start && extent_end > end) {
			*length = end - start + 1;
			break;
		} else if (key.offset <= start && extent_end > start)
			*length += extent_end - start;
		else if (key.offset > start && extent_end <= end)
			*length += extent_end - key.offset;
		else if (key.offset > start && key.offset <= end) {
			*length += end - key.offset + 1;
			break;
		} else if (key.offset > end)
			break;

next:
		path->slots[0]++;
	}
	ret = 0;
out:
	btrfs_free_path(path);
	return ret;
}

1243
static int contains_pending_extent(struct btrfs_transaction *transaction,
1244 1245 1246
				   struct btrfs_device *device,
				   u64 *start, u64 len)
{
1247
	struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
1248
	struct extent_map *em;
1249
	struct list_head *search_list = &fs_info->pinned_chunks;
1250
	int ret = 0;
1251
	u64 physical_start = *start;
1252

1253 1254
	if (transaction)
		search_list = &transaction->pending_chunks;
1255 1256
again:
	list_for_each_entry(em, search_list, list) {
1257 1258 1259
		struct map_lookup *map;
		int i;

1260
		map = em->map_lookup;
1261
		for (i = 0; i < map->num_stripes; i++) {
1262 1263
			u64 end;

1264 1265
			if (map->stripes[i].dev != device)
				continue;
1266
			if (map->stripes[i].physical >= physical_start + len ||
1267
			    map->stripes[i].physical + em->orig_block_len <=
1268
			    physical_start)
1269
				continue;
1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286
			/*
			 * Make sure that while processing the pinned list we do
			 * not override our *start with a lower value, because
			 * we can have pinned chunks that fall within this
			 * device hole and that have lower physical addresses
			 * than the pending chunks we processed before. If we
			 * do not take this special care we can end up getting
			 * 2 pending chunks that start at the same physical
			 * device offsets because the end offset of a pinned
			 * chunk can be equal to the start offset of some
			 * pending chunk.
			 */
			end = map->stripes[i].physical + em->orig_block_len;
			if (end > *start) {
				*start = end;
				ret = 1;
			}
1287 1288
		}
	}
1289 1290
	if (search_list != &fs_info->pinned_chunks) {
		search_list = &fs_info->pinned_chunks;
1291 1292
		goto again;
	}
1293 1294 1295 1296 1297

	return ret;
}


1298
/*
1299 1300 1301 1302 1303 1304 1305
 * find_free_dev_extent_start - find free space in the specified device
 * @device:	  the device which we search the free space in
 * @num_bytes:	  the size of the free space that we need
 * @search_start: the position from which to begin the search
 * @start:	  store the start of the free space.
 * @len:	  the size of the free space. that we find, or the size
 *		  of the max free space if we don't find suitable free space
1306
 *
1307 1308 1309
 * this uses a pretty simple search, the expectation is that it is
 * called very infrequently and that a given device has a small number
 * of extents
1310 1311 1312 1313 1314 1315 1316 1317
 *
 * @start is used to store the start of the free space if we find. But if we
 * don't find suitable free space, it will be used to store the start position
 * of the max free space.
 *
 * @len is used to store the size of the free space that we find.
 * But if we don't find suitable free space, it is used to store the size of
 * the max free space.
1318
 */
1319 1320 1321
int find_free_dev_extent_start(struct btrfs_transaction *transaction,
			       struct btrfs_device *device, u64 num_bytes,
			       u64 search_start, u64 *start, u64 *len)
1322 1323 1324
{
	struct btrfs_key key;
	struct btrfs_root *root = device->dev_root;
1325
	struct btrfs_dev_extent *dev_extent;
Y
Yan Zheng 已提交
1326
	struct btrfs_path *path;
1327 1328 1329 1330
	u64 hole_size;
	u64 max_hole_start;
	u64 max_hole_size;
	u64 extent_end;
1331 1332
	u64 search_end = device->total_bytes;
	int ret;
1333
	int slot;
1334
	struct extent_buffer *l;
1335 1336 1337 1338 1339 1340 1341 1342 1343
	u64 min_search_start;

	/*
	 * We don't want to overwrite the superblock on the drive nor any area
	 * used by the boot loader (grub for example), so we make sure to start
	 * at an offset of at least 1MB.
	 */
	min_search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
	search_start = max(search_start, min_search_start);
1344

1345 1346 1347
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
1348

1349 1350 1351
	max_hole_start = search_start;
	max_hole_size = 0;

1352
again:
1353
	if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
1354
		ret = -ENOSPC;
1355
		goto out;
1356 1357
	}

1358
	path->reada = READA_FORWARD;
1359 1360
	path->search_commit_root = 1;
	path->skip_locking = 1;
1361

1362 1363 1364
	key.objectid = device->devid;
	key.offset = search_start;
	key.type = BTRFS_DEV_EXTENT_KEY;
1365

1366
	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1367
	if (ret < 0)
1368
		goto out;
1369 1370 1371
	if (ret > 0) {
		ret = btrfs_previous_item(root, path, key.objectid, key.type);
		if (ret < 0)
1372
			goto out;
1373
	}
1374

1375 1376 1377 1378 1379 1380 1381 1382
	while (1) {
		l = path->nodes[0];
		slot = path->slots[0];
		if (slot >= btrfs_header_nritems(l)) {
			ret = btrfs_next_leaf(root, path);
			if (ret == 0)
				continue;
			if (ret < 0)
1383 1384 1385
				goto out;

			break;
1386 1387 1388 1389 1390 1391 1392
		}
		btrfs_item_key_to_cpu(l, &key, slot);

		if (key.objectid < device->devid)
			goto next;

		if (key.objectid > device->devid)
1393
			break;
1394

1395
		if (key.type != BTRFS_DEV_EXTENT_KEY)
1396
			goto next;
1397

1398 1399
		if (key.offset > search_start) {
			hole_size = key.offset - search_start;
1400

1401 1402 1403 1404
			/*
			 * Have to check before we set max_hole_start, otherwise
			 * we could end up sending back this offset anyway.
			 */
1405
			if (contains_pending_extent(transaction, device,
1406
						    &search_start,
1407 1408 1409 1410 1411 1412 1413 1414
						    hole_size)) {
				if (key.offset >= search_start) {
					hole_size = key.offset - search_start;
				} else {
					WARN_ON_ONCE(1);
					hole_size = 0;
				}
			}
1415

1416 1417 1418 1419
			if (hole_size > max_hole_size) {
				max_hole_start = search_start;
				max_hole_size = hole_size;
			}
1420

1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432
			/*
			 * If this free space is greater than which we need,
			 * it must be the max free space that we have found
			 * until now, so max_hole_start must point to the start
			 * of this free space and the length of this free space
			 * is stored in max_hole_size. Thus, we return
			 * max_hole_start and max_hole_size and go back to the
			 * caller.
			 */
			if (hole_size >= num_bytes) {
				ret = 0;
				goto out;
1433 1434 1435 1436
			}
		}

		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1437 1438 1439 1440
		extent_end = key.offset + btrfs_dev_extent_length(l,
								  dev_extent);
		if (extent_end > search_start)
			search_start = extent_end;
1441 1442 1443 1444 1445
next:
		path->slots[0]++;
		cond_resched();
	}

1446 1447 1448 1449 1450
	/*
	 * At this point, search_start should be the end of
	 * allocated dev extents, and when shrinking the device,
	 * search_end may be smaller than search_start.
	 */
1451
	if (search_end > search_start) {
1452 1453
		hole_size = search_end - search_start;

1454
		if (contains_pending_extent(transaction, device, &search_start,
1455 1456 1457 1458
					    hole_size)) {
			btrfs_release_path(path);
			goto again;
		}
1459

1460 1461 1462 1463
		if (hole_size > max_hole_size) {
			max_hole_start = search_start;
			max_hole_size = hole_size;
		}
1464 1465
	}

1466
	/* See above. */
1467
	if (max_hole_size < num_bytes)
1468 1469 1470 1471 1472
		ret = -ENOSPC;
	else
		ret = 0;

out:
Y
Yan Zheng 已提交
1473
	btrfs_free_path(path);
1474
	*start = max_hole_start;
1475
	if (len)
1476
		*len = max_hole_size;
1477 1478 1479
	return ret;
}

1480 1481 1482 1483 1484 1485
int find_free_dev_extent(struct btrfs_trans_handle *trans,
			 struct btrfs_device *device, u64 num_bytes,
			 u64 *start, u64 *len)
{
	/* FIXME use last free of some kind */
	return find_free_dev_extent_start(trans->transaction, device,
1486
					  num_bytes, 0, start, len);
1487 1488
}

1489
static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1490
			  struct btrfs_device *device,
M
Miao Xie 已提交
1491
			  u64 start, u64 *dev_extent_len)
1492 1493 1494 1495 1496
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_root *root = device->dev_root;
	struct btrfs_key key;
1497 1498 1499
	struct btrfs_key found_key;
	struct extent_buffer *leaf = NULL;
	struct btrfs_dev_extent *extent = NULL;
1500 1501 1502 1503 1504 1505 1506 1507

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = device->devid;
	key.offset = start;
	key.type = BTRFS_DEV_EXTENT_KEY;
M
Miao Xie 已提交
1508
again:
1509
	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1510 1511 1512
	if (ret > 0) {
		ret = btrfs_previous_item(root, path, key.objectid,
					  BTRFS_DEV_EXTENT_KEY);
1513 1514
		if (ret)
			goto out;
1515 1516 1517 1518 1519 1520
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
		extent = btrfs_item_ptr(leaf, path->slots[0],
					struct btrfs_dev_extent);
		BUG_ON(found_key.offset > start || found_key.offset +
		       btrfs_dev_extent_length(leaf, extent) < start);
M
Miao Xie 已提交
1521 1522 1523
		key = found_key;
		btrfs_release_path(path);
		goto again;
1524 1525 1526 1527
	} else if (ret == 0) {
		leaf = path->nodes[0];
		extent = btrfs_item_ptr(leaf, path->slots[0],
					struct btrfs_dev_extent);
1528
	} else {
1529
		btrfs_handle_fs_error(root->fs_info, ret, "Slot search failed");
1530
		goto out;
1531
	}
1532

M
Miao Xie 已提交
1533 1534
	*dev_extent_len = btrfs_dev_extent_length(leaf, extent);

1535
	ret = btrfs_del_item(trans, root, path);
1536
	if (ret) {
1537
		btrfs_handle_fs_error(root->fs_info, ret,
1538
			    "Failed to remove dev extent item");
Z
Zhao Lei 已提交
1539
	} else {
1540
		set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1541
	}
1542
out:
1543 1544 1545 1546
	btrfs_free_path(path);
	return ret;
}

1547 1548 1549 1550
static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
				  struct btrfs_device *device,
				  u64 chunk_tree, u64 chunk_objectid,
				  u64 chunk_offset, u64 start, u64 num_bytes)
1551 1552 1553 1554 1555 1556 1557 1558
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_root *root = device->dev_root;
	struct btrfs_dev_extent *extent;
	struct extent_buffer *leaf;
	struct btrfs_key key;

1559
	WARN_ON(!device->in_fs_metadata);
1560
	WARN_ON(device->is_tgtdev_for_dev_replace);
1561 1562 1563 1564 1565
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = device->devid;
Y
Yan Zheng 已提交
1566
	key.offset = start;
1567 1568 1569
	key.type = BTRFS_DEV_EXTENT_KEY;
	ret = btrfs_insert_empty_item(trans, root, path, &key,
				      sizeof(*extent));
1570 1571
	if (ret)
		goto out;
1572 1573 1574 1575

	leaf = path->nodes[0];
	extent = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_dev_extent);
1576 1577 1578 1579 1580
	btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
	btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);

	write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1581
		    btrfs_dev_extent_chunk_tree_uuid(extent), BTRFS_UUID_SIZE);
1582

1583 1584
	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
	btrfs_mark_buffer_dirty(leaf);
1585
out:
1586 1587 1588 1589
	btrfs_free_path(path);
	return ret;
}

1590
static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1591
{
1592 1593 1594 1595
	struct extent_map_tree *em_tree;
	struct extent_map *em;
	struct rb_node *n;
	u64 ret = 0;
1596

1597 1598 1599 1600 1601 1602
	em_tree = &fs_info->mapping_tree.map_tree;
	read_lock(&em_tree->lock);
	n = rb_last(&em_tree->map);
	if (n) {
		em = rb_entry(n, struct extent_map, rb_node);
		ret = em->start + em->len;
1603
	}
1604 1605
	read_unlock(&em_tree->lock);

1606 1607 1608
	return ret;
}

1609 1610
static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
				    u64 *devid_ret)
1611 1612 1613 1614
{
	int ret;
	struct btrfs_key key;
	struct btrfs_key found_key;
Y
Yan Zheng 已提交
1615 1616 1617 1618 1619
	struct btrfs_path *path;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
1620 1621 1622 1623 1624

	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
	key.offset = (u64)-1;

1625
	ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1626 1627 1628
	if (ret < 0)
		goto error;

1629
	BUG_ON(ret == 0); /* Corruption */
1630

1631 1632
	ret = btrfs_previous_item(fs_info->chunk_root, path,
				  BTRFS_DEV_ITEMS_OBJECTID,
1633 1634
				  BTRFS_DEV_ITEM_KEY);
	if (ret) {
1635
		*devid_ret = 1;
1636 1637 1638
	} else {
		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
				      path->slots[0]);
1639
		*devid_ret = found_key.offset + 1;
1640 1641 1642
	}
	ret = 0;
error:
Y
Yan Zheng 已提交
1643
	btrfs_free_path(path);
1644 1645 1646 1647 1648 1649 1650
	return ret;
}

/*
 * the device information is stored in the chunk root
 * the btrfs_device struct should be fully filled in
 */
1651 1652 1653
static int btrfs_add_device(struct btrfs_trans_handle *trans,
			    struct btrfs_root *root,
			    struct btrfs_device *device)
1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_dev_item *dev_item;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	unsigned long ptr;

	root = root->fs_info->chunk_root;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
Y
Yan Zheng 已提交
1670
	key.offset = device->devid;
1671 1672

	ret = btrfs_insert_empty_item(trans, root, path, &key,
1673
				      sizeof(*dev_item));
1674 1675 1676 1677 1678 1679 1680
	if (ret)
		goto out;

	leaf = path->nodes[0];
	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);

	btrfs_set_device_id(leaf, dev_item, device->devid);
Y
Yan Zheng 已提交
1681
	btrfs_set_device_generation(leaf, dev_item, 0);
1682 1683 1684 1685
	btrfs_set_device_type(leaf, dev_item, device->type);
	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1686 1687 1688 1689
	btrfs_set_device_total_bytes(leaf, dev_item,
				     btrfs_device_get_disk_total_bytes(device));
	btrfs_set_device_bytes_used(leaf, dev_item,
				    btrfs_device_get_bytes_used(device));
1690 1691 1692
	btrfs_set_device_group(leaf, dev_item, 0);
	btrfs_set_device_seek_speed(leaf, dev_item, 0);
	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1693
	btrfs_set_device_start_offset(leaf, dev_item, 0);
1694

1695
	ptr = btrfs_device_uuid(dev_item);
1696
	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1697
	ptr = btrfs_device_fsid(dev_item);
Y
Yan Zheng 已提交
1698
	write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1699 1700
	btrfs_mark_buffer_dirty(leaf);

Y
Yan Zheng 已提交
1701
	ret = 0;
1702 1703 1704 1705
out:
	btrfs_free_path(path);
	return ret;
}
1706

1707 1708 1709 1710 1711 1712 1713 1714 1715
/*
 * Function to update ctime/mtime for a given device path.
 * Mainly used for ctime/mtime based probe like libblkid.
 */
static void update_dev_time(char *path_name)
{
	struct file *filp;

	filp = filp_open(path_name, O_RDWR, 0);
1716
	if (IS_ERR(filp))
1717 1718 1719 1720 1721
		return;
	file_update_time(filp);
	filp_close(filp, NULL);
}

1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735
static int btrfs_rm_dev_item(struct btrfs_root *root,
			     struct btrfs_device *device)
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_key key;
	struct btrfs_trans_handle *trans;

	root = root->fs_info->chunk_root;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

1736
	trans = btrfs_start_transaction(root, 0);
1737 1738 1739 1740
	if (IS_ERR(trans)) {
		btrfs_free_path(path);
		return PTR_ERR(trans);
	}
1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762
	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
	key.offset = device->devid;

	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
	if (ret < 0)
		goto out;

	if (ret > 0) {
		ret = -ENOENT;
		goto out;
	}

	ret = btrfs_del_item(trans, root, path);
	if (ret)
		goto out;
out:
	btrfs_free_path(path);
	btrfs_commit_transaction(trans, root);
	return ret;
}

1763 1764 1765 1766 1767 1768 1769
/*
 * Verify that @num_devices satisfies the RAID profile constraints in the whole
 * filesystem. It's up to the caller to adjust that number regarding eg. device
 * replace.
 */
static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
		u64 num_devices)
1770 1771
{
	u64 all_avail;
1772
	unsigned seq;
1773
	int i;
1774

1775
	do {
1776
		seq = read_seqbegin(&fs_info->profiles_lock);
1777

1778 1779 1780 1781
		all_avail = fs_info->avail_data_alloc_bits |
			    fs_info->avail_system_alloc_bits |
			    fs_info->avail_metadata_alloc_bits;
	} while (read_seqretry(&fs_info->profiles_lock, seq));
1782

1783 1784 1785
	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
		if (!(all_avail & btrfs_raid_group[i]))
			continue;
1786

1787 1788
		if (num_devices < btrfs_raid_array[i].devs_min) {
			int ret = btrfs_raid_mindev_error[i];
1789

1790 1791 1792
			if (ret)
				return ret;
		}
D
David Woodhouse 已提交
1793 1794
	}

1795
	return 0;
1796 1797
}

1798 1799
struct btrfs_device *btrfs_find_next_active_device(struct btrfs_fs_devices *fs_devs,
					struct btrfs_device *device)
1800
{
Y
Yan Zheng 已提交
1801
	struct btrfs_device *next_device;
1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837

	list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
		if (next_device != device &&
			!next_device->missing && next_device->bdev)
			return next_device;
	}

	return NULL;
}

/*
 * Helper function to check if the given device is part of s_bdev / latest_bdev
 * and replace it with the provided or the next active device, in the context
 * where this function called, there should be always be another device (or
 * this_dev) which is active.
 */
void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info,
		struct btrfs_device *device, struct btrfs_device *this_dev)
{
	struct btrfs_device *next_device;

	if (this_dev)
		next_device = this_dev;
	else
		next_device = btrfs_find_next_active_device(fs_info->fs_devices,
								device);
	ASSERT(next_device);

	if (fs_info->sb->s_bdev &&
			(fs_info->sb->s_bdev == device->bdev))
		fs_info->sb->s_bdev = next_device->bdev;

	if (fs_info->fs_devices->latest_bdev == device->bdev)
		fs_info->fs_devices->latest_bdev = next_device->bdev;
}

1838
int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
1839 1840
{
	struct btrfs_device *device;
1841
	struct btrfs_fs_devices *cur_devices;
Y
Yan Zheng 已提交
1842
	u64 num_devices;
1843
	int ret = 0;
1844
	bool clear_super = false;
1845
	char *dev_name = NULL;
1846 1847 1848

	mutex_lock(&uuid_mutex);

1849
	num_devices = root->fs_info->fs_devices->num_devices;
1850
	btrfs_dev_replace_lock(&root->fs_info->dev_replace, 0);
1851 1852 1853 1854
	if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
		WARN_ON(num_devices < 1);
		num_devices--;
	}
1855
	btrfs_dev_replace_unlock(&root->fs_info->dev_replace, 0);
1856

1857
	ret = btrfs_check_raid_min_devices(root->fs_info, num_devices - 1);
1858
	if (ret)
1859 1860
		goto out;

1861
	ret = btrfs_find_device_by_devspec(root, devid, device_path,
1862 1863
				&device);
	if (ret)
D
David Woodhouse 已提交
1864
		goto out;
1865

1866
	if (device->is_tgtdev_for_dev_replace) {
1867
		ret = BTRFS_ERROR_DEV_TGT_REPLACE;
1868
		goto out;
1869 1870
	}

Y
Yan Zheng 已提交
1871
	if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1872
		ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
1873
		goto out;
Y
Yan Zheng 已提交
1874 1875 1876
	}

	if (device->writeable) {
1877
		lock_chunks(root);
Y
Yan Zheng 已提交
1878
		list_del_init(&device->dev_alloc_list);
1879
		device->fs_devices->rw_devices--;
1880
		unlock_chunks(root);
1881 1882 1883 1884 1885
		dev_name = kstrdup(device->name->str, GFP_KERNEL);
		if (!dev_name) {
			ret = -ENOMEM;
			goto error_undo;
		}
1886
		clear_super = true;
1887
	}
1888

1889
	mutex_unlock(&uuid_mutex);
1890
	ret = btrfs_shrink_device(device, 0);
1891
	mutex_lock(&uuid_mutex);
1892
	if (ret)
1893
		goto error_undo;
1894

1895 1896 1897 1898 1899
	/*
	 * TODO: the superblock still includes this device in its num_devices
	 * counter although write_all_supers() is not locked out. This
	 * could give a filesystem state which requires a degraded mount.
	 */
1900 1901
	ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
	if (ret)
1902
		goto error_undo;
1903

Y
Yan Zheng 已提交
1904
	device->in_fs_metadata = 0;
1905
	btrfs_scrub_cancel_dev(root->fs_info, device);
1906 1907 1908 1909

	/*
	 * the device list mutex makes sure that we don't change
	 * the device list while someone else is writing out all
1910 1911 1912 1913 1914
	 * the device supers. Whoever is writing all supers, should
	 * lock the device list mutex before getting the number of
	 * devices in the super block (super_copy). Conversely,
	 * whoever updates the number of devices in the super block
	 * (super_copy) should hold the device list mutex.
1915
	 */
1916 1917

	cur_devices = device->fs_devices;
1918
	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1919
	list_del_rcu(&device->dev_list);
1920

Y
Yan Zheng 已提交
1921
	device->fs_devices->num_devices--;
J
Josef Bacik 已提交
1922
	device->fs_devices->total_devices--;
Y
Yan Zheng 已提交
1923

1924
	if (device->missing)
1925
		device->fs_devices->missing_devices--;
1926

1927
	btrfs_assign_next_active_device(root->fs_info, device, NULL);
Y
Yan Zheng 已提交
1928

1929
	if (device->bdev) {
Y
Yan Zheng 已提交
1930
		device->fs_devices->open_devices--;
1931
		/* remove sysfs entry */
1932
		btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
1933
	}
1934

1935
	call_rcu(&device->rcu, free_device);
Y
Yan Zheng 已提交
1936

1937 1938
	num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
	btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1939
	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
Y
Yan Zheng 已提交
1940

1941
	if (cur_devices->open_devices == 0) {
Y
Yan Zheng 已提交
1942 1943 1944
		struct btrfs_fs_devices *fs_devices;
		fs_devices = root->fs_info->fs_devices;
		while (fs_devices) {
1945 1946
			if (fs_devices->seed == cur_devices) {
				fs_devices->seed = cur_devices->seed;
Y
Yan Zheng 已提交
1947
				break;
1948
			}
Y
Yan Zheng 已提交
1949
			fs_devices = fs_devices->seed;
Y
Yan Zheng 已提交
1950
		}
1951 1952 1953
		cur_devices->seed = NULL;
		__btrfs_close_devices(cur_devices);
		free_fs_devices(cur_devices);
Y
Yan Zheng 已提交
1954 1955
	}

1956 1957 1958
	root->fs_info->num_tolerated_disk_barrier_failures =
		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);

Y
Yan Zheng 已提交
1959 1960 1961 1962
	/*
	 * at this point, the device is zero sized.  We want to
	 * remove it from the devices list and zero out the old super
	 */
1963
	if (clear_super) {
1964
		struct block_device *bdev;
1965

1966 1967 1968 1969
		bdev = blkdev_get_by_path(dev_name, FMODE_READ | FMODE_EXCL,
						root->fs_info->bdev_holder);
		if (!IS_ERR(bdev)) {
			btrfs_scratch_superblocks(bdev, dev_name);
1970
			blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1971
		}
1972
	}
1973 1974

out:
1975 1976
	kfree(dev_name);

1977 1978
	mutex_unlock(&uuid_mutex);
	return ret;
1979

1980 1981
error_undo:
	if (device->writeable) {
1982
		lock_chunks(root);
1983 1984
		list_add(&device->dev_alloc_list,
			 &root->fs_info->fs_devices->alloc_list);
1985
		device->fs_devices->rw_devices++;
1986
		unlock_chunks(root);
1987
	}
1988
	goto out;
1989 1990
}

1991 1992
void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
					struct btrfs_device *srcdev)
1993
{
1994 1995
	struct btrfs_fs_devices *fs_devices;

1996
	WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1997

1998 1999 2000 2001 2002 2003 2004
	/*
	 * in case of fs with no seed, srcdev->fs_devices will point
	 * to fs_devices of fs_info. However when the dev being replaced is
	 * a seed dev it will point to the seed's local fs_devices. In short
	 * srcdev will have its correct fs_devices in both the cases.
	 */
	fs_devices = srcdev->fs_devices;
2005

2006 2007
	list_del_rcu(&srcdev->dev_list);
	list_del_rcu(&srcdev->dev_alloc_list);
2008
	fs_devices->num_devices--;
2009
	if (srcdev->missing)
2010
		fs_devices->missing_devices--;
2011

2012
	if (srcdev->writeable)
2013
		fs_devices->rw_devices--;
2014

2015
	if (srcdev->bdev)
2016
		fs_devices->open_devices--;
2017 2018 2019 2020 2021 2022
}

void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
				      struct btrfs_device *srcdev)
{
	struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2023

2024 2025 2026 2027
	if (srcdev->writeable) {
		/* zero out the old super if it is writable */
		btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
	}
2028
	call_rcu(&srcdev->rcu, free_device);
2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048

	/*
	 * unless fs_devices is seed fs, num_devices shouldn't go
	 * zero
	 */
	BUG_ON(!fs_devices->num_devices && !fs_devices->seeding);

	/* if this is no devs we rather delete the fs_devices */
	if (!fs_devices->num_devices) {
		struct btrfs_fs_devices *tmp_fs_devices;

		tmp_fs_devices = fs_info->fs_devices;
		while (tmp_fs_devices) {
			if (tmp_fs_devices->seed == fs_devices) {
				tmp_fs_devices->seed = fs_devices->seed;
				break;
			}
			tmp_fs_devices = tmp_fs_devices->seed;
		}
		fs_devices->seed = NULL;
2049 2050
		__btrfs_close_devices(fs_devices);
		free_fs_devices(fs_devices);
2051
	}
2052 2053 2054 2055 2056
}

void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
				      struct btrfs_device *tgtdev)
{
2057
	mutex_lock(&uuid_mutex);
2058 2059
	WARN_ON(!tgtdev);
	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2060

2061
	btrfs_sysfs_rm_device_link(fs_info->fs_devices, tgtdev);
2062

2063
	if (tgtdev->bdev)
2064
		fs_info->fs_devices->open_devices--;
2065

2066 2067
	fs_info->fs_devices->num_devices--;

2068
	btrfs_assign_next_active_device(fs_info, tgtdev, NULL);
2069 2070 2071 2072

	list_del_rcu(&tgtdev->dev_list);

	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2073
	mutex_unlock(&uuid_mutex);
2074 2075 2076 2077 2078 2079 2080 2081 2082 2083

	/*
	 * The update_dev_time() with in btrfs_scratch_superblocks()
	 * may lead to a call to btrfs_show_devname() which will try
	 * to hold device_list_mutex. And here this device
	 * is already out of device list, so we don't have to hold
	 * the device_list_mutex lock.
	 */
	btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
	call_rcu(&tgtdev->rcu, free_device);
2084 2085
}

2086 2087
static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
				     struct btrfs_device **device)
2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103
{
	int ret = 0;
	struct btrfs_super_block *disk_super;
	u64 devid;
	u8 *dev_uuid;
	struct block_device *bdev;
	struct buffer_head *bh;

	*device = NULL;
	ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
				    root->fs_info->bdev_holder, 0, &bdev, &bh);
	if (ret)
		return ret;
	disk_super = (struct btrfs_super_block *)bh->b_data;
	devid = btrfs_stack_device_id(&disk_super->dev_item);
	dev_uuid = disk_super->dev_item.uuid;
2104
	*device = btrfs_find_device(root->fs_info, devid, dev_uuid,
2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133
				    disk_super->fsid);
	brelse(bh);
	if (!*device)
		ret = -ENOENT;
	blkdev_put(bdev, FMODE_READ);
	return ret;
}

int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
					 char *device_path,
					 struct btrfs_device **device)
{
	*device = NULL;
	if (strcmp(device_path, "missing") == 0) {
		struct list_head *devices;
		struct btrfs_device *tmp;

		devices = &root->fs_info->fs_devices->devices;
		/*
		 * It is safe to read the devices since the volume_mutex
		 * is held by the caller.
		 */
		list_for_each_entry(tmp, devices, dev_list) {
			if (tmp->in_fs_metadata && !tmp->bdev) {
				*device = tmp;
				break;
			}
		}

2134 2135
		if (!*device)
			return BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2136 2137 2138 2139 2140 2141 2142

		return 0;
	} else {
		return btrfs_find_device_by_path(root, device_path, device);
	}
}

2143 2144 2145 2146 2147
/*
 * Lookup a device given by device id, or the path if the id is 0.
 */
int btrfs_find_device_by_devspec(struct btrfs_root *root, u64 devid,
					 char *devpath,
2148 2149 2150 2151
					 struct btrfs_device **device)
{
	int ret;

2152
	if (devid) {
2153
		ret = 0;
2154
		*device = btrfs_find_device(root->fs_info, devid, NULL,
2155 2156 2157 2158
					    NULL);
		if (!*device)
			ret = -ENOENT;
	} else {
2159
		if (!devpath || !devpath[0])
2160 2161
			return -EINVAL;

2162
		ret = btrfs_find_device_missing_or_by_path(root, devpath,
2163 2164 2165 2166 2167
							   device);
	}
	return ret;
}

Y
Yan Zheng 已提交
2168 2169 2170
/*
 * does all the dirty work required for changing file system's UUID.
 */
2171
static int btrfs_prepare_sprout(struct btrfs_root *root)
Y
Yan Zheng 已提交
2172 2173 2174
{
	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
	struct btrfs_fs_devices *old_devices;
Y
Yan Zheng 已提交
2175
	struct btrfs_fs_devices *seed_devices;
2176
	struct btrfs_super_block *disk_super = root->fs_info->super_copy;
Y
Yan Zheng 已提交
2177 2178 2179 2180
	struct btrfs_device *device;
	u64 super_flags;

	BUG_ON(!mutex_is_locked(&uuid_mutex));
Y
Yan Zheng 已提交
2181
	if (!fs_devices->seeding)
Y
Yan Zheng 已提交
2182 2183
		return -EINVAL;

2184 2185 2186
	seed_devices = __alloc_fs_devices();
	if (IS_ERR(seed_devices))
		return PTR_ERR(seed_devices);
Y
Yan Zheng 已提交
2187

Y
Yan Zheng 已提交
2188 2189 2190 2191
	old_devices = clone_fs_devices(fs_devices);
	if (IS_ERR(old_devices)) {
		kfree(seed_devices);
		return PTR_ERR(old_devices);
Y
Yan Zheng 已提交
2192
	}
Y
Yan Zheng 已提交
2193

Y
Yan Zheng 已提交
2194 2195
	list_add(&old_devices->list, &fs_uuids);

Y
Yan Zheng 已提交
2196 2197 2198 2199
	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
	seed_devices->opened = 1;
	INIT_LIST_HEAD(&seed_devices->devices);
	INIT_LIST_HEAD(&seed_devices->alloc_list);
2200
	mutex_init(&seed_devices->device_list_mutex);
2201 2202

	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2203 2204
	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
			      synchronize_rcu);
M
Miao Xie 已提交
2205 2206
	list_for_each_entry(device, &seed_devices->devices, dev_list)
		device->fs_devices = seed_devices;
2207

M
Miao Xie 已提交
2208
	lock_chunks(root);
Y
Yan Zheng 已提交
2209
	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
M
Miao Xie 已提交
2210
	unlock_chunks(root);
Y
Yan Zheng 已提交
2211

Y
Yan Zheng 已提交
2212 2213 2214
	fs_devices->seeding = 0;
	fs_devices->num_devices = 0;
	fs_devices->open_devices = 0;
2215 2216
	fs_devices->missing_devices = 0;
	fs_devices->rotating = 0;
Y
Yan Zheng 已提交
2217
	fs_devices->seed = seed_devices;
Y
Yan Zheng 已提交
2218 2219 2220 2221

	generate_random_uuid(fs_devices->fsid);
	memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2222 2223
	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);

Y
Yan Zheng 已提交
2224 2225 2226 2227 2228 2229 2230 2231
	super_flags = btrfs_super_flags(disk_super) &
		      ~BTRFS_SUPER_FLAG_SEEDING;
	btrfs_set_super_flags(disk_super, super_flags);

	return 0;
}

/*
2232
 * Store the expected generation for seed devices in device items.
Y
Yan Zheng 已提交
2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270
 */
static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
			       struct btrfs_root *root)
{
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_dev_item *dev_item;
	struct btrfs_device *device;
	struct btrfs_key key;
	u8 fs_uuid[BTRFS_UUID_SIZE];
	u8 dev_uuid[BTRFS_UUID_SIZE];
	u64 devid;
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	root = root->fs_info->chunk_root;
	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.offset = 0;
	key.type = BTRFS_DEV_ITEM_KEY;

	while (1) {
		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
		if (ret < 0)
			goto error;

		leaf = path->nodes[0];
next_slot:
		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret > 0)
				break;
			if (ret < 0)
				goto error;
			leaf = path->nodes[0];
			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2271
			btrfs_release_path(path);
Y
Yan Zheng 已提交
2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282
			continue;
		}

		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
		    key.type != BTRFS_DEV_ITEM_KEY)
			break;

		dev_item = btrfs_item_ptr(leaf, path->slots[0],
					  struct btrfs_dev_item);
		devid = btrfs_device_id(leaf, dev_item);
2283
		read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
Y
Yan Zheng 已提交
2284
				   BTRFS_UUID_SIZE);
2285
		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
Y
Yan Zheng 已提交
2286
				   BTRFS_UUID_SIZE);
2287 2288
		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
					   fs_uuid);
2289
		BUG_ON(!device); /* Logic error */
Y
Yan Zheng 已提交
2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305

		if (device->fs_devices->seeding) {
			btrfs_set_device_generation(leaf, dev_item,
						    device->generation);
			btrfs_mark_buffer_dirty(leaf);
		}

		path->slots[0]++;
		goto next_slot;
	}
	ret = 0;
error:
	btrfs_free_path(path);
	return ret;
}

2306 2307
int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
{
2308
	struct request_queue *q;
2309 2310 2311 2312
	struct btrfs_trans_handle *trans;
	struct btrfs_device *device;
	struct block_device *bdev;
	struct list_head *devices;
Y
Yan Zheng 已提交
2313
	struct super_block *sb = root->fs_info->sb;
2314
	struct rcu_string *name;
2315
	u64 tmp;
Y
Yan Zheng 已提交
2316
	int seeding_dev = 0;
2317 2318
	int ret = 0;

Y
Yan Zheng 已提交
2319
	if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
2320
		return -EROFS;
2321

2322
	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2323
				  root->fs_info->bdev_holder);
2324 2325
	if (IS_ERR(bdev))
		return PTR_ERR(bdev);
2326

Y
Yan Zheng 已提交
2327 2328 2329 2330 2331 2332
	if (root->fs_info->fs_devices->seeding) {
		seeding_dev = 1;
		down_write(&sb->s_umount);
		mutex_lock(&uuid_mutex);
	}

2333
	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2334

2335
	devices = &root->fs_info->fs_devices->devices;
2336 2337

	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
Q
Qinghuang Feng 已提交
2338
	list_for_each_entry(device, devices, dev_list) {
2339 2340
		if (device->bdev == bdev) {
			ret = -EEXIST;
2341 2342
			mutex_unlock(
				&root->fs_info->fs_devices->device_list_mutex);
Y
Yan Zheng 已提交
2343
			goto error;
2344 2345
		}
	}
2346
	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2347

2348 2349
	device = btrfs_alloc_device(root->fs_info, NULL, NULL);
	if (IS_ERR(device)) {
2350
		/* we can safely leave the fs_devices entry around */
2351
		ret = PTR_ERR(device);
Y
Yan Zheng 已提交
2352
		goto error;
2353 2354
	}

2355
	name = rcu_string_strdup(device_path, GFP_KERNEL);
2356
	if (!name) {
2357
		kfree(device);
Y
Yan Zheng 已提交
2358 2359
		ret = -ENOMEM;
		goto error;
2360
	}
2361
	rcu_assign_pointer(device->name, name);
Y
Yan Zheng 已提交
2362

2363
	trans = btrfs_start_transaction(root, 0);
2364
	if (IS_ERR(trans)) {
2365
		rcu_string_free(device->name);
2366 2367 2368 2369 2370
		kfree(device);
		ret = PTR_ERR(trans);
		goto error;
	}

2371 2372 2373
	q = bdev_get_queue(bdev);
	if (blk_queue_discard(q))
		device->can_discard = 1;
Y
Yan Zheng 已提交
2374 2375
	device->writeable = 1;
	device->generation = trans->transid;
2376 2377 2378 2379
	device->io_width = root->sectorsize;
	device->io_align = root->sectorsize;
	device->sector_size = root->sectorsize;
	device->total_bytes = i_size_read(bdev->bd_inode);
2380
	device->disk_total_bytes = device->total_bytes;
2381
	device->commit_total_bytes = device->total_bytes;
2382 2383
	device->dev_root = root->fs_info->dev_root;
	device->bdev = bdev;
2384
	device->in_fs_metadata = 1;
2385
	device->is_tgtdev_for_dev_replace = 0;
2386
	device->mode = FMODE_EXCL;
2387
	device->dev_stats_valid = 1;
Y
Yan Zheng 已提交
2388
	set_blocksize(device->bdev, 4096);
2389

Y
Yan Zheng 已提交
2390 2391
	if (seeding_dev) {
		sb->s_flags &= ~MS_RDONLY;
2392
		ret = btrfs_prepare_sprout(root);
2393
		BUG_ON(ret); /* -ENOMEM */
Y
Yan Zheng 已提交
2394
	}
2395

Y
Yan Zheng 已提交
2396
	device->fs_devices = root->fs_info->fs_devices;
2397 2398

	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
M
Miao Xie 已提交
2399
	lock_chunks(root);
2400
	list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
Y
Yan Zheng 已提交
2401 2402 2403 2404 2405
	list_add(&device->dev_alloc_list,
		 &root->fs_info->fs_devices->alloc_list);
	root->fs_info->fs_devices->num_devices++;
	root->fs_info->fs_devices->open_devices++;
	root->fs_info->fs_devices->rw_devices++;
J
Josef Bacik 已提交
2406
	root->fs_info->fs_devices->total_devices++;
Y
Yan Zheng 已提交
2407
	root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
2408

2409 2410 2411 2412
	spin_lock(&root->fs_info->free_chunk_lock);
	root->fs_info->free_chunk_space += device->total_bytes;
	spin_unlock(&root->fs_info->free_chunk_lock);

C
Chris Mason 已提交
2413 2414 2415
	if (!blk_queue_nonrot(bdev_get_queue(bdev)))
		root->fs_info->fs_devices->rotating = 1;

2416
	tmp = btrfs_super_total_bytes(root->fs_info->super_copy);
2417
	btrfs_set_super_total_bytes(root->fs_info->super_copy,
2418
				    tmp + device->total_bytes);
2419

2420
	tmp = btrfs_super_num_devices(root->fs_info->super_copy);
2421
	btrfs_set_super_num_devices(root->fs_info->super_copy,
2422
				    tmp + 1);
2423 2424

	/* add sysfs device entry */
2425
	btrfs_sysfs_add_device_link(root->fs_info->fs_devices, device);
2426

M
Miao Xie 已提交
2427 2428 2429 2430 2431 2432 2433
	/*
	 * we've got more storage, clear any full flags on the space
	 * infos
	 */
	btrfs_clear_space_info_full(root->fs_info);

	unlock_chunks(root);
2434
	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2435

Y
Yan Zheng 已提交
2436
	if (seeding_dev) {
M
Miao Xie 已提交
2437
		lock_chunks(root);
Y
Yan Zheng 已提交
2438
		ret = init_first_rw_device(trans, root, device);
M
Miao Xie 已提交
2439
		unlock_chunks(root);
2440 2441
		if (ret) {
			btrfs_abort_transaction(trans, root, ret);
2442
			goto error_trans;
2443
		}
M
Miao Xie 已提交
2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454
	}

	ret = btrfs_add_device(trans, root, device);
	if (ret) {
		btrfs_abort_transaction(trans, root, ret);
		goto error_trans;
	}

	if (seeding_dev) {
		char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];

Y
Yan Zheng 已提交
2455
		ret = btrfs_finish_sprout(trans, root);
2456 2457
		if (ret) {
			btrfs_abort_transaction(trans, root, ret);
2458
			goto error_trans;
2459
		}
2460 2461 2462 2463 2464 2465

		/* Sprouting would change fsid of the mounted root,
		 * so rename the fsid on the sysfs
		 */
		snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
						root->fs_info->fsid);
2466
		if (kobject_rename(&root->fs_info->fs_devices->fsid_kobj,
2467
								fsid_buf))
2468 2469
			btrfs_warn(root->fs_info,
				"sysfs: failed to create fsid for sprout");
Y
Yan Zheng 已提交
2470 2471
	}

2472 2473
	root->fs_info->num_tolerated_disk_barrier_failures =
		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
2474
	ret = btrfs_commit_transaction(trans, root);
2475

Y
Yan Zheng 已提交
2476 2477 2478
	if (seeding_dev) {
		mutex_unlock(&uuid_mutex);
		up_write(&sb->s_umount);
2479

2480 2481 2482
		if (ret) /* transaction commit */
			return ret;

Y
Yan Zheng 已提交
2483
		ret = btrfs_relocate_sys_chunks(root);
2484
		if (ret < 0)
2485
			btrfs_handle_fs_error(root->fs_info, ret,
2486 2487 2488
				    "Failed to relocate sys chunks after "
				    "device initialization. This can be fixed "
				    "using the \"btrfs balance\" command.");
2489 2490 2491 2492 2493 2494 2495
		trans = btrfs_attach_transaction(root);
		if (IS_ERR(trans)) {
			if (PTR_ERR(trans) == -ENOENT)
				return 0;
			return PTR_ERR(trans);
		}
		ret = btrfs_commit_transaction(trans, root);
Y
Yan Zheng 已提交
2496
	}
2497

2498 2499
	/* Update ctime/mtime for libblkid */
	update_dev_time(device_path);
Y
Yan Zheng 已提交
2500
	return ret;
2501 2502 2503

error_trans:
	btrfs_end_transaction(trans, root);
2504
	rcu_string_free(device->name);
2505
	btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
2506
	kfree(device);
Y
Yan Zheng 已提交
2507
error:
2508
	blkdev_put(bdev, FMODE_EXCL);
Y
Yan Zheng 已提交
2509 2510 2511 2512
	if (seeding_dev) {
		mutex_unlock(&uuid_mutex);
		up_write(&sb->s_umount);
	}
2513
	return ret;
2514 2515
}

2516
int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2517
				  struct btrfs_device *srcdev,
2518 2519 2520 2521 2522 2523 2524 2525
				  struct btrfs_device **device_out)
{
	struct request_queue *q;
	struct btrfs_device *device;
	struct block_device *bdev;
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct list_head *devices;
	struct rcu_string *name;
2526
	u64 devid = BTRFS_DEV_REPLACE_DEVID;
2527 2528 2529
	int ret = 0;

	*device_out = NULL;
2530 2531
	if (fs_info->fs_devices->seeding) {
		btrfs_err(fs_info, "the filesystem is a seed filesystem!");
2532
		return -EINVAL;
2533
	}
2534 2535 2536

	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
				  fs_info->bdev_holder);
2537 2538
	if (IS_ERR(bdev)) {
		btrfs_err(fs_info, "target device %s is invalid!", device_path);
2539
		return PTR_ERR(bdev);
2540
	}
2541 2542 2543 2544 2545 2546

	filemap_write_and_wait(bdev->bd_inode->i_mapping);

	devices = &fs_info->fs_devices->devices;
	list_for_each_entry(device, devices, dev_list) {
		if (device->bdev == bdev) {
2547
			btrfs_err(fs_info, "target device is in the filesystem!");
2548 2549 2550 2551 2552
			ret = -EEXIST;
			goto error;
		}
	}

2553

2554 2555
	if (i_size_read(bdev->bd_inode) <
	    btrfs_device_get_total_bytes(srcdev)) {
2556 2557 2558 2559 2560 2561
		btrfs_err(fs_info, "target device is smaller than source device!");
		ret = -EINVAL;
		goto error;
	}


2562 2563 2564
	device = btrfs_alloc_device(NULL, &devid, NULL);
	if (IS_ERR(device)) {
		ret = PTR_ERR(device);
2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584
		goto error;
	}

	name = rcu_string_strdup(device_path, GFP_NOFS);
	if (!name) {
		kfree(device);
		ret = -ENOMEM;
		goto error;
	}
	rcu_assign_pointer(device->name, name);

	q = bdev_get_queue(bdev);
	if (blk_queue_discard(q))
		device->can_discard = 1;
	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
	device->writeable = 1;
	device->generation = 0;
	device->io_width = root->sectorsize;
	device->io_align = root->sectorsize;
	device->sector_size = root->sectorsize;
2585 2586 2587
	device->total_bytes = btrfs_device_get_total_bytes(srcdev);
	device->disk_total_bytes = btrfs_device_get_disk_total_bytes(srcdev);
	device->bytes_used = btrfs_device_get_bytes_used(srcdev);
2588 2589
	ASSERT(list_empty(&srcdev->resized_list));
	device->commit_total_bytes = srcdev->commit_total_bytes;
2590
	device->commit_bytes_used = device->bytes_used;
2591 2592 2593 2594 2595
	device->dev_root = fs_info->dev_root;
	device->bdev = bdev;
	device->in_fs_metadata = 1;
	device->is_tgtdev_for_dev_replace = 1;
	device->mode = FMODE_EXCL;
2596
	device->dev_stats_valid = 1;
2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622
	set_blocksize(device->bdev, 4096);
	device->fs_devices = fs_info->fs_devices;
	list_add(&device->dev_list, &fs_info->fs_devices->devices);
	fs_info->fs_devices->num_devices++;
	fs_info->fs_devices->open_devices++;
	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);

	*device_out = device;
	return ret;

error:
	blkdev_put(bdev, FMODE_EXCL);
	return ret;
}

void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
					      struct btrfs_device *tgtdev)
{
	WARN_ON(fs_info->fs_devices->rw_devices == 0);
	tgtdev->io_width = fs_info->dev_root->sectorsize;
	tgtdev->io_align = fs_info->dev_root->sectorsize;
	tgtdev->sector_size = fs_info->dev_root->sectorsize;
	tgtdev->dev_root = fs_info->dev_root;
	tgtdev->in_fs_metadata = 1;
}

C
Chris Mason 已提交
2623 2624
static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
					struct btrfs_device *device)
2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_root *root;
	struct btrfs_dev_item *dev_item;
	struct extent_buffer *leaf;
	struct btrfs_key key;

	root = device->dev_root->fs_info->chunk_root;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
	key.offset = device->devid;

	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
	if (ret < 0)
		goto out;

	if (ret > 0) {
		ret = -ENOENT;
		goto out;
	}

	leaf = path->nodes[0];
	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);

	btrfs_set_device_id(leaf, dev_item, device->devid);
	btrfs_set_device_type(leaf, dev_item, device->type);
	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2660 2661 2662 2663
	btrfs_set_device_total_bytes(leaf, dev_item,
				     btrfs_device_get_disk_total_bytes(device));
	btrfs_set_device_bytes_used(leaf, dev_item,
				    btrfs_device_get_bytes_used(device));
2664 2665 2666 2667 2668 2669 2670
	btrfs_mark_buffer_dirty(leaf);

out:
	btrfs_free_path(path);
	return ret;
}

M
Miao Xie 已提交
2671
int btrfs_grow_device(struct btrfs_trans_handle *trans,
2672 2673 2674
		      struct btrfs_device *device, u64 new_size)
{
	struct btrfs_super_block *super_copy =
2675
		device->dev_root->fs_info->super_copy;
2676
	struct btrfs_fs_devices *fs_devices;
M
Miao Xie 已提交
2677 2678
	u64 old_total;
	u64 diff;
2679

Y
Yan Zheng 已提交
2680 2681
	if (!device->writeable)
		return -EACCES;
M
Miao Xie 已提交
2682 2683 2684 2685 2686

	lock_chunks(device->dev_root);
	old_total = btrfs_super_total_bytes(super_copy);
	diff = new_size - device->total_bytes;

2687
	if (new_size <= device->total_bytes ||
M
Miao Xie 已提交
2688 2689
	    device->is_tgtdev_for_dev_replace) {
		unlock_chunks(device->dev_root);
Y
Yan Zheng 已提交
2690
		return -EINVAL;
M
Miao Xie 已提交
2691
	}
Y
Yan Zheng 已提交
2692

2693
	fs_devices = device->dev_root->fs_info->fs_devices;
Y
Yan Zheng 已提交
2694

2695
	btrfs_set_super_total_bytes(super_copy, old_total + diff);
Y
Yan Zheng 已提交
2696 2697
	device->fs_devices->total_rw_bytes += diff;

2698 2699
	btrfs_device_set_total_bytes(device, new_size);
	btrfs_device_set_disk_total_bytes(device, new_size);
2700
	btrfs_clear_space_info_full(device->dev_root->fs_info);
2701 2702 2703
	if (list_empty(&device->resized_list))
		list_add_tail(&device->resized_list,
			      &fs_devices->resized_devices);
M
Miao Xie 已提交
2704
	unlock_chunks(device->dev_root);
2705

2706 2707 2708 2709
	return btrfs_update_device(trans, device);
}

static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2710
			    struct btrfs_root *root, u64 chunk_objectid,
2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726
			    u64 chunk_offset)
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_key key;

	root = root->fs_info->chunk_root;
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = chunk_objectid;
	key.offset = chunk_offset;
	key.type = BTRFS_CHUNK_ITEM_KEY;

	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2727 2728 2729
	if (ret < 0)
		goto out;
	else if (ret > 0) { /* Logic error or corruption */
2730
		btrfs_handle_fs_error(root->fs_info, -ENOENT,
2731 2732 2733 2734
			    "Failed lookup while freeing chunk.");
		ret = -ENOENT;
		goto out;
	}
2735 2736

	ret = btrfs_del_item(trans, root, path);
2737
	if (ret < 0)
2738
		btrfs_handle_fs_error(root->fs_info, ret,
2739 2740
			    "Failed to delete chunk item.");
out:
2741
	btrfs_free_path(path);
2742
	return ret;
2743 2744
}

2745
static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2746 2747
			chunk_offset)
{
2748
	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2749 2750 2751 2752 2753 2754 2755 2756 2757 2758
	struct btrfs_disk_key *disk_key;
	struct btrfs_chunk *chunk;
	u8 *ptr;
	int ret = 0;
	u32 num_stripes;
	u32 array_size;
	u32 len = 0;
	u32 cur;
	struct btrfs_key key;

M
Miao Xie 已提交
2759
	lock_chunks(root);
2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788
	array_size = btrfs_super_sys_array_size(super_copy);

	ptr = super_copy->sys_chunk_array;
	cur = 0;

	while (cur < array_size) {
		disk_key = (struct btrfs_disk_key *)ptr;
		btrfs_disk_key_to_cpu(&key, disk_key);

		len = sizeof(*disk_key);

		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
			chunk = (struct btrfs_chunk *)(ptr + len);
			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
			len += btrfs_chunk_item_size(num_stripes);
		} else {
			ret = -EIO;
			break;
		}
		if (key.objectid == chunk_objectid &&
		    key.offset == chunk_offset) {
			memmove(ptr, ptr + len, array_size - (cur + len));
			array_size -= len;
			btrfs_set_super_sys_array_size(super_copy, array_size);
		} else {
			ptr += len;
			cur += len;
		}
	}
M
Miao Xie 已提交
2789
	unlock_chunks(root);
2790 2791 2792
	return ret;
}

2793 2794
int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
		       struct btrfs_root *root, u64 chunk_offset)
2795 2796 2797
{
	struct extent_map_tree *em_tree;
	struct extent_map *em;
2798
	struct btrfs_root *extent_root = root->fs_info->extent_root;
2799
	struct map_lookup *map;
M
Miao Xie 已提交
2800
	u64 dev_extent_len = 0;
2801 2802
	u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
	int i, ret = 0;
2803
	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2804

2805
	/* Just in case */
2806 2807 2808
	root = root->fs_info->chunk_root;
	em_tree = &root->fs_info->mapping_tree.map_tree;

2809
	read_lock(&em_tree->lock);
2810
	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2811
	read_unlock(&em_tree->lock);
2812

2813 2814 2815 2816
	if (!em || em->start > chunk_offset ||
	    em->start + em->len < chunk_offset) {
		/*
		 * This is a logic error, but we don't want to just rely on the
2817
		 * user having built with ASSERT enabled, so if ASSERT doesn't
2818 2819 2820 2821 2822 2823 2824
		 * do anything we still error out.
		 */
		ASSERT(0);
		if (em)
			free_extent_map(em);
		return -EINVAL;
	}
2825
	map = em->map_lookup;
2826
	lock_chunks(root->fs_info->chunk_root);
2827
	check_system_chunk(trans, extent_root, map->type);
2828
	unlock_chunks(root->fs_info->chunk_root);
2829

2830 2831 2832 2833 2834 2835
	/*
	 * Take the device list mutex to prevent races with the final phase of
	 * a device replace operation that replaces the device object associated
	 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
	 */
	mutex_lock(&fs_devices->device_list_mutex);
2836
	for (i = 0; i < map->num_stripes; i++) {
2837
		struct btrfs_device *device = map->stripes[i].dev;
M
Miao Xie 已提交
2838 2839 2840
		ret = btrfs_free_dev_extent(trans, device,
					    map->stripes[i].physical,
					    &dev_extent_len);
2841
		if (ret) {
2842
			mutex_unlock(&fs_devices->device_list_mutex);
2843 2844 2845
			btrfs_abort_transaction(trans, root, ret);
			goto out;
		}
2846

M
Miao Xie 已提交
2847 2848 2849 2850 2851 2852 2853 2854 2855 2856
		if (device->bytes_used > 0) {
			lock_chunks(root);
			btrfs_device_set_bytes_used(device,
					device->bytes_used - dev_extent_len);
			spin_lock(&root->fs_info->free_chunk_lock);
			root->fs_info->free_chunk_space += dev_extent_len;
			spin_unlock(&root->fs_info->free_chunk_lock);
			btrfs_clear_space_info_full(root->fs_info);
			unlock_chunks(root);
		}
2857

2858 2859
		if (map->stripes[i].dev) {
			ret = btrfs_update_device(trans, map->stripes[i].dev);
2860
			if (ret) {
2861
				mutex_unlock(&fs_devices->device_list_mutex);
2862 2863 2864
				btrfs_abort_transaction(trans, root, ret);
				goto out;
			}
2865
		}
2866
	}
2867 2868
	mutex_unlock(&fs_devices->device_list_mutex);

2869
	ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset);
2870 2871 2872 2873
	if (ret) {
		btrfs_abort_transaction(trans, root, ret);
		goto out;
	}
2874

2875 2876
	trace_btrfs_chunk_free(root, map, chunk_offset, em->len);

2877 2878
	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
		ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2879 2880 2881 2882
		if (ret) {
			btrfs_abort_transaction(trans, root, ret);
			goto out;
		}
2883 2884
	}

2885
	ret = btrfs_remove_block_group(trans, extent_root, chunk_offset, em);
2886 2887 2888 2889
	if (ret) {
		btrfs_abort_transaction(trans, extent_root, ret);
		goto out;
	}
Y
Yan Zheng 已提交
2890

2891
out:
Y
Yan Zheng 已提交
2892 2893
	/* once for us */
	free_extent_map(em);
2894 2895
	return ret;
}
Y
Yan Zheng 已提交
2896

2897
static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset)
2898 2899 2900 2901
{
	struct btrfs_root *extent_root;
	struct btrfs_trans_handle *trans;
	int ret;
Y
Yan Zheng 已提交
2902

2903 2904 2905
	root = root->fs_info->chunk_root;
	extent_root = root->fs_info->extent_root;

2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919
	/*
	 * Prevent races with automatic removal of unused block groups.
	 * After we relocate and before we remove the chunk with offset
	 * chunk_offset, automatic removal of the block group can kick in,
	 * resulting in a failure when calling btrfs_remove_chunk() below.
	 *
	 * Make sure to acquire this mutex before doing a tree search (dev
	 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
	 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
	 * we release the path used to search the chunk/dev tree and before
	 * the current task acquires this mutex and calls us.
	 */
	ASSERT(mutex_is_locked(&root->fs_info->delete_unused_bgs_mutex));

2920 2921 2922 2923 2924
	ret = btrfs_can_relocate(extent_root, chunk_offset);
	if (ret)
		return -ENOSPC;

	/* step one, relocate all the extents inside this chunk */
2925
	btrfs_scrub_pause(root);
2926
	ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2927
	btrfs_scrub_continue(root);
2928 2929 2930
	if (ret)
		return ret;

2931 2932
	trans = btrfs_start_trans_remove_block_group(root->fs_info,
						     chunk_offset);
2933 2934
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
2935
		btrfs_handle_fs_error(root->fs_info, ret, NULL);
2936 2937 2938 2939 2940 2941 2942 2943
		return ret;
	}

	/*
	 * step two, delete the device extents and the
	 * chunk tree entries
	 */
	ret = btrfs_remove_chunk(trans, root, chunk_offset);
Y
Yan Zheng 已提交
2944
	btrfs_end_transaction(trans, root);
2945
	return ret;
Y
Yan Zheng 已提交
2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956
}

static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
{
	struct btrfs_root *chunk_root = root->fs_info->chunk_root;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_chunk *chunk;
	struct btrfs_key key;
	struct btrfs_key found_key;
	u64 chunk_type;
2957 2958
	bool retried = false;
	int failed = 0;
Y
Yan Zheng 已提交
2959 2960 2961 2962 2963 2964
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

2965
again:
Y
Yan Zheng 已提交
2966 2967 2968 2969 2970
	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
	key.offset = (u64)-1;
	key.type = BTRFS_CHUNK_ITEM_KEY;

	while (1) {
2971
		mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
Y
Yan Zheng 已提交
2972
		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2973 2974
		if (ret < 0) {
			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
Y
Yan Zheng 已提交
2975
			goto error;
2976
		}
2977
		BUG_ON(ret == 0); /* Corruption */
Y
Yan Zheng 已提交
2978 2979 2980

		ret = btrfs_previous_item(chunk_root, path, key.objectid,
					  key.type);
2981 2982
		if (ret)
			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
Y
Yan Zheng 已提交
2983 2984 2985 2986
		if (ret < 0)
			goto error;
		if (ret > 0)
			break;
Z
Zheng Yan 已提交
2987

Y
Yan Zheng 已提交
2988 2989
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
Z
Zheng Yan 已提交
2990

Y
Yan Zheng 已提交
2991 2992 2993
		chunk = btrfs_item_ptr(leaf, path->slots[0],
				       struct btrfs_chunk);
		chunk_type = btrfs_chunk_type(leaf, chunk);
2994
		btrfs_release_path(path);
2995

Y
Yan Zheng 已提交
2996
		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2997
			ret = btrfs_relocate_chunk(chunk_root,
Y
Yan Zheng 已提交
2998
						   found_key.offset);
2999 3000
			if (ret == -ENOSPC)
				failed++;
H
HIMANGI SARAOGI 已提交
3001 3002
			else
				BUG_ON(ret);
Y
Yan Zheng 已提交
3003
		}
3004
		mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
3005

Y
Yan Zheng 已提交
3006 3007 3008 3009 3010
		if (found_key.offset == 0)
			break;
		key.offset = found_key.offset - 1;
	}
	ret = 0;
3011 3012 3013 3014
	if (failed && !retried) {
		failed = 0;
		retried = true;
		goto again;
3015
	} else if (WARN_ON(failed && retried)) {
3016 3017
		ret = -ENOSPC;
	}
Y
Yan Zheng 已提交
3018 3019 3020
error:
	btrfs_free_path(path);
	return ret;
3021 3022
}

3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044
static int insert_balance_item(struct btrfs_root *root,
			       struct btrfs_balance_control *bctl)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_balance_item *item;
	struct btrfs_disk_balance_args disk_bargs;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	int ret, err;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	trans = btrfs_start_transaction(root, 0);
	if (IS_ERR(trans)) {
		btrfs_free_path(path);
		return PTR_ERR(trans);
	}

	key.objectid = BTRFS_BALANCE_OBJECTID;
3045
	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093
	key.offset = 0;

	ret = btrfs_insert_empty_item(trans, root, path, &key,
				      sizeof(*item));
	if (ret)
		goto out;

	leaf = path->nodes[0];
	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);

	memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));

	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
	btrfs_set_balance_data(leaf, item, &disk_bargs);
	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
	btrfs_set_balance_meta(leaf, item, &disk_bargs);
	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
	btrfs_set_balance_sys(leaf, item, &disk_bargs);

	btrfs_set_balance_flags(leaf, item, bctl->flags);

	btrfs_mark_buffer_dirty(leaf);
out:
	btrfs_free_path(path);
	err = btrfs_commit_transaction(trans, root);
	if (err && !ret)
		ret = err;
	return ret;
}

static int del_balance_item(struct btrfs_root *root)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_path *path;
	struct btrfs_key key;
	int ret, err;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	trans = btrfs_start_transaction(root, 0);
	if (IS_ERR(trans)) {
		btrfs_free_path(path);
		return PTR_ERR(trans);
	}

	key.objectid = BTRFS_BALANCE_OBJECTID;
3094
	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113
	key.offset = 0;

	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
	if (ret < 0)
		goto out;
	if (ret > 0) {
		ret = -ENOENT;
		goto out;
	}

	ret = btrfs_del_item(trans, root, path);
out:
	btrfs_free_path(path);
	err = btrfs_commit_transaction(trans, root);
	if (err && !ret)
		ret = err;
	return ret;
}

I
Ilya Dryomov 已提交
3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137
/*
 * This is a heuristic used to reduce the number of chunks balanced on
 * resume after balance was interrupted.
 */
static void update_balance_args(struct btrfs_balance_control *bctl)
{
	/*
	 * Turn on soft mode for chunk types that were being converted.
	 */
	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;

	/*
	 * Turn on usage filter if is not already used.  The idea is
	 * that chunks that we have already balanced should be
	 * reasonably full.  Don't do it for chunks that are being
	 * converted - that will keep us from relocating unconverted
	 * (albeit full) chunks.
	 */
	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3138
	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
I
Ilya Dryomov 已提交
3139 3140 3141 3142 3143
	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
		bctl->data.usage = 90;
	}
	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3144
	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
I
Ilya Dryomov 已提交
3145 3146 3147 3148 3149
	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
		bctl->sys.usage = 90;
	}
	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3150
	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
I
Ilya Dryomov 已提交
3151 3152 3153 3154 3155 3156
	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
		bctl->meta.usage = 90;
	}
}

3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185
/*
 * Should be called with both balance and volume mutexes held to
 * serialize other volume operations (add_dev/rm_dev/resize) with
 * restriper.  Same goes for unset_balance_control.
 */
static void set_balance_control(struct btrfs_balance_control *bctl)
{
	struct btrfs_fs_info *fs_info = bctl->fs_info;

	BUG_ON(fs_info->balance_ctl);

	spin_lock(&fs_info->balance_lock);
	fs_info->balance_ctl = bctl;
	spin_unlock(&fs_info->balance_lock);
}

static void unset_balance_control(struct btrfs_fs_info *fs_info)
{
	struct btrfs_balance_control *bctl = fs_info->balance_ctl;

	BUG_ON(!fs_info->balance_ctl);

	spin_lock(&fs_info->balance_lock);
	fs_info->balance_ctl = NULL;
	spin_unlock(&fs_info->balance_lock);

	kfree(bctl);
}

I
Ilya Dryomov 已提交
3186 3187 3188 3189
/*
 * Balance filters.  Return 1 if chunk should be filtered out
 * (should not be balanced).
 */
3190
static int chunk_profiles_filter(u64 chunk_type,
I
Ilya Dryomov 已提交
3191 3192
				 struct btrfs_balance_args *bargs)
{
3193 3194
	chunk_type = chunk_to_extended(chunk_type) &
				BTRFS_EXTENDED_PROFILE_MASK;
I
Ilya Dryomov 已提交
3195

3196
	if (bargs->profiles & chunk_type)
I
Ilya Dryomov 已提交
3197 3198 3199 3200 3201
		return 0;

	return 1;
}

3202
static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
I
Ilya Dryomov 已提交
3203
			      struct btrfs_balance_args *bargs)
3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234
{
	struct btrfs_block_group_cache *cache;
	u64 chunk_used;
	u64 user_thresh_min;
	u64 user_thresh_max;
	int ret = 1;

	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
	chunk_used = btrfs_block_group_used(&cache->item);

	if (bargs->usage_min == 0)
		user_thresh_min = 0;
	else
		user_thresh_min = div_factor_fine(cache->key.offset,
					bargs->usage_min);

	if (bargs->usage_max == 0)
		user_thresh_max = 1;
	else if (bargs->usage_max > 100)
		user_thresh_max = cache->key.offset;
	else
		user_thresh_max = div_factor_fine(cache->key.offset,
					bargs->usage_max);

	if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
		ret = 0;

	btrfs_put_block_group(cache);
	return ret;
}

3235
static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3236
		u64 chunk_offset, struct btrfs_balance_args *bargs)
I
Ilya Dryomov 已提交
3237 3238 3239 3240 3241 3242 3243 3244
{
	struct btrfs_block_group_cache *cache;
	u64 chunk_used, user_thresh;
	int ret = 1;

	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
	chunk_used = btrfs_block_group_used(&cache->item);

3245
	if (bargs->usage_min == 0)
3246
		user_thresh = 1;
3247 3248 3249 3250 3251 3252
	else if (bargs->usage > 100)
		user_thresh = cache->key.offset;
	else
		user_thresh = div_factor_fine(cache->key.offset,
					      bargs->usage);

I
Ilya Dryomov 已提交
3253 3254 3255 3256 3257 3258 3259
	if (chunk_used < user_thresh)
		ret = 0;

	btrfs_put_block_group(cache);
	return ret;
}

I
Ilya Dryomov 已提交
3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276
static int chunk_devid_filter(struct extent_buffer *leaf,
			      struct btrfs_chunk *chunk,
			      struct btrfs_balance_args *bargs)
{
	struct btrfs_stripe *stripe;
	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
	int i;

	for (i = 0; i < num_stripes; i++) {
		stripe = btrfs_stripe_nr(chunk, i);
		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
			return 0;
	}

	return 1;
}

I
Ilya Dryomov 已提交
3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293
/* [pstart, pend) */
static int chunk_drange_filter(struct extent_buffer *leaf,
			       struct btrfs_chunk *chunk,
			       u64 chunk_offset,
			       struct btrfs_balance_args *bargs)
{
	struct btrfs_stripe *stripe;
	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
	u64 stripe_offset;
	u64 stripe_length;
	int factor;
	int i;

	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
		return 0;

	if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
D
David Woodhouse 已提交
3294 3295 3296 3297 3298 3299 3300 3301 3302
	     BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
		factor = num_stripes / 2;
	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
		factor = num_stripes - 1;
	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
		factor = num_stripes - 2;
	} else {
		factor = num_stripes;
	}
I
Ilya Dryomov 已提交
3303 3304 3305 3306 3307 3308 3309 3310

	for (i = 0; i < num_stripes; i++) {
		stripe = btrfs_stripe_nr(chunk, i);
		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
			continue;

		stripe_offset = btrfs_stripe_offset(leaf, stripe);
		stripe_length = btrfs_chunk_length(leaf, chunk);
3311
		stripe_length = div_u64(stripe_length, factor);
I
Ilya Dryomov 已提交
3312 3313 3314 3315 3316 3317 3318 3319 3320

		if (stripe_offset < bargs->pend &&
		    stripe_offset + stripe_length > bargs->pstart)
			return 0;
	}

	return 1;
}

3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334
/* [vstart, vend) */
static int chunk_vrange_filter(struct extent_buffer *leaf,
			       struct btrfs_chunk *chunk,
			       u64 chunk_offset,
			       struct btrfs_balance_args *bargs)
{
	if (chunk_offset < bargs->vend &&
	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
		/* at least part of the chunk is inside this vrange */
		return 0;

	return 1;
}

3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347
static int chunk_stripes_range_filter(struct extent_buffer *leaf,
			       struct btrfs_chunk *chunk,
			       struct btrfs_balance_args *bargs)
{
	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);

	if (bargs->stripes_min <= num_stripes
			&& num_stripes <= bargs->stripes_max)
		return 0;

	return 1;
}

3348
static int chunk_soft_convert_filter(u64 chunk_type,
3349 3350 3351 3352 3353
				     struct btrfs_balance_args *bargs)
{
	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
		return 0;

3354 3355
	chunk_type = chunk_to_extended(chunk_type) &
				BTRFS_EXTENDED_PROFILE_MASK;
3356

3357
	if (bargs->target == chunk_type)
3358 3359 3360 3361 3362
		return 1;

	return 0;
}

3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383
static int should_balance_chunk(struct btrfs_root *root,
				struct extent_buffer *leaf,
				struct btrfs_chunk *chunk, u64 chunk_offset)
{
	struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
	struct btrfs_balance_args *bargs = NULL;
	u64 chunk_type = btrfs_chunk_type(leaf, chunk);

	/* type filter */
	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
		return 0;
	}

	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
		bargs = &bctl->data;
	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
		bargs = &bctl->sys;
	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
		bargs = &bctl->meta;

I
Ilya Dryomov 已提交
3384 3385 3386 3387
	/* profiles filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
	    chunk_profiles_filter(chunk_type, bargs)) {
		return 0;
I
Ilya Dryomov 已提交
3388 3389 3390 3391 3392 3393
	}

	/* usage filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
	    chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
		return 0;
3394 3395 3396
	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
	    chunk_usage_range_filter(bctl->fs_info, chunk_offset, bargs)) {
		return 0;
I
Ilya Dryomov 已提交
3397 3398 3399 3400 3401 3402
	}

	/* devid filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
	    chunk_devid_filter(leaf, chunk, bargs)) {
		return 0;
I
Ilya Dryomov 已提交
3403 3404 3405 3406 3407 3408
	}

	/* drange filter, makes sense only with devid filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
	    chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
		return 0;
3409 3410 3411 3412 3413 3414
	}

	/* vrange filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
		return 0;
I
Ilya Dryomov 已提交
3415 3416
	}

3417 3418 3419 3420 3421 3422
	/* stripes filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
	    chunk_stripes_range_filter(leaf, chunk, bargs)) {
		return 0;
	}

3423 3424 3425 3426 3427 3428
	/* soft profile changing mode */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
	    chunk_soft_convert_filter(chunk_type, bargs)) {
		return 0;
	}

3429 3430 3431 3432 3433 3434 3435 3436
	/*
	 * limited by count, must be the last filter
	 */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
		if (bargs->limit == 0)
			return 0;
		else
			bargs->limit--;
3437 3438 3439
	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
		/*
		 * Same logic as the 'limit' filter; the minimum cannot be
3440
		 * determined here because we do not have the global information
3441 3442 3443 3444 3445 3446
		 * about the count of all chunks that satisfy the filters.
		 */
		if (bargs->limit_max == 0)
			return 0;
		else
			bargs->limit_max--;
3447 3448
	}

3449 3450 3451
	return 1;
}

3452
static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3453
{
3454
	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3455 3456 3457
	struct btrfs_root *chunk_root = fs_info->chunk_root;
	struct btrfs_root *dev_root = fs_info->dev_root;
	struct list_head *devices;
3458 3459 3460
	struct btrfs_device *device;
	u64 old_size;
	u64 size_to_free;
3461
	u64 chunk_type;
3462
	struct btrfs_chunk *chunk;
3463 3464 3465
	struct btrfs_path *path;
	struct btrfs_key key;
	struct btrfs_key found_key;
3466
	struct btrfs_trans_handle *trans;
3467 3468
	struct extent_buffer *leaf;
	int slot;
3469 3470
	int ret;
	int enospc_errors = 0;
3471
	bool counting = true;
3472
	/* The single value limit and min/max limits use the same bytes in the */
3473 3474 3475
	u64 limit_data = bctl->data.limit;
	u64 limit_meta = bctl->meta.limit;
	u64 limit_sys = bctl->sys.limit;
3476 3477 3478
	u32 count_data = 0;
	u32 count_meta = 0;
	u32 count_sys = 0;
3479
	int chunk_reserved = 0;
3480
	u64 bytes_used = 0;
3481 3482

	/* step one make some room on all the devices */
3483
	devices = &fs_info->fs_devices->devices;
Q
Qinghuang Feng 已提交
3484
	list_for_each_entry(device, devices, dev_list) {
3485
		old_size = btrfs_device_get_total_bytes(device);
3486
		size_to_free = div_factor(old_size, 1);
3487
		size_to_free = min_t(u64, size_to_free, SZ_1M);
Y
Yan Zheng 已提交
3488
		if (!device->writeable ||
3489 3490
		    btrfs_device_get_total_bytes(device) -
		    btrfs_device_get_bytes_used(device) > size_to_free ||
3491
		    device->is_tgtdev_for_dev_replace)
3492 3493 3494
			continue;

		ret = btrfs_shrink_device(device, old_size - size_to_free);
3495 3496
		if (ret == -ENOSPC)
			break;
3497 3498
		BUG_ON(ret);

3499
		trans = btrfs_start_transaction(dev_root, 0);
3500
		BUG_ON(IS_ERR(trans));
3501 3502 3503 3504 3505 3506 3507 3508 3509

		ret = btrfs_grow_device(trans, device, old_size);
		BUG_ON(ret);

		btrfs_end_transaction(trans, dev_root);
	}

	/* step two, relocate all the chunks */
	path = btrfs_alloc_path();
3510 3511 3512 3513
	if (!path) {
		ret = -ENOMEM;
		goto error;
	}
3514 3515 3516 3517 3518 3519

	/* zero out stat counters */
	spin_lock(&fs_info->balance_lock);
	memset(&bctl->stat, 0, sizeof(bctl->stat));
	spin_unlock(&fs_info->balance_lock);
again:
3520
	if (!counting) {
3521 3522 3523 3524
		/*
		 * The single value limit and min/max limits use the same bytes
		 * in the
		 */
3525 3526 3527 3528
		bctl->data.limit = limit_data;
		bctl->meta.limit = limit_meta;
		bctl->sys.limit = limit_sys;
	}
3529 3530 3531 3532
	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
	key.offset = (u64)-1;
	key.type = BTRFS_CHUNK_ITEM_KEY;

C
Chris Mason 已提交
3533
	while (1) {
3534
		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3535
		    atomic_read(&fs_info->balance_cancel_req)) {
3536 3537 3538 3539
			ret = -ECANCELED;
			goto error;
		}

3540
		mutex_lock(&fs_info->delete_unused_bgs_mutex);
3541
		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3542 3543
		if (ret < 0) {
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3544
			goto error;
3545
		}
3546 3547 3548 3549 3550 3551

		/*
		 * this shouldn't happen, it means the last relocate
		 * failed
		 */
		if (ret == 0)
3552
			BUG(); /* FIXME break ? */
3553 3554 3555

		ret = btrfs_previous_item(chunk_root, path, 0,
					  BTRFS_CHUNK_ITEM_KEY);
3556
		if (ret) {
3557
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3558
			ret = 0;
3559
			break;
3560
		}
3561

3562 3563 3564
		leaf = path->nodes[0];
		slot = path->slots[0];
		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3565

3566 3567
		if (found_key.objectid != key.objectid) {
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3568
			break;
3569
		}
3570

3571
		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3572
		chunk_type = btrfs_chunk_type(leaf, chunk);
3573

3574 3575 3576 3577 3578 3579
		if (!counting) {
			spin_lock(&fs_info->balance_lock);
			bctl->stat.considered++;
			spin_unlock(&fs_info->balance_lock);
		}

3580 3581
		ret = should_balance_chunk(chunk_root, leaf, chunk,
					   found_key.offset);
3582

3583
		btrfs_release_path(path);
3584 3585
		if (!ret) {
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3586
			goto loop;
3587
		}
3588

3589
		if (counting) {
3590
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3591 3592 3593
			spin_lock(&fs_info->balance_lock);
			bctl->stat.expected++;
			spin_unlock(&fs_info->balance_lock);
3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615

			if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
				count_data++;
			else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
				count_sys++;
			else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
				count_meta++;

			goto loop;
		}

		/*
		 * Apply limit_min filter, no need to check if the LIMITS
		 * filter is used, limit_min is 0 by default
		 */
		if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
					count_data < bctl->data.limit_min)
				|| ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
					count_meta < bctl->meta.limit_min)
				|| ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
					count_sys < bctl->sys.limit_min)) {
			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3616 3617 3618
			goto loop;
		}

3619 3620 3621 3622 3623 3624 3625
		ASSERT(fs_info->data_sinfo);
		spin_lock(&fs_info->data_sinfo->lock);
		bytes_used = fs_info->data_sinfo->bytes_used;
		spin_unlock(&fs_info->data_sinfo->lock);

		if ((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
		    !chunk_reserved && !bytes_used) {
3626 3627 3628 3629 3630 3631 3632 3633 3634
			trans = btrfs_start_transaction(chunk_root, 0);
			if (IS_ERR(trans)) {
				mutex_unlock(&fs_info->delete_unused_bgs_mutex);
				ret = PTR_ERR(trans);
				goto error;
			}

			ret = btrfs_force_chunk_alloc(trans, chunk_root,
						      BTRFS_BLOCK_GROUP_DATA);
3635
			btrfs_end_transaction(trans, chunk_root);
3636 3637 3638 3639 3640 3641 3642
			if (ret < 0) {
				mutex_unlock(&fs_info->delete_unused_bgs_mutex);
				goto error;
			}
			chunk_reserved = 1;
		}

3643 3644
		ret = btrfs_relocate_chunk(chunk_root,
					   found_key.offset);
3645
		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3646 3647
		if (ret && ret != -ENOSPC)
			goto error;
3648
		if (ret == -ENOSPC) {
3649
			enospc_errors++;
3650 3651 3652 3653 3654
		} else {
			spin_lock(&fs_info->balance_lock);
			bctl->stat.completed++;
			spin_unlock(&fs_info->balance_lock);
		}
3655
loop:
3656 3657
		if (found_key.offset == 0)
			break;
3658
		key.offset = found_key.offset - 1;
3659
	}
3660

3661 3662 3663 3664 3665
	if (counting) {
		btrfs_release_path(path);
		counting = false;
		goto again;
	}
3666 3667
error:
	btrfs_free_path(path);
3668
	if (enospc_errors) {
3669
		btrfs_info(fs_info, "%d enospc errors during balance",
3670 3671 3672 3673 3674
		       enospc_errors);
		if (!ret)
			ret = -ENOSPC;
	}

3675 3676 3677
	return ret;
}

3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701
/**
 * alloc_profile_is_valid - see if a given profile is valid and reduced
 * @flags: profile to validate
 * @extended: if true @flags is treated as an extended profile
 */
static int alloc_profile_is_valid(u64 flags, int extended)
{
	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
			       BTRFS_BLOCK_GROUP_PROFILE_MASK);

	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;

	/* 1) check that all other bits are zeroed */
	if (flags & ~mask)
		return 0;

	/* 2) see if profile is reduced */
	if (flags == 0)
		return !extended; /* "0" is valid for usual profiles */

	/* true if exactly one bit set */
	return (flags & (flags - 1)) == 0;
}

3702 3703
static inline int balance_need_close(struct btrfs_fs_info *fs_info)
{
3704 3705 3706 3707
	/* cancel requested || normal exit path */
	return atomic_read(&fs_info->balance_cancel_req) ||
		(atomic_read(&fs_info->balance_pause_req) == 0 &&
		 atomic_read(&fs_info->balance_cancel_req) == 0);
3708 3709
}

3710 3711
static void __cancel_balance(struct btrfs_fs_info *fs_info)
{
3712 3713
	int ret;

3714
	unset_balance_control(fs_info);
3715
	ret = del_balance_item(fs_info->tree_root);
3716
	if (ret)
3717
		btrfs_handle_fs_error(fs_info, ret, NULL);
3718 3719

	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3720 3721
}

3722 3723 3724 3725 3726 3727 3728 3729 3730
/* Non-zero return value signifies invalidity */
static inline int validate_convert_profile(struct btrfs_balance_args *bctl_arg,
		u64 allowed)
{
	return ((bctl_arg->flags & BTRFS_BALANCE_ARGS_CONVERT) &&
		(!alloc_profile_is_valid(bctl_arg->target, 1) ||
		 (bctl_arg->target & ~allowed)));
}

3731 3732 3733 3734 3735 3736 3737
/*
 * Should be called with both balance and volume mutexes held
 */
int btrfs_balance(struct btrfs_balance_control *bctl,
		  struct btrfs_ioctl_balance_args *bargs)
{
	struct btrfs_fs_info *fs_info = bctl->fs_info;
3738
	u64 allowed;
3739
	int mixed = 0;
3740
	int ret;
3741
	u64 num_devices;
3742
	unsigned seq;
3743

3744
	if (btrfs_fs_closing(fs_info) ||
3745 3746
	    atomic_read(&fs_info->balance_pause_req) ||
	    atomic_read(&fs_info->balance_cancel_req)) {
3747 3748 3749 3750
		ret = -EINVAL;
		goto out;
	}

3751 3752 3753 3754
	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
		mixed = 1;

3755 3756 3757 3758
	/*
	 * In case of mixed groups both data and meta should be picked,
	 * and identical options should be given for both of them.
	 */
3759 3760
	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
	if (mixed && (bctl->flags & allowed)) {
3761 3762 3763
		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3764 3765
			btrfs_err(fs_info, "with mixed groups data and "
				   "metadata balance options must be the same");
3766 3767 3768 3769 3770
			ret = -EINVAL;
			goto out;
		}
	}

3771
	num_devices = fs_info->fs_devices->num_devices;
3772
	btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
3773 3774 3775 3776
	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
		BUG_ON(num_devices < 1);
		num_devices--;
	}
3777
	btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
3778 3779
	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE | BTRFS_BLOCK_GROUP_DUP;
	if (num_devices > 1)
3780
		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3781 3782 3783 3784 3785
	if (num_devices > 2)
		allowed |= BTRFS_BLOCK_GROUP_RAID5;
	if (num_devices > 3)
		allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
			    BTRFS_BLOCK_GROUP_RAID6);
3786
	if (validate_convert_profile(&bctl->data, allowed)) {
3787 3788
		btrfs_err(fs_info, "unable to start balance with target "
			   "data profile %llu",
3789
		       bctl->data.target);
3790 3791 3792
		ret = -EINVAL;
		goto out;
	}
3793
	if (validate_convert_profile(&bctl->meta, allowed)) {
3794 3795
		btrfs_err(fs_info,
			   "unable to start balance with target metadata profile %llu",
3796
		       bctl->meta.target);
3797 3798 3799
		ret = -EINVAL;
		goto out;
	}
3800
	if (validate_convert_profile(&bctl->sys, allowed)) {
3801 3802
		btrfs_err(fs_info,
			   "unable to start balance with target system profile %llu",
3803
		       bctl->sys.target);
3804 3805 3806 3807 3808 3809
		ret = -EINVAL;
		goto out;
	}

	/* allow to reduce meta or sys integrity only if force set */
	allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
D
David Woodhouse 已提交
3810 3811 3812
			BTRFS_BLOCK_GROUP_RAID10 |
			BTRFS_BLOCK_GROUP_RAID5 |
			BTRFS_BLOCK_GROUP_RAID6;
3813 3814 3815 3816 3817 3818 3819 3820 3821 3822
	do {
		seq = read_seqbegin(&fs_info->profiles_lock);

		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
		     (fs_info->avail_system_alloc_bits & allowed) &&
		     !(bctl->sys.target & allowed)) ||
		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
		     (fs_info->avail_metadata_alloc_bits & allowed) &&
		     !(bctl->meta.target & allowed))) {
			if (bctl->flags & BTRFS_BALANCE_FORCE) {
3823
				btrfs_info(fs_info, "force reducing metadata integrity");
3824
			} else {
3825 3826
				btrfs_err(fs_info, "balance will reduce metadata "
					   "integrity, use force if you want this");
3827 3828 3829
				ret = -EINVAL;
				goto out;
			}
3830
		}
3831
	} while (read_seqretry(&fs_info->profiles_lock, seq));
3832

3833 3834 3835
	if (btrfs_get_num_tolerated_disk_barrier_failures(bctl->meta.target) <
		btrfs_get_num_tolerated_disk_barrier_failures(bctl->data.target)) {
		btrfs_warn(fs_info,
3836
	"metadata profile 0x%llx has lower redundancy than data profile 0x%llx",
3837 3838 3839
			bctl->meta.target, bctl->data.target);
	}

3840
	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3841 3842 3843 3844
		fs_info->num_tolerated_disk_barrier_failures = min(
			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info),
			btrfs_get_num_tolerated_disk_barrier_failures(
				bctl->sys.target));
3845 3846
	}

3847
	ret = insert_balance_item(fs_info->tree_root, bctl);
I
Ilya Dryomov 已提交
3848
	if (ret && ret != -EEXIST)
3849 3850
		goto out;

I
Ilya Dryomov 已提交
3851 3852 3853 3854 3855 3856 3857 3858 3859
	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
		BUG_ON(ret == -EEXIST);
		set_balance_control(bctl);
	} else {
		BUG_ON(ret != -EEXIST);
		spin_lock(&fs_info->balance_lock);
		update_balance_args(bctl);
		spin_unlock(&fs_info->balance_lock);
	}
3860

3861
	atomic_inc(&fs_info->balance_running);
3862 3863 3864 3865 3866
	mutex_unlock(&fs_info->balance_mutex);

	ret = __btrfs_balance(fs_info);

	mutex_lock(&fs_info->balance_mutex);
3867
	atomic_dec(&fs_info->balance_running);
3868

3869 3870 3871 3872 3873
	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
		fs_info->num_tolerated_disk_barrier_failures =
			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
	}

3874 3875
	if (bargs) {
		memset(bargs, 0, sizeof(*bargs));
3876
		update_ioctl_balance_args(fs_info, 0, bargs);
3877 3878
	}

3879 3880 3881 3882 3883
	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
	    balance_need_close(fs_info)) {
		__cancel_balance(fs_info);
	}

3884
	wake_up(&fs_info->balance_wait_q);
3885 3886 3887

	return ret;
out:
I
Ilya Dryomov 已提交
3888 3889
	if (bctl->flags & BTRFS_BALANCE_RESUME)
		__cancel_balance(fs_info);
3890
	else {
I
Ilya Dryomov 已提交
3891
		kfree(bctl);
3892 3893
		atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
	}
I
Ilya Dryomov 已提交
3894 3895 3896 3897 3898
	return ret;
}

static int balance_kthread(void *data)
{
3899
	struct btrfs_fs_info *fs_info = data;
3900
	int ret = 0;
I
Ilya Dryomov 已提交
3901 3902 3903 3904

	mutex_lock(&fs_info->volume_mutex);
	mutex_lock(&fs_info->balance_mutex);

3905
	if (fs_info->balance_ctl) {
3906
		btrfs_info(fs_info, "continuing balance");
3907
		ret = btrfs_balance(fs_info->balance_ctl, NULL);
3908
	}
I
Ilya Dryomov 已提交
3909 3910 3911

	mutex_unlock(&fs_info->balance_mutex);
	mutex_unlock(&fs_info->volume_mutex);
3912

I
Ilya Dryomov 已提交
3913 3914 3915
	return ret;
}

3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927
int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
{
	struct task_struct *tsk;

	spin_lock(&fs_info->balance_lock);
	if (!fs_info->balance_ctl) {
		spin_unlock(&fs_info->balance_lock);
		return 0;
	}
	spin_unlock(&fs_info->balance_lock);

	if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3928
		btrfs_info(fs_info, "force skipping balance");
3929 3930 3931 3932
		return 0;
	}

	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3933
	return PTR_ERR_OR_ZERO(tsk);
3934 3935
}

3936
int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
I
Ilya Dryomov 已提交
3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950
{
	struct btrfs_balance_control *bctl;
	struct btrfs_balance_item *item;
	struct btrfs_disk_balance_args disk_bargs;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = BTRFS_BALANCE_OBJECTID;
3951
	key.type = BTRFS_TEMPORARY_ITEM_KEY;
I
Ilya Dryomov 已提交
3952 3953
	key.offset = 0;

3954
	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
I
Ilya Dryomov 已提交
3955
	if (ret < 0)
3956
		goto out;
I
Ilya Dryomov 已提交
3957 3958
	if (ret > 0) { /* ret = -ENOENT; */
		ret = 0;
3959 3960 3961 3962 3963 3964 3965
		goto out;
	}

	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
	if (!bctl) {
		ret = -ENOMEM;
		goto out;
I
Ilya Dryomov 已提交
3966 3967 3968 3969 3970
	}

	leaf = path->nodes[0];
	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);

3971 3972 3973
	bctl->fs_info = fs_info;
	bctl->flags = btrfs_balance_flags(leaf, item);
	bctl->flags |= BTRFS_BALANCE_RESUME;
I
Ilya Dryomov 已提交
3974 3975 3976 3977 3978 3979 3980 3981

	btrfs_balance_data(leaf, item, &disk_bargs);
	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
	btrfs_balance_meta(leaf, item, &disk_bargs);
	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
	btrfs_balance_sys(leaf, item, &disk_bargs);
	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);

3982 3983
	WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));

3984 3985
	mutex_lock(&fs_info->volume_mutex);
	mutex_lock(&fs_info->balance_mutex);
I
Ilya Dryomov 已提交
3986

3987 3988 3989 3990
	set_balance_control(bctl);

	mutex_unlock(&fs_info->balance_mutex);
	mutex_unlock(&fs_info->volume_mutex);
I
Ilya Dryomov 已提交
3991 3992
out:
	btrfs_free_path(path);
3993 3994 3995
	return ret;
}

3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024
int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
{
	int ret = 0;

	mutex_lock(&fs_info->balance_mutex);
	if (!fs_info->balance_ctl) {
		mutex_unlock(&fs_info->balance_mutex);
		return -ENOTCONN;
	}

	if (atomic_read(&fs_info->balance_running)) {
		atomic_inc(&fs_info->balance_pause_req);
		mutex_unlock(&fs_info->balance_mutex);

		wait_event(fs_info->balance_wait_q,
			   atomic_read(&fs_info->balance_running) == 0);

		mutex_lock(&fs_info->balance_mutex);
		/* we are good with balance_ctl ripped off from under us */
		BUG_ON(atomic_read(&fs_info->balance_running));
		atomic_dec(&fs_info->balance_pause_req);
	} else {
		ret = -ENOTCONN;
	}

	mutex_unlock(&fs_info->balance_mutex);
	return ret;
}

4025 4026
int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
{
4027 4028 4029
	if (fs_info->sb->s_flags & MS_RDONLY)
		return -EROFS;

4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063
	mutex_lock(&fs_info->balance_mutex);
	if (!fs_info->balance_ctl) {
		mutex_unlock(&fs_info->balance_mutex);
		return -ENOTCONN;
	}

	atomic_inc(&fs_info->balance_cancel_req);
	/*
	 * if we are running just wait and return, balance item is
	 * deleted in btrfs_balance in this case
	 */
	if (atomic_read(&fs_info->balance_running)) {
		mutex_unlock(&fs_info->balance_mutex);
		wait_event(fs_info->balance_wait_q,
			   atomic_read(&fs_info->balance_running) == 0);
		mutex_lock(&fs_info->balance_mutex);
	} else {
		/* __cancel_balance needs volume_mutex */
		mutex_unlock(&fs_info->balance_mutex);
		mutex_lock(&fs_info->volume_mutex);
		mutex_lock(&fs_info->balance_mutex);

		if (fs_info->balance_ctl)
			__cancel_balance(fs_info);

		mutex_unlock(&fs_info->volume_mutex);
	}

	BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
	atomic_dec(&fs_info->balance_cancel_req);
	mutex_unlock(&fs_info->balance_mutex);
	return 0;
}

S
Stefan Behrens 已提交
4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075
static int btrfs_uuid_scan_kthread(void *data)
{
	struct btrfs_fs_info *fs_info = data;
	struct btrfs_root *root = fs_info->tree_root;
	struct btrfs_key key;
	struct btrfs_key max_key;
	struct btrfs_path *path = NULL;
	int ret = 0;
	struct extent_buffer *eb;
	int slot;
	struct btrfs_root_item root_item;
	u32 item_size;
4076
	struct btrfs_trans_handle *trans = NULL;
S
Stefan Behrens 已提交
4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092

	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

	key.objectid = 0;
	key.type = BTRFS_ROOT_ITEM_KEY;
	key.offset = 0;

	max_key.objectid = (u64)-1;
	max_key.type = BTRFS_ROOT_ITEM_KEY;
	max_key.offset = (u64)-1;

	while (1) {
4093
		ret = btrfs_search_forward(root, &key, path, 0);
S
Stefan Behrens 已提交
4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116
		if (ret) {
			if (ret > 0)
				ret = 0;
			break;
		}

		if (key.type != BTRFS_ROOT_ITEM_KEY ||
		    (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
		     key.objectid != BTRFS_FS_TREE_OBJECTID) ||
		    key.objectid > BTRFS_LAST_FREE_OBJECTID)
			goto skip;

		eb = path->nodes[0];
		slot = path->slots[0];
		item_size = btrfs_item_size_nr(eb, slot);
		if (item_size < sizeof(root_item))
			goto skip;

		read_extent_buffer(eb, &root_item,
				   btrfs_item_ptr_offset(eb, slot),
				   (int)sizeof(root_item));
		if (btrfs_root_refs(&root_item) == 0)
			goto skip;
4117 4118 4119 4120 4121 4122 4123

		if (!btrfs_is_empty_uuid(root_item.uuid) ||
		    !btrfs_is_empty_uuid(root_item.received_uuid)) {
			if (trans)
				goto update_tree;

			btrfs_release_path(path);
S
Stefan Behrens 已提交
4124 4125 4126 4127 4128 4129 4130 4131 4132
			/*
			 * 1 - subvol uuid item
			 * 1 - received_subvol uuid item
			 */
			trans = btrfs_start_transaction(fs_info->uuid_root, 2);
			if (IS_ERR(trans)) {
				ret = PTR_ERR(trans);
				break;
			}
4133 4134 4135 4136 4137 4138
			continue;
		} else {
			goto skip;
		}
update_tree:
		if (!btrfs_is_empty_uuid(root_item.uuid)) {
S
Stefan Behrens 已提交
4139 4140 4141 4142 4143
			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
						  root_item.uuid,
						  BTRFS_UUID_KEY_SUBVOL,
						  key.objectid);
			if (ret < 0) {
4144
				btrfs_warn(fs_info, "uuid_tree_add failed %d",
S
Stefan Behrens 已提交
4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155
					ret);
				break;
			}
		}

		if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
						  root_item.received_uuid,
						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
						  key.objectid);
			if (ret < 0) {
4156
				btrfs_warn(fs_info, "uuid_tree_add failed %d",
S
Stefan Behrens 已提交
4157 4158 4159 4160 4161
					ret);
				break;
			}
		}

4162
skip:
S
Stefan Behrens 已提交
4163 4164
		if (trans) {
			ret = btrfs_end_transaction(trans, fs_info->uuid_root);
4165
			trans = NULL;
S
Stefan Behrens 已提交
4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187
			if (ret)
				break;
		}

		btrfs_release_path(path);
		if (key.offset < (u64)-1) {
			key.offset++;
		} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
			key.offset = 0;
			key.type = BTRFS_ROOT_ITEM_KEY;
		} else if (key.objectid < (u64)-1) {
			key.offset = 0;
			key.type = BTRFS_ROOT_ITEM_KEY;
			key.objectid++;
		} else {
			break;
		}
		cond_resched();
	}

out:
	btrfs_free_path(path);
4188 4189
	if (trans && !IS_ERR(trans))
		btrfs_end_transaction(trans, fs_info->uuid_root);
S
Stefan Behrens 已提交
4190
	if (ret)
4191
		btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4192 4193
	else
		fs_info->update_uuid_tree_gen = 1;
S
Stefan Behrens 已提交
4194 4195 4196 4197
	up(&fs_info->uuid_tree_rescan_sem);
	return 0;
}

4198 4199 4200 4201
/*
 * Callback for btrfs_uuid_tree_iterate().
 * returns:
 * 0	check succeeded, the entry is not outdated.
4202
 * < 0	if an error occurred.
4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254
 * > 0	if the check failed, which means the caller shall remove the entry.
 */
static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
				       u8 *uuid, u8 type, u64 subid)
{
	struct btrfs_key key;
	int ret = 0;
	struct btrfs_root *subvol_root;

	if (type != BTRFS_UUID_KEY_SUBVOL &&
	    type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
		goto out;

	key.objectid = subid;
	key.type = BTRFS_ROOT_ITEM_KEY;
	key.offset = (u64)-1;
	subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
	if (IS_ERR(subvol_root)) {
		ret = PTR_ERR(subvol_root);
		if (ret == -ENOENT)
			ret = 1;
		goto out;
	}

	switch (type) {
	case BTRFS_UUID_KEY_SUBVOL:
		if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
			ret = 1;
		break;
	case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
		if (memcmp(uuid, subvol_root->root_item.received_uuid,
			   BTRFS_UUID_SIZE))
			ret = 1;
		break;
	}

out:
	return ret;
}

static int btrfs_uuid_rescan_kthread(void *data)
{
	struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
	int ret;

	/*
	 * 1st step is to iterate through the existing UUID tree and
	 * to delete all entries that contain outdated data.
	 * 2nd step is to add all missing entries to the UUID tree.
	 */
	ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
	if (ret < 0) {
4255
		btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret);
4256 4257 4258 4259 4260 4261
		up(&fs_info->uuid_tree_rescan_sem);
		return ret;
	}
	return btrfs_uuid_scan_kthread(data);
}

4262 4263 4264 4265 4266
int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *tree_root = fs_info->tree_root;
	struct btrfs_root *uuid_root;
S
Stefan Behrens 已提交
4267 4268
	struct task_struct *task;
	int ret;
4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280

	/*
	 * 1 - root node
	 * 1 - root item
	 */
	trans = btrfs_start_transaction(tree_root, 2);
	if (IS_ERR(trans))
		return PTR_ERR(trans);

	uuid_root = btrfs_create_tree(trans, fs_info,
				      BTRFS_UUID_TREE_OBJECTID);
	if (IS_ERR(uuid_root)) {
4281 4282
		ret = PTR_ERR(uuid_root);
		btrfs_abort_transaction(trans, tree_root, ret);
4283
		btrfs_end_transaction(trans, tree_root);
4284
		return ret;
4285 4286 4287 4288
	}

	fs_info->uuid_root = uuid_root;

S
Stefan Behrens 已提交
4289 4290 4291 4292 4293 4294 4295
	ret = btrfs_commit_transaction(trans, tree_root);
	if (ret)
		return ret;

	down(&fs_info->uuid_tree_rescan_sem);
	task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
	if (IS_ERR(task)) {
4296
		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4297
		btrfs_warn(fs_info, "failed to start uuid_scan task");
S
Stefan Behrens 已提交
4298 4299 4300 4301 4302
		up(&fs_info->uuid_tree_rescan_sem);
		return PTR_ERR(task);
	}

	return 0;
4303
}
S
Stefan Behrens 已提交
4304

4305 4306 4307 4308 4309 4310 4311 4312
int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
{
	struct task_struct *task;

	down(&fs_info->uuid_tree_rescan_sem);
	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
	if (IS_ERR(task)) {
		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4313
		btrfs_warn(fs_info, "failed to start uuid_rescan task");
4314 4315 4316 4317 4318 4319 4320
		up(&fs_info->uuid_tree_rescan_sem);
		return PTR_ERR(task);
	}

	return 0;
}

4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335
/*
 * shrinking a device means finding all of the device extents past
 * the new size, and then following the back refs to the chunks.
 * The chunk relocation code actually frees the device extent
 */
int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = device->dev_root;
	struct btrfs_dev_extent *dev_extent = NULL;
	struct btrfs_path *path;
	u64 length;
	u64 chunk_offset;
	int ret;
	int slot;
4336 4337
	int failed = 0;
	bool retried = false;
4338
	bool checked_pending_chunks = false;
4339 4340
	struct extent_buffer *l;
	struct btrfs_key key;
4341
	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4342
	u64 old_total = btrfs_super_total_bytes(super_copy);
4343 4344
	u64 old_size = btrfs_device_get_total_bytes(device);
	u64 diff = old_size - new_size;
4345

4346 4347 4348
	if (device->is_tgtdev_for_dev_replace)
		return -EINVAL;

4349 4350 4351 4352
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

4353
	path->reada = READA_FORWARD;
4354

4355 4356
	lock_chunks(root);

4357
	btrfs_device_set_total_bytes(device, new_size);
4358
	if (device->writeable) {
Y
Yan Zheng 已提交
4359
		device->fs_devices->total_rw_bytes -= diff;
4360 4361 4362 4363
		spin_lock(&root->fs_info->free_chunk_lock);
		root->fs_info->free_chunk_space -= diff;
		spin_unlock(&root->fs_info->free_chunk_lock);
	}
4364
	unlock_chunks(root);
4365

4366
again:
4367 4368 4369 4370
	key.objectid = device->devid;
	key.offset = (u64)-1;
	key.type = BTRFS_DEV_EXTENT_KEY;

4371
	do {
4372
		mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
4373
		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4374 4375
		if (ret < 0) {
			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4376
			goto done;
4377
		}
4378 4379

		ret = btrfs_previous_item(root, path, 0, key.type);
4380 4381
		if (ret)
			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4382 4383 4384 4385
		if (ret < 0)
			goto done;
		if (ret) {
			ret = 0;
4386
			btrfs_release_path(path);
4387
			break;
4388 4389 4390 4391 4392 4393
		}

		l = path->nodes[0];
		slot = path->slots[0];
		btrfs_item_key_to_cpu(l, &key, path->slots[0]);

4394
		if (key.objectid != device->devid) {
4395
			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4396
			btrfs_release_path(path);
4397
			break;
4398
		}
4399 4400 4401 4402

		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
		length = btrfs_dev_extent_length(l, dev_extent);

4403
		if (key.offset + length <= new_size) {
4404
			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4405
			btrfs_release_path(path);
4406
			break;
4407
		}
4408 4409

		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4410
		btrfs_release_path(path);
4411

4412
		ret = btrfs_relocate_chunk(root, chunk_offset);
4413
		mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4414
		if (ret && ret != -ENOSPC)
4415
			goto done;
4416 4417
		if (ret == -ENOSPC)
			failed++;
4418
	} while (key.offset-- > 0);
4419 4420 4421 4422 4423 4424 4425 4426

	if (failed && !retried) {
		failed = 0;
		retried = true;
		goto again;
	} else if (failed && retried) {
		ret = -ENOSPC;
		goto done;
4427 4428
	}

4429
	/* Shrinking succeeded, else we would be at "done". */
4430
	trans = btrfs_start_transaction(root, 0);
4431 4432 4433 4434 4435
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto done;
	}

4436
	lock_chunks(root);
4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453

	/*
	 * We checked in the above loop all device extents that were already in
	 * the device tree. However before we have updated the device's
	 * total_bytes to the new size, we might have had chunk allocations that
	 * have not complete yet (new block groups attached to transaction
	 * handles), and therefore their device extents were not yet in the
	 * device tree and we missed them in the loop above. So if we have any
	 * pending chunk using a device extent that overlaps the device range
	 * that we can not use anymore, commit the current transaction and
	 * repeat the search on the device tree - this way we guarantee we will
	 * not have chunks using device extents that end beyond 'new_size'.
	 */
	if (!checked_pending_chunks) {
		u64 start = new_size;
		u64 len = old_size - new_size;

4454 4455
		if (contains_pending_extent(trans->transaction, device,
					    &start, len)) {
4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466
			unlock_chunks(root);
			checked_pending_chunks = true;
			failed = 0;
			retried = false;
			ret = btrfs_commit_transaction(trans, root);
			if (ret)
				goto done;
			goto again;
		}
	}

4467
	btrfs_device_set_disk_total_bytes(device, new_size);
4468 4469 4470
	if (list_empty(&device->resized_list))
		list_add_tail(&device->resized_list,
			      &root->fs_info->fs_devices->resized_devices);
4471 4472 4473 4474

	WARN_ON(diff > old_total);
	btrfs_set_super_total_bytes(super_copy, old_total - diff);
	unlock_chunks(root);
M
Miao Xie 已提交
4475 4476 4477

	/* Now btrfs_update_device() will change the on-disk size. */
	ret = btrfs_update_device(trans, device);
4478
	btrfs_end_transaction(trans, root);
4479 4480
done:
	btrfs_free_path(path);
4481 4482 4483 4484 4485 4486 4487 4488 4489 4490
	if (ret) {
		lock_chunks(root);
		btrfs_device_set_total_bytes(device, old_size);
		if (device->writeable)
			device->fs_devices->total_rw_bytes += diff;
		spin_lock(&root->fs_info->free_chunk_lock);
		root->fs_info->free_chunk_space += diff;
		spin_unlock(&root->fs_info->free_chunk_lock);
		unlock_chunks(root);
	}
4491 4492 4493
	return ret;
}

4494
static int btrfs_add_system_chunk(struct btrfs_root *root,
4495 4496 4497
			   struct btrfs_key *key,
			   struct btrfs_chunk *chunk, int item_size)
{
4498
	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4499 4500 4501 4502
	struct btrfs_disk_key disk_key;
	u32 array_size;
	u8 *ptr;

4503
	lock_chunks(root);
4504
	array_size = btrfs_super_sys_array_size(super_copy);
4505
	if (array_size + item_size + sizeof(disk_key)
4506 4507
			> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
		unlock_chunks(root);
4508
		return -EFBIG;
4509
	}
4510 4511 4512 4513 4514 4515 4516 4517

	ptr = super_copy->sys_chunk_array + array_size;
	btrfs_cpu_key_to_disk(&disk_key, key);
	memcpy(ptr, &disk_key, sizeof(disk_key));
	ptr += sizeof(disk_key);
	memcpy(ptr, chunk, item_size);
	item_size += sizeof(disk_key);
	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4518 4519
	unlock_chunks(root);

4520 4521 4522
	return 0;
}

4523 4524 4525 4526
/*
 * sort the devices in descending order by max_avail, total_avail
 */
static int btrfs_cmp_device_info(const void *a, const void *b)
4527
{
4528 4529
	const struct btrfs_device_info *di_a = a;
	const struct btrfs_device_info *di_b = b;
4530

4531
	if (di_a->max_avail > di_b->max_avail)
4532
		return -1;
4533
	if (di_a->max_avail < di_b->max_avail)
4534
		return 1;
4535 4536 4537 4538 4539
	if (di_a->total_avail > di_b->total_avail)
		return -1;
	if (di_a->total_avail < di_b->total_avail)
		return 1;
	return 0;
4540
}
4541

D
David Woodhouse 已提交
4542 4543 4544
static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
{
	/* TODO allow them to set a preferred stripe size */
4545
	return SZ_64K;
D
David Woodhouse 已提交
4546 4547 4548 4549
}

static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
{
4550
	if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
D
David Woodhouse 已提交
4551 4552
		return;

4553
	btrfs_set_fs_incompat(info, RAID56);
D
David Woodhouse 已提交
4554 4555
}

4556 4557 4558 4559 4560 4561 4562 4563 4564 4565
#define BTRFS_MAX_DEVS(r) ((BTRFS_LEAF_DATA_SIZE(r)		\
			- sizeof(struct btrfs_item)		\
			- sizeof(struct btrfs_chunk))		\
			/ sizeof(struct btrfs_stripe) + 1)

#define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE	\
				- 2 * sizeof(struct btrfs_disk_key)	\
				- 2 * sizeof(struct btrfs_chunk))	\
				/ sizeof(struct btrfs_stripe) + 1)

4566
static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4567 4568
			       struct btrfs_root *extent_root, u64 start,
			       u64 type)
4569
{
4570 4571 4572 4573 4574 4575 4576 4577 4578
	struct btrfs_fs_info *info = extent_root->fs_info;
	struct btrfs_fs_devices *fs_devices = info->fs_devices;
	struct list_head *cur;
	struct map_lookup *map = NULL;
	struct extent_map_tree *em_tree;
	struct extent_map *em;
	struct btrfs_device_info *devices_info = NULL;
	u64 total_avail;
	int num_stripes;	/* total number of stripes to allocate */
D
David Woodhouse 已提交
4579 4580
	int data_stripes;	/* number of stripes that count for
				   block group size */
4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591
	int sub_stripes;	/* sub_stripes info for map */
	int dev_stripes;	/* stripes per dev */
	int devs_max;		/* max devs to use */
	int devs_min;		/* min devs needed */
	int devs_increment;	/* ndevs has to be a multiple of this */
	int ncopies;		/* how many copies to data has */
	int ret;
	u64 max_stripe_size;
	u64 max_chunk_size;
	u64 stripe_size;
	u64 num_bytes;
D
David Woodhouse 已提交
4592
	u64 raid_stripe_len = BTRFS_STRIPE_LEN;
4593 4594 4595
	int ndevs;
	int i;
	int j;
4596
	int index;
4597

4598
	BUG_ON(!alloc_profile_is_valid(type, 0));
4599

4600 4601
	if (list_empty(&fs_devices->alloc_list))
		return -ENOSPC;
4602

4603
	index = __get_raid_index(type);
4604

4605 4606 4607 4608 4609 4610
	sub_stripes = btrfs_raid_array[index].sub_stripes;
	dev_stripes = btrfs_raid_array[index].dev_stripes;
	devs_max = btrfs_raid_array[index].devs_max;
	devs_min = btrfs_raid_array[index].devs_min;
	devs_increment = btrfs_raid_array[index].devs_increment;
	ncopies = btrfs_raid_array[index].ncopies;
4611

4612
	if (type & BTRFS_BLOCK_GROUP_DATA) {
4613
		max_stripe_size = SZ_1G;
4614
		max_chunk_size = 10 * max_stripe_size;
4615 4616
		if (!devs_max)
			devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4617
	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4618
		/* for larger filesystems, use larger metadata chunks */
4619 4620
		if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
			max_stripe_size = SZ_1G;
4621
		else
4622
			max_stripe_size = SZ_256M;
4623
		max_chunk_size = max_stripe_size;
4624 4625
		if (!devs_max)
			devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4626
	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4627
		max_stripe_size = SZ_32M;
4628
		max_chunk_size = 2 * max_stripe_size;
4629 4630
		if (!devs_max)
			devs_max = BTRFS_MAX_DEVS_SYS_CHUNK;
4631
	} else {
4632
		btrfs_err(info, "invalid chunk type 0x%llx requested",
4633 4634
		       type);
		BUG_ON(1);
4635 4636
	}

Y
Yan Zheng 已提交
4637 4638 4639
	/* we don't want a chunk larger than 10% of writeable space */
	max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
			     max_chunk_size);
4640

4641
	devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
4642 4643 4644
			       GFP_NOFS);
	if (!devices_info)
		return -ENOMEM;
4645

4646
	cur = fs_devices->alloc_list.next;
4647

4648
	/*
4649 4650
	 * in the first pass through the devices list, we gather information
	 * about the available holes on each device.
4651
	 */
4652 4653 4654 4655 4656
	ndevs = 0;
	while (cur != &fs_devices->alloc_list) {
		struct btrfs_device *device;
		u64 max_avail;
		u64 dev_offset;
4657

4658
		device = list_entry(cur, struct btrfs_device, dev_alloc_list);
4659

4660
		cur = cur->next;
4661

4662
		if (!device->writeable) {
J
Julia Lawall 已提交
4663
			WARN(1, KERN_ERR
4664
			       "BTRFS: read-only device in alloc_list\n");
4665 4666
			continue;
		}
4667

4668 4669
		if (!device->in_fs_metadata ||
		    device->is_tgtdev_for_dev_replace)
4670
			continue;
4671

4672 4673 4674 4675
		if (device->total_bytes > device->bytes_used)
			total_avail = device->total_bytes - device->bytes_used;
		else
			total_avail = 0;
4676 4677 4678 4679

		/* If there is no space on this device, skip it. */
		if (total_avail == 0)
			continue;
4680

4681
		ret = find_free_dev_extent(trans, device,
4682 4683 4684 4685
					   max_stripe_size * dev_stripes,
					   &dev_offset, &max_avail);
		if (ret && ret != -ENOSPC)
			goto error;
4686

4687 4688
		if (ret == 0)
			max_avail = max_stripe_size * dev_stripes;
4689

4690 4691
		if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
			continue;
4692

4693 4694 4695 4696 4697
		if (ndevs == fs_devices->rw_devices) {
			WARN(1, "%s: found more than %llu devices\n",
			     __func__, fs_devices->rw_devices);
			break;
		}
4698 4699 4700 4701 4702 4703
		devices_info[ndevs].dev_offset = dev_offset;
		devices_info[ndevs].max_avail = max_avail;
		devices_info[ndevs].total_avail = total_avail;
		devices_info[ndevs].dev = device;
		++ndevs;
	}
4704

4705 4706 4707 4708 4709
	/*
	 * now sort the devices by hole size / available space
	 */
	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
	     btrfs_cmp_device_info, NULL);
4710

4711 4712
	/* round down to number of usable stripes */
	ndevs -= ndevs % devs_increment;
4713

4714 4715 4716
	if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
		ret = -ENOSPC;
		goto error;
4717
	}
4718

4719 4720 4721 4722 4723 4724 4725 4726
	if (devs_max && ndevs > devs_max)
		ndevs = devs_max;
	/*
	 * the primary goal is to maximize the number of stripes, so use as many
	 * devices as possible, even if the stripes are not maximum sized.
	 */
	stripe_size = devices_info[ndevs-1].max_avail;
	num_stripes = ndevs * dev_stripes;
4727

D
David Woodhouse 已提交
4728 4729 4730 4731 4732 4733 4734 4735
	/*
	 * this will have to be fixed for RAID1 and RAID10 over
	 * more drives
	 */
	data_stripes = num_stripes / ncopies;

	if (type & BTRFS_BLOCK_GROUP_RAID5) {
		raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
4736
						extent_root->stripesize);
D
David Woodhouse 已提交
4737 4738 4739 4740
		data_stripes = num_stripes - 1;
	}
	if (type & BTRFS_BLOCK_GROUP_RAID6) {
		raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
4741
						extent_root->stripesize);
D
David Woodhouse 已提交
4742 4743
		data_stripes = num_stripes - 2;
	}
4744 4745 4746 4747 4748 4749 4750 4751

	/*
	 * Use the number of data stripes to figure out how big this chunk
	 * is really going to be in terms of logical address space,
	 * and compare that answer with the max chunk size
	 */
	if (stripe_size * data_stripes > max_chunk_size) {
		u64 mask = (1ULL << 24) - 1;
4752 4753

		stripe_size = div_u64(max_chunk_size, data_stripes);
4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764

		/* bump the answer up to a 16MB boundary */
		stripe_size = (stripe_size + mask) & ~mask;

		/* but don't go higher than the limits we found
		 * while searching for free extents
		 */
		if (stripe_size > devices_info[ndevs-1].max_avail)
			stripe_size = devices_info[ndevs-1].max_avail;
	}

4765
	stripe_size = div_u64(stripe_size, dev_stripes);
4766 4767

	/* align to BTRFS_STRIPE_LEN */
4768
	stripe_size = div_u64(stripe_size, raid_stripe_len);
D
David Woodhouse 已提交
4769
	stripe_size *= raid_stripe_len;
4770 4771 4772 4773 4774 4775 4776

	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
	if (!map) {
		ret = -ENOMEM;
		goto error;
	}
	map->num_stripes = num_stripes;
4777

4778 4779 4780 4781 4782 4783
	for (i = 0; i < ndevs; ++i) {
		for (j = 0; j < dev_stripes; ++j) {
			int s = i * dev_stripes + j;
			map->stripes[s].dev = devices_info[i].dev;
			map->stripes[s].physical = devices_info[i].dev_offset +
						   j * stripe_size;
4784 4785
		}
	}
Y
Yan Zheng 已提交
4786
	map->sector_size = extent_root->sectorsize;
D
David Woodhouse 已提交
4787 4788 4789
	map->stripe_len = raid_stripe_len;
	map->io_align = raid_stripe_len;
	map->io_width = raid_stripe_len;
Y
Yan Zheng 已提交
4790 4791
	map->type = type;
	map->sub_stripes = sub_stripes;
4792

D
David Woodhouse 已提交
4793
	num_bytes = stripe_size * data_stripes;
4794

4795
	trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
4796

4797
	em = alloc_extent_map();
Y
Yan Zheng 已提交
4798
	if (!em) {
4799
		kfree(map);
4800 4801
		ret = -ENOMEM;
		goto error;
4802
	}
4803
	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
4804
	em->map_lookup = map;
Y
Yan Zheng 已提交
4805
	em->start = start;
4806
	em->len = num_bytes;
Y
Yan Zheng 已提交
4807 4808
	em->block_start = 0;
	em->block_len = em->len;
4809
	em->orig_block_len = stripe_size;
4810

Y
Yan Zheng 已提交
4811
	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4812
	write_lock(&em_tree->lock);
J
Josef Bacik 已提交
4813
	ret = add_extent_mapping(em_tree, em, 0);
4814 4815 4816 4817
	if (!ret) {
		list_add_tail(&em->list, &trans->transaction->pending_chunks);
		atomic_inc(&em->refs);
	}
4818
	write_unlock(&em_tree->lock);
4819 4820
	if (ret) {
		free_extent_map(em);
4821
		goto error;
4822
	}
4823

4824 4825 4826
	ret = btrfs_make_block_group(trans, extent_root, 0, type,
				     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
				     start, num_bytes);
4827 4828
	if (ret)
		goto error_del_extent;
Y
Yan Zheng 已提交
4829

4830 4831 4832 4833
	for (i = 0; i < map->num_stripes; i++) {
		num_bytes = map->stripes[i].dev->bytes_used + stripe_size;
		btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes);
	}
4834

4835 4836 4837 4838 4839
	spin_lock(&extent_root->fs_info->free_chunk_lock);
	extent_root->fs_info->free_chunk_space -= (stripe_size *
						   map->num_stripes);
	spin_unlock(&extent_root->fs_info->free_chunk_lock);

4840
	free_extent_map(em);
D
David Woodhouse 已提交
4841 4842
	check_raid56_incompat_flag(extent_root->fs_info, type);

4843
	kfree(devices_info);
Y
Yan Zheng 已提交
4844
	return 0;
4845

4846
error_del_extent:
4847 4848 4849 4850 4851 4852 4853 4854
	write_lock(&em_tree->lock);
	remove_extent_mapping(em_tree, em);
	write_unlock(&em_tree->lock);

	/* One for our allocation */
	free_extent_map(em);
	/* One for the tree reference */
	free_extent_map(em);
4855 4856
	/* One for the pending_chunks list reference */
	free_extent_map(em);
4857 4858 4859
error:
	kfree(devices_info);
	return ret;
Y
Yan Zheng 已提交
4860 4861
}

4862
int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
Y
Yan Zheng 已提交
4863
				struct btrfs_root *extent_root,
4864
				u64 chunk_offset, u64 chunk_size)
Y
Yan Zheng 已提交
4865 4866 4867 4868 4869 4870
{
	struct btrfs_key key;
	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
	struct btrfs_device *device;
	struct btrfs_chunk *chunk;
	struct btrfs_stripe *stripe;
4871 4872 4873 4874 4875 4876 4877
	struct extent_map_tree *em_tree;
	struct extent_map *em;
	struct map_lookup *map;
	size_t item_size;
	u64 dev_offset;
	u64 stripe_size;
	int i = 0;
4878
	int ret = 0;
Y
Yan Zheng 已提交
4879

4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892
	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
	read_lock(&em_tree->lock);
	em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size);
	read_unlock(&em_tree->lock);

	if (!em) {
		btrfs_crit(extent_root->fs_info, "unable to find logical "
			   "%Lu len %Lu", chunk_offset, chunk_size);
		return -EINVAL;
	}

	if (em->start != chunk_offset || em->len != chunk_size) {
		btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted"
4893
			  " %Lu-%Lu, found %Lu-%Lu", chunk_offset,
4894 4895 4896 4897 4898
			  chunk_size, em->start, em->len);
		free_extent_map(em);
		return -EINVAL;
	}

4899
	map = em->map_lookup;
4900 4901 4902
	item_size = btrfs_chunk_item_size(map->num_stripes);
	stripe_size = em->orig_block_len;

Y
Yan Zheng 已提交
4903
	chunk = kzalloc(item_size, GFP_NOFS);
4904 4905 4906 4907 4908
	if (!chunk) {
		ret = -ENOMEM;
		goto out;
	}

4909 4910 4911 4912 4913 4914 4915 4916
	/*
	 * Take the device list mutex to prevent races with the final phase of
	 * a device replace operation that replaces the device object associated
	 * with the map's stripes, because the device object's id can change
	 * at any time during that final phase of the device replace operation
	 * (dev-replace.c:btrfs_dev_replace_finishing()).
	 */
	mutex_lock(&chunk_root->fs_info->fs_devices->device_list_mutex);
4917 4918 4919
	for (i = 0; i < map->num_stripes; i++) {
		device = map->stripes[i].dev;
		dev_offset = map->stripes[i].physical;
Y
Yan Zheng 已提交
4920

4921
		ret = btrfs_update_device(trans, device);
4922
		if (ret)
4923
			break;
4924 4925 4926 4927 4928 4929
		ret = btrfs_alloc_dev_extent(trans, device,
					     chunk_root->root_key.objectid,
					     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
					     chunk_offset, dev_offset,
					     stripe_size);
		if (ret)
4930 4931 4932 4933 4934
			break;
	}
	if (ret) {
		mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex);
		goto out;
Y
Yan Zheng 已提交
4935 4936 4937
	}

	stripe = &chunk->stripe;
4938 4939 4940
	for (i = 0; i < map->num_stripes; i++) {
		device = map->stripes[i].dev;
		dev_offset = map->stripes[i].physical;
4941

4942 4943 4944
		btrfs_set_stack_stripe_devid(stripe, device->devid);
		btrfs_set_stack_stripe_offset(stripe, dev_offset);
		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
Y
Yan Zheng 已提交
4945
		stripe++;
4946
	}
4947
	mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex);
4948

Y
Yan Zheng 已提交
4949
	btrfs_set_stack_chunk_length(chunk, chunk_size);
4950
	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
Y
Yan Zheng 已提交
4951 4952 4953 4954 4955
	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
	btrfs_set_stack_chunk_type(chunk, map->type);
	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4956
	btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
Y
Yan Zheng 已提交
4957
	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4958

Y
Yan Zheng 已提交
4959 4960 4961
	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
	key.type = BTRFS_CHUNK_ITEM_KEY;
	key.offset = chunk_offset;
4962

Y
Yan Zheng 已提交
4963
	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
4964 4965 4966 4967 4968
	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
		/*
		 * TODO: Cleanup of inserted chunk root in case of
		 * failure.
		 */
4969
		ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
Y
Yan Zheng 已提交
4970
					     item_size);
4971
	}
4972

4973
out:
4974
	kfree(chunk);
4975
	free_extent_map(em);
4976
	return ret;
Y
Yan Zheng 已提交
4977
}
4978

Y
Yan Zheng 已提交
4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990
/*
 * Chunk allocation falls into two parts. The first part does works
 * that make the new allocated chunk useable, but not do any operation
 * that modifies the chunk tree. The second part does the works that
 * require modifying the chunk tree. This division is important for the
 * bootstrap process of adding storage to a seed btrfs.
 */
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
		      struct btrfs_root *extent_root, u64 type)
{
	u64 chunk_offset;

4991
	ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex));
4992 4993
	chunk_offset = find_next_chunk(extent_root->fs_info);
	return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
Y
Yan Zheng 已提交
4994 4995
}

C
Chris Mason 已提交
4996
static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
Y
Yan Zheng 已提交
4997 4998 4999 5000 5001 5002 5003 5004 5005 5006
					 struct btrfs_root *root,
					 struct btrfs_device *device)
{
	u64 chunk_offset;
	u64 sys_chunk_offset;
	u64 alloc_profile;
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct btrfs_root *extent_root = fs_info->extent_root;
	int ret;

5007
	chunk_offset = find_next_chunk(fs_info);
5008
	alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
5009 5010
	ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
				  alloc_profile);
5011 5012
	if (ret)
		return ret;
Y
Yan Zheng 已提交
5013

5014
	sys_chunk_offset = find_next_chunk(root->fs_info);
5015
	alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
5016 5017
	ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
				  alloc_profile);
5018
	return ret;
Y
Yan Zheng 已提交
5019 5020
}

5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033
static inline int btrfs_chunk_max_errors(struct map_lookup *map)
{
	int max_errors;

	if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
			 BTRFS_BLOCK_GROUP_RAID10 |
			 BTRFS_BLOCK_GROUP_RAID5 |
			 BTRFS_BLOCK_GROUP_DUP)) {
		max_errors = 1;
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
		max_errors = 2;
	} else {
		max_errors = 0;
5034
	}
Y
Yan Zheng 已提交
5035

5036
	return max_errors;
Y
Yan Zheng 已提交
5037 5038 5039 5040 5041 5042 5043 5044
}

int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
{
	struct extent_map *em;
	struct map_lookup *map;
	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
	int readonly = 0;
5045
	int miss_ndevs = 0;
Y
Yan Zheng 已提交
5046 5047
	int i;

5048
	read_lock(&map_tree->map_tree.lock);
Y
Yan Zheng 已提交
5049
	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
5050
	read_unlock(&map_tree->map_tree.lock);
Y
Yan Zheng 已提交
5051 5052 5053
	if (!em)
		return 1;

5054
	map = em->map_lookup;
Y
Yan Zheng 已提交
5055
	for (i = 0; i < map->num_stripes; i++) {
5056 5057 5058 5059 5060
		if (map->stripes[i].dev->missing) {
			miss_ndevs++;
			continue;
		}

Y
Yan Zheng 已提交
5061 5062
		if (!map->stripes[i].dev->writeable) {
			readonly = 1;
5063
			goto end;
Y
Yan Zheng 已提交
5064 5065
		}
	}
5066 5067 5068 5069 5070 5071 5072 5073 5074

	/*
	 * If the number of missing devices is larger than max errors,
	 * we can not write the data into that chunk successfully, so
	 * set it readonly.
	 */
	if (miss_ndevs > btrfs_chunk_max_errors(map))
		readonly = 1;
end:
5075
	free_extent_map(em);
Y
Yan Zheng 已提交
5076
	return readonly;
5077 5078 5079 5080
}

void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
{
5081
	extent_map_tree_init(&tree->map_tree);
5082 5083 5084 5085 5086 5087
}

void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
{
	struct extent_map *em;

C
Chris Mason 已提交
5088
	while (1) {
5089
		write_lock(&tree->map_tree.lock);
5090 5091 5092
		em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
		if (em)
			remove_extent_mapping(&tree->map_tree, em);
5093
		write_unlock(&tree->map_tree.lock);
5094 5095 5096 5097 5098 5099 5100 5101 5102
		if (!em)
			break;
		/* once for us */
		free_extent_map(em);
		/* once for the tree */
		free_extent_map(em);
	}
}

5103
int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5104
{
5105
	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
5106 5107 5108 5109 5110
	struct extent_map *em;
	struct map_lookup *map;
	struct extent_map_tree *em_tree = &map_tree->map_tree;
	int ret;

5111
	read_lock(&em_tree->lock);
5112
	em = lookup_extent_mapping(em_tree, logical, len);
5113
	read_unlock(&em_tree->lock);
5114

5115 5116 5117 5118 5119 5120
	/*
	 * We could return errors for these cases, but that could get ugly and
	 * we'd probably do the same thing which is just not do anything else
	 * and exit, so return 1 so the callers don't try to use other copies.
	 */
	if (!em) {
5121
		btrfs_crit(fs_info, "No mapping for %Lu-%Lu", logical,
5122 5123 5124 5125 5126
			    logical+len);
		return 1;
	}

	if (em->start > logical || em->start + em->len < logical) {
5127
		btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got "
5128
			    "%Lu-%Lu", logical, logical+len, em->start,
5129
			    em->start + em->len);
5130
		free_extent_map(em);
5131 5132 5133
		return 1;
	}

5134
	map = em->map_lookup;
5135 5136
	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
		ret = map->num_stripes;
C
Chris Mason 已提交
5137 5138
	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
		ret = map->sub_stripes;
D
David Woodhouse 已提交
5139 5140 5141 5142
	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
		ret = 2;
	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
		ret = 3;
5143 5144 5145
	else
		ret = 1;
	free_extent_map(em);
5146

5147
	btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
5148 5149
	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
		ret++;
5150
	btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
5151

5152 5153 5154
	return ret;
}

D
David Woodhouse 已提交
5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169
unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
				    struct btrfs_mapping_tree *map_tree,
				    u64 logical)
{
	struct extent_map *em;
	struct map_lookup *map;
	struct extent_map_tree *em_tree = &map_tree->map_tree;
	unsigned long len = root->sectorsize;

	read_lock(&em_tree->lock);
	em = lookup_extent_mapping(em_tree, logical, len);
	read_unlock(&em_tree->lock);
	BUG_ON(!em);

	BUG_ON(em->start > logical || em->start + em->len < logical);
5170
	map = em->map_lookup;
5171
	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
D
David Woodhouse 已提交
5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190
		len = map->stripe_len * nr_data_stripes(map);
	free_extent_map(em);
	return len;
}

int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
			   u64 logical, u64 len, int mirror_num)
{
	struct extent_map *em;
	struct map_lookup *map;
	struct extent_map_tree *em_tree = &map_tree->map_tree;
	int ret = 0;

	read_lock(&em_tree->lock);
	em = lookup_extent_mapping(em_tree, logical, len);
	read_unlock(&em_tree->lock);
	BUG_ON(!em);

	BUG_ON(em->start > logical || em->start + em->len < logical);
5191
	map = em->map_lookup;
5192
	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
D
David Woodhouse 已提交
5193 5194 5195 5196 5197
		ret = 1;
	free_extent_map(em);
	return ret;
}

5198 5199 5200
static int find_live_mirror(struct btrfs_fs_info *fs_info,
			    struct map_lookup *map, int first, int num,
			    int optimal, int dev_replace_is_ongoing)
5201 5202
{
	int i;
5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226
	int tolerance;
	struct btrfs_device *srcdev;

	if (dev_replace_is_ongoing &&
	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
		srcdev = fs_info->dev_replace.srcdev;
	else
		srcdev = NULL;

	/*
	 * try to avoid the drive that is the source drive for a
	 * dev-replace procedure, only choose it if no other non-missing
	 * mirror is available
	 */
	for (tolerance = 0; tolerance < 2; tolerance++) {
		if (map->stripes[optimal].dev->bdev &&
		    (tolerance || map->stripes[optimal].dev != srcdev))
			return optimal;
		for (i = first; i < first + num; i++) {
			if (map->stripes[i].dev->bdev &&
			    (tolerance || map->stripes[i].dev != srcdev))
				return i;
		}
5227
	}
5228

5229 5230 5231 5232 5233 5234
	/* we couldn't find one that doesn't fail.  Just return something
	 * and the io error handling code will clean up eventually
	 */
	return optimal;
}

D
David Woodhouse 已提交
5235 5236 5237 5238 5239 5240
static inline int parity_smaller(u64 a, u64 b)
{
	return a > b;
}

/* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5241
static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
D
David Woodhouse 已提交
5242 5243 5244 5245 5246 5247 5248 5249
{
	struct btrfs_bio_stripe s;
	int i;
	u64 l;
	int again = 1;

	while (again) {
		again = 0;
5250
		for (i = 0; i < num_stripes - 1; i++) {
5251 5252
			if (parity_smaller(bbio->raid_map[i],
					   bbio->raid_map[i+1])) {
D
David Woodhouse 已提交
5253
				s = bbio->stripes[i];
5254
				l = bbio->raid_map[i];
D
David Woodhouse 已提交
5255
				bbio->stripes[i] = bbio->stripes[i+1];
5256
				bbio->raid_map[i] = bbio->raid_map[i+1];
D
David Woodhouse 已提交
5257
				bbio->stripes[i+1] = s;
5258
				bbio->raid_map[i+1] = l;
5259

D
David Woodhouse 已提交
5260 5261 5262 5263 5264 5265
				again = 1;
			}
		}
	}
}

5266 5267 5268
static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
{
	struct btrfs_bio *bbio = kzalloc(
5269
		 /* the size of the btrfs_bio */
5270
		sizeof(struct btrfs_bio) +
5271
		/* plus the variable array for the stripes */
5272
		sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5273
		/* plus the variable array for the tgt dev */
5274
		sizeof(int) * (real_stripes) +
5275 5276 5277 5278 5279
		/*
		 * plus the raid_map, which includes both the tgt dev
		 * and the stripes
		 */
		sizeof(u64) * (total_stripes),
5280
		GFP_NOFS|__GFP_NOFAIL);
5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301

	atomic_set(&bbio->error, 0);
	atomic_set(&bbio->refs, 1);

	return bbio;
}

void btrfs_get_bbio(struct btrfs_bio *bbio)
{
	WARN_ON(!atomic_read(&bbio->refs));
	atomic_inc(&bbio->refs);
}

void btrfs_put_bbio(struct btrfs_bio *bbio)
{
	if (!bbio)
		return;
	if (atomic_dec_and_test(&bbio->refs))
		kfree(bbio);
}

5302
static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5303
			     u64 logical, u64 *length,
5304
			     struct btrfs_bio **bbio_ret,
5305
			     int mirror_num, int need_raid_map)
5306 5307 5308
{
	struct extent_map *em;
	struct map_lookup *map;
5309
	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
5310 5311
	struct extent_map_tree *em_tree = &map_tree->map_tree;
	u64 offset;
5312
	u64 stripe_offset;
5313
	u64 stripe_end_offset;
5314
	u64 stripe_nr;
5315 5316
	u64 stripe_nr_orig;
	u64 stripe_nr_end;
D
David Woodhouse 已提交
5317
	u64 stripe_len;
5318
	u32 stripe_index;
5319
	int i;
L
Li Zefan 已提交
5320
	int ret = 0;
5321
	int num_stripes;
5322
	int max_errors = 0;
5323
	int tgtdev_indexes = 0;
5324
	struct btrfs_bio *bbio = NULL;
5325 5326 5327
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
	int dev_replace_is_ongoing = 0;
	int num_alloc_stripes;
5328 5329
	int patch_the_first_stripe_for_dev_replace = 0;
	u64 physical_to_patch_in_first_stripe = 0;
D
David Woodhouse 已提交
5330
	u64 raid56_full_stripe_start = (u64)-1;
5331

5332
	read_lock(&em_tree->lock);
5333
	em = lookup_extent_mapping(em_tree, logical, *length);
5334
	read_unlock(&em_tree->lock);
5335

5336
	if (!em) {
5337
		btrfs_crit(fs_info, "unable to find logical %llu len %llu",
5338
			logical, *length);
5339 5340 5341 5342 5343
		return -EINVAL;
	}

	if (em->start > logical || em->start + em->len < logical) {
		btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
5344
			   "found %Lu-%Lu", logical, em->start,
5345
			   em->start + em->len);
5346
		free_extent_map(em);
5347
		return -EINVAL;
5348
	}
5349

5350
	map = em->map_lookup;
5351
	offset = logical - em->start;
5352

D
David Woodhouse 已提交
5353
	stripe_len = map->stripe_len;
5354 5355 5356 5357 5358
	stripe_nr = offset;
	/*
	 * stripe_nr counts the total number of stripes we have to stride
	 * to get to this block
	 */
5359
	stripe_nr = div64_u64(stripe_nr, stripe_len);
5360

D
David Woodhouse 已提交
5361
	stripe_offset = stripe_nr * stripe_len;
5362 5363 5364 5365 5366 5367 5368 5369 5370
	if (offset < stripe_offset) {
		btrfs_crit(fs_info, "stripe math has gone wrong, "
			   "stripe_offset=%llu, offset=%llu, start=%llu, "
			   "logical=%llu, stripe_len=%llu",
			   stripe_offset, offset, em->start, logical,
			   stripe_len);
		free_extent_map(em);
		return -EINVAL;
	}
5371 5372 5373 5374

	/* stripe_offset is the offset of this block in its stripe*/
	stripe_offset = offset - stripe_offset;

D
David Woodhouse 已提交
5375
	/* if we're here for raid56, we need to know the stripe aligned start */
5376
	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
D
David Woodhouse 已提交
5377 5378 5379 5380 5381 5382
		unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
		raid56_full_stripe_start = offset;

		/* allow a write of a full stripe, but make sure we don't
		 * allow straddling of stripes
		 */
5383 5384
		raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
				full_stripe_len);
D
David Woodhouse 已提交
5385 5386 5387 5388 5389
		raid56_full_stripe_start *= full_stripe_len;
	}

	if (rw & REQ_DISCARD) {
		/* we don't discard raid56 yet */
5390
		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
D
David Woodhouse 已提交
5391 5392 5393
			ret = -EOPNOTSUPP;
			goto out;
		}
5394
		*length = min_t(u64, em->len - offset, *length);
D
David Woodhouse 已提交
5395 5396 5397 5398 5399
	} else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
		u64 max_len;
		/* For writes to RAID[56], allow a full stripeset across all disks.
		   For other RAID types and for RAID[56] reads, just allow a single
		   stripe (on a single disk). */
5400
		if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
D
David Woodhouse 已提交
5401 5402 5403 5404 5405 5406 5407 5408
		    (rw & REQ_WRITE)) {
			max_len = stripe_len * nr_data_stripes(map) -
				(offset - raid56_full_stripe_start);
		} else {
			/* we limit the length of each bio to what fits in a stripe */
			max_len = stripe_len - stripe_offset;
		}
		*length = min_t(u64, em->len - offset, max_len);
5409 5410 5411
	} else {
		*length = em->len - offset;
	}
5412

D
David Woodhouse 已提交
5413 5414
	/* This is for when we're called from btrfs_merge_bio_hook() and all
	   it cares about is the length */
5415
	if (!bbio_ret)
5416 5417
		goto out;

5418
	btrfs_dev_replace_lock(dev_replace, 0);
5419 5420
	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
	if (!dev_replace_is_ongoing)
5421 5422 5423
		btrfs_dev_replace_unlock(dev_replace, 0);
	else
		btrfs_dev_replace_set_lock_blocking(dev_replace);
5424

5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448
	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
	    !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
	    dev_replace->tgtdev != NULL) {
		/*
		 * in dev-replace case, for repair case (that's the only
		 * case where the mirror is selected explicitly when
		 * calling btrfs_map_block), blocks left of the left cursor
		 * can also be read from the target drive.
		 * For REQ_GET_READ_MIRRORS, the target drive is added as
		 * the last one to the array of stripes. For READ, it also
		 * needs to be supported using the same mirror number.
		 * If the requested block is not left of the left cursor,
		 * EIO is returned. This can happen because btrfs_num_copies()
		 * returns one more in the dev-replace case.
		 */
		u64 tmp_length = *length;
		struct btrfs_bio *tmp_bbio = NULL;
		int tmp_num_stripes;
		u64 srcdev_devid = dev_replace->srcdev->devid;
		int index_srcdev = 0;
		int found = 0;
		u64 physical_of_found = 0;

		ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
5449
			     logical, &tmp_length, &tmp_bbio, 0, 0);
5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462
		if (ret) {
			WARN_ON(tmp_bbio != NULL);
			goto out;
		}

		tmp_num_stripes = tmp_bbio->num_stripes;
		if (mirror_num > tmp_num_stripes) {
			/*
			 * REQ_GET_READ_MIRRORS does not contain this
			 * mirror, that means that the requested area
			 * is not left of the left cursor
			 */
			ret = -EIO;
5463
			btrfs_put_bbio(tmp_bbio);
5464 5465 5466 5467 5468 5469 5470 5471 5472 5473
			goto out;
		}

		/*
		 * process the rest of the function using the mirror_num
		 * of the source drive. Therefore look it up first.
		 * At the end, patch the device pointer to the one of the
		 * target drive.
		 */
		for (i = 0; i < tmp_num_stripes; i++) {
5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487
			if (tmp_bbio->stripes[i].dev->devid != srcdev_devid)
				continue;

			/*
			 * In case of DUP, in order to keep it simple, only add
			 * the mirror with the lowest physical address
			 */
			if (found &&
			    physical_of_found <= tmp_bbio->stripes[i].physical)
				continue;

			index_srcdev = i;
			found = 1;
			physical_of_found = tmp_bbio->stripes[i].physical;
5488 5489
		}

5490 5491 5492
		btrfs_put_bbio(tmp_bbio);

		if (!found) {
5493 5494 5495 5496 5497
			WARN_ON(1);
			ret = -EIO;
			goto out;
		}

5498 5499 5500
		mirror_num = index_srcdev + 1;
		patch_the_first_stripe_for_dev_replace = 1;
		physical_to_patch_in_first_stripe = physical_of_found;
5501 5502 5503 5504
	} else if (mirror_num > map->num_stripes) {
		mirror_num = 0;
	}

5505
	num_stripes = 1;
5506
	stripe_index = 0;
5507
	stripe_nr_orig = stripe_nr;
5508
	stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
5509
	stripe_nr_end = div_u64(stripe_nr_end, map->stripe_len);
5510 5511
	stripe_end_offset = stripe_nr_end * map->stripe_len -
			    (offset + *length);
D
David Woodhouse 已提交
5512

5513 5514 5515 5516
	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
		if (rw & REQ_DISCARD)
			num_stripes = min_t(u64, map->num_stripes,
					    stripe_nr_end - stripe_nr_orig);
5517 5518
		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
				&stripe_index);
5519 5520
		if (!(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)))
			mirror_num = 1;
5521
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
5522
		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
5523
			num_stripes = map->num_stripes;
5524
		else if (mirror_num)
5525
			stripe_index = mirror_num - 1;
5526
		else {
5527
			stripe_index = find_live_mirror(fs_info, map, 0,
5528
					    map->num_stripes,
5529 5530
					    current->pid % map->num_stripes,
					    dev_replace_is_ongoing);
5531
			mirror_num = stripe_index + 1;
5532
		}
5533

5534
	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
5535
		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
5536
			num_stripes = map->num_stripes;
5537
		} else if (mirror_num) {
5538
			stripe_index = mirror_num - 1;
5539 5540 5541
		} else {
			mirror_num = 1;
		}
5542

C
Chris Mason 已提交
5543
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5544
		u32 factor = map->num_stripes / map->sub_stripes;
C
Chris Mason 已提交
5545

5546
		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
C
Chris Mason 已提交
5547 5548
		stripe_index *= map->sub_stripes;

5549
		if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
5550
			num_stripes = map->sub_stripes;
5551 5552 5553 5554
		else if (rw & REQ_DISCARD)
			num_stripes = min_t(u64, map->sub_stripes *
					    (stripe_nr_end - stripe_nr_orig),
					    map->num_stripes);
C
Chris Mason 已提交
5555 5556
		else if (mirror_num)
			stripe_index += mirror_num - 1;
5557
		else {
J
Jan Schmidt 已提交
5558
			int old_stripe_index = stripe_index;
5559 5560
			stripe_index = find_live_mirror(fs_info, map,
					      stripe_index,
5561
					      map->sub_stripes, stripe_index +
5562 5563
					      current->pid % map->sub_stripes,
					      dev_replace_is_ongoing);
J
Jan Schmidt 已提交
5564
			mirror_num = stripe_index - old_stripe_index + 1;
5565
		}
D
David Woodhouse 已提交
5566

5567
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5568
		if (need_raid_map &&
5569 5570
		    ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
		     mirror_num > 1)) {
D
David Woodhouse 已提交
5571
			/* push stripe_nr back to the start of the full stripe */
5572 5573
			stripe_nr = div_u64(raid56_full_stripe_start,
					stripe_len * nr_data_stripes(map));
D
David Woodhouse 已提交
5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587

			/* RAID[56] write or recovery. Return all stripes */
			num_stripes = map->num_stripes;
			max_errors = nr_parity_stripes(map);

			*length = map->stripe_len;
			stripe_index = 0;
			stripe_offset = 0;
		} else {
			/*
			 * Mirror #0 or #1 means the original data block.
			 * Mirror #2 is RAID5 parity block.
			 * Mirror #3 is RAID6 Q block.
			 */
5588 5589
			stripe_nr = div_u64_rem(stripe_nr,
					nr_data_stripes(map), &stripe_index);
D
David Woodhouse 已提交
5590 5591 5592 5593 5594
			if (mirror_num > 1)
				stripe_index = nr_data_stripes(map) +
						mirror_num - 2;

			/* We distribute the parity blocks across stripes */
5595 5596
			div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
					&stripe_index);
5597 5598 5599
			if (!(rw & (REQ_WRITE | REQ_DISCARD |
				    REQ_GET_READ_MIRRORS)) && mirror_num <= 1)
				mirror_num = 1;
D
David Woodhouse 已提交
5600
		}
5601 5602
	} else {
		/*
5603 5604 5605
		 * after this, stripe_nr is the number of stripes on this
		 * device we have to walk to find the data, and stripe_index is
		 * the number of our device in the stripe array
5606
		 */
5607 5608
		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
				&stripe_index);
5609
		mirror_num = stripe_index + 1;
5610
	}
5611 5612 5613 5614 5615 5616 5617
	if (stripe_index >= map->num_stripes) {
		btrfs_crit(fs_info, "stripe index math went horribly wrong, "
			   "got stripe_index=%u, num_stripes=%u",
			   stripe_index, map->num_stripes);
		ret = -EINVAL;
		goto out;
	}
5618

5619
	num_alloc_stripes = num_stripes;
5620 5621 5622 5623 5624
	if (dev_replace_is_ongoing) {
		if (rw & (REQ_WRITE | REQ_DISCARD))
			num_alloc_stripes <<= 1;
		if (rw & REQ_GET_READ_MIRRORS)
			num_alloc_stripes++;
5625
		tgtdev_indexes = num_stripes;
5626
	}
5627

5628
	bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
L
Li Zefan 已提交
5629 5630 5631 5632
	if (!bbio) {
		ret = -ENOMEM;
		goto out;
	}
5633 5634
	if (dev_replace_is_ongoing)
		bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
L
Li Zefan 已提交
5635

5636
	/* build raid_map */
5637
	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
5638 5639 5640
	    need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
	    mirror_num > 1)) {
		u64 tmp;
5641
		unsigned rot;
5642 5643 5644 5645 5646 5647 5648

		bbio->raid_map = (u64 *)((void *)bbio->stripes +
				 sizeof(struct btrfs_bio_stripe) *
				 num_alloc_stripes +
				 sizeof(int) * tgtdev_indexes);

		/* Work out the disk rotation on this stripe-set */
5649
		div_u64_rem(stripe_nr, num_stripes, &rot);
5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662

		/* Fill in the logical address of each stripe */
		tmp = stripe_nr * nr_data_stripes(map);
		for (i = 0; i < nr_data_stripes(map); i++)
			bbio->raid_map[(i+rot) % num_stripes] =
				em->start + (tmp + i) * map->stripe_len;

		bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
		if (map->type & BTRFS_BLOCK_GROUP_RAID6)
			bbio->raid_map[(i+rot+1) % num_stripes] =
				RAID6_Q_STRIPE;
	}

5663
	if (rw & REQ_DISCARD) {
5664 5665
		u32 factor = 0;
		u32 sub_stripes = 0;
5666 5667
		u64 stripes_per_dev = 0;
		u32 remaining_stripes = 0;
L
Liu Bo 已提交
5668
		u32 last_stripe = 0;
5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681

		if (map->type &
		    (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
			if (map->type & BTRFS_BLOCK_GROUP_RAID0)
				sub_stripes = 1;
			else
				sub_stripes = map->sub_stripes;

			factor = map->num_stripes / sub_stripes;
			stripes_per_dev = div_u64_rem(stripe_nr_end -
						      stripe_nr_orig,
						      factor,
						      &remaining_stripes);
L
Liu Bo 已提交
5682 5683
			div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
			last_stripe *= sub_stripes;
5684 5685
		}

5686
		for (i = 0; i < num_stripes; i++) {
5687
			bbio->stripes[i].physical =
5688 5689
				map->stripes[stripe_index].physical +
				stripe_offset + stripe_nr * map->stripe_len;
5690
			bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5691

5692 5693 5694 5695
			if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
					 BTRFS_BLOCK_GROUP_RAID10)) {
				bbio->stripes[i].length = stripes_per_dev *
							  map->stripe_len;
L
Liu Bo 已提交
5696

5697 5698 5699
				if (i / sub_stripes < remaining_stripes)
					bbio->stripes[i].length +=
						map->stripe_len;
L
Liu Bo 已提交
5700 5701 5702 5703 5704 5705 5706 5707 5708

				/*
				 * Special for the first stripe and
				 * the last stripe:
				 *
				 * |-------|...|-------|
				 *     |----------|
				 *    off     end_off
				 */
5709
				if (i < sub_stripes)
5710
					bbio->stripes[i].length -=
5711
						stripe_offset;
L
Liu Bo 已提交
5712 5713 5714 5715

				if (stripe_index >= last_stripe &&
				    stripe_index <= (last_stripe +
						     sub_stripes - 1))
5716
					bbio->stripes[i].length -=
5717
						stripe_end_offset;
L
Liu Bo 已提交
5718

5719 5720
				if (i == sub_stripes - 1)
					stripe_offset = 0;
5721
			} else
5722
				bbio->stripes[i].length = *length;
5723 5724 5725 5726 5727 5728 5729 5730 5731 5732

			stripe_index++;
			if (stripe_index == map->num_stripes) {
				/* This could only happen for RAID0/10 */
				stripe_index = 0;
				stripe_nr++;
			}
		}
	} else {
		for (i = 0; i < num_stripes; i++) {
5733
			bbio->stripes[i].physical =
5734 5735 5736
				map->stripes[stripe_index].physical +
				stripe_offset +
				stripe_nr * map->stripe_len;
5737
			bbio->stripes[i].dev =
5738
				map->stripes[stripe_index].dev;
5739
			stripe_index++;
5740
		}
5741
	}
L
Li Zefan 已提交
5742

5743 5744
	if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
		max_errors = btrfs_chunk_max_errors(map);
L
Li Zefan 已提交
5745

5746 5747
	if (bbio->raid_map)
		sort_parity_stripes(bbio, num_stripes);
5748

5749
	tgtdev_indexes = 0;
5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777
	if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
	    dev_replace->tgtdev != NULL) {
		int index_where_to_add;
		u64 srcdev_devid = dev_replace->srcdev->devid;

		/*
		 * duplicate the write operations while the dev replace
		 * procedure is running. Since the copying of the old disk
		 * to the new disk takes place at run time while the
		 * filesystem is mounted writable, the regular write
		 * operations to the old disk have to be duplicated to go
		 * to the new disk as well.
		 * Note that device->missing is handled by the caller, and
		 * that the write to the old disk is already set up in the
		 * stripes array.
		 */
		index_where_to_add = num_stripes;
		for (i = 0; i < num_stripes; i++) {
			if (bbio->stripes[i].dev->devid == srcdev_devid) {
				/* write to new disk, too */
				struct btrfs_bio_stripe *new =
					bbio->stripes + index_where_to_add;
				struct btrfs_bio_stripe *old =
					bbio->stripes + i;

				new->physical = old->physical;
				new->length = old->length;
				new->dev = dev_replace->tgtdev;
5778
				bbio->tgtdev_map[i] = index_where_to_add;
5779 5780
				index_where_to_add++;
				max_errors++;
5781
				tgtdev_indexes++;
5782 5783 5784
			}
		}
		num_stripes = index_where_to_add;
5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815
	} else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
		   dev_replace->tgtdev != NULL) {
		u64 srcdev_devid = dev_replace->srcdev->devid;
		int index_srcdev = 0;
		int found = 0;
		u64 physical_of_found = 0;

		/*
		 * During the dev-replace procedure, the target drive can
		 * also be used to read data in case it is needed to repair
		 * a corrupt block elsewhere. This is possible if the
		 * requested area is left of the left cursor. In this area,
		 * the target drive is a full copy of the source drive.
		 */
		for (i = 0; i < num_stripes; i++) {
			if (bbio->stripes[i].dev->devid == srcdev_devid) {
				/*
				 * In case of DUP, in order to keep it
				 * simple, only add the mirror with the
				 * lowest physical address
				 */
				if (found &&
				    physical_of_found <=
				     bbio->stripes[i].physical)
					continue;
				index_srcdev = i;
				found = 1;
				physical_of_found = bbio->stripes[i].physical;
			}
		}
		if (found) {
5816 5817
			struct btrfs_bio_stripe *tgtdev_stripe =
				bbio->stripes + num_stripes;
5818

5819 5820 5821 5822 5823
			tgtdev_stripe->physical = physical_of_found;
			tgtdev_stripe->length =
				bbio->stripes[index_srcdev].length;
			tgtdev_stripe->dev = dev_replace->tgtdev;
			bbio->tgtdev_map[index_srcdev] = num_stripes;
5824

5825 5826
			tgtdev_indexes++;
			num_stripes++;
5827
		}
5828 5829
	}

L
Li Zefan 已提交
5830
	*bbio_ret = bbio;
Z
Zhao Lei 已提交
5831
	bbio->map_type = map->type;
L
Li Zefan 已提交
5832 5833 5834
	bbio->num_stripes = num_stripes;
	bbio->max_errors = max_errors;
	bbio->mirror_num = mirror_num;
5835
	bbio->num_tgtdevs = tgtdev_indexes;
5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847

	/*
	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
	 * mirror_num == num_stripes + 1 && dev_replace target drive is
	 * available as a mirror
	 */
	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
		WARN_ON(num_stripes > 1);
		bbio->stripes[0].dev = dev_replace->tgtdev;
		bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
		bbio->mirror_num = map->num_stripes + 1;
	}
5848
out:
5849 5850 5851 5852
	if (dev_replace_is_ongoing) {
		btrfs_dev_replace_clear_lock_blocking(dev_replace);
		btrfs_dev_replace_unlock(dev_replace, 0);
	}
5853
	free_extent_map(em);
L
Li Zefan 已提交
5854
	return ret;
5855 5856
}

5857
int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5858
		      u64 logical, u64 *length,
5859
		      struct btrfs_bio **bbio_ret, int mirror_num)
5860
{
5861
	return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5862
				 mirror_num, 0);
5863 5864
}

5865 5866 5867 5868
/* For Scrub/replace */
int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
		     u64 logical, u64 *length,
		     struct btrfs_bio **bbio_ret, int mirror_num,
5869
		     int need_raid_map)
5870 5871
{
	return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5872
				 mirror_num, need_raid_map);
5873 5874
}

Y
Yan Zheng 已提交
5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885
int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
		     u64 chunk_start, u64 physical, u64 devid,
		     u64 **logical, int *naddrs, int *stripe_len)
{
	struct extent_map_tree *em_tree = &map_tree->map_tree;
	struct extent_map *em;
	struct map_lookup *map;
	u64 *buf;
	u64 bytenr;
	u64 length;
	u64 stripe_nr;
D
David Woodhouse 已提交
5886
	u64 rmap_len;
Y
Yan Zheng 已提交
5887 5888
	int i, j, nr = 0;

5889
	read_lock(&em_tree->lock);
Y
Yan Zheng 已提交
5890
	em = lookup_extent_mapping(em_tree, chunk_start, 1);
5891
	read_unlock(&em_tree->lock);
Y
Yan Zheng 已提交
5892

5893
	if (!em) {
5894
		printk(KERN_ERR "BTRFS: couldn't find em for chunk %Lu\n",
5895 5896 5897 5898 5899
		       chunk_start);
		return -EIO;
	}

	if (em->start != chunk_start) {
5900
		printk(KERN_ERR "BTRFS: bad chunk start, em=%Lu, wanted=%Lu\n",
5901 5902 5903 5904
		       em->start, chunk_start);
		free_extent_map(em);
		return -EIO;
	}
5905
	map = em->map_lookup;
Y
Yan Zheng 已提交
5906 5907

	length = em->len;
D
David Woodhouse 已提交
5908 5909
	rmap_len = map->stripe_len;

Y
Yan Zheng 已提交
5910
	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5911
		length = div_u64(length, map->num_stripes / map->sub_stripes);
Y
Yan Zheng 已提交
5912
	else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5913
		length = div_u64(length, map->num_stripes);
5914
	else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5915
		length = div_u64(length, nr_data_stripes(map));
D
David Woodhouse 已提交
5916 5917
		rmap_len = map->stripe_len * nr_data_stripes(map);
	}
Y
Yan Zheng 已提交
5918

5919
	buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
5920
	BUG_ON(!buf); /* -ENOMEM */
Y
Yan Zheng 已提交
5921 5922 5923 5924 5925 5926 5927 5928 5929

	for (i = 0; i < map->num_stripes; i++) {
		if (devid && map->stripes[i].dev->devid != devid)
			continue;
		if (map->stripes[i].physical > physical ||
		    map->stripes[i].physical + length <= physical)
			continue;

		stripe_nr = physical - map->stripes[i].physical;
5930
		stripe_nr = div_u64(stripe_nr, map->stripe_len);
Y
Yan Zheng 已提交
5931 5932 5933

		if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
			stripe_nr = stripe_nr * map->num_stripes + i;
5934
			stripe_nr = div_u64(stripe_nr, map->sub_stripes);
Y
Yan Zheng 已提交
5935 5936
		} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
			stripe_nr = stripe_nr * map->num_stripes + i;
D
David Woodhouse 已提交
5937 5938 5939 5940 5941
		} /* else if RAID[56], multiply by nr_data_stripes().
		   * Alternatively, just use rmap_len below instead of
		   * map->stripe_len */

		bytenr = chunk_start + stripe_nr * rmap_len;
5942
		WARN_ON(nr >= map->num_stripes);
Y
Yan Zheng 已提交
5943 5944 5945 5946
		for (j = 0; j < nr; j++) {
			if (buf[j] == bytenr)
				break;
		}
5947 5948
		if (j == nr) {
			WARN_ON(nr >= map->num_stripes);
Y
Yan Zheng 已提交
5949
			buf[nr++] = bytenr;
5950
		}
Y
Yan Zheng 已提交
5951 5952 5953 5954
	}

	*logical = buf;
	*naddrs = nr;
D
David Woodhouse 已提交
5955
	*stripe_len = rmap_len;
Y
Yan Zheng 已提交
5956 5957 5958

	free_extent_map(em);
	return 0;
5959 5960
}

5961
static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
5962
{
5963 5964
	bio->bi_private = bbio->private;
	bio->bi_end_io = bbio->end_io;
5965
	bio_endio(bio);
5966

5967
	btrfs_put_bbio(bbio);
5968 5969
}

5970
static void btrfs_end_bio(struct bio *bio)
5971
{
5972
	struct btrfs_bio *bbio = bio->bi_private;
5973
	int is_orig_bio = 0;
5974

5975
	if (bio->bi_error) {
5976
		atomic_inc(&bbio->error);
5977
		if (bio->bi_error == -EIO || bio->bi_error == -EREMOTEIO) {
5978
			unsigned int stripe_index =
5979
				btrfs_io_bio(bio)->stripe_index;
5980
			struct btrfs_device *dev;
5981 5982 5983

			BUG_ON(stripe_index >= bbio->num_stripes);
			dev = bbio->stripes[stripe_index].dev;
5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995
			if (dev->bdev) {
				if (bio->bi_rw & WRITE)
					btrfs_dev_stat_inc(dev,
						BTRFS_DEV_STAT_WRITE_ERRS);
				else
					btrfs_dev_stat_inc(dev,
						BTRFS_DEV_STAT_READ_ERRS);
				if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
					btrfs_dev_stat_inc(dev,
						BTRFS_DEV_STAT_FLUSH_ERRS);
				btrfs_dev_stat_print_on_error(dev);
			}
5996 5997
		}
	}
5998

5999
	if (bio == bbio->orig_bio)
6000 6001
		is_orig_bio = 1;

6002 6003
	btrfs_bio_counter_dec(bbio->fs_info);

6004
	if (atomic_dec_and_test(&bbio->stripes_pending)) {
6005 6006
		if (!is_orig_bio) {
			bio_put(bio);
6007
			bio = bbio->orig_bio;
6008
		}
6009

6010
		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6011
		/* only send an error to the higher layers if it is
D
David Woodhouse 已提交
6012
		 * beyond the tolerance of the btrfs bio
6013
		 */
6014
		if (atomic_read(&bbio->error) > bbio->max_errors) {
6015
			bio->bi_error = -EIO;
6016
		} else {
6017 6018 6019 6020
			/*
			 * this bio is actually up to date, we didn't
			 * go over the max number of errors
			 */
6021
			bio->bi_error = 0;
6022
		}
6023

6024
		btrfs_end_bbio(bbio, bio);
6025
	} else if (!is_orig_bio) {
6026 6027 6028 6029
		bio_put(bio);
	}
}

6030 6031 6032 6033 6034 6035 6036
/*
 * see run_scheduled_bios for a description of why bios are collected for
 * async submit.
 *
 * This will add one bio to the pending list for a device and make sure
 * the work struct is scheduled.
 */
6037 6038 6039
static noinline void btrfs_schedule_bio(struct btrfs_root *root,
					struct btrfs_device *device,
					int rw, struct bio *bio)
6040 6041
{
	int should_queue = 1;
6042
	struct btrfs_pending_bios *pending_bios;
6043

D
David Woodhouse 已提交
6044
	if (device->missing || !device->bdev) {
6045
		bio_io_error(bio);
D
David Woodhouse 已提交
6046 6047 6048
		return;
	}

6049
	/* don't bother with additional async steps for reads, right now */
6050
	if (!(rw & REQ_WRITE)) {
6051
		bio_get(bio);
6052
		btrfsic_submit_bio(rw, bio);
6053
		bio_put(bio);
6054
		return;
6055 6056 6057
	}

	/*
6058
	 * nr_async_bios allows us to reliably return congestion to the
6059 6060 6061 6062
	 * higher layers.  Otherwise, the async bio makes it appear we have
	 * made progress against dirty pages when we've really just put it
	 * on a queue for later
	 */
6063
	atomic_inc(&root->fs_info->nr_async_bios);
6064
	WARN_ON(bio->bi_next);
6065 6066 6067 6068
	bio->bi_next = NULL;
	bio->bi_rw |= rw;

	spin_lock(&device->io_lock);
6069
	if (bio->bi_rw & REQ_SYNC)
6070 6071 6072
		pending_bios = &device->pending_sync_bios;
	else
		pending_bios = &device->pending_bios;
6073

6074 6075
	if (pending_bios->tail)
		pending_bios->tail->bi_next = bio;
6076

6077 6078 6079
	pending_bios->tail = bio;
	if (!pending_bios->head)
		pending_bios->head = bio;
6080 6081 6082 6083 6084 6085
	if (device->running_pending)
		should_queue = 0;

	spin_unlock(&device->io_lock);

	if (should_queue)
6086 6087
		btrfs_queue_work(root->fs_info->submit_workers,
				 &device->work);
6088 6089
}

6090 6091 6092 6093 6094 6095 6096
static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
			      struct bio *bio, u64 physical, int dev_nr,
			      int rw, int async)
{
	struct btrfs_device *dev = bbio->stripes[dev_nr].dev;

	bio->bi_private = bbio;
6097
	btrfs_io_bio(bio)->stripe_index = dev_nr;
6098
	bio->bi_end_io = btrfs_end_bio;
6099
	bio->bi_iter.bi_sector = physical >> 9;
6100 6101 6102 6103 6104 6105
#ifdef DEBUG
	{
		struct rcu_string *name;

		rcu_read_lock();
		name = rcu_dereference(dev->name);
M
Masanari Iida 已提交
6106
		pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
6107
			 "(%s id %llu), size=%u\n", rw,
6108 6109
			 (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev,
			 name->str, dev->devid, bio->bi_iter.bi_size);
6110 6111 6112 6113
		rcu_read_unlock();
	}
#endif
	bio->bi_bdev = dev->bdev;
6114 6115 6116

	btrfs_bio_counter_inc_noblocked(root->fs_info);

6117
	if (async)
D
David Woodhouse 已提交
6118
		btrfs_schedule_bio(root, dev, rw, bio);
6119 6120 6121 6122 6123 6124 6125 6126
	else
		btrfsic_submit_bio(rw, bio);
}

static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
{
	atomic_inc(&bbio->error);
	if (atomic_dec_and_test(&bbio->stripes_pending)) {
6127
		/* Should be the original bio. */
6128 6129
		WARN_ON(bio != bbio->orig_bio);

6130
		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6131
		bio->bi_iter.bi_sector = logical >> 9;
6132 6133
		bio->bi_error = -EIO;
		btrfs_end_bbio(bbio, bio);
6134 6135 6136
	}
}

6137
int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
6138
		  int mirror_num, int async_submit)
6139 6140
{
	struct btrfs_device *dev;
6141
	struct bio *first_bio = bio;
6142
	u64 logical = (u64)bio->bi_iter.bi_sector << 9;
6143 6144 6145
	u64 length = 0;
	u64 map_length;
	int ret;
6146 6147
	int dev_nr;
	int total_devs;
6148
	struct btrfs_bio *bbio = NULL;
6149

6150
	length = bio->bi_iter.bi_size;
6151
	map_length = length;
6152

6153
	btrfs_bio_counter_inc_blocked(root->fs_info);
D
David Woodhouse 已提交
6154
	ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
6155
			      mirror_num, 1);
6156 6157
	if (ret) {
		btrfs_bio_counter_dec(root->fs_info);
6158
		return ret;
6159
	}
6160

6161
	total_devs = bbio->num_stripes;
D
David Woodhouse 已提交
6162 6163 6164
	bbio->orig_bio = first_bio;
	bbio->private = first_bio->bi_private;
	bbio->end_io = first_bio->bi_end_io;
6165
	bbio->fs_info = root->fs_info;
D
David Woodhouse 已提交
6166 6167
	atomic_set(&bbio->stripes_pending, bbio->num_stripes);

6168 6169
	if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
	    ((rw & WRITE) || (mirror_num > 1))) {
D
David Woodhouse 已提交
6170 6171 6172
		/* In this case, map_length has been set to the length of
		   a single stripe; not the whole write */
		if (rw & WRITE) {
6173
			ret = raid56_parity_write(root, bio, bbio, map_length);
D
David Woodhouse 已提交
6174
		} else {
6175
			ret = raid56_parity_recover(root, bio, bbio, map_length,
6176
						    mirror_num, 1);
D
David Woodhouse 已提交
6177
		}
6178

6179 6180
		btrfs_bio_counter_dec(root->fs_info);
		return ret;
D
David Woodhouse 已提交
6181 6182
	}

6183
	if (map_length < length) {
6184
		btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
6185
			logical, length, map_length);
6186 6187
		BUG();
	}
6188

6189
	for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6190 6191 6192 6193 6194 6195
		dev = bbio->stripes[dev_nr].dev;
		if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
			bbio_error(bbio, first_bio, logical);
			continue;
		}

6196
		if (dev_nr < total_devs - 1) {
6197
			bio = btrfs_bio_clone(first_bio, GFP_NOFS);
6198
			BUG_ON(!bio); /* -ENOMEM */
6199
		} else
6200
			bio = first_bio;
6201 6202 6203 6204

		submit_stripe_bio(root, bbio, bio,
				  bbio->stripes[dev_nr].physical, dev_nr, rw,
				  async_submit);
6205
	}
6206
	btrfs_bio_counter_dec(root->fs_info);
6207 6208 6209
	return 0;
}

6210
struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
Y
Yan Zheng 已提交
6211
				       u8 *uuid, u8 *fsid)
6212
{
Y
Yan Zheng 已提交
6213 6214 6215
	struct btrfs_device *device;
	struct btrfs_fs_devices *cur_devices;

6216
	cur_devices = fs_info->fs_devices;
Y
Yan Zheng 已提交
6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227
	while (cur_devices) {
		if (!fsid ||
		    !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
			device = __find_device(&cur_devices->devices,
					       devid, uuid);
			if (device)
				return device;
		}
		cur_devices = cur_devices->seed;
	}
	return NULL;
6228 6229
}

6230
static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
6231
					    struct btrfs_fs_devices *fs_devices,
6232 6233 6234 6235
					    u64 devid, u8 *dev_uuid)
{
	struct btrfs_device *device;

6236 6237
	device = btrfs_alloc_device(NULL, &devid, dev_uuid);
	if (IS_ERR(device))
6238
		return NULL;
6239 6240

	list_add(&device->dev_list, &fs_devices->devices);
Y
Yan Zheng 已提交
6241
	device->fs_devices = fs_devices;
6242
	fs_devices->num_devices++;
6243 6244

	device->missing = 1;
6245
	fs_devices->missing_devices++;
6246

6247 6248 6249
	return device;
}

6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269
/**
 * btrfs_alloc_device - allocate struct btrfs_device
 * @fs_info:	used only for generating a new devid, can be NULL if
 *		devid is provided (i.e. @devid != NULL).
 * @devid:	a pointer to devid for this device.  If NULL a new devid
 *		is generated.
 * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
 *		is generated.
 *
 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
 * on error.  Returned struct is not linked onto any lists and can be
 * destroyed with kfree() right away.
 */
struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
					const u64 *devid,
					const u8 *uuid)
{
	struct btrfs_device *dev;
	u64 tmp;

6270
	if (WARN_ON(!devid && !fs_info))
6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294
		return ERR_PTR(-EINVAL);

	dev = __alloc_device();
	if (IS_ERR(dev))
		return dev;

	if (devid)
		tmp = *devid;
	else {
		int ret;

		ret = find_next_devid(fs_info, &tmp);
		if (ret) {
			kfree(dev);
			return ERR_PTR(ret);
		}
	}
	dev->devid = tmp;

	if (uuid)
		memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
	else
		generate_random_uuid(dev->uuid);

6295 6296
	btrfs_init_work(&dev->work, btrfs_submit_helper,
			pending_bios_fn, NULL, NULL);
6297 6298 6299 6300

	return dev;
}

6301 6302 6303 6304
/* Return -EIO if any error, otherwise return 0. */
static int btrfs_check_chunk_valid(struct btrfs_root *root,
				   struct extent_buffer *leaf,
				   struct btrfs_chunk *chunk, u64 logical)
6305 6306
{
	u64 length;
6307
	u64 stripe_len;
6308 6309 6310
	u16 num_stripes;
	u16 sub_stripes;
	u64 type;
6311

6312
	length = btrfs_chunk_length(leaf, chunk);
6313 6314
	stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6315 6316 6317
	sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
	type = btrfs_chunk_type(leaf, chunk);

6318 6319 6320 6321 6322 6323 6324 6325 6326 6327
	if (!num_stripes) {
		btrfs_err(root->fs_info, "invalid chunk num_stripes: %u",
			  num_stripes);
		return -EIO;
	}
	if (!IS_ALIGNED(logical, root->sectorsize)) {
		btrfs_err(root->fs_info,
			  "invalid chunk logical %llu", logical);
		return -EIO;
	}
6328 6329 6330 6331 6332
	if (btrfs_chunk_sector_size(leaf, chunk) != root->sectorsize) {
		btrfs_err(root->fs_info, "invalid chunk sectorsize %u",
			  btrfs_chunk_sector_size(leaf, chunk));
		return -EIO;
	}
6333 6334 6335 6336 6337
	if (!length || !IS_ALIGNED(length, root->sectorsize)) {
		btrfs_err(root->fs_info,
			"invalid chunk length %llu", length);
		return -EIO;
	}
6338
	if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) {
6339 6340 6341 6342 6343
		btrfs_err(root->fs_info, "invalid chunk stripe length: %llu",
			  stripe_len);
		return -EIO;
	}
	if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
6344
	    type) {
6345 6346 6347 6348 6349 6350
		btrfs_err(root->fs_info, "unrecognized chunk type: %llu",
			  ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
			    BTRFS_BLOCK_GROUP_PROFILE_MASK) &
			  btrfs_chunk_type(leaf, chunk));
		return -EIO;
	}
6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391
	if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
	    (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) ||
	    (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
	    (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
	    (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) ||
	    ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
	     num_stripes != 1)) {
		btrfs_err(root->fs_info,
			"invalid num_stripes:sub_stripes %u:%u for profile %llu",
			num_stripes, sub_stripes,
			type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
		return -EIO;
	}

	return 0;
}

static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
			  struct extent_buffer *leaf,
			  struct btrfs_chunk *chunk)
{
	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
	struct map_lookup *map;
	struct extent_map *em;
	u64 logical;
	u64 length;
	u64 stripe_len;
	u64 devid;
	u8 uuid[BTRFS_UUID_SIZE];
	int num_stripes;
	int ret;
	int i;

	logical = key->offset;
	length = btrfs_chunk_length(leaf, chunk);
	stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);

	ret = btrfs_check_chunk_valid(root, leaf, chunk, logical);
	if (ret)
		return ret;
6392

6393
	read_lock(&map_tree->map_tree.lock);
6394
	em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
6395
	read_unlock(&map_tree->map_tree.lock);
6396 6397 6398 6399 6400 6401 6402 6403 6404

	/* already mapped? */
	if (em && em->start <= logical && em->start + em->len > logical) {
		free_extent_map(em);
		return 0;
	} else if (em) {
		free_extent_map(em);
	}

6405
	em = alloc_extent_map();
6406 6407
	if (!em)
		return -ENOMEM;
6408
	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6409 6410 6411 6412 6413
	if (!map) {
		free_extent_map(em);
		return -ENOMEM;
	}

6414
	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6415
	em->map_lookup = map;
6416 6417
	em->start = logical;
	em->len = length;
6418
	em->orig_start = 0;
6419
	em->block_start = 0;
C
Chris Mason 已提交
6420
	em->block_len = em->len;
6421

6422 6423 6424 6425 6426 6427
	map->num_stripes = num_stripes;
	map->io_width = btrfs_chunk_io_width(leaf, chunk);
	map->io_align = btrfs_chunk_io_align(leaf, chunk);
	map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
	map->type = btrfs_chunk_type(leaf, chunk);
C
Chris Mason 已提交
6428
	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6429 6430 6431 6432
	for (i = 0; i < num_stripes; i++) {
		map->stripes[i].physical =
			btrfs_stripe_offset_nr(leaf, chunk, i);
		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6433 6434 6435
		read_extent_buffer(leaf, uuid, (unsigned long)
				   btrfs_stripe_dev_uuid_nr(chunk, i),
				   BTRFS_UUID_SIZE);
6436 6437
		map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
							uuid, NULL);
6438
		if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
6439 6440 6441
			free_extent_map(em);
			return -EIO;
		}
6442 6443
		if (!map->stripes[i].dev) {
			map->stripes[i].dev =
6444 6445
				add_missing_dev(root, root->fs_info->fs_devices,
						devid, uuid);
6446 6447 6448 6449
			if (!map->stripes[i].dev) {
				free_extent_map(em);
				return -EIO;
			}
6450 6451
			btrfs_warn(root->fs_info, "devid %llu uuid %pU is missing",
						devid, uuid);
6452 6453
		}
		map->stripes[i].dev->in_fs_metadata = 1;
6454 6455
	}

6456
	write_lock(&map_tree->map_tree.lock);
J
Josef Bacik 已提交
6457
	ret = add_extent_mapping(&map_tree->map_tree, em, 0);
6458
	write_unlock(&map_tree->map_tree.lock);
6459
	BUG_ON(ret); /* Tree corruption */
6460 6461 6462 6463 6464
	free_extent_map(em);

	return 0;
}

6465
static void fill_device_from_item(struct extent_buffer *leaf,
6466 6467 6468 6469 6470 6471
				 struct btrfs_dev_item *dev_item,
				 struct btrfs_device *device)
{
	unsigned long ptr;

	device->devid = btrfs_device_id(leaf, dev_item);
6472 6473
	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
	device->total_bytes = device->disk_total_bytes;
6474
	device->commit_total_bytes = device->disk_total_bytes;
6475
	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6476
	device->commit_bytes_used = device->bytes_used;
6477 6478 6479 6480
	device->type = btrfs_device_type(leaf, dev_item);
	device->io_align = btrfs_device_io_align(leaf, dev_item);
	device->io_width = btrfs_device_io_width(leaf, dev_item);
	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6481
	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6482
	device->is_tgtdev_for_dev_replace = 0;
6483

6484
	ptr = btrfs_device_uuid(dev_item);
6485
	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6486 6487
}

6488 6489
static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
						  u8 *fsid)
Y
Yan Zheng 已提交
6490 6491 6492 6493
{
	struct btrfs_fs_devices *fs_devices;
	int ret;

6494
	BUG_ON(!mutex_is_locked(&uuid_mutex));
Y
Yan Zheng 已提交
6495 6496 6497

	fs_devices = root->fs_info->fs_devices->seed;
	while (fs_devices) {
6498 6499 6500
		if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE))
			return fs_devices;

Y
Yan Zheng 已提交
6501 6502 6503 6504 6505
		fs_devices = fs_devices->seed;
	}

	fs_devices = find_fsid(fsid);
	if (!fs_devices) {
6506 6507 6508 6509 6510 6511 6512 6513 6514 6515
		if (!btrfs_test_opt(root, DEGRADED))
			return ERR_PTR(-ENOENT);

		fs_devices = alloc_fs_devices(fsid);
		if (IS_ERR(fs_devices))
			return fs_devices;

		fs_devices->seeding = 1;
		fs_devices->opened = 1;
		return fs_devices;
Y
Yan Zheng 已提交
6516
	}
Y
Yan Zheng 已提交
6517 6518

	fs_devices = clone_fs_devices(fs_devices);
6519 6520
	if (IS_ERR(fs_devices))
		return fs_devices;
Y
Yan Zheng 已提交
6521

6522
	ret = __btrfs_open_devices(fs_devices, FMODE_READ,
6523
				   root->fs_info->bdev_holder);
6524 6525
	if (ret) {
		free_fs_devices(fs_devices);
6526
		fs_devices = ERR_PTR(ret);
Y
Yan Zheng 已提交
6527
		goto out;
6528
	}
Y
Yan Zheng 已提交
6529 6530 6531

	if (!fs_devices->seeding) {
		__btrfs_close_devices(fs_devices);
Y
Yan Zheng 已提交
6532
		free_fs_devices(fs_devices);
6533
		fs_devices = ERR_PTR(-EINVAL);
Y
Yan Zheng 已提交
6534 6535 6536 6537 6538 6539
		goto out;
	}

	fs_devices->seed = root->fs_info->fs_devices->seed;
	root->fs_info->fs_devices->seed = fs_devices;
out:
6540
	return fs_devices;
Y
Yan Zheng 已提交
6541 6542
}

6543
static int read_one_dev(struct btrfs_root *root,
6544 6545 6546
			struct extent_buffer *leaf,
			struct btrfs_dev_item *dev_item)
{
6547
	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6548 6549 6550
	struct btrfs_device *device;
	u64 devid;
	int ret;
Y
Yan Zheng 已提交
6551
	u8 fs_uuid[BTRFS_UUID_SIZE];
6552 6553
	u8 dev_uuid[BTRFS_UUID_SIZE];

6554
	devid = btrfs_device_id(leaf, dev_item);
6555
	read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6556
			   BTRFS_UUID_SIZE);
6557
	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
Y
Yan Zheng 已提交
6558 6559 6560
			   BTRFS_UUID_SIZE);

	if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
6561 6562 6563
		fs_devices = open_seed_devices(root, fs_uuid);
		if (IS_ERR(fs_devices))
			return PTR_ERR(fs_devices);
Y
Yan Zheng 已提交
6564 6565
	}

6566
	device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
6567
	if (!device) {
Y
Yan Zheng 已提交
6568
		if (!btrfs_test_opt(root, DEGRADED))
Y
Yan Zheng 已提交
6569 6570
			return -EIO;

6571 6572 6573
		device = add_missing_dev(root, fs_devices, devid, dev_uuid);
		if (!device)
			return -ENOMEM;
6574 6575
		btrfs_warn(root->fs_info, "devid %llu uuid %pU missing",
				devid, dev_uuid);
6576 6577 6578 6579 6580
	} else {
		if (!device->bdev && !btrfs_test_opt(root, DEGRADED))
			return -EIO;

		if(!device->bdev && !device->missing) {
6581 6582 6583 6584 6585 6586
			/*
			 * this happens when a device that was properly setup
			 * in the device info lists suddenly goes bad.
			 * device->bdev is NULL, and so we have to set
			 * device->missing to one here
			 */
6587
			device->fs_devices->missing_devices++;
6588
			device->missing = 1;
Y
Yan Zheng 已提交
6589
		}
6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603

		/* Move the device to its own fs_devices */
		if (device->fs_devices != fs_devices) {
			ASSERT(device->missing);

			list_move(&device->dev_list, &fs_devices->devices);
			device->fs_devices->num_devices--;
			fs_devices->num_devices++;

			device->fs_devices->missing_devices--;
			fs_devices->missing_devices++;

			device->fs_devices = fs_devices;
		}
Y
Yan Zheng 已提交
6604 6605 6606 6607 6608 6609 6610
	}

	if (device->fs_devices != root->fs_info->fs_devices) {
		BUG_ON(device->writeable);
		if (device->generation !=
		    btrfs_device_generation(leaf, dev_item))
			return -EINVAL;
6611
	}
6612 6613

	fill_device_from_item(leaf, dev_item, device);
6614
	device->in_fs_metadata = 1;
6615
	if (device->writeable && !device->is_tgtdev_for_dev_replace) {
Y
Yan Zheng 已提交
6616
		device->fs_devices->total_rw_bytes += device->total_bytes;
6617 6618 6619 6620 6621
		spin_lock(&root->fs_info->free_chunk_lock);
		root->fs_info->free_chunk_space += device->total_bytes -
			device->bytes_used;
		spin_unlock(&root->fs_info->free_chunk_lock);
	}
6622 6623 6624 6625
	ret = 0;
	return ret;
}

Y
Yan Zheng 已提交
6626
int btrfs_read_sys_array(struct btrfs_root *root)
6627
{
6628
	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
6629
	struct extent_buffer *sb;
6630 6631
	struct btrfs_disk_key *disk_key;
	struct btrfs_chunk *chunk;
6632 6633
	u8 *array_ptr;
	unsigned long sb_array_offset;
6634
	int ret = 0;
6635 6636 6637
	u32 num_stripes;
	u32 array_size;
	u32 len = 0;
6638
	u32 cur_offset;
6639
	u64 type;
6640
	struct btrfs_key key;
6641

6642 6643 6644 6645 6646 6647 6648
	ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize);
	/*
	 * This will create extent buffer of nodesize, superblock size is
	 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
	 * overallocate but we can keep it as-is, only the first page is used.
	 */
	sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
6649 6650
	if (IS_ERR(sb))
		return PTR_ERR(sb);
6651
	set_extent_buffer_uptodate(sb);
6652
	btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6653
	/*
6654
	 * The sb extent buffer is artificial and just used to read the system array.
6655
	 * set_extent_buffer_uptodate() call does not properly mark all it's
6656 6657 6658 6659 6660 6661 6662 6663 6664
	 * pages up-to-date when the page is larger: extent does not cover the
	 * whole page and consequently check_page_uptodate does not find all
	 * the page's extents up-to-date (the hole beyond sb),
	 * write_extent_buffer then triggers a WARN_ON.
	 *
	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
	 * but sb spans only this function. Add an explicit SetPageUptodate call
	 * to silence the warning eg. on PowerPC 64.
	 */
6665
	if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
6666
		SetPageUptodate(sb->pages[0]);
6667

6668
	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
6669 6670
	array_size = btrfs_super_sys_array_size(super_copy);

6671 6672 6673
	array_ptr = super_copy->sys_chunk_array;
	sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
	cur_offset = 0;
6674

6675 6676
	while (cur_offset < array_size) {
		disk_key = (struct btrfs_disk_key *)array_ptr;
6677 6678 6679 6680
		len = sizeof(*disk_key);
		if (cur_offset + len > array_size)
			goto out_short_read;

6681 6682
		btrfs_disk_key_to_cpu(&key, disk_key);

6683 6684 6685
		array_ptr += len;
		sb_array_offset += len;
		cur_offset += len;
6686

6687
		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
6688
			chunk = (struct btrfs_chunk *)sb_array_offset;
6689 6690 6691 6692 6693 6694 6695 6696 6697
			/*
			 * At least one btrfs_chunk with one stripe must be
			 * present, exact stripe count check comes afterwards
			 */
			len = btrfs_chunk_item_size(1);
			if (cur_offset + len > array_size)
				goto out_short_read;

			num_stripes = btrfs_chunk_num_stripes(sb, chunk);
6698 6699 6700 6701 6702 6703 6704 6705
			if (!num_stripes) {
				printk(KERN_ERR
	    "BTRFS: invalid number of stripes %u in sys_array at offset %u\n",
					num_stripes, cur_offset);
				ret = -EIO;
				break;
			}

6706 6707 6708 6709 6710 6711 6712 6713 6714
			type = btrfs_chunk_type(sb, chunk);
			if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
				btrfs_err(root->fs_info,
			    "invalid chunk type %llu in sys_array at offset %u",
					type, cur_offset);
				ret = -EIO;
				break;
			}

6715 6716 6717 6718
			len = btrfs_chunk_item_size(num_stripes);
			if (cur_offset + len > array_size)
				goto out_short_read;

6719
			ret = read_one_chunk(root, &key, sb, chunk);
6720 6721
			if (ret)
				break;
6722
		} else {
6723 6724 6725
			printk(KERN_ERR
		"BTRFS: unexpected item type %u in sys_array at offset %u\n",
				(u32)key.type, cur_offset);
6726 6727
			ret = -EIO;
			break;
6728
		}
6729 6730 6731
		array_ptr += len;
		sb_array_offset += len;
		cur_offset += len;
6732
	}
6733
	clear_extent_buffer_uptodate(sb);
6734
	free_extent_buffer_stale(sb);
6735
	return ret;
6736 6737 6738 6739

out_short_read:
	printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
			len, cur_offset);
6740
	clear_extent_buffer_uptodate(sb);
6741
	free_extent_buffer_stale(sb);
6742
	return -EIO;
6743 6744 6745 6746 6747 6748 6749 6750 6751 6752
}

int btrfs_read_chunk_tree(struct btrfs_root *root)
{
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	struct btrfs_key found_key;
	int ret;
	int slot;
6753
	u64 total_dev = 0;
6754 6755 6756 6757 6758 6759 6760

	root = root->fs_info->chunk_root;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

6761 6762 6763
	mutex_lock(&uuid_mutex);
	lock_chunks(root);

6764 6765 6766 6767 6768
	/*
	 * Read all device items, and then all the chunk items. All
	 * device items are found before any chunk item (their object id
	 * is smaller than the lowest possible object id for a chunk
	 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
6769 6770 6771 6772 6773
	 */
	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.offset = 0;
	key.type = 0;
	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6774 6775
	if (ret < 0)
		goto error;
C
Chris Mason 已提交
6776
	while (1) {
6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787
		leaf = path->nodes[0];
		slot = path->slots[0];
		if (slot >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret == 0)
				continue;
			if (ret < 0)
				goto error;
			break;
		}
		btrfs_item_key_to_cpu(leaf, &found_key, slot);
6788 6789 6790
		if (found_key.type == BTRFS_DEV_ITEM_KEY) {
			struct btrfs_dev_item *dev_item;
			dev_item = btrfs_item_ptr(leaf, slot,
6791
						  struct btrfs_dev_item);
6792 6793 6794
			ret = read_one_dev(root, leaf, dev_item);
			if (ret)
				goto error;
6795
			total_dev++;
6796 6797 6798 6799
		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
			struct btrfs_chunk *chunk;
			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
			ret = read_one_chunk(root, &found_key, leaf, chunk);
Y
Yan Zheng 已提交
6800 6801
			if (ret)
				goto error;
6802 6803 6804
		}
		path->slots[0]++;
	}
6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826

	/*
	 * After loading chunk tree, we've got all device information,
	 * do another round of validation checks.
	 */
	if (total_dev != root->fs_info->fs_devices->total_devices) {
		btrfs_err(root->fs_info,
	   "super_num_devices %llu mismatch with num_devices %llu found here",
			  btrfs_super_num_devices(root->fs_info->super_copy),
			  total_dev);
		ret = -EINVAL;
		goto error;
	}
	if (btrfs_super_total_bytes(root->fs_info->super_copy) <
	    root->fs_info->fs_devices->total_rw_bytes) {
		btrfs_err(root->fs_info,
	"super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
			  btrfs_super_total_bytes(root->fs_info->super_copy),
			  root->fs_info->fs_devices->total_rw_bytes);
		ret = -EINVAL;
		goto error;
	}
6827 6828
	ret = 0;
error:
6829 6830 6831
	unlock_chunks(root);
	mutex_unlock(&uuid_mutex);

Y
Yan Zheng 已提交
6832
	btrfs_free_path(path);
6833 6834
	return ret;
}
6835

6836 6837 6838 6839 6840
void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
{
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	struct btrfs_device *device;

6841 6842 6843 6844 6845 6846 6847 6848
	while (fs_devices) {
		mutex_lock(&fs_devices->device_list_mutex);
		list_for_each_entry(device, &fs_devices->devices, dev_list)
			device->dev_root = fs_info->dev_root;
		mutex_unlock(&fs_devices->device_list_mutex);

		fs_devices = fs_devices->seed;
	}
6849 6850
}

6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882
static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
{
	int i;

	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
		btrfs_dev_stat_reset(dev, i);
}

int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
{
	struct btrfs_key key;
	struct btrfs_key found_key;
	struct btrfs_root *dev_root = fs_info->dev_root;
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	struct extent_buffer *eb;
	int slot;
	int ret = 0;
	struct btrfs_device *device;
	struct btrfs_path *path = NULL;
	int i;

	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

	mutex_lock(&fs_devices->device_list_mutex);
	list_for_each_entry(device, &fs_devices->devices, dev_list) {
		int item_size;
		struct btrfs_dev_stats_item *ptr;

6883 6884
		key.objectid = BTRFS_DEV_STATS_OBJECTID;
		key.type = BTRFS_PERSISTENT_ITEM_KEY;
6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930
		key.offset = device->devid;
		ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
		if (ret) {
			__btrfs_reset_dev_stats(device);
			device->dev_stats_valid = 1;
			btrfs_release_path(path);
			continue;
		}
		slot = path->slots[0];
		eb = path->nodes[0];
		btrfs_item_key_to_cpu(eb, &found_key, slot);
		item_size = btrfs_item_size_nr(eb, slot);

		ptr = btrfs_item_ptr(eb, slot,
				     struct btrfs_dev_stats_item);

		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
			if (item_size >= (1 + i) * sizeof(__le64))
				btrfs_dev_stat_set(device, i,
					btrfs_dev_stats_value(eb, ptr, i));
			else
				btrfs_dev_stat_reset(device, i);
		}

		device->dev_stats_valid = 1;
		btrfs_dev_stat_print_on_load(device);
		btrfs_release_path(path);
	}
	mutex_unlock(&fs_devices->device_list_mutex);

out:
	btrfs_free_path(path);
	return ret < 0 ? ret : 0;
}

static int update_dev_stat_item(struct btrfs_trans_handle *trans,
				struct btrfs_root *dev_root,
				struct btrfs_device *device)
{
	struct btrfs_path *path;
	struct btrfs_key key;
	struct extent_buffer *eb;
	struct btrfs_dev_stats_item *ptr;
	int ret;
	int i;

6931 6932
	key.objectid = BTRFS_DEV_STATS_OBJECTID;
	key.type = BTRFS_PERSISTENT_ITEM_KEY;
6933 6934 6935 6936 6937 6938
	key.offset = device->devid;

	path = btrfs_alloc_path();
	BUG_ON(!path);
	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
	if (ret < 0) {
6939 6940
		btrfs_warn_in_rcu(dev_root->fs_info,
			"error %d while searching for dev_stats item for device %s",
6941
			      ret, rcu_str_deref(device->name));
6942 6943 6944 6945 6946 6947 6948 6949
		goto out;
	}

	if (ret == 0 &&
	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
		/* need to delete old one and insert a new one */
		ret = btrfs_del_item(trans, dev_root, path);
		if (ret != 0) {
6950 6951
			btrfs_warn_in_rcu(dev_root->fs_info,
				"delete too small dev_stats item for device %s failed %d",
6952
				      rcu_str_deref(device->name), ret);
6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963
			goto out;
		}
		ret = 1;
	}

	if (ret == 1) {
		/* need to insert a new item */
		btrfs_release_path(path);
		ret = btrfs_insert_empty_item(trans, dev_root, path,
					      &key, sizeof(*ptr));
		if (ret < 0) {
6964 6965 6966
			btrfs_warn_in_rcu(dev_root->fs_info,
				"insert dev_stats item for device %s failed %d",
				rcu_str_deref(device->name), ret);
6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991
			goto out;
		}
	}

	eb = path->nodes[0];
	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
		btrfs_set_dev_stats_value(eb, ptr, i,
					  btrfs_dev_stat_read(device, i));
	btrfs_mark_buffer_dirty(eb);

out:
	btrfs_free_path(path);
	return ret;
}

/*
 * called from commit_transaction. Writes all changed device stats to disk.
 */
int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
			struct btrfs_fs_info *fs_info)
{
	struct btrfs_root *dev_root = fs_info->dev_root;
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	struct btrfs_device *device;
6992
	int stats_cnt;
6993 6994 6995 6996
	int ret = 0;

	mutex_lock(&fs_devices->device_list_mutex);
	list_for_each_entry(device, &fs_devices->devices, dev_list) {
6997
		if (!device->dev_stats_valid || !btrfs_dev_stats_dirty(device))
6998 6999
			continue;

7000
		stats_cnt = atomic_read(&device->dev_stats_ccnt);
7001 7002
		ret = update_dev_stat_item(trans, dev_root, device);
		if (!ret)
7003
			atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7004 7005 7006 7007 7008 7009
	}
	mutex_unlock(&fs_devices->device_list_mutex);

	return ret;
}

7010 7011 7012 7013 7014 7015
void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
{
	btrfs_dev_stat_inc(dev, index);
	btrfs_dev_stat_print_on_error(dev);
}

7016
static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
7017
{
7018 7019
	if (!dev->dev_stats_valid)
		return;
7020 7021
	btrfs_err_rl_in_rcu(dev->dev_root->fs_info,
		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7022
			   rcu_str_deref(dev->name),
7023 7024 7025
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7026 7027
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7028
}
7029

7030 7031
static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
{
7032 7033 7034 7035 7036 7037 7038 7039
	int i;

	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
		if (btrfs_dev_stat_read(dev, i) != 0)
			break;
	if (i == BTRFS_DEV_STAT_VALUES_MAX)
		return; /* all values == 0, suppress message */

7040 7041
	btrfs_info_in_rcu(dev->dev_root->fs_info,
		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7042
	       rcu_str_deref(dev->name),
7043 7044 7045 7046 7047 7048 7049
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
}

7050
int btrfs_get_dev_stats(struct btrfs_root *root,
7051
			struct btrfs_ioctl_get_dev_stats *stats)
7052 7053 7054 7055 7056 7057
{
	struct btrfs_device *dev;
	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
	int i;

	mutex_lock(&fs_devices->device_list_mutex);
7058
	dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
7059 7060 7061
	mutex_unlock(&fs_devices->device_list_mutex);

	if (!dev) {
7062
		btrfs_warn(root->fs_info, "get dev_stats failed, device not found");
7063
		return -ENODEV;
7064
	} else if (!dev->dev_stats_valid) {
7065
		btrfs_warn(root->fs_info, "get dev_stats failed, not yet valid");
7066
		return -ENODEV;
7067
	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083
		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
			if (stats->nr_items > i)
				stats->values[i] =
					btrfs_dev_stat_read_and_reset(dev, i);
			else
				btrfs_dev_stat_reset(dev, i);
		}
	} else {
		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
			if (stats->nr_items > i)
				stats->values[i] = btrfs_dev_stat_read(dev, i);
	}
	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
	return 0;
}
7084

7085
void btrfs_scratch_superblocks(struct block_device *bdev, char *device_path)
7086 7087 7088
{
	struct buffer_head *bh;
	struct btrfs_super_block *disk_super;
7089
	int copy_num;
7090

7091 7092
	if (!bdev)
		return;
7093

7094 7095
	for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX;
		copy_num++) {
7096

7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112
		if (btrfs_read_dev_one_super(bdev, copy_num, &bh))
			continue;

		disk_super = (struct btrfs_super_block *)bh->b_data;

		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
		set_buffer_dirty(bh);
		sync_dirty_buffer(bh);
		brelse(bh);
	}

	/* Notify udev that device has changed */
	btrfs_kobject_uevent(bdev, KOBJ_CHANGE);

	/* Update ctime/mtime for device path for libblkid */
	update_dev_time(device_path);
7113
}
7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136

/*
 * Update the size of all devices, which is used for writing out the
 * super blocks.
 */
void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info)
{
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	struct btrfs_device *curr, *next;

	if (list_empty(&fs_devices->resized_devices))
		return;

	mutex_lock(&fs_devices->device_list_mutex);
	lock_chunks(fs_info->dev_root);
	list_for_each_entry_safe(curr, next, &fs_devices->resized_devices,
				 resized_list) {
		list_del_init(&curr->resized_list);
		curr->commit_total_bytes = curr->disk_total_bytes;
	}
	unlock_chunks(fs_info->dev_root);
	mutex_unlock(&fs_devices->device_list_mutex);
}
7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152

/* Must be invoked during the transaction commit */
void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
					struct btrfs_transaction *transaction)
{
	struct extent_map *em;
	struct map_lookup *map;
	struct btrfs_device *dev;
	int i;

	if (list_empty(&transaction->pending_chunks))
		return;

	/* In order to kick the device replace finish process */
	lock_chunks(root);
	list_for_each_entry(em, &transaction->pending_chunks, list) {
7153
		map = em->map_lookup;
7154 7155 7156 7157 7158 7159 7160 7161

		for (i = 0; i < map->num_stripes; i++) {
			dev = map->stripes[i].dev;
			dev->commit_bytes_used = dev->bytes_used;
		}
	}
	unlock_chunks(root);
}
7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179

void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info)
{
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	while (fs_devices) {
		fs_devices->fs_info = fs_info;
		fs_devices = fs_devices->seed;
	}
}

void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
{
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	while (fs_devices) {
		fs_devices->fs_info = NULL;
		fs_devices = fs_devices->seed;
	}
}