volumes.c 124.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */
#include <linux/sched.h>
#include <linux/bio.h>
20
#include <linux/slab.h>
21
#include <linux/buffer_head.h>
22
#include <linux/blkdev.h>
23
#include <linux/random.h>
24
#include <linux/iocontext.h>
25
#include <linux/capability.h>
26
#include <linux/ratelimit.h>
I
Ilya Dryomov 已提交
27
#include <linux/kthread.h>
28
#include <asm/div64.h>
C
Chris Mason 已提交
29
#include "compat.h"
30 31 32 33 34 35
#include "ctree.h"
#include "extent_map.h"
#include "disk-io.h"
#include "transaction.h"
#include "print-tree.h"
#include "volumes.h"
36
#include "async-thread.h"
37
#include "check-integrity.h"
38
#include "rcu-string.h"
39

Y
Yan Zheng 已提交
40 41 42 43
static int init_first_rw_device(struct btrfs_trans_handle *trans,
				struct btrfs_root *root,
				struct btrfs_device *device);
static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
44 45
static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
Y
Yan Zheng 已提交
46

47 48 49
static DEFINE_MUTEX(uuid_mutex);
static LIST_HEAD(fs_uuids);

50 51 52 53 54 55 56 57 58 59
static void lock_chunks(struct btrfs_root *root)
{
	mutex_lock(&root->fs_info->chunk_mutex);
}

static void unlock_chunks(struct btrfs_root *root)
{
	mutex_unlock(&root->fs_info->chunk_mutex);
}

Y
Yan Zheng 已提交
60 61 62 63 64 65 66 67
static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
{
	struct btrfs_device *device;
	WARN_ON(fs_devices->opened);
	while (!list_empty(&fs_devices->devices)) {
		device = list_entry(fs_devices->devices.next,
				    struct btrfs_device, dev_list);
		list_del(&device->dev_list);
68
		rcu_string_free(device->name);
Y
Yan Zheng 已提交
69 70 71 72 73
		kfree(device);
	}
	kfree(fs_devices);
}

74
void btrfs_cleanup_fs_uuids(void)
75 76 77
{
	struct btrfs_fs_devices *fs_devices;

Y
Yan Zheng 已提交
78 79 80 81
	while (!list_empty(&fs_uuids)) {
		fs_devices = list_entry(fs_uuids.next,
					struct btrfs_fs_devices, list);
		list_del(&fs_devices->list);
Y
Yan Zheng 已提交
82
		free_fs_devices(fs_devices);
83 84 85
	}
}

86 87
static noinline struct btrfs_device *__find_device(struct list_head *head,
						   u64 devid, u8 *uuid)
88 89 90
{
	struct btrfs_device *dev;

Q
Qinghuang Feng 已提交
91
	list_for_each_entry(dev, head, dev_list) {
92
		if (dev->devid == devid &&
93
		    (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
94
			return dev;
95
		}
96 97 98 99
	}
	return NULL;
}

100
static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
101 102 103
{
	struct btrfs_fs_devices *fs_devices;

Q
Qinghuang Feng 已提交
104
	list_for_each_entry(fs_devices, &fs_uuids, list) {
105 106 107 108 109 110
		if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
			return fs_devices;
	}
	return NULL;
}

111 112 113 114 115 116 117 118 119 120 121 122 123 124
static void requeue_list(struct btrfs_pending_bios *pending_bios,
			struct bio *head, struct bio *tail)
{

	struct bio *old_head;

	old_head = pending_bios->head;
	pending_bios->head = head;
	if (pending_bios->tail)
		tail->bi_next = old_head;
	else
		pending_bios->tail = tail;
}

125 126 127 128 129 130 131 132 133 134 135
/*
 * we try to collect pending bios for a device so we don't get a large
 * number of procs sending bios down to the same device.  This greatly
 * improves the schedulers ability to collect and merge the bios.
 *
 * But, it also turns into a long list of bios to process and that is sure
 * to eventually make the worker thread block.  The solution here is to
 * make some progress and then put this work struct back at the end of
 * the list if the block device is congested.  This way, multiple devices
 * can make progress from a single worker thread.
 */
136
static noinline void run_scheduled_bios(struct btrfs_device *device)
137 138 139
{
	struct bio *pending;
	struct backing_dev_info *bdi;
140
	struct btrfs_fs_info *fs_info;
141
	struct btrfs_pending_bios *pending_bios;
142 143 144
	struct bio *tail;
	struct bio *cur;
	int again = 0;
145
	unsigned long num_run;
146
	unsigned long batch_run = 0;
147
	unsigned long limit;
148
	unsigned long last_waited = 0;
149
	int force_reg = 0;
M
Miao Xie 已提交
150
	int sync_pending = 0;
151 152 153 154 155 156 157 158 159
	struct blk_plug plug;

	/*
	 * this function runs all the bios we've collected for
	 * a particular device.  We don't want to wander off to
	 * another device without first sending all of these down.
	 * So, setup a plug here and finish it off before we return
	 */
	blk_start_plug(&plug);
160

161
	bdi = blk_get_backing_dev_info(device->bdev);
162 163 164 165
	fs_info = device->dev_root->fs_info;
	limit = btrfs_async_submit_limit(fs_info);
	limit = limit * 2 / 3;

166 167 168
loop:
	spin_lock(&device->io_lock);

169
loop_lock:
170
	num_run = 0;
171

172 173 174 175 176
	/* take all the bios off the list at once and process them
	 * later on (without the lock held).  But, remember the
	 * tail and other pointers so the bios can be properly reinserted
	 * into the list if we hit congestion
	 */
177
	if (!force_reg && device->pending_sync_bios.head) {
178
		pending_bios = &device->pending_sync_bios;
179 180
		force_reg = 1;
	} else {
181
		pending_bios = &device->pending_bios;
182 183
		force_reg = 0;
	}
184 185 186

	pending = pending_bios->head;
	tail = pending_bios->tail;
187 188 189 190 191 192 193 194 195 196
	WARN_ON(pending && !tail);

	/*
	 * if pending was null this time around, no bios need processing
	 * at all and we can stop.  Otherwise it'll loop back up again
	 * and do an additional check so no bios are missed.
	 *
	 * device->running_pending is used to synchronize with the
	 * schedule_bio code.
	 */
197 198
	if (device->pending_sync_bios.head == NULL &&
	    device->pending_bios.head == NULL) {
199 200
		again = 0;
		device->running_pending = 0;
201 202 203
	} else {
		again = 1;
		device->running_pending = 1;
204
	}
205 206 207 208

	pending_bios->head = NULL;
	pending_bios->tail = NULL;

209 210
	spin_unlock(&device->io_lock);

C
Chris Mason 已提交
211
	while (pending) {
212 213

		rmb();
214 215 216 217 218 219 220 221
		/* we want to work on both lists, but do more bios on the
		 * sync list than the regular list
		 */
		if ((num_run > 32 &&
		    pending_bios != &device->pending_sync_bios &&
		    device->pending_sync_bios.head) ||
		   (num_run > 64 && pending_bios == &device->pending_sync_bios &&
		    device->pending_bios.head)) {
222 223 224 225 226
			spin_lock(&device->io_lock);
			requeue_list(pending_bios, pending, tail);
			goto loop_lock;
		}

227 228 229
		cur = pending;
		pending = pending->bi_next;
		cur->bi_next = NULL;
230 231 232 233 234
		atomic_dec(&fs_info->nr_async_bios);

		if (atomic_read(&fs_info->nr_async_bios) < limit &&
		    waitqueue_active(&fs_info->async_submit_wait))
			wake_up(&fs_info->async_submit_wait);
235 236

		BUG_ON(atomic_read(&cur->bi_cnt) == 0);
237

238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
		/*
		 * if we're doing the sync list, record that our
		 * plug has some sync requests on it
		 *
		 * If we're doing the regular list and there are
		 * sync requests sitting around, unplug before
		 * we add more
		 */
		if (pending_bios == &device->pending_sync_bios) {
			sync_pending = 1;
		} else if (sync_pending) {
			blk_finish_plug(&plug);
			blk_start_plug(&plug);
			sync_pending = 0;
		}

254
		btrfsic_submit_bio(cur->bi_rw, cur);
255 256
		num_run++;
		batch_run++;
J
Jens Axboe 已提交
257
		if (need_resched())
258
			cond_resched();
259 260 261 262 263 264

		/*
		 * we made progress, there is more work to do and the bdi
		 * is now congested.  Back off and let other work structs
		 * run instead
		 */
C
Chris Mason 已提交
265
		if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
266
		    fs_info->fs_devices->open_devices > 1) {
267
			struct io_context *ioc;
268

269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290
			ioc = current->io_context;

			/*
			 * the main goal here is that we don't want to
			 * block if we're going to be able to submit
			 * more requests without blocking.
			 *
			 * This code does two great things, it pokes into
			 * the elevator code from a filesystem _and_
			 * it makes assumptions about how batching works.
			 */
			if (ioc && ioc->nr_batch_requests > 0 &&
			    time_before(jiffies, ioc->last_waited + HZ/50UL) &&
			    (last_waited == 0 ||
			     ioc->last_waited == last_waited)) {
				/*
				 * we want to go through our batch of
				 * requests and stop.  So, we copy out
				 * the ioc->last_waited time and test
				 * against it before looping
				 */
				last_waited = ioc->last_waited;
J
Jens Axboe 已提交
291
				if (need_resched())
292
					cond_resched();
293 294
				continue;
			}
295
			spin_lock(&device->io_lock);
296
			requeue_list(pending_bios, pending, tail);
297
			device->running_pending = 1;
298 299 300 301 302

			spin_unlock(&device->io_lock);
			btrfs_requeue_work(&device->work);
			goto done;
		}
C
Chris Mason 已提交
303 304 305 306 307 308
		/* unplug every 64 requests just for good measure */
		if (batch_run % 64 == 0) {
			blk_finish_plug(&plug);
			blk_start_plug(&plug);
			sync_pending = 0;
		}
309
	}
310

311 312 313 314 315 316 317 318 319
	cond_resched();
	if (again)
		goto loop;

	spin_lock(&device->io_lock);
	if (device->pending_bios.head || device->pending_sync_bios.head)
		goto loop_lock;
	spin_unlock(&device->io_lock);

320
done:
321
	blk_finish_plug(&plug);
322 323
}

324
static void pending_bios_fn(struct btrfs_work *work)
325 326 327 328 329 330 331
{
	struct btrfs_device *device;

	device = container_of(work, struct btrfs_device, work);
	run_scheduled_bios(device);
}

332
static noinline int device_list_add(const char *path,
333 334 335 336 337
			   struct btrfs_super_block *disk_super,
			   u64 devid, struct btrfs_fs_devices **fs_devices_ret)
{
	struct btrfs_device *device;
	struct btrfs_fs_devices *fs_devices;
338
	struct rcu_string *name;
339 340 341 342
	u64 found_transid = btrfs_super_generation(disk_super);

	fs_devices = find_fsid(disk_super->fsid);
	if (!fs_devices) {
343
		fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
344 345 346
		if (!fs_devices)
			return -ENOMEM;
		INIT_LIST_HEAD(&fs_devices->devices);
347
		INIT_LIST_HEAD(&fs_devices->alloc_list);
348 349 350 351
		list_add(&fs_devices->list, &fs_uuids);
		memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
		fs_devices->latest_devid = devid;
		fs_devices->latest_trans = found_transid;
352
		mutex_init(&fs_devices->device_list_mutex);
353 354
		device = NULL;
	} else {
355 356
		device = __find_device(&fs_devices->devices, devid,
				       disk_super->dev_item.uuid);
357 358
	}
	if (!device) {
Y
Yan Zheng 已提交
359 360 361
		if (fs_devices->opened)
			return -EBUSY;

362 363 364 365 366 367
		device = kzalloc(sizeof(*device), GFP_NOFS);
		if (!device) {
			/* we can safely leave the fs_devices entry around */
			return -ENOMEM;
		}
		device->devid = devid;
368
		device->dev_stats_valid = 0;
369
		device->work.func = pending_bios_fn;
370 371
		memcpy(device->uuid, disk_super->dev_item.uuid,
		       BTRFS_UUID_SIZE);
372
		spin_lock_init(&device->io_lock);
373 374 375

		name = rcu_string_strdup(path, GFP_NOFS);
		if (!name) {
376 377 378
			kfree(device);
			return -ENOMEM;
		}
379
		rcu_assign_pointer(device->name, name);
Y
Yan Zheng 已提交
380
		INIT_LIST_HEAD(&device->dev_alloc_list);
381

382 383 384 385 386 387 388 389
		/* init readahead state */
		spin_lock_init(&device->reada_lock);
		device->reada_curr_zone = NULL;
		atomic_set(&device->reada_in_flight, 0);
		device->reada_next = 0;
		INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
		INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);

390
		mutex_lock(&fs_devices->device_list_mutex);
391
		list_add_rcu(&device->dev_list, &fs_devices->devices);
392 393
		mutex_unlock(&fs_devices->device_list_mutex);

Y
Yan Zheng 已提交
394
		device->fs_devices = fs_devices;
395
		fs_devices->num_devices++;
396 397
	} else if (!device->name || strcmp(device->name->str, path)) {
		name = rcu_string_strdup(path, GFP_NOFS);
398 399
		if (!name)
			return -ENOMEM;
400 401
		rcu_string_free(device->name);
		rcu_assign_pointer(device->name, name);
402 403 404 405
		if (device->missing) {
			fs_devices->missing_devices--;
			device->missing = 0;
		}
406 407 408 409 410 411 412 413 414 415
	}

	if (found_transid > fs_devices->latest_trans) {
		fs_devices->latest_devid = devid;
		fs_devices->latest_trans = found_transid;
	}
	*fs_devices_ret = fs_devices;
	return 0;
}

Y
Yan Zheng 已提交
416 417 418 419 420 421 422 423 424 425 426 427 428
static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
{
	struct btrfs_fs_devices *fs_devices;
	struct btrfs_device *device;
	struct btrfs_device *orig_dev;

	fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
	if (!fs_devices)
		return ERR_PTR(-ENOMEM);

	INIT_LIST_HEAD(&fs_devices->devices);
	INIT_LIST_HEAD(&fs_devices->alloc_list);
	INIT_LIST_HEAD(&fs_devices->list);
429
	mutex_init(&fs_devices->device_list_mutex);
Y
Yan Zheng 已提交
430 431
	fs_devices->latest_devid = orig->latest_devid;
	fs_devices->latest_trans = orig->latest_trans;
J
Josef Bacik 已提交
432
	fs_devices->total_devices = orig->total_devices;
Y
Yan Zheng 已提交
433 434
	memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));

435
	/* We have held the volume lock, it is safe to get the devices. */
Y
Yan Zheng 已提交
436
	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
437 438
		struct rcu_string *name;

Y
Yan Zheng 已提交
439 440 441 442
		device = kzalloc(sizeof(*device), GFP_NOFS);
		if (!device)
			goto error;

443 444 445 446 447 448
		/*
		 * This is ok to do without rcu read locked because we hold the
		 * uuid mutex so nothing we touch in here is going to disappear.
		 */
		name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
		if (!name) {
J
Julia Lawall 已提交
449
			kfree(device);
Y
Yan Zheng 已提交
450
			goto error;
J
Julia Lawall 已提交
451
		}
452
		rcu_assign_pointer(device->name, name);
Y
Yan Zheng 已提交
453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470

		device->devid = orig_dev->devid;
		device->work.func = pending_bios_fn;
		memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
		spin_lock_init(&device->io_lock);
		INIT_LIST_HEAD(&device->dev_list);
		INIT_LIST_HEAD(&device->dev_alloc_list);

		list_add(&device->dev_list, &fs_devices->devices);
		device->fs_devices = fs_devices;
		fs_devices->num_devices++;
	}
	return fs_devices;
error:
	free_fs_devices(fs_devices);
	return ERR_PTR(-ENOMEM);
}

471
void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
472
{
Q
Qinghuang Feng 已提交
473
	struct btrfs_device *device, *next;
474

475 476 477 478
	struct block_device *latest_bdev = NULL;
	u64 latest_devid = 0;
	u64 latest_transid = 0;

479 480
	mutex_lock(&uuid_mutex);
again:
481
	/* This is the initialized path, it is safe to release the devices. */
Q
Qinghuang Feng 已提交
482
	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
483 484 485 486 487 488 489
		if (device->in_fs_metadata) {
			if (!latest_transid ||
			    device->generation > latest_transid) {
				latest_devid = device->devid;
				latest_transid = device->generation;
				latest_bdev = device->bdev;
			}
Y
Yan Zheng 已提交
490
			continue;
491
		}
Y
Yan Zheng 已提交
492 493

		if (device->bdev) {
494
			blkdev_put(device->bdev, device->mode);
Y
Yan Zheng 已提交
495 496 497 498 499 500 501 502
			device->bdev = NULL;
			fs_devices->open_devices--;
		}
		if (device->writeable) {
			list_del_init(&device->dev_alloc_list);
			device->writeable = 0;
			fs_devices->rw_devices--;
		}
Y
Yan Zheng 已提交
503 504
		list_del_init(&device->dev_list);
		fs_devices->num_devices--;
505
		rcu_string_free(device->name);
Y
Yan Zheng 已提交
506
		kfree(device);
507
	}
Y
Yan Zheng 已提交
508 509 510 511 512 513

	if (fs_devices->seed) {
		fs_devices = fs_devices->seed;
		goto again;
	}

514 515 516 517
	fs_devices->latest_bdev = latest_bdev;
	fs_devices->latest_devid = latest_devid;
	fs_devices->latest_trans = latest_transid;

518 519
	mutex_unlock(&uuid_mutex);
}
520

521 522 523 524 525 526 527 528 529
static void __free_device(struct work_struct *work)
{
	struct btrfs_device *device;

	device = container_of(work, struct btrfs_device, rcu_work);

	if (device->bdev)
		blkdev_put(device->bdev, device->mode);

530
	rcu_string_free(device->name);
531 532 533 534 535 536 537 538 539 540 541 542 543
	kfree(device);
}

static void free_device(struct rcu_head *head)
{
	struct btrfs_device *device;

	device = container_of(head, struct btrfs_device, rcu);

	INIT_WORK(&device->rcu_work, __free_device);
	schedule_work(&device->rcu_work);
}

Y
Yan Zheng 已提交
544
static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
545 546
{
	struct btrfs_device *device;
Y
Yan Zheng 已提交
547

Y
Yan Zheng 已提交
548 549
	if (--fs_devices->opened > 0)
		return 0;
550

551
	mutex_lock(&fs_devices->device_list_mutex);
Q
Qinghuang Feng 已提交
552
	list_for_each_entry(device, &fs_devices->devices, dev_list) {
553
		struct btrfs_device *new_device;
554
		struct rcu_string *name;
555 556

		if (device->bdev)
557
			fs_devices->open_devices--;
558

Y
Yan Zheng 已提交
559 560 561 562 563
		if (device->writeable) {
			list_del_init(&device->dev_alloc_list);
			fs_devices->rw_devices--;
		}

564 565 566
		if (device->can_discard)
			fs_devices->num_can_discard--;

567
		new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
568
		BUG_ON(!new_device); /* -ENOMEM */
569
		memcpy(new_device, device, sizeof(*new_device));
570 571

		/* Safe because we are under uuid_mutex */
572 573 574 575 576
		if (device->name) {
			name = rcu_string_strdup(device->name->str, GFP_NOFS);
			BUG_ON(device->name && !name); /* -ENOMEM */
			rcu_assign_pointer(new_device->name, name);
		}
577 578 579
		new_device->bdev = NULL;
		new_device->writeable = 0;
		new_device->in_fs_metadata = 0;
580
		new_device->can_discard = 0;
581 582 583
		list_replace_rcu(&device->dev_list, &new_device->dev_list);

		call_rcu(&device->rcu, free_device);
584
	}
585 586
	mutex_unlock(&fs_devices->device_list_mutex);

Y
Yan Zheng 已提交
587 588
	WARN_ON(fs_devices->open_devices);
	WARN_ON(fs_devices->rw_devices);
Y
Yan Zheng 已提交
589 590 591
	fs_devices->opened = 0;
	fs_devices->seeding = 0;

592 593 594
	return 0;
}

Y
Yan Zheng 已提交
595 596
int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
{
Y
Yan Zheng 已提交
597
	struct btrfs_fs_devices *seed_devices = NULL;
Y
Yan Zheng 已提交
598 599 600 601
	int ret;

	mutex_lock(&uuid_mutex);
	ret = __btrfs_close_devices(fs_devices);
Y
Yan Zheng 已提交
602 603 604 605
	if (!fs_devices->opened) {
		seed_devices = fs_devices->seed;
		fs_devices->seed = NULL;
	}
Y
Yan Zheng 已提交
606
	mutex_unlock(&uuid_mutex);
Y
Yan Zheng 已提交
607 608 609 610 611 612 613

	while (seed_devices) {
		fs_devices = seed_devices;
		seed_devices = fs_devices->seed;
		__btrfs_close_devices(fs_devices);
		free_fs_devices(fs_devices);
	}
Y
Yan Zheng 已提交
614 615 616
	return ret;
}

Y
Yan Zheng 已提交
617 618
static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
				fmode_t flags, void *holder)
619
{
620
	struct request_queue *q;
621 622 623
	struct block_device *bdev;
	struct list_head *head = &fs_devices->devices;
	struct btrfs_device *device;
624 625 626 627 628 629
	struct block_device *latest_bdev = NULL;
	struct buffer_head *bh;
	struct btrfs_super_block *disk_super;
	u64 latest_devid = 0;
	u64 latest_transid = 0;
	u64 devid;
Y
Yan Zheng 已提交
630
	int seeding = 1;
631
	int ret = 0;
632

633 634
	flags |= FMODE_EXCL;

Q
Qinghuang Feng 已提交
635
	list_for_each_entry(device, head, dev_list) {
636 637
		if (device->bdev)
			continue;
638 639 640
		if (!device->name)
			continue;

641
		bdev = blkdev_get_by_path(device->name->str, flags, holder);
642
		if (IS_ERR(bdev)) {
643
			printk(KERN_INFO "open %s failed\n", device->name->str);
644
			goto error;
645
		}
646 647
		filemap_write_and_wait(bdev->bd_inode->i_mapping);
		invalidate_bdev(bdev);
648
		set_blocksize(bdev, 4096);
649

Y
Yan Zheng 已提交
650
		bh = btrfs_read_dev_super(bdev);
651
		if (!bh)
652 653 654
			goto error_close;

		disk_super = (struct btrfs_super_block *)bh->b_data;
655
		devid = btrfs_stack_device_id(&disk_super->dev_item);
656 657 658
		if (devid != device->devid)
			goto error_brelse;

Y
Yan Zheng 已提交
659 660 661 662 663 664
		if (memcmp(device->uuid, disk_super->dev_item.uuid,
			   BTRFS_UUID_SIZE))
			goto error_brelse;

		device->generation = btrfs_super_generation(disk_super);
		if (!latest_transid || device->generation > latest_transid) {
665
			latest_devid = devid;
Y
Yan Zheng 已提交
666
			latest_transid = device->generation;
667 668 669
			latest_bdev = bdev;
		}

Y
Yan Zheng 已提交
670 671 672 673 674 675 676
		if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
			device->writeable = 0;
		} else {
			device->writeable = !bdev_read_only(bdev);
			seeding = 0;
		}

677 678 679 680 681 682
		q = bdev_get_queue(bdev);
		if (blk_queue_discard(q)) {
			device->can_discard = 1;
			fs_devices->num_can_discard++;
		}

683
		device->bdev = bdev;
684
		device->in_fs_metadata = 0;
685 686
		device->mode = flags;

C
Chris Mason 已提交
687 688 689
		if (!blk_queue_nonrot(bdev_get_queue(bdev)))
			fs_devices->rotating = 1;

690
		fs_devices->open_devices++;
Y
Yan Zheng 已提交
691 692 693 694 695
		if (device->writeable) {
			fs_devices->rw_devices++;
			list_add(&device->dev_alloc_list,
				 &fs_devices->alloc_list);
		}
696
		brelse(bh);
697
		continue;
698

699 700 701
error_brelse:
		brelse(bh);
error_close:
702
		blkdev_put(bdev, flags);
703 704
error:
		continue;
705
	}
706
	if (fs_devices->open_devices == 0) {
707
		ret = -EINVAL;
708 709
		goto out;
	}
Y
Yan Zheng 已提交
710 711
	fs_devices->seeding = seeding;
	fs_devices->opened = 1;
712 713 714
	fs_devices->latest_bdev = latest_bdev;
	fs_devices->latest_devid = latest_devid;
	fs_devices->latest_trans = latest_transid;
Y
Yan Zheng 已提交
715
	fs_devices->total_rw_bytes = 0;
716
out:
Y
Yan Zheng 已提交
717 718 719 720
	return ret;
}

int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
721
		       fmode_t flags, void *holder)
Y
Yan Zheng 已提交
722 723 724 725 726
{
	int ret;

	mutex_lock(&uuid_mutex);
	if (fs_devices->opened) {
Y
Yan Zheng 已提交
727 728
		fs_devices->opened++;
		ret = 0;
Y
Yan Zheng 已提交
729
	} else {
730
		ret = __btrfs_open_devices(fs_devices, flags, holder);
Y
Yan Zheng 已提交
731
	}
732 733 734 735
	mutex_unlock(&uuid_mutex);
	return ret;
}

736
int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
737 738 739 740 741 742 743
			  struct btrfs_fs_devices **fs_devices_ret)
{
	struct btrfs_super_block *disk_super;
	struct block_device *bdev;
	struct buffer_head *bh;
	int ret;
	u64 devid;
744
	u64 transid;
J
Josef Bacik 已提交
745
	u64 total_devices;
746

747 748
	flags |= FMODE_EXCL;
	bdev = blkdev_get_by_path(path, flags, holder);
749 750 751 752 753 754

	if (IS_ERR(bdev)) {
		ret = PTR_ERR(bdev);
		goto error;
	}

755
	mutex_lock(&uuid_mutex);
756 757 758
	ret = set_blocksize(bdev, 4096);
	if (ret)
		goto error_close;
Y
Yan Zheng 已提交
759
	bh = btrfs_read_dev_super(bdev);
760
	if (!bh) {
761
		ret = -EINVAL;
762 763 764
		goto error_close;
	}
	disk_super = (struct btrfs_super_block *)bh->b_data;
765
	devid = btrfs_stack_device_id(&disk_super->dev_item);
766
	transid = btrfs_super_generation(disk_super);
J
Josef Bacik 已提交
767
	total_devices = btrfs_super_num_devices(disk_super);
768
	if (disk_super->label[0])
C
Chris Mason 已提交
769
		printk(KERN_INFO "device label %s ", disk_super->label);
I
Ilya Dryomov 已提交
770 771
	else
		printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
772
	printk(KERN_CONT "devid %llu transid %llu %s\n",
C
Chris Mason 已提交
773
	       (unsigned long long)devid, (unsigned long long)transid, path);
774
	ret = device_list_add(path, disk_super, devid, fs_devices_ret);
J
Josef Bacik 已提交
775 776
	if (!ret && fs_devices_ret)
		(*fs_devices_ret)->total_devices = total_devices;
777 778
	brelse(bh);
error_close:
779
	mutex_unlock(&uuid_mutex);
780
	blkdev_put(bdev, flags);
781 782 783
error:
	return ret;
}
784

785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868
/* helper to account the used device space in the range */
int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
				   u64 end, u64 *length)
{
	struct btrfs_key key;
	struct btrfs_root *root = device->dev_root;
	struct btrfs_dev_extent *dev_extent;
	struct btrfs_path *path;
	u64 extent_end;
	int ret;
	int slot;
	struct extent_buffer *l;

	*length = 0;

	if (start >= device->total_bytes)
		return 0;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
	path->reada = 2;

	key.objectid = device->devid;
	key.offset = start;
	key.type = BTRFS_DEV_EXTENT_KEY;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		goto out;
	if (ret > 0) {
		ret = btrfs_previous_item(root, path, key.objectid, key.type);
		if (ret < 0)
			goto out;
	}

	while (1) {
		l = path->nodes[0];
		slot = path->slots[0];
		if (slot >= btrfs_header_nritems(l)) {
			ret = btrfs_next_leaf(root, path);
			if (ret == 0)
				continue;
			if (ret < 0)
				goto out;

			break;
		}
		btrfs_item_key_to_cpu(l, &key, slot);

		if (key.objectid < device->devid)
			goto next;

		if (key.objectid > device->devid)
			break;

		if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
			goto next;

		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
		extent_end = key.offset + btrfs_dev_extent_length(l,
								  dev_extent);
		if (key.offset <= start && extent_end > end) {
			*length = end - start + 1;
			break;
		} else if (key.offset <= start && extent_end > start)
			*length += extent_end - start;
		else if (key.offset > start && extent_end <= end)
			*length += extent_end - key.offset;
		else if (key.offset > start && key.offset <= end) {
			*length += end - key.offset + 1;
			break;
		} else if (key.offset > end)
			break;

next:
		path->slots[0]++;
	}
	ret = 0;
out:
	btrfs_free_path(path);
	return ret;
}

869
/*
870 871 872 873 874 875 876
 * find_free_dev_extent - find free space in the specified device
 * @device:	the device which we search the free space in
 * @num_bytes:	the size of the free space that we need
 * @start:	store the start of the free space.
 * @len:	the size of the free space. that we find, or the size of the max
 * 		free space if we don't find suitable free space
 *
877 878 879
 * this uses a pretty simple search, the expectation is that it is
 * called very infrequently and that a given device has a small number
 * of extents
880 881 882 883 884 885 886 887
 *
 * @start is used to store the start of the free space if we find. But if we
 * don't find suitable free space, it will be used to store the start position
 * of the max free space.
 *
 * @len is used to store the size of the free space that we find.
 * But if we don't find suitable free space, it is used to store the size of
 * the max free space.
888
 */
889
int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
890
			 u64 *start, u64 *len)
891 892 893
{
	struct btrfs_key key;
	struct btrfs_root *root = device->dev_root;
894
	struct btrfs_dev_extent *dev_extent;
Y
Yan Zheng 已提交
895
	struct btrfs_path *path;
896 897 898 899 900
	u64 hole_size;
	u64 max_hole_start;
	u64 max_hole_size;
	u64 extent_end;
	u64 search_start;
901 902
	u64 search_end = device->total_bytes;
	int ret;
903
	int slot;
904 905 906 907
	struct extent_buffer *l;

	/* FIXME use last free of some kind */

908 909 910
	/* we don't want to overwrite the superblock on the drive,
	 * so we make sure to start at an offset of at least 1MB
	 */
A
Arne Jansen 已提交
911
	search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
912

913 914
	max_hole_start = search_start;
	max_hole_size = 0;
915
	hole_size = 0;
916 917 918 919 920 921 922 923 924 925 926 927 928

	if (search_start >= search_end) {
		ret = -ENOSPC;
		goto error;
	}

	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto error;
	}
	path->reada = 2;

929 930 931
	key.objectid = device->devid;
	key.offset = search_start;
	key.type = BTRFS_DEV_EXTENT_KEY;
932

933
	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
934
	if (ret < 0)
935
		goto out;
936 937 938
	if (ret > 0) {
		ret = btrfs_previous_item(root, path, key.objectid, key.type);
		if (ret < 0)
939
			goto out;
940
	}
941

942 943 944 945 946 947 948 949
	while (1) {
		l = path->nodes[0];
		slot = path->slots[0];
		if (slot >= btrfs_header_nritems(l)) {
			ret = btrfs_next_leaf(root, path);
			if (ret == 0)
				continue;
			if (ret < 0)
950 951 952
				goto out;

			break;
953 954 955 956 957 958 959
		}
		btrfs_item_key_to_cpu(l, &key, slot);

		if (key.objectid < device->devid)
			goto next;

		if (key.objectid > device->devid)
960
			break;
961

962 963
		if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
			goto next;
964

965 966
		if (key.offset > search_start) {
			hole_size = key.offset - search_start;
967

968 969 970 971
			if (hole_size > max_hole_size) {
				max_hole_start = search_start;
				max_hole_size = hole_size;
			}
972

973 974 975 976 977 978 979 980 981 982 983 984
			/*
			 * If this free space is greater than which we need,
			 * it must be the max free space that we have found
			 * until now, so max_hole_start must point to the start
			 * of this free space and the length of this free space
			 * is stored in max_hole_size. Thus, we return
			 * max_hole_start and max_hole_size and go back to the
			 * caller.
			 */
			if (hole_size >= num_bytes) {
				ret = 0;
				goto out;
985 986 987 988
			}
		}

		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
989 990 991 992
		extent_end = key.offset + btrfs_dev_extent_length(l,
								  dev_extent);
		if (extent_end > search_start)
			search_start = extent_end;
993 994 995 996 997
next:
		path->slots[0]++;
		cond_resched();
	}

998 999 1000 1001 1002 1003 1004 1005
	/*
	 * At this point, search_start should be the end of
	 * allocated dev extents, and when shrinking the device,
	 * search_end may be smaller than search_start.
	 */
	if (search_end > search_start)
		hole_size = search_end - search_start;

1006 1007 1008
	if (hole_size > max_hole_size) {
		max_hole_start = search_start;
		max_hole_size = hole_size;
1009 1010
	}

1011 1012 1013 1014 1015 1016 1017
	/* See above. */
	if (hole_size < num_bytes)
		ret = -ENOSPC;
	else
		ret = 0;

out:
Y
Yan Zheng 已提交
1018
	btrfs_free_path(path);
1019 1020
error:
	*start = max_hole_start;
1021
	if (len)
1022
		*len = max_hole_size;
1023 1024 1025
	return ret;
}

1026
static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1027 1028 1029 1030 1031 1032 1033
			  struct btrfs_device *device,
			  u64 start)
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_root *root = device->dev_root;
	struct btrfs_key key;
1034 1035 1036
	struct btrfs_key found_key;
	struct extent_buffer *leaf = NULL;
	struct btrfs_dev_extent *extent = NULL;
1037 1038 1039 1040 1041 1042 1043 1044

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = device->devid;
	key.offset = start;
	key.type = BTRFS_DEV_EXTENT_KEY;
M
Miao Xie 已提交
1045
again:
1046
	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1047 1048 1049
	if (ret > 0) {
		ret = btrfs_previous_item(root, path, key.objectid,
					  BTRFS_DEV_EXTENT_KEY);
1050 1051
		if (ret)
			goto out;
1052 1053 1054 1055 1056 1057
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
		extent = btrfs_item_ptr(leaf, path->slots[0],
					struct btrfs_dev_extent);
		BUG_ON(found_key.offset > start || found_key.offset +
		       btrfs_dev_extent_length(leaf, extent) < start);
M
Miao Xie 已提交
1058 1059 1060
		key = found_key;
		btrfs_release_path(path);
		goto again;
1061 1062 1063 1064
	} else if (ret == 0) {
		leaf = path->nodes[0];
		extent = btrfs_item_ptr(leaf, path->slots[0],
					struct btrfs_dev_extent);
1065 1066 1067
	} else {
		btrfs_error(root->fs_info, ret, "Slot search failed");
		goto out;
1068
	}
1069

1070 1071 1072 1073 1074 1075 1076
	if (device->bytes_used > 0) {
		u64 len = btrfs_dev_extent_length(leaf, extent);
		device->bytes_used -= len;
		spin_lock(&root->fs_info->free_chunk_lock);
		root->fs_info->free_chunk_space += len;
		spin_unlock(&root->fs_info->free_chunk_lock);
	}
1077
	ret = btrfs_del_item(trans, root, path);
1078 1079 1080 1081
	if (ret) {
		btrfs_error(root->fs_info, ret,
			    "Failed to remove dev extent item");
	}
1082
out:
1083 1084 1085 1086
	btrfs_free_path(path);
	return ret;
}

Y
Yan Zheng 已提交
1087
int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1088
			   struct btrfs_device *device,
1089
			   u64 chunk_tree, u64 chunk_objectid,
Y
Yan Zheng 已提交
1090
			   u64 chunk_offset, u64 start, u64 num_bytes)
1091 1092 1093 1094 1095 1096 1097 1098
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_root *root = device->dev_root;
	struct btrfs_dev_extent *extent;
	struct extent_buffer *leaf;
	struct btrfs_key key;

1099
	WARN_ON(!device->in_fs_metadata);
1100 1101 1102 1103 1104
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = device->devid;
Y
Yan Zheng 已提交
1105
	key.offset = start;
1106 1107 1108
	key.type = BTRFS_DEV_EXTENT_KEY;
	ret = btrfs_insert_empty_item(trans, root, path, &key,
				      sizeof(*extent));
1109 1110
	if (ret)
		goto out;
1111 1112 1113 1114

	leaf = path->nodes[0];
	extent = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_dev_extent);
1115 1116 1117 1118 1119 1120 1121 1122
	btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
	btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);

	write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
		    (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
		    BTRFS_UUID_SIZE);

1123 1124
	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
	btrfs_mark_buffer_dirty(leaf);
1125
out:
1126 1127 1128 1129
	btrfs_free_path(path);
	return ret;
}

1130 1131
static noinline int find_next_chunk(struct btrfs_root *root,
				    u64 objectid, u64 *offset)
1132 1133 1134 1135
{
	struct btrfs_path *path;
	int ret;
	struct btrfs_key key;
1136
	struct btrfs_chunk *chunk;
1137 1138 1139
	struct btrfs_key found_key;

	path = btrfs_alloc_path();
1140 1141
	if (!path)
		return -ENOMEM;
1142

1143
	key.objectid = objectid;
1144 1145 1146 1147 1148 1149 1150
	key.offset = (u64)-1;
	key.type = BTRFS_CHUNK_ITEM_KEY;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		goto error;

1151
	BUG_ON(ret == 0); /* Corruption */
1152 1153 1154

	ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
	if (ret) {
1155
		*offset = 0;
1156 1157 1158
	} else {
		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
				      path->slots[0]);
1159 1160 1161 1162 1163 1164 1165 1166
		if (found_key.objectid != objectid)
			*offset = 0;
		else {
			chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
					       struct btrfs_chunk);
			*offset = found_key.offset +
				btrfs_chunk_length(path->nodes[0], chunk);
		}
1167 1168 1169 1170 1171 1172 1173
	}
	ret = 0;
error:
	btrfs_free_path(path);
	return ret;
}

Y
Yan Zheng 已提交
1174
static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1175 1176 1177 1178
{
	int ret;
	struct btrfs_key key;
	struct btrfs_key found_key;
Y
Yan Zheng 已提交
1179 1180 1181 1182 1183 1184 1185
	struct btrfs_path *path;

	root = root->fs_info->chunk_root;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
1186 1187 1188 1189 1190 1191 1192 1193 1194

	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
	key.offset = (u64)-1;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		goto error;

1195
	BUG_ON(ret == 0); /* Corruption */
1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207

	ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
				  BTRFS_DEV_ITEM_KEY);
	if (ret) {
		*objectid = 1;
	} else {
		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
				      path->slots[0]);
		*objectid = found_key.offset + 1;
	}
	ret = 0;
error:
Y
Yan Zheng 已提交
1208
	btrfs_free_path(path);
1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
	return ret;
}

/*
 * the device information is stored in the chunk root
 * the btrfs_device struct should be fully filled in
 */
int btrfs_add_device(struct btrfs_trans_handle *trans,
		     struct btrfs_root *root,
		     struct btrfs_device *device)
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_dev_item *dev_item;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	unsigned long ptr;

	root = root->fs_info->chunk_root;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
Y
Yan Zheng 已提交
1235
	key.offset = device->devid;
1236 1237

	ret = btrfs_insert_empty_item(trans, root, path, &key,
1238
				      sizeof(*dev_item));
1239 1240 1241 1242 1243 1244 1245
	if (ret)
		goto out;

	leaf = path->nodes[0];
	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);

	btrfs_set_device_id(leaf, dev_item, device->devid);
Y
Yan Zheng 已提交
1246
	btrfs_set_device_generation(leaf, dev_item, 0);
1247 1248 1249 1250 1251 1252
	btrfs_set_device_type(leaf, dev_item, device->type);
	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
	btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
	btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1253 1254 1255
	btrfs_set_device_group(leaf, dev_item, 0);
	btrfs_set_device_seek_speed(leaf, dev_item, 0);
	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1256
	btrfs_set_device_start_offset(leaf, dev_item, 0);
1257 1258

	ptr = (unsigned long)btrfs_device_uuid(dev_item);
1259
	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
Y
Yan Zheng 已提交
1260 1261
	ptr = (unsigned long)btrfs_device_fsid(dev_item);
	write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1262 1263
	btrfs_mark_buffer_dirty(leaf);

Y
Yan Zheng 已提交
1264
	ret = 0;
1265 1266 1267 1268
out:
	btrfs_free_path(path);
	return ret;
}
1269

1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283
static int btrfs_rm_dev_item(struct btrfs_root *root,
			     struct btrfs_device *device)
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_key key;
	struct btrfs_trans_handle *trans;

	root = root->fs_info->chunk_root;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

1284
	trans = btrfs_start_transaction(root, 0);
1285 1286 1287 1288
	if (IS_ERR(trans)) {
		btrfs_free_path(path);
		return PTR_ERR(trans);
	}
1289 1290 1291
	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
	key.offset = device->devid;
1292
	lock_chunks(root);
1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307

	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
	if (ret < 0)
		goto out;

	if (ret > 0) {
		ret = -ENOENT;
		goto out;
	}

	ret = btrfs_del_item(trans, root, path);
	if (ret)
		goto out;
out:
	btrfs_free_path(path);
1308
	unlock_chunks(root);
1309 1310 1311 1312 1313 1314 1315
	btrfs_commit_transaction(trans, root);
	return ret;
}

int btrfs_rm_device(struct btrfs_root *root, char *device_path)
{
	struct btrfs_device *device;
Y
Yan Zheng 已提交
1316
	struct btrfs_device *next_device;
1317
	struct block_device *bdev;
1318
	struct buffer_head *bh = NULL;
1319
	struct btrfs_super_block *disk_super;
1320
	struct btrfs_fs_devices *cur_devices;
1321 1322
	u64 all_avail;
	u64 devid;
Y
Yan Zheng 已提交
1323 1324
	u64 num_devices;
	u8 *dev_uuid;
1325
	int ret = 0;
1326
	bool clear_super = false;
1327 1328 1329 1330 1331 1332 1333 1334

	mutex_lock(&uuid_mutex);

	all_avail = root->fs_info->avail_data_alloc_bits |
		root->fs_info->avail_system_alloc_bits |
		root->fs_info->avail_metadata_alloc_bits;

	if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1335
	    root->fs_info->fs_devices->num_devices <= 4) {
C
Chris Mason 已提交
1336 1337
		printk(KERN_ERR "btrfs: unable to go below four devices "
		       "on raid10\n");
1338 1339 1340 1341 1342
		ret = -EINVAL;
		goto out;
	}

	if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1343
	    root->fs_info->fs_devices->num_devices <= 2) {
C
Chris Mason 已提交
1344 1345
		printk(KERN_ERR "btrfs: unable to go below two "
		       "devices on raid1\n");
1346 1347 1348 1349
		ret = -EINVAL;
		goto out;
	}

1350 1351 1352
	if (strcmp(device_path, "missing") == 0) {
		struct list_head *devices;
		struct btrfs_device *tmp;
1353

1354 1355
		device = NULL;
		devices = &root->fs_info->fs_devices->devices;
1356 1357 1358 1359
		/*
		 * It is safe to read the devices since the volume_mutex
		 * is held.
		 */
Q
Qinghuang Feng 已提交
1360
		list_for_each_entry(tmp, devices, dev_list) {
1361 1362 1363 1364 1365 1366 1367 1368 1369
			if (tmp->in_fs_metadata && !tmp->bdev) {
				device = tmp;
				break;
			}
		}
		bdev = NULL;
		bh = NULL;
		disk_super = NULL;
		if (!device) {
C
Chris Mason 已提交
1370 1371
			printk(KERN_ERR "btrfs: no missing devices found to "
			       "remove\n");
1372 1373 1374
			goto out;
		}
	} else {
1375 1376
		bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
					  root->fs_info->bdev_holder);
1377 1378 1379 1380
		if (IS_ERR(bdev)) {
			ret = PTR_ERR(bdev);
			goto out;
		}
1381

Y
Yan Zheng 已提交
1382
		set_blocksize(bdev, 4096);
1383
		invalidate_bdev(bdev);
Y
Yan Zheng 已提交
1384
		bh = btrfs_read_dev_super(bdev);
1385
		if (!bh) {
1386
			ret = -EINVAL;
1387 1388 1389
			goto error_close;
		}
		disk_super = (struct btrfs_super_block *)bh->b_data;
1390
		devid = btrfs_stack_device_id(&disk_super->dev_item);
Y
Yan Zheng 已提交
1391 1392 1393
		dev_uuid = disk_super->dev_item.uuid;
		device = btrfs_find_device(root, devid, dev_uuid,
					   disk_super->fsid);
1394 1395 1396 1397
		if (!device) {
			ret = -ENOENT;
			goto error_brelse;
		}
Y
Yan Zheng 已提交
1398
	}
1399

Y
Yan Zheng 已提交
1400
	if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
C
Chris Mason 已提交
1401 1402
		printk(KERN_ERR "btrfs: unable to remove the only writeable "
		       "device\n");
Y
Yan Zheng 已提交
1403 1404 1405 1406 1407
		ret = -EINVAL;
		goto error_brelse;
	}

	if (device->writeable) {
1408
		lock_chunks(root);
Y
Yan Zheng 已提交
1409
		list_del_init(&device->dev_alloc_list);
1410
		unlock_chunks(root);
Y
Yan Zheng 已提交
1411
		root->fs_info->fs_devices->rw_devices--;
1412
		clear_super = true;
1413
	}
1414 1415 1416

	ret = btrfs_shrink_device(device, 0);
	if (ret)
1417
		goto error_undo;
1418 1419 1420

	ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
	if (ret)
1421
		goto error_undo;
1422

1423 1424 1425 1426 1427
	spin_lock(&root->fs_info->free_chunk_lock);
	root->fs_info->free_chunk_space = device->total_bytes -
		device->bytes_used;
	spin_unlock(&root->fs_info->free_chunk_lock);

Y
Yan Zheng 已提交
1428
	device->in_fs_metadata = 0;
A
Arne Jansen 已提交
1429
	btrfs_scrub_cancel_dev(root, device);
1430 1431 1432 1433 1434 1435

	/*
	 * the device list mutex makes sure that we don't change
	 * the device list while someone else is writing out all
	 * the device supers.
	 */
1436 1437

	cur_devices = device->fs_devices;
1438
	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1439
	list_del_rcu(&device->dev_list);
1440

Y
Yan Zheng 已提交
1441
	device->fs_devices->num_devices--;
J
Josef Bacik 已提交
1442
	device->fs_devices->total_devices--;
Y
Yan Zheng 已提交
1443

1444 1445 1446
	if (device->missing)
		root->fs_info->fs_devices->missing_devices--;

Y
Yan Zheng 已提交
1447 1448 1449 1450 1451 1452 1453
	next_device = list_entry(root->fs_info->fs_devices->devices.next,
				 struct btrfs_device, dev_list);
	if (device->bdev == root->fs_info->sb->s_bdev)
		root->fs_info->sb->s_bdev = next_device->bdev;
	if (device->bdev == root->fs_info->fs_devices->latest_bdev)
		root->fs_info->fs_devices->latest_bdev = next_device->bdev;

1454
	if (device->bdev)
Y
Yan Zheng 已提交
1455
		device->fs_devices->open_devices--;
1456 1457 1458

	call_rcu(&device->rcu, free_device);
	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
Y
Yan Zheng 已提交
1459

1460 1461
	num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
	btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
Y
Yan Zheng 已提交
1462

1463
	if (cur_devices->open_devices == 0) {
Y
Yan Zheng 已提交
1464 1465 1466
		struct btrfs_fs_devices *fs_devices;
		fs_devices = root->fs_info->fs_devices;
		while (fs_devices) {
1467
			if (fs_devices->seed == cur_devices)
Y
Yan Zheng 已提交
1468 1469
				break;
			fs_devices = fs_devices->seed;
Y
Yan Zheng 已提交
1470
		}
1471 1472
		fs_devices->seed = cur_devices->seed;
		cur_devices->seed = NULL;
1473
		lock_chunks(root);
1474
		__btrfs_close_devices(cur_devices);
1475
		unlock_chunks(root);
1476
		free_fs_devices(cur_devices);
Y
Yan Zheng 已提交
1477 1478 1479 1480 1481 1482
	}

	/*
	 * at this point, the device is zero sized.  We want to
	 * remove it from the devices list and zero out the old super
	 */
1483
	if (clear_super) {
1484 1485 1486 1487 1488 1489 1490
		/* make sure this device isn't detected as part of
		 * the FS anymore
		 */
		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
		set_buffer_dirty(bh);
		sync_dirty_buffer(bh);
	}
1491 1492 1493 1494 1495 1496

	ret = 0;

error_brelse:
	brelse(bh);
error_close:
1497
	if (bdev)
1498
		blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1499 1500 1501
out:
	mutex_unlock(&uuid_mutex);
	return ret;
1502 1503
error_undo:
	if (device->writeable) {
1504
		lock_chunks(root);
1505 1506
		list_add(&device->dev_alloc_list,
			 &root->fs_info->fs_devices->alloc_list);
1507
		unlock_chunks(root);
1508 1509 1510
		root->fs_info->fs_devices->rw_devices++;
	}
	goto error_brelse;
1511 1512
}

Y
Yan Zheng 已提交
1513 1514 1515
/*
 * does all the dirty work required for changing file system's UUID.
 */
1516
static int btrfs_prepare_sprout(struct btrfs_root *root)
Y
Yan Zheng 已提交
1517 1518 1519
{
	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
	struct btrfs_fs_devices *old_devices;
Y
Yan Zheng 已提交
1520
	struct btrfs_fs_devices *seed_devices;
1521
	struct btrfs_super_block *disk_super = root->fs_info->super_copy;
Y
Yan Zheng 已提交
1522 1523 1524 1525
	struct btrfs_device *device;
	u64 super_flags;

	BUG_ON(!mutex_is_locked(&uuid_mutex));
Y
Yan Zheng 已提交
1526
	if (!fs_devices->seeding)
Y
Yan Zheng 已提交
1527 1528
		return -EINVAL;

Y
Yan Zheng 已提交
1529 1530
	seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
	if (!seed_devices)
Y
Yan Zheng 已提交
1531 1532
		return -ENOMEM;

Y
Yan Zheng 已提交
1533 1534 1535 1536
	old_devices = clone_fs_devices(fs_devices);
	if (IS_ERR(old_devices)) {
		kfree(seed_devices);
		return PTR_ERR(old_devices);
Y
Yan Zheng 已提交
1537
	}
Y
Yan Zheng 已提交
1538

Y
Yan Zheng 已提交
1539 1540
	list_add(&old_devices->list, &fs_uuids);

Y
Yan Zheng 已提交
1541 1542 1543 1544
	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
	seed_devices->opened = 1;
	INIT_LIST_HEAD(&seed_devices->devices);
	INIT_LIST_HEAD(&seed_devices->alloc_list);
1545
	mutex_init(&seed_devices->device_list_mutex);
1546 1547

	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1548 1549
	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
			      synchronize_rcu);
1550 1551
	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);

Y
Yan Zheng 已提交
1552 1553 1554 1555 1556
	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
	list_for_each_entry(device, &seed_devices->devices, dev_list) {
		device->fs_devices = seed_devices;
	}

Y
Yan Zheng 已提交
1557 1558 1559
	fs_devices->seeding = 0;
	fs_devices->num_devices = 0;
	fs_devices->open_devices = 0;
J
Josef Bacik 已提交
1560
	fs_devices->total_devices = 0;
Y
Yan Zheng 已提交
1561
	fs_devices->seed = seed_devices;
Y
Yan Zheng 已提交
1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612

	generate_random_uuid(fs_devices->fsid);
	memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
	super_flags = btrfs_super_flags(disk_super) &
		      ~BTRFS_SUPER_FLAG_SEEDING;
	btrfs_set_super_flags(disk_super, super_flags);

	return 0;
}

/*
 * strore the expected generation for seed devices in device items.
 */
static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
			       struct btrfs_root *root)
{
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_dev_item *dev_item;
	struct btrfs_device *device;
	struct btrfs_key key;
	u8 fs_uuid[BTRFS_UUID_SIZE];
	u8 dev_uuid[BTRFS_UUID_SIZE];
	u64 devid;
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	root = root->fs_info->chunk_root;
	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.offset = 0;
	key.type = BTRFS_DEV_ITEM_KEY;

	while (1) {
		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
		if (ret < 0)
			goto error;

		leaf = path->nodes[0];
next_slot:
		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret > 0)
				break;
			if (ret < 0)
				goto error;
			leaf = path->nodes[0];
			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1613
			btrfs_release_path(path);
Y
Yan Zheng 已提交
1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631
			continue;
		}

		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
		    key.type != BTRFS_DEV_ITEM_KEY)
			break;

		dev_item = btrfs_item_ptr(leaf, path->slots[0],
					  struct btrfs_dev_item);
		devid = btrfs_device_id(leaf, dev_item);
		read_extent_buffer(leaf, dev_uuid,
				   (unsigned long)btrfs_device_uuid(dev_item),
				   BTRFS_UUID_SIZE);
		read_extent_buffer(leaf, fs_uuid,
				   (unsigned long)btrfs_device_fsid(dev_item),
				   BTRFS_UUID_SIZE);
		device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1632
		BUG_ON(!device); /* Logic error */
Y
Yan Zheng 已提交
1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648

		if (device->fs_devices->seeding) {
			btrfs_set_device_generation(leaf, dev_item,
						    device->generation);
			btrfs_mark_buffer_dirty(leaf);
		}

		path->slots[0]++;
		goto next_slot;
	}
	ret = 0;
error:
	btrfs_free_path(path);
	return ret;
}

1649 1650
int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
{
1651
	struct request_queue *q;
1652 1653 1654 1655
	struct btrfs_trans_handle *trans;
	struct btrfs_device *device;
	struct block_device *bdev;
	struct list_head *devices;
Y
Yan Zheng 已提交
1656
	struct super_block *sb = root->fs_info->sb;
1657
	struct rcu_string *name;
1658
	u64 total_bytes;
Y
Yan Zheng 已提交
1659
	int seeding_dev = 0;
1660 1661
	int ret = 0;

Y
Yan Zheng 已提交
1662
	if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1663
		return -EROFS;
1664

1665
	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1666
				  root->fs_info->bdev_holder);
1667 1668
	if (IS_ERR(bdev))
		return PTR_ERR(bdev);
1669

Y
Yan Zheng 已提交
1670 1671 1672 1673 1674 1675
	if (root->fs_info->fs_devices->seeding) {
		seeding_dev = 1;
		down_write(&sb->s_umount);
		mutex_lock(&uuid_mutex);
	}

1676
	filemap_write_and_wait(bdev->bd_inode->i_mapping);
1677

1678
	devices = &root->fs_info->fs_devices->devices;
1679 1680 1681 1682
	/*
	 * we have the volume lock, so we don't need the extra
	 * device list mutex while reading the list here.
	 */
Q
Qinghuang Feng 已提交
1683
	list_for_each_entry(device, devices, dev_list) {
1684 1685
		if (device->bdev == bdev) {
			ret = -EEXIST;
Y
Yan Zheng 已提交
1686
			goto error;
1687 1688 1689 1690 1691 1692 1693
		}
	}

	device = kzalloc(sizeof(*device), GFP_NOFS);
	if (!device) {
		/* we can safely leave the fs_devices entry around */
		ret = -ENOMEM;
Y
Yan Zheng 已提交
1694
		goto error;
1695 1696
	}

1697 1698
	name = rcu_string_strdup(device_path, GFP_NOFS);
	if (!name) {
1699
		kfree(device);
Y
Yan Zheng 已提交
1700 1701
		ret = -ENOMEM;
		goto error;
1702
	}
1703
	rcu_assign_pointer(device->name, name);
Y
Yan Zheng 已提交
1704 1705 1706

	ret = find_next_devid(root, &device->devid);
	if (ret) {
1707
		rcu_string_free(device->name);
Y
Yan Zheng 已提交
1708 1709 1710 1711
		kfree(device);
		goto error;
	}

1712
	trans = btrfs_start_transaction(root, 0);
1713
	if (IS_ERR(trans)) {
1714
		rcu_string_free(device->name);
1715 1716 1717 1718 1719
		kfree(device);
		ret = PTR_ERR(trans);
		goto error;
	}

Y
Yan Zheng 已提交
1720 1721
	lock_chunks(root);

1722 1723 1724
	q = bdev_get_queue(bdev);
	if (blk_queue_discard(q))
		device->can_discard = 1;
Y
Yan Zheng 已提交
1725 1726 1727 1728 1729
	device->writeable = 1;
	device->work.func = pending_bios_fn;
	generate_random_uuid(device->uuid);
	spin_lock_init(&device->io_lock);
	device->generation = trans->transid;
1730 1731 1732 1733
	device->io_width = root->sectorsize;
	device->io_align = root->sectorsize;
	device->sector_size = root->sectorsize;
	device->total_bytes = i_size_read(bdev->bd_inode);
1734
	device->disk_total_bytes = device->total_bytes;
1735 1736
	device->dev_root = root->fs_info->dev_root;
	device->bdev = bdev;
1737
	device->in_fs_metadata = 1;
1738
	device->mode = FMODE_EXCL;
Y
Yan Zheng 已提交
1739
	set_blocksize(device->bdev, 4096);
1740

Y
Yan Zheng 已提交
1741 1742
	if (seeding_dev) {
		sb->s_flags &= ~MS_RDONLY;
1743
		ret = btrfs_prepare_sprout(root);
1744
		BUG_ON(ret); /* -ENOMEM */
Y
Yan Zheng 已提交
1745
	}
1746

Y
Yan Zheng 已提交
1747
	device->fs_devices = root->fs_info->fs_devices;
1748 1749 1750 1751 1752 1753

	/*
	 * we don't want write_supers to jump in here with our device
	 * half setup
	 */
	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1754
	list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
Y
Yan Zheng 已提交
1755 1756 1757 1758 1759
	list_add(&device->dev_alloc_list,
		 &root->fs_info->fs_devices->alloc_list);
	root->fs_info->fs_devices->num_devices++;
	root->fs_info->fs_devices->open_devices++;
	root->fs_info->fs_devices->rw_devices++;
J
Josef Bacik 已提交
1760
	root->fs_info->fs_devices->total_devices++;
1761 1762
	if (device->can_discard)
		root->fs_info->fs_devices->num_can_discard++;
Y
Yan Zheng 已提交
1763
	root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1764

1765 1766 1767 1768
	spin_lock(&root->fs_info->free_chunk_lock);
	root->fs_info->free_chunk_space += device->total_bytes;
	spin_unlock(&root->fs_info->free_chunk_lock);

C
Chris Mason 已提交
1769 1770 1771
	if (!blk_queue_nonrot(bdev_get_queue(bdev)))
		root->fs_info->fs_devices->rotating = 1;

1772 1773
	total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
	btrfs_set_super_total_bytes(root->fs_info->super_copy,
1774 1775
				    total_bytes + device->total_bytes);

1776 1777
	total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
	btrfs_set_super_num_devices(root->fs_info->super_copy,
1778
				    total_bytes + 1);
1779
	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1780

Y
Yan Zheng 已提交
1781 1782
	if (seeding_dev) {
		ret = init_first_rw_device(trans, root, device);
1783 1784
		if (ret)
			goto error_trans;
Y
Yan Zheng 已提交
1785
		ret = btrfs_finish_sprout(trans, root);
1786 1787
		if (ret)
			goto error_trans;
Y
Yan Zheng 已提交
1788 1789
	} else {
		ret = btrfs_add_device(trans, root, device);
1790 1791
		if (ret)
			goto error_trans;
Y
Yan Zheng 已提交
1792 1793
	}

1794 1795 1796 1797 1798 1799
	/*
	 * we've got more storage, clear any full flags on the space
	 * infos
	 */
	btrfs_clear_space_info_full(root->fs_info);

1800
	unlock_chunks(root);
1801
	ret = btrfs_commit_transaction(trans, root);
1802

Y
Yan Zheng 已提交
1803 1804 1805
	if (seeding_dev) {
		mutex_unlock(&uuid_mutex);
		up_write(&sb->s_umount);
1806

1807 1808 1809
		if (ret) /* transaction commit */
			return ret;

Y
Yan Zheng 已提交
1810
		ret = btrfs_relocate_sys_chunks(root);
1811 1812 1813 1814 1815
		if (ret < 0)
			btrfs_error(root->fs_info, ret,
				    "Failed to relocate sys chunks after "
				    "device initialization. This can be fixed "
				    "using the \"btrfs balance\" command.");
Y
Yan Zheng 已提交
1816
	}
1817

Y
Yan Zheng 已提交
1818
	return ret;
1819 1820 1821 1822 1823

error_trans:
	unlock_chunks(root);
	btrfs_abort_transaction(trans, root, ret);
	btrfs_end_transaction(trans, root);
1824
	rcu_string_free(device->name);
1825
	kfree(device);
Y
Yan Zheng 已提交
1826
error:
1827
	blkdev_put(bdev, FMODE_EXCL);
Y
Yan Zheng 已提交
1828 1829 1830 1831
	if (seeding_dev) {
		mutex_unlock(&uuid_mutex);
		up_write(&sb->s_umount);
	}
1832
	return ret;
1833 1834
}

C
Chris Mason 已提交
1835 1836
static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
					struct btrfs_device *device)
1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_root *root;
	struct btrfs_dev_item *dev_item;
	struct extent_buffer *leaf;
	struct btrfs_key key;

	root = device->dev_root->fs_info->chunk_root;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.type = BTRFS_DEV_ITEM_KEY;
	key.offset = device->devid;

	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
	if (ret < 0)
		goto out;

	if (ret > 0) {
		ret = -ENOENT;
		goto out;
	}

	leaf = path->nodes[0];
	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);

	btrfs_set_device_id(leaf, dev_item, device->devid);
	btrfs_set_device_type(leaf, dev_item, device->type);
	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1872
	btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
1873 1874 1875 1876 1877 1878 1879 1880
	btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
	btrfs_mark_buffer_dirty(leaf);

out:
	btrfs_free_path(path);
	return ret;
}

1881
static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1882 1883 1884
		      struct btrfs_device *device, u64 new_size)
{
	struct btrfs_super_block *super_copy =
1885
		device->dev_root->fs_info->super_copy;
1886 1887 1888
	u64 old_total = btrfs_super_total_bytes(super_copy);
	u64 diff = new_size - device->total_bytes;

Y
Yan Zheng 已提交
1889 1890 1891 1892 1893
	if (!device->writeable)
		return -EACCES;
	if (new_size <= device->total_bytes)
		return -EINVAL;

1894
	btrfs_set_super_total_bytes(super_copy, old_total + diff);
Y
Yan Zheng 已提交
1895 1896 1897
	device->fs_devices->total_rw_bytes += diff;

	device->total_bytes = new_size;
1898
	device->disk_total_bytes = new_size;
1899 1900
	btrfs_clear_space_info_full(device->dev_root->fs_info);

1901 1902 1903
	return btrfs_update_device(trans, device);
}

1904 1905 1906 1907 1908 1909 1910 1911 1912 1913
int btrfs_grow_device(struct btrfs_trans_handle *trans,
		      struct btrfs_device *device, u64 new_size)
{
	int ret;
	lock_chunks(device->dev_root);
	ret = __btrfs_grow_device(trans, device, new_size);
	unlock_chunks(device->dev_root);
	return ret;
}

1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932
static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
			    struct btrfs_root *root,
			    u64 chunk_tree, u64 chunk_objectid,
			    u64 chunk_offset)
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_key key;

	root = root->fs_info->chunk_root;
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = chunk_objectid;
	key.offset = chunk_offset;
	key.type = BTRFS_CHUNK_ITEM_KEY;

	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1933 1934 1935 1936 1937 1938 1939 1940
	if (ret < 0)
		goto out;
	else if (ret > 0) { /* Logic error or corruption */
		btrfs_error(root->fs_info, -ENOENT,
			    "Failed lookup while freeing chunk.");
		ret = -ENOENT;
		goto out;
	}
1941 1942

	ret = btrfs_del_item(trans, root, path);
1943 1944 1945 1946
	if (ret < 0)
		btrfs_error(root->fs_info, ret,
			    "Failed to delete chunk item.");
out:
1947
	btrfs_free_path(path);
1948
	return ret;
1949 1950
}

1951
static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1952 1953
			chunk_offset)
{
1954
	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996
	struct btrfs_disk_key *disk_key;
	struct btrfs_chunk *chunk;
	u8 *ptr;
	int ret = 0;
	u32 num_stripes;
	u32 array_size;
	u32 len = 0;
	u32 cur;
	struct btrfs_key key;

	array_size = btrfs_super_sys_array_size(super_copy);

	ptr = super_copy->sys_chunk_array;
	cur = 0;

	while (cur < array_size) {
		disk_key = (struct btrfs_disk_key *)ptr;
		btrfs_disk_key_to_cpu(&key, disk_key);

		len = sizeof(*disk_key);

		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
			chunk = (struct btrfs_chunk *)(ptr + len);
			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
			len += btrfs_chunk_item_size(num_stripes);
		} else {
			ret = -EIO;
			break;
		}
		if (key.objectid == chunk_objectid &&
		    key.offset == chunk_offset) {
			memmove(ptr, ptr + len, array_size - (cur + len));
			array_size -= len;
			btrfs_set_super_sys_array_size(super_copy, array_size);
		} else {
			ptr += len;
			cur += len;
		}
	}
	return ret;
}

1997
static int btrfs_relocate_chunk(struct btrfs_root *root,
1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012
			 u64 chunk_tree, u64 chunk_objectid,
			 u64 chunk_offset)
{
	struct extent_map_tree *em_tree;
	struct btrfs_root *extent_root;
	struct btrfs_trans_handle *trans;
	struct extent_map *em;
	struct map_lookup *map;
	int ret;
	int i;

	root = root->fs_info->chunk_root;
	extent_root = root->fs_info->extent_root;
	em_tree = &root->fs_info->mapping_tree.map_tree;

2013 2014 2015 2016
	ret = btrfs_can_relocate(extent_root, chunk_offset);
	if (ret)
		return -ENOSPC;

2017
	/* step one, relocate all the extents inside this chunk */
Z
Zheng Yan 已提交
2018
	ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2019 2020
	if (ret)
		return ret;
2021

2022
	trans = btrfs_start_transaction(root, 0);
2023
	BUG_ON(IS_ERR(trans));
2024

2025 2026
	lock_chunks(root);

2027 2028 2029 2030
	/*
	 * step two, delete the device extents and the
	 * chunk tree entries
	 */
2031
	read_lock(&em_tree->lock);
2032
	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2033
	read_unlock(&em_tree->lock);
2034

2035
	BUG_ON(!em || em->start > chunk_offset ||
2036
	       em->start + em->len < chunk_offset);
2037 2038 2039 2040 2041 2042
	map = (struct map_lookup *)em->bdev;

	for (i = 0; i < map->num_stripes; i++) {
		ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
					    map->stripes[i].physical);
		BUG_ON(ret);
2043

2044 2045 2046 2047
		if (map->stripes[i].dev) {
			ret = btrfs_update_device(trans, map->stripes[i].dev);
			BUG_ON(ret);
		}
2048 2049 2050 2051 2052 2053
	}
	ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
			       chunk_offset);

	BUG_ON(ret);

2054 2055
	trace_btrfs_chunk_free(root, map, chunk_offset, em->len);

2056 2057 2058 2059 2060
	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
		ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
		BUG_ON(ret);
	}

Y
Yan Zheng 已提交
2061 2062 2063
	ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
	BUG_ON(ret);

2064
	write_lock(&em_tree->lock);
Y
Yan Zheng 已提交
2065
	remove_extent_mapping(em_tree, em);
2066
	write_unlock(&em_tree->lock);
Y
Yan Zheng 已提交
2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090

	kfree(map);
	em->bdev = NULL;

	/* once for the tree */
	free_extent_map(em);
	/* once for us */
	free_extent_map(em);

	unlock_chunks(root);
	btrfs_end_transaction(trans, root);
	return 0;
}

static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
{
	struct btrfs_root *chunk_root = root->fs_info->chunk_root;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_chunk *chunk;
	struct btrfs_key key;
	struct btrfs_key found_key;
	u64 chunk_tree = chunk_root->root_key.objectid;
	u64 chunk_type;
2091 2092
	bool retried = false;
	int failed = 0;
Y
Yan Zheng 已提交
2093 2094 2095 2096 2097 2098
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

2099
again:
Y
Yan Zheng 已提交
2100 2101 2102 2103 2104 2105 2106 2107
	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
	key.offset = (u64)-1;
	key.type = BTRFS_CHUNK_ITEM_KEY;

	while (1) {
		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
		if (ret < 0)
			goto error;
2108
		BUG_ON(ret == 0); /* Corruption */
Y
Yan Zheng 已提交
2109 2110 2111 2112 2113 2114 2115

		ret = btrfs_previous_item(chunk_root, path, key.objectid,
					  key.type);
		if (ret < 0)
			goto error;
		if (ret > 0)
			break;
Z
Zheng Yan 已提交
2116

Y
Yan Zheng 已提交
2117 2118
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
Z
Zheng Yan 已提交
2119

Y
Yan Zheng 已提交
2120 2121 2122
		chunk = btrfs_item_ptr(leaf, path->slots[0],
				       struct btrfs_chunk);
		chunk_type = btrfs_chunk_type(leaf, chunk);
2123
		btrfs_release_path(path);
2124

Y
Yan Zheng 已提交
2125 2126 2127 2128
		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
			ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
						   found_key.objectid,
						   found_key.offset);
2129 2130 2131 2132
			if (ret == -ENOSPC)
				failed++;
			else if (ret)
				BUG();
Y
Yan Zheng 已提交
2133
		}
2134

Y
Yan Zheng 已提交
2135 2136 2137 2138 2139
		if (found_key.offset == 0)
			break;
		key.offset = found_key.offset - 1;
	}
	ret = 0;
2140 2141 2142 2143 2144 2145 2146 2147
	if (failed && !retried) {
		failed = 0;
		retried = true;
		goto again;
	} else if (failed && retried) {
		WARN_ON(1);
		ret = -ENOSPC;
	}
Y
Yan Zheng 已提交
2148 2149 2150
error:
	btrfs_free_path(path);
	return ret;
2151 2152
}

2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243
static int insert_balance_item(struct btrfs_root *root,
			       struct btrfs_balance_control *bctl)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_balance_item *item;
	struct btrfs_disk_balance_args disk_bargs;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	int ret, err;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	trans = btrfs_start_transaction(root, 0);
	if (IS_ERR(trans)) {
		btrfs_free_path(path);
		return PTR_ERR(trans);
	}

	key.objectid = BTRFS_BALANCE_OBJECTID;
	key.type = BTRFS_BALANCE_ITEM_KEY;
	key.offset = 0;

	ret = btrfs_insert_empty_item(trans, root, path, &key,
				      sizeof(*item));
	if (ret)
		goto out;

	leaf = path->nodes[0];
	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);

	memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));

	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
	btrfs_set_balance_data(leaf, item, &disk_bargs);
	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
	btrfs_set_balance_meta(leaf, item, &disk_bargs);
	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
	btrfs_set_balance_sys(leaf, item, &disk_bargs);

	btrfs_set_balance_flags(leaf, item, bctl->flags);

	btrfs_mark_buffer_dirty(leaf);
out:
	btrfs_free_path(path);
	err = btrfs_commit_transaction(trans, root);
	if (err && !ret)
		ret = err;
	return ret;
}

static int del_balance_item(struct btrfs_root *root)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_path *path;
	struct btrfs_key key;
	int ret, err;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	trans = btrfs_start_transaction(root, 0);
	if (IS_ERR(trans)) {
		btrfs_free_path(path);
		return PTR_ERR(trans);
	}

	key.objectid = BTRFS_BALANCE_OBJECTID;
	key.type = BTRFS_BALANCE_ITEM_KEY;
	key.offset = 0;

	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
	if (ret < 0)
		goto out;
	if (ret > 0) {
		ret = -ENOENT;
		goto out;
	}

	ret = btrfs_del_item(trans, root, path);
out:
	btrfs_free_path(path);
	err = btrfs_commit_transaction(trans, root);
	if (err && !ret)
		ret = err;
	return ret;
}

I
Ilya Dryomov 已提交
2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283
/*
 * This is a heuristic used to reduce the number of chunks balanced on
 * resume after balance was interrupted.
 */
static void update_balance_args(struct btrfs_balance_control *bctl)
{
	/*
	 * Turn on soft mode for chunk types that were being converted.
	 */
	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;

	/*
	 * Turn on usage filter if is not already used.  The idea is
	 * that chunks that we have already balanced should be
	 * reasonably full.  Don't do it for chunks that are being
	 * converted - that will keep us from relocating unconverted
	 * (albeit full) chunks.
	 */
	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
		bctl->data.usage = 90;
	}
	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
		bctl->sys.usage = 90;
	}
	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
		bctl->meta.usage = 90;
	}
}

2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312
/*
 * Should be called with both balance and volume mutexes held to
 * serialize other volume operations (add_dev/rm_dev/resize) with
 * restriper.  Same goes for unset_balance_control.
 */
static void set_balance_control(struct btrfs_balance_control *bctl)
{
	struct btrfs_fs_info *fs_info = bctl->fs_info;

	BUG_ON(fs_info->balance_ctl);

	spin_lock(&fs_info->balance_lock);
	fs_info->balance_ctl = bctl;
	spin_unlock(&fs_info->balance_lock);
}

static void unset_balance_control(struct btrfs_fs_info *fs_info)
{
	struct btrfs_balance_control *bctl = fs_info->balance_ctl;

	BUG_ON(!fs_info->balance_ctl);

	spin_lock(&fs_info->balance_lock);
	fs_info->balance_ctl = NULL;
	spin_unlock(&fs_info->balance_lock);

	kfree(bctl);
}

I
Ilya Dryomov 已提交
2313 2314 2315 2316
/*
 * Balance filters.  Return 1 if chunk should be filtered out
 * (should not be balanced).
 */
2317
static int chunk_profiles_filter(u64 chunk_type,
I
Ilya Dryomov 已提交
2318 2319
				 struct btrfs_balance_args *bargs)
{
2320 2321
	chunk_type = chunk_to_extended(chunk_type) &
				BTRFS_EXTENDED_PROFILE_MASK;
I
Ilya Dryomov 已提交
2322

2323
	if (bargs->profiles & chunk_type)
I
Ilya Dryomov 已提交
2324 2325 2326 2327 2328
		return 0;

	return 1;
}

I
Ilya Dryomov 已提交
2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358
static u64 div_factor_fine(u64 num, int factor)
{
	if (factor <= 0)
		return 0;
	if (factor >= 100)
		return num;

	num *= factor;
	do_div(num, 100);
	return num;
}

static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
			      struct btrfs_balance_args *bargs)
{
	struct btrfs_block_group_cache *cache;
	u64 chunk_used, user_thresh;
	int ret = 1;

	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
	chunk_used = btrfs_block_group_used(&cache->item);

	user_thresh = div_factor_fine(cache->key.offset, bargs->usage);
	if (chunk_used < user_thresh)
		ret = 0;

	btrfs_put_block_group(cache);
	return ret;
}

I
Ilya Dryomov 已提交
2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375
static int chunk_devid_filter(struct extent_buffer *leaf,
			      struct btrfs_chunk *chunk,
			      struct btrfs_balance_args *bargs)
{
	struct btrfs_stripe *stripe;
	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
	int i;

	for (i = 0; i < num_stripes; i++) {
		stripe = btrfs_stripe_nr(chunk, i);
		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
			return 0;
	}

	return 1;
}

I
Ilya Dryomov 已提交
2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415
/* [pstart, pend) */
static int chunk_drange_filter(struct extent_buffer *leaf,
			       struct btrfs_chunk *chunk,
			       u64 chunk_offset,
			       struct btrfs_balance_args *bargs)
{
	struct btrfs_stripe *stripe;
	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
	u64 stripe_offset;
	u64 stripe_length;
	int factor;
	int i;

	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
		return 0;

	if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
	     BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
		factor = 2;
	else
		factor = 1;
	factor = num_stripes / factor;

	for (i = 0; i < num_stripes; i++) {
		stripe = btrfs_stripe_nr(chunk, i);
		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
			continue;

		stripe_offset = btrfs_stripe_offset(leaf, stripe);
		stripe_length = btrfs_chunk_length(leaf, chunk);
		do_div(stripe_length, factor);

		if (stripe_offset < bargs->pend &&
		    stripe_offset + stripe_length > bargs->pstart)
			return 0;
	}

	return 1;
}

2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429
/* [vstart, vend) */
static int chunk_vrange_filter(struct extent_buffer *leaf,
			       struct btrfs_chunk *chunk,
			       u64 chunk_offset,
			       struct btrfs_balance_args *bargs)
{
	if (chunk_offset < bargs->vend &&
	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
		/* at least part of the chunk is inside this vrange */
		return 0;

	return 1;
}

2430
static int chunk_soft_convert_filter(u64 chunk_type,
2431 2432 2433 2434 2435
				     struct btrfs_balance_args *bargs)
{
	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
		return 0;

2436 2437
	chunk_type = chunk_to_extended(chunk_type) &
				BTRFS_EXTENDED_PROFILE_MASK;
2438

2439
	if (bargs->target == chunk_type)
2440 2441 2442 2443 2444
		return 1;

	return 0;
}

2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465
static int should_balance_chunk(struct btrfs_root *root,
				struct extent_buffer *leaf,
				struct btrfs_chunk *chunk, u64 chunk_offset)
{
	struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
	struct btrfs_balance_args *bargs = NULL;
	u64 chunk_type = btrfs_chunk_type(leaf, chunk);

	/* type filter */
	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
		return 0;
	}

	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
		bargs = &bctl->data;
	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
		bargs = &bctl->sys;
	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
		bargs = &bctl->meta;

I
Ilya Dryomov 已提交
2466 2467 2468 2469
	/* profiles filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
	    chunk_profiles_filter(chunk_type, bargs)) {
		return 0;
I
Ilya Dryomov 已提交
2470 2471 2472 2473 2474 2475
	}

	/* usage filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
	    chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
		return 0;
I
Ilya Dryomov 已提交
2476 2477 2478 2479 2480 2481
	}

	/* devid filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
	    chunk_devid_filter(leaf, chunk, bargs)) {
		return 0;
I
Ilya Dryomov 已提交
2482 2483 2484 2485 2486 2487
	}

	/* drange filter, makes sense only with devid filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
	    chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
		return 0;
2488 2489 2490 2491 2492 2493
	}

	/* vrange filter */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
		return 0;
I
Ilya Dryomov 已提交
2494 2495
	}

2496 2497 2498 2499 2500 2501
	/* soft profile changing mode */
	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
	    chunk_soft_convert_filter(chunk_type, bargs)) {
		return 0;
	}

2502 2503 2504
	return 1;
}

2505 2506 2507 2508 2509 2510 2511 2512 2513
static u64 div_factor(u64 num, int factor)
{
	if (factor == 10)
		return num;
	num *= factor;
	do_div(num, 10);
	return num;
}

2514
static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2515
{
2516
	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2517 2518 2519
	struct btrfs_root *chunk_root = fs_info->chunk_root;
	struct btrfs_root *dev_root = fs_info->dev_root;
	struct list_head *devices;
2520 2521 2522
	struct btrfs_device *device;
	u64 old_size;
	u64 size_to_free;
2523
	struct btrfs_chunk *chunk;
2524 2525 2526
	struct btrfs_path *path;
	struct btrfs_key key;
	struct btrfs_key found_key;
2527
	struct btrfs_trans_handle *trans;
2528 2529
	struct extent_buffer *leaf;
	int slot;
2530 2531
	int ret;
	int enospc_errors = 0;
2532
	bool counting = true;
2533 2534

	/* step one make some room on all the devices */
2535
	devices = &fs_info->fs_devices->devices;
Q
Qinghuang Feng 已提交
2536
	list_for_each_entry(device, devices, dev_list) {
2537 2538 2539
		old_size = device->total_bytes;
		size_to_free = div_factor(old_size, 1);
		size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
Y
Yan Zheng 已提交
2540 2541
		if (!device->writeable ||
		    device->total_bytes - device->bytes_used > size_to_free)
2542 2543 2544
			continue;

		ret = btrfs_shrink_device(device, old_size - size_to_free);
2545 2546
		if (ret == -ENOSPC)
			break;
2547 2548
		BUG_ON(ret);

2549
		trans = btrfs_start_transaction(dev_root, 0);
2550
		BUG_ON(IS_ERR(trans));
2551 2552 2553 2554 2555 2556 2557 2558 2559

		ret = btrfs_grow_device(trans, device, old_size);
		BUG_ON(ret);

		btrfs_end_transaction(trans, dev_root);
	}

	/* step two, relocate all the chunks */
	path = btrfs_alloc_path();
2560 2561 2562 2563
	if (!path) {
		ret = -ENOMEM;
		goto error;
	}
2564 2565 2566 2567 2568 2569

	/* zero out stat counters */
	spin_lock(&fs_info->balance_lock);
	memset(&bctl->stat, 0, sizeof(bctl->stat));
	spin_unlock(&fs_info->balance_lock);
again:
2570 2571 2572 2573
	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
	key.offset = (u64)-1;
	key.type = BTRFS_CHUNK_ITEM_KEY;

C
Chris Mason 已提交
2574
	while (1) {
2575
		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2576
		    atomic_read(&fs_info->balance_cancel_req)) {
2577 2578 2579 2580
			ret = -ECANCELED;
			goto error;
		}

2581 2582 2583 2584 2585 2586 2587 2588 2589
		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
		if (ret < 0)
			goto error;

		/*
		 * this shouldn't happen, it means the last relocate
		 * failed
		 */
		if (ret == 0)
2590
			BUG(); /* FIXME break ? */
2591 2592 2593

		ret = btrfs_previous_item(chunk_root, path, 0,
					  BTRFS_CHUNK_ITEM_KEY);
2594 2595
		if (ret) {
			ret = 0;
2596
			break;
2597
		}
2598

2599 2600 2601
		leaf = path->nodes[0];
		slot = path->slots[0];
		btrfs_item_key_to_cpu(leaf, &found_key, slot);
2602

2603 2604
		if (found_key.objectid != key.objectid)
			break;
2605

2606
		/* chunk zero is special */
2607
		if (found_key.offset == 0)
2608 2609
			break;

2610 2611
		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);

2612 2613 2614 2615 2616 2617
		if (!counting) {
			spin_lock(&fs_info->balance_lock);
			bctl->stat.considered++;
			spin_unlock(&fs_info->balance_lock);
		}

2618 2619
		ret = should_balance_chunk(chunk_root, leaf, chunk,
					   found_key.offset);
2620
		btrfs_release_path(path);
2621 2622 2623
		if (!ret)
			goto loop;

2624 2625 2626 2627 2628 2629 2630
		if (counting) {
			spin_lock(&fs_info->balance_lock);
			bctl->stat.expected++;
			spin_unlock(&fs_info->balance_lock);
			goto loop;
		}

2631 2632 2633 2634
		ret = btrfs_relocate_chunk(chunk_root,
					   chunk_root->root_key.objectid,
					   found_key.objectid,
					   found_key.offset);
2635 2636
		if (ret && ret != -ENOSPC)
			goto error;
2637
		if (ret == -ENOSPC) {
2638
			enospc_errors++;
2639 2640 2641 2642 2643
		} else {
			spin_lock(&fs_info->balance_lock);
			bctl->stat.completed++;
			spin_unlock(&fs_info->balance_lock);
		}
2644
loop:
2645
		key.offset = found_key.offset - 1;
2646
	}
2647

2648 2649 2650 2651 2652
	if (counting) {
		btrfs_release_path(path);
		counting = false;
		goto again;
	}
2653 2654
error:
	btrfs_free_path(path);
2655 2656 2657 2658 2659 2660 2661
	if (enospc_errors) {
		printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
		       enospc_errors);
		if (!ret)
			ret = -ENOSPC;
	}

2662 2663 2664
	return ret;
}

2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688
/**
 * alloc_profile_is_valid - see if a given profile is valid and reduced
 * @flags: profile to validate
 * @extended: if true @flags is treated as an extended profile
 */
static int alloc_profile_is_valid(u64 flags, int extended)
{
	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
			       BTRFS_BLOCK_GROUP_PROFILE_MASK);

	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;

	/* 1) check that all other bits are zeroed */
	if (flags & ~mask)
		return 0;

	/* 2) see if profile is reduced */
	if (flags == 0)
		return !extended; /* "0" is valid for usual profiles */

	/* true if exactly one bit set */
	return (flags & (flags - 1)) == 0;
}

2689 2690
static inline int balance_need_close(struct btrfs_fs_info *fs_info)
{
2691 2692 2693 2694
	/* cancel requested || normal exit path */
	return atomic_read(&fs_info->balance_cancel_req) ||
		(atomic_read(&fs_info->balance_pause_req) == 0 &&
		 atomic_read(&fs_info->balance_cancel_req) == 0);
2695 2696
}

2697 2698
static void __cancel_balance(struct btrfs_fs_info *fs_info)
{
2699 2700
	int ret;

2701
	unset_balance_control(fs_info);
2702 2703
	ret = del_balance_item(fs_info->tree_root);
	BUG_ON(ret);
2704 2705
}

2706
void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
2707 2708 2709 2710 2711 2712 2713 2714 2715
			       struct btrfs_ioctl_balance_args *bargs);

/*
 * Should be called with both balance and volume mutexes held
 */
int btrfs_balance(struct btrfs_balance_control *bctl,
		  struct btrfs_ioctl_balance_args *bargs)
{
	struct btrfs_fs_info *fs_info = bctl->fs_info;
2716
	u64 allowed;
2717
	int mixed = 0;
2718 2719
	int ret;

2720
	if (btrfs_fs_closing(fs_info) ||
2721 2722
	    atomic_read(&fs_info->balance_pause_req) ||
	    atomic_read(&fs_info->balance_cancel_req)) {
2723 2724 2725 2726
		ret = -EINVAL;
		goto out;
	}

2727 2728 2729 2730
	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
		mixed = 1;

2731 2732 2733 2734
	/*
	 * In case of mixed groups both data and meta should be picked,
	 * and identical options should be given for both of them.
	 */
2735 2736
	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
	if (mixed && (bctl->flags & allowed)) {
2737 2738 2739 2740 2741 2742 2743 2744 2745 2746
		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
			printk(KERN_ERR "btrfs: with mixed groups data and "
			       "metadata balance options must be the same\n");
			ret = -EINVAL;
			goto out;
		}
	}

2747 2748 2749 2750 2751 2752 2753 2754 2755
	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
	if (fs_info->fs_devices->num_devices == 1)
		allowed |= BTRFS_BLOCK_GROUP_DUP;
	else if (fs_info->fs_devices->num_devices < 4)
		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
	else
		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
				BTRFS_BLOCK_GROUP_RAID10);

2756 2757 2758
	if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
	    (!alloc_profile_is_valid(bctl->data.target, 1) ||
	     (bctl->data.target & ~allowed))) {
2759 2760 2761 2762 2763 2764
		printk(KERN_ERR "btrfs: unable to start balance with target "
		       "data profile %llu\n",
		       (unsigned long long)bctl->data.target);
		ret = -EINVAL;
		goto out;
	}
2765 2766 2767
	if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
	    (!alloc_profile_is_valid(bctl->meta.target, 1) ||
	     (bctl->meta.target & ~allowed))) {
2768 2769 2770 2771 2772 2773
		printk(KERN_ERR "btrfs: unable to start balance with target "
		       "metadata profile %llu\n",
		       (unsigned long long)bctl->meta.target);
		ret = -EINVAL;
		goto out;
	}
2774 2775 2776
	if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
	    (!alloc_profile_is_valid(bctl->sys.target, 1) ||
	     (bctl->sys.target & ~allowed))) {
2777 2778 2779 2780 2781 2782 2783
		printk(KERN_ERR "btrfs: unable to start balance with target "
		       "system profile %llu\n",
		       (unsigned long long)bctl->sys.target);
		ret = -EINVAL;
		goto out;
	}

2784 2785
	/* allow dup'ed data chunks only in mixed mode */
	if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2786
	    (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811
		printk(KERN_ERR "btrfs: dup for data is not allowed\n");
		ret = -EINVAL;
		goto out;
	}

	/* allow to reduce meta or sys integrity only if force set */
	allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
			BTRFS_BLOCK_GROUP_RAID10;
	if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
	     (fs_info->avail_system_alloc_bits & allowed) &&
	     !(bctl->sys.target & allowed)) ||
	    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
	     (fs_info->avail_metadata_alloc_bits & allowed) &&
	     !(bctl->meta.target & allowed))) {
		if (bctl->flags & BTRFS_BALANCE_FORCE) {
			printk(KERN_INFO "btrfs: force reducing metadata "
			       "integrity\n");
		} else {
			printk(KERN_ERR "btrfs: balance will reduce metadata "
			       "integrity, use force if you want this\n");
			ret = -EINVAL;
			goto out;
		}
	}

2812
	ret = insert_balance_item(fs_info->tree_root, bctl);
I
Ilya Dryomov 已提交
2813
	if (ret && ret != -EEXIST)
2814 2815
		goto out;

I
Ilya Dryomov 已提交
2816 2817 2818 2819 2820 2821 2822 2823 2824
	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
		BUG_ON(ret == -EEXIST);
		set_balance_control(bctl);
	} else {
		BUG_ON(ret != -EEXIST);
		spin_lock(&fs_info->balance_lock);
		update_balance_args(bctl);
		spin_unlock(&fs_info->balance_lock);
	}
2825

2826
	atomic_inc(&fs_info->balance_running);
2827 2828 2829 2830 2831
	mutex_unlock(&fs_info->balance_mutex);

	ret = __btrfs_balance(fs_info);

	mutex_lock(&fs_info->balance_mutex);
2832
	atomic_dec(&fs_info->balance_running);
2833 2834 2835

	if (bargs) {
		memset(bargs, 0, sizeof(*bargs));
2836
		update_ioctl_balance_args(fs_info, 0, bargs);
2837 2838
	}

2839 2840 2841 2842 2843 2844
	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
	    balance_need_close(fs_info)) {
		__cancel_balance(fs_info);
	}

	wake_up(&fs_info->balance_wait_q);
2845 2846 2847

	return ret;
out:
I
Ilya Dryomov 已提交
2848 2849 2850 2851 2852 2853 2854 2855 2856
	if (bctl->flags & BTRFS_BALANCE_RESUME)
		__cancel_balance(fs_info);
	else
		kfree(bctl);
	return ret;
}

static int balance_kthread(void *data)
{
2857
	struct btrfs_fs_info *fs_info = data;
2858
	int ret = 0;
I
Ilya Dryomov 已提交
2859 2860 2861 2862

	mutex_lock(&fs_info->volume_mutex);
	mutex_lock(&fs_info->balance_mutex);

2863
	if (fs_info->balance_ctl) {
2864
		printk(KERN_INFO "btrfs: continuing balance\n");
2865
		ret = btrfs_balance(fs_info->balance_ctl, NULL);
2866
	}
I
Ilya Dryomov 已提交
2867 2868 2869

	mutex_unlock(&fs_info->balance_mutex);
	mutex_unlock(&fs_info->volume_mutex);
2870

I
Ilya Dryomov 已提交
2871 2872 2873
	return ret;
}

2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896
int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
{
	struct task_struct *tsk;

	spin_lock(&fs_info->balance_lock);
	if (!fs_info->balance_ctl) {
		spin_unlock(&fs_info->balance_lock);
		return 0;
	}
	spin_unlock(&fs_info->balance_lock);

	if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
		printk(KERN_INFO "btrfs: force skipping balance\n");
		return 0;
	}

	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
	if (IS_ERR(tsk))
		return PTR_ERR(tsk);

	return 0;
}

2897
int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
I
Ilya Dryomov 已提交
2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914
{
	struct btrfs_balance_control *bctl;
	struct btrfs_balance_item *item;
	struct btrfs_disk_balance_args disk_bargs;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = BTRFS_BALANCE_OBJECTID;
	key.type = BTRFS_BALANCE_ITEM_KEY;
	key.offset = 0;

2915
	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
I
Ilya Dryomov 已提交
2916
	if (ret < 0)
2917
		goto out;
I
Ilya Dryomov 已提交
2918 2919
	if (ret > 0) { /* ret = -ENOENT; */
		ret = 0;
2920 2921 2922 2923 2924 2925 2926
		goto out;
	}

	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
	if (!bctl) {
		ret = -ENOMEM;
		goto out;
I
Ilya Dryomov 已提交
2927 2928 2929 2930 2931
	}

	leaf = path->nodes[0];
	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);

2932 2933 2934
	bctl->fs_info = fs_info;
	bctl->flags = btrfs_balance_flags(leaf, item);
	bctl->flags |= BTRFS_BALANCE_RESUME;
I
Ilya Dryomov 已提交
2935 2936 2937 2938 2939 2940 2941 2942

	btrfs_balance_data(leaf, item, &disk_bargs);
	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
	btrfs_balance_meta(leaf, item, &disk_bargs);
	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
	btrfs_balance_sys(leaf, item, &disk_bargs);
	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);

2943 2944
	mutex_lock(&fs_info->volume_mutex);
	mutex_lock(&fs_info->balance_mutex);
I
Ilya Dryomov 已提交
2945

2946 2947 2948 2949
	set_balance_control(bctl);

	mutex_unlock(&fs_info->balance_mutex);
	mutex_unlock(&fs_info->volume_mutex);
I
Ilya Dryomov 已提交
2950 2951
out:
	btrfs_free_path(path);
2952 2953 2954
	return ret;
}

2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983
int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
{
	int ret = 0;

	mutex_lock(&fs_info->balance_mutex);
	if (!fs_info->balance_ctl) {
		mutex_unlock(&fs_info->balance_mutex);
		return -ENOTCONN;
	}

	if (atomic_read(&fs_info->balance_running)) {
		atomic_inc(&fs_info->balance_pause_req);
		mutex_unlock(&fs_info->balance_mutex);

		wait_event(fs_info->balance_wait_q,
			   atomic_read(&fs_info->balance_running) == 0);

		mutex_lock(&fs_info->balance_mutex);
		/* we are good with balance_ctl ripped off from under us */
		BUG_ON(atomic_read(&fs_info->balance_running));
		atomic_dec(&fs_info->balance_pause_req);
	} else {
		ret = -ENOTCONN;
	}

	mutex_unlock(&fs_info->balance_mutex);
	return ret;
}

2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019
int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
{
	mutex_lock(&fs_info->balance_mutex);
	if (!fs_info->balance_ctl) {
		mutex_unlock(&fs_info->balance_mutex);
		return -ENOTCONN;
	}

	atomic_inc(&fs_info->balance_cancel_req);
	/*
	 * if we are running just wait and return, balance item is
	 * deleted in btrfs_balance in this case
	 */
	if (atomic_read(&fs_info->balance_running)) {
		mutex_unlock(&fs_info->balance_mutex);
		wait_event(fs_info->balance_wait_q,
			   atomic_read(&fs_info->balance_running) == 0);
		mutex_lock(&fs_info->balance_mutex);
	} else {
		/* __cancel_balance needs volume_mutex */
		mutex_unlock(&fs_info->balance_mutex);
		mutex_lock(&fs_info->volume_mutex);
		mutex_lock(&fs_info->balance_mutex);

		if (fs_info->balance_ctl)
			__cancel_balance(fs_info);

		mutex_unlock(&fs_info->volume_mutex);
	}

	BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
	atomic_dec(&fs_info->balance_cancel_req);
	mutex_unlock(&fs_info->balance_mutex);
	return 0;
}

3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036
/*
 * shrinking a device means finding all of the device extents past
 * the new size, and then following the back refs to the chunks.
 * The chunk relocation code actually frees the device extent
 */
int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = device->dev_root;
	struct btrfs_dev_extent *dev_extent = NULL;
	struct btrfs_path *path;
	u64 length;
	u64 chunk_tree;
	u64 chunk_objectid;
	u64 chunk_offset;
	int ret;
	int slot;
3037 3038
	int failed = 0;
	bool retried = false;
3039 3040
	struct extent_buffer *l;
	struct btrfs_key key;
3041
	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3042
	u64 old_total = btrfs_super_total_bytes(super_copy);
3043
	u64 old_size = device->total_bytes;
3044 3045
	u64 diff = device->total_bytes - new_size;

Y
Yan Zheng 已提交
3046 3047
	if (new_size >= device->total_bytes)
		return -EINVAL;
3048 3049 3050 3051 3052 3053 3054

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	path->reada = 2;

3055 3056
	lock_chunks(root);

3057
	device->total_bytes = new_size;
3058
	if (device->writeable) {
Y
Yan Zheng 已提交
3059
		device->fs_devices->total_rw_bytes -= diff;
3060 3061 3062 3063
		spin_lock(&root->fs_info->free_chunk_lock);
		root->fs_info->free_chunk_space -= diff;
		spin_unlock(&root->fs_info->free_chunk_lock);
	}
3064
	unlock_chunks(root);
3065

3066
again:
3067 3068 3069 3070
	key.objectid = device->devid;
	key.offset = (u64)-1;
	key.type = BTRFS_DEV_EXTENT_KEY;

3071
	do {
3072 3073 3074 3075 3076 3077 3078 3079 3080
		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		if (ret < 0)
			goto done;

		ret = btrfs_previous_item(root, path, 0, key.type);
		if (ret < 0)
			goto done;
		if (ret) {
			ret = 0;
3081
			btrfs_release_path(path);
3082
			break;
3083 3084 3085 3086 3087 3088
		}

		l = path->nodes[0];
		slot = path->slots[0];
		btrfs_item_key_to_cpu(l, &key, path->slots[0]);

3089
		if (key.objectid != device->devid) {
3090
			btrfs_release_path(path);
3091
			break;
3092
		}
3093 3094 3095 3096

		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
		length = btrfs_dev_extent_length(l, dev_extent);

3097
		if (key.offset + length <= new_size) {
3098
			btrfs_release_path(path);
3099
			break;
3100
		}
3101 3102 3103 3104

		chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
		chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3105
		btrfs_release_path(path);
3106 3107 3108

		ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
					   chunk_offset);
3109
		if (ret && ret != -ENOSPC)
3110
			goto done;
3111 3112
		if (ret == -ENOSPC)
			failed++;
3113
	} while (key.offset-- > 0);
3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125

	if (failed && !retried) {
		failed = 0;
		retried = true;
		goto again;
	} else if (failed && retried) {
		ret = -ENOSPC;
		lock_chunks(root);

		device->total_bytes = old_size;
		if (device->writeable)
			device->fs_devices->total_rw_bytes += diff;
3126 3127 3128
		spin_lock(&root->fs_info->free_chunk_lock);
		root->fs_info->free_chunk_space += diff;
		spin_unlock(&root->fs_info->free_chunk_lock);
3129 3130
		unlock_chunks(root);
		goto done;
3131 3132
	}

3133
	/* Shrinking succeeded, else we would be at "done". */
3134
	trans = btrfs_start_transaction(root, 0);
3135 3136 3137 3138 3139
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto done;
	}

3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153
	lock_chunks(root);

	device->disk_total_bytes = new_size;
	/* Now btrfs_update_device() will change the on-disk size. */
	ret = btrfs_update_device(trans, device);
	if (ret) {
		unlock_chunks(root);
		btrfs_end_transaction(trans, root);
		goto done;
	}
	WARN_ON(diff > old_total);
	btrfs_set_super_total_bytes(super_copy, old_total - diff);
	unlock_chunks(root);
	btrfs_end_transaction(trans, root);
3154 3155 3156 3157 3158
done:
	btrfs_free_path(path);
	return ret;
}

3159
static int btrfs_add_system_chunk(struct btrfs_root *root,
3160 3161 3162
			   struct btrfs_key *key,
			   struct btrfs_chunk *chunk, int item_size)
{
3163
	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181
	struct btrfs_disk_key disk_key;
	u32 array_size;
	u8 *ptr;

	array_size = btrfs_super_sys_array_size(super_copy);
	if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
		return -EFBIG;

	ptr = super_copy->sys_chunk_array + array_size;
	btrfs_cpu_key_to_disk(&disk_key, key);
	memcpy(ptr, &disk_key, sizeof(disk_key));
	ptr += sizeof(disk_key);
	memcpy(ptr, chunk, item_size);
	item_size += sizeof(disk_key);
	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
	return 0;
}

3182 3183 3184 3185
/*
 * sort the devices in descending order by max_avail, total_avail
 */
static int btrfs_cmp_device_info(const void *a, const void *b)
3186
{
3187 3188
	const struct btrfs_device_info *di_a = a;
	const struct btrfs_device_info *di_b = b;
3189

3190
	if (di_a->max_avail > di_b->max_avail)
3191
		return -1;
3192
	if (di_a->max_avail < di_b->max_avail)
3193
		return 1;
3194 3195 3196 3197 3198
	if (di_a->total_avail > di_b->total_avail)
		return -1;
	if (di_a->total_avail < di_b->total_avail)
		return 1;
	return 0;
3199
}
3200

3201 3202 3203 3204 3205
static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
			       struct btrfs_root *extent_root,
			       struct map_lookup **map_ret,
			       u64 *num_bytes_out, u64 *stripe_size_out,
			       u64 start, u64 type)
3206
{
3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229
	struct btrfs_fs_info *info = extent_root->fs_info;
	struct btrfs_fs_devices *fs_devices = info->fs_devices;
	struct list_head *cur;
	struct map_lookup *map = NULL;
	struct extent_map_tree *em_tree;
	struct extent_map *em;
	struct btrfs_device_info *devices_info = NULL;
	u64 total_avail;
	int num_stripes;	/* total number of stripes to allocate */
	int sub_stripes;	/* sub_stripes info for map */
	int dev_stripes;	/* stripes per dev */
	int devs_max;		/* max devs to use */
	int devs_min;		/* min devs needed */
	int devs_increment;	/* ndevs has to be a multiple of this */
	int ncopies;		/* how many copies to data has */
	int ret;
	u64 max_stripe_size;
	u64 max_chunk_size;
	u64 stripe_size;
	u64 num_bytes;
	int ndevs;
	int i;
	int j;
3230

3231
	BUG_ON(!alloc_profile_is_valid(type, 0));
3232

3233 3234
	if (list_empty(&fs_devices->alloc_list))
		return -ENOSPC;
3235

3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249
	sub_stripes = 1;
	dev_stripes = 1;
	devs_increment = 1;
	ncopies = 1;
	devs_max = 0;	/* 0 == as many as possible */
	devs_min = 1;

	/*
	 * define the properties of each RAID type.
	 * FIXME: move this to a global table and use it in all RAID
	 * calculation code
	 */
	if (type & (BTRFS_BLOCK_GROUP_DUP)) {
		dev_stripes = 2;
3250
		ncopies = 2;
3251 3252 3253 3254 3255
		devs_max = 1;
	} else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
		devs_min = 2;
	} else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
		devs_increment = 2;
3256
		ncopies = 2;
3257 3258 3259 3260 3261 3262 3263 3264 3265 3266
		devs_max = 2;
		devs_min = 2;
	} else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
		sub_stripes = 2;
		devs_increment = 2;
		ncopies = 2;
		devs_min = 4;
	} else {
		devs_max = 1;
	}
3267

3268
	if (type & BTRFS_BLOCK_GROUP_DATA) {
3269 3270
		max_stripe_size = 1024 * 1024 * 1024;
		max_chunk_size = 10 * max_stripe_size;
3271
	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3272 3273 3274 3275 3276
		/* for larger filesystems, use larger metadata chunks */
		if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
			max_stripe_size = 1024 * 1024 * 1024;
		else
			max_stripe_size = 256 * 1024 * 1024;
3277
		max_chunk_size = max_stripe_size;
3278
	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
C
Chris Mason 已提交
3279
		max_stripe_size = 32 * 1024 * 1024;
3280 3281 3282 3283 3284
		max_chunk_size = 2 * max_stripe_size;
	} else {
		printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
		       type);
		BUG_ON(1);
3285 3286
	}

Y
Yan Zheng 已提交
3287 3288 3289
	/* we don't want a chunk larger than 10% of writeable space */
	max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
			     max_chunk_size);
3290

3291 3292 3293 3294
	devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
			       GFP_NOFS);
	if (!devices_info)
		return -ENOMEM;
3295

3296
	cur = fs_devices->alloc_list.next;
3297

3298
	/*
3299 3300
	 * in the first pass through the devices list, we gather information
	 * about the available holes on each device.
3301
	 */
3302 3303 3304 3305 3306
	ndevs = 0;
	while (cur != &fs_devices->alloc_list) {
		struct btrfs_device *device;
		u64 max_avail;
		u64 dev_offset;
3307

3308
		device = list_entry(cur, struct btrfs_device, dev_alloc_list);
3309

3310
		cur = cur->next;
3311

3312 3313 3314 3315 3316 3317
		if (!device->writeable) {
			printk(KERN_ERR
			       "btrfs: read-only device in alloc_list\n");
			WARN_ON(1);
			continue;
		}
3318

3319 3320
		if (!device->in_fs_metadata)
			continue;
3321

3322 3323 3324 3325
		if (device->total_bytes > device->bytes_used)
			total_avail = device->total_bytes - device->bytes_used;
		else
			total_avail = 0;
3326 3327 3328 3329

		/* If there is no space on this device, skip it. */
		if (total_avail == 0)
			continue;
3330

3331
		ret = find_free_dev_extent(device,
3332 3333 3334 3335
					   max_stripe_size * dev_stripes,
					   &dev_offset, &max_avail);
		if (ret && ret != -ENOSPC)
			goto error;
3336

3337 3338
		if (ret == 0)
			max_avail = max_stripe_size * dev_stripes;
3339

3340 3341
		if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
			continue;
3342

3343 3344 3345 3346 3347 3348
		devices_info[ndevs].dev_offset = dev_offset;
		devices_info[ndevs].max_avail = max_avail;
		devices_info[ndevs].total_avail = total_avail;
		devices_info[ndevs].dev = device;
		++ndevs;
	}
3349

3350 3351 3352 3353 3354
	/*
	 * now sort the devices by hole size / available space
	 */
	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
	     btrfs_cmp_device_info, NULL);
3355

3356 3357
	/* round down to number of usable stripes */
	ndevs -= ndevs % devs_increment;
3358

3359 3360 3361
	if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
		ret = -ENOSPC;
		goto error;
3362
	}
3363

3364 3365 3366 3367 3368 3369 3370 3371
	if (devs_max && ndevs > devs_max)
		ndevs = devs_max;
	/*
	 * the primary goal is to maximize the number of stripes, so use as many
	 * devices as possible, even if the stripes are not maximum sized.
	 */
	stripe_size = devices_info[ndevs-1].max_avail;
	num_stripes = ndevs * dev_stripes;
3372

3373
	if (stripe_size * ndevs > max_chunk_size * ncopies) {
3374
		stripe_size = max_chunk_size * ncopies;
3375
		do_div(stripe_size, ndevs);
3376 3377
	}

3378
	do_div(stripe_size, dev_stripes);
3379 3380

	/* align to BTRFS_STRIPE_LEN */
3381 3382
	do_div(stripe_size, BTRFS_STRIPE_LEN);
	stripe_size *= BTRFS_STRIPE_LEN;
3383 3384 3385 3386 3387 3388 3389

	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
	if (!map) {
		ret = -ENOMEM;
		goto error;
	}
	map->num_stripes = num_stripes;
3390

3391 3392 3393 3394 3395 3396
	for (i = 0; i < ndevs; ++i) {
		for (j = 0; j < dev_stripes; ++j) {
			int s = i * dev_stripes + j;
			map->stripes[s].dev = devices_info[i].dev;
			map->stripes[s].physical = devices_info[i].dev_offset +
						   j * stripe_size;
3397 3398
		}
	}
Y
Yan Zheng 已提交
3399
	map->sector_size = extent_root->sectorsize;
3400 3401 3402
	map->stripe_len = BTRFS_STRIPE_LEN;
	map->io_align = BTRFS_STRIPE_LEN;
	map->io_width = BTRFS_STRIPE_LEN;
Y
Yan Zheng 已提交
3403 3404
	map->type = type;
	map->sub_stripes = sub_stripes;
3405

Y
Yan Zheng 已提交
3406
	*map_ret = map;
3407
	num_bytes = stripe_size * (num_stripes / ncopies);
3408

3409 3410
	*stripe_size_out = stripe_size;
	*num_bytes_out = num_bytes;
3411

3412
	trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
3413

3414
	em = alloc_extent_map();
Y
Yan Zheng 已提交
3415
	if (!em) {
3416 3417
		ret = -ENOMEM;
		goto error;
3418
	}
Y
Yan Zheng 已提交
3419 3420
	em->bdev = (struct block_device *)map;
	em->start = start;
3421
	em->len = num_bytes;
Y
Yan Zheng 已提交
3422 3423
	em->block_start = 0;
	em->block_len = em->len;
3424

Y
Yan Zheng 已提交
3425
	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
3426
	write_lock(&em_tree->lock);
Y
Yan Zheng 已提交
3427
	ret = add_extent_mapping(em_tree, em);
3428
	write_unlock(&em_tree->lock);
Y
Yan Zheng 已提交
3429
	free_extent_map(em);
3430 3431
	if (ret)
		goto error;
3432

Y
Yan Zheng 已提交
3433 3434
	ret = btrfs_make_block_group(trans, extent_root, 0, type,
				     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3435
				     start, num_bytes);
3436 3437
	if (ret)
		goto error;
3438

3439 3440 3441 3442 3443 3444
	for (i = 0; i < map->num_stripes; ++i) {
		struct btrfs_device *device;
		u64 dev_offset;

		device = map->stripes[i].dev;
		dev_offset = map->stripes[i].physical;
3445 3446

		ret = btrfs_alloc_dev_extent(trans, device,
Y
Yan Zheng 已提交
3447 3448
				info->chunk_root->root_key.objectid,
				BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3449
				start, dev_offset, stripe_size);
3450 3451 3452 3453
		if (ret) {
			btrfs_abort_transaction(trans, extent_root, ret);
			goto error;
		}
Y
Yan Zheng 已提交
3454 3455
	}

3456
	kfree(devices_info);
Y
Yan Zheng 已提交
3457
	return 0;
3458 3459 3460 3461 3462

error:
	kfree(map);
	kfree(devices_info);
	return ret;
Y
Yan Zheng 已提交
3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487
}

static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
				struct btrfs_root *extent_root,
				struct map_lookup *map, u64 chunk_offset,
				u64 chunk_size, u64 stripe_size)
{
	u64 dev_offset;
	struct btrfs_key key;
	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
	struct btrfs_device *device;
	struct btrfs_chunk *chunk;
	struct btrfs_stripe *stripe;
	size_t item_size = btrfs_chunk_item_size(map->num_stripes);
	int index = 0;
	int ret;

	chunk = kzalloc(item_size, GFP_NOFS);
	if (!chunk)
		return -ENOMEM;

	index = 0;
	while (index < map->num_stripes) {
		device = map->stripes[index].dev;
		device->bytes_used += stripe_size;
3488
		ret = btrfs_update_device(trans, device);
3489 3490
		if (ret)
			goto out_free;
Y
Yan Zheng 已提交
3491 3492 3493
		index++;
	}

3494 3495 3496 3497 3498
	spin_lock(&extent_root->fs_info->free_chunk_lock);
	extent_root->fs_info->free_chunk_space -= (stripe_size *
						   map->num_stripes);
	spin_unlock(&extent_root->fs_info->free_chunk_lock);

Y
Yan Zheng 已提交
3499 3500 3501 3502 3503
	index = 0;
	stripe = &chunk->stripe;
	while (index < map->num_stripes) {
		device = map->stripes[index].dev;
		dev_offset = map->stripes[index].physical;
3504

3505 3506 3507
		btrfs_set_stack_stripe_devid(stripe, device->devid);
		btrfs_set_stack_stripe_offset(stripe, dev_offset);
		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
Y
Yan Zheng 已提交
3508
		stripe++;
3509 3510 3511
		index++;
	}

Y
Yan Zheng 已提交
3512
	btrfs_set_stack_chunk_length(chunk, chunk_size);
3513
	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
Y
Yan Zheng 已提交
3514 3515 3516 3517 3518
	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
	btrfs_set_stack_chunk_type(chunk, map->type);
	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
3519
	btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
Y
Yan Zheng 已提交
3520
	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
3521

Y
Yan Zheng 已提交
3522 3523 3524
	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
	key.type = BTRFS_CHUNK_ITEM_KEY;
	key.offset = chunk_offset;
3525

Y
Yan Zheng 已提交
3526
	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
3527

3528 3529 3530 3531 3532
	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
		/*
		 * TODO: Cleanup of inserted chunk root in case of
		 * failure.
		 */
3533
		ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
Y
Yan Zheng 已提交
3534
					     item_size);
3535
	}
3536

3537
out_free:
3538
	kfree(chunk);
3539
	return ret;
Y
Yan Zheng 已提交
3540
}
3541

Y
Yan Zheng 已提交
3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570
/*
 * Chunk allocation falls into two parts. The first part does works
 * that make the new allocated chunk useable, but not do any operation
 * that modifies the chunk tree. The second part does the works that
 * require modifying the chunk tree. This division is important for the
 * bootstrap process of adding storage to a seed btrfs.
 */
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
		      struct btrfs_root *extent_root, u64 type)
{
	u64 chunk_offset;
	u64 chunk_size;
	u64 stripe_size;
	struct map_lookup *map;
	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
	int ret;

	ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
			      &chunk_offset);
	if (ret)
		return ret;

	ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
				  &stripe_size, chunk_offset, type);
	if (ret)
		return ret;

	ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
				   chunk_size, stripe_size);
3571 3572
	if (ret)
		return ret;
Y
Yan Zheng 已提交
3573 3574 3575
	return 0;
}

C
Chris Mason 已提交
3576
static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
Y
Yan Zheng 已提交
3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594
					 struct btrfs_root *root,
					 struct btrfs_device *device)
{
	u64 chunk_offset;
	u64 sys_chunk_offset;
	u64 chunk_size;
	u64 sys_chunk_size;
	u64 stripe_size;
	u64 sys_stripe_size;
	u64 alloc_profile;
	struct map_lookup *map;
	struct map_lookup *sys_map;
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct btrfs_root *extent_root = fs_info->extent_root;
	int ret;

	ret = find_next_chunk(fs_info->chunk_root,
			      BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
3595 3596
	if (ret)
		return ret;
Y
Yan Zheng 已提交
3597 3598

	alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
3599
				fs_info->avail_metadata_alloc_bits;
Y
Yan Zheng 已提交
3600 3601 3602 3603
	alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);

	ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
				  &stripe_size, chunk_offset, alloc_profile);
3604 3605
	if (ret)
		return ret;
Y
Yan Zheng 已提交
3606 3607 3608 3609

	sys_chunk_offset = chunk_offset + chunk_size;

	alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
3610
				fs_info->avail_system_alloc_bits;
Y
Yan Zheng 已提交
3611 3612 3613 3614 3615
	alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);

	ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
				  &sys_chunk_size, &sys_stripe_size,
				  sys_chunk_offset, alloc_profile);
3616 3617
	if (ret)
		goto abort;
Y
Yan Zheng 已提交
3618 3619

	ret = btrfs_add_device(trans, fs_info->chunk_root, device);
3620 3621
	if (ret)
		goto abort;
Y
Yan Zheng 已提交
3622 3623 3624 3625 3626 3627 3628 3629 3630

	/*
	 * Modifying chunk tree needs allocating new blocks from both
	 * system block group and metadata block group. So we only can
	 * do operations require modifying the chunk tree after both
	 * block groups were created.
	 */
	ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
				   chunk_size, stripe_size);
3631 3632
	if (ret)
		goto abort;
Y
Yan Zheng 已提交
3633 3634 3635 3636

	ret = __finish_chunk_alloc(trans, extent_root, sys_map,
				   sys_chunk_offset, sys_chunk_size,
				   sys_stripe_size);
3637 3638 3639
	if (ret)
		goto abort;

Y
Yan Zheng 已提交
3640
	return 0;
3641 3642 3643 3644

abort:
	btrfs_abort_transaction(trans, root, ret);
	return ret;
Y
Yan Zheng 已提交
3645 3646 3647 3648 3649 3650 3651 3652 3653 3654
}

int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
{
	struct extent_map *em;
	struct map_lookup *map;
	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
	int readonly = 0;
	int i;

3655
	read_lock(&map_tree->map_tree.lock);
Y
Yan Zheng 已提交
3656
	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3657
	read_unlock(&map_tree->map_tree.lock);
Y
Yan Zheng 已提交
3658 3659 3660
	if (!em)
		return 1;

3661 3662 3663 3664 3665
	if (btrfs_test_opt(root, DEGRADED)) {
		free_extent_map(em);
		return 0;
	}

Y
Yan Zheng 已提交
3666 3667 3668 3669 3670 3671 3672
	map = (struct map_lookup *)em->bdev;
	for (i = 0; i < map->num_stripes; i++) {
		if (!map->stripes[i].dev->writeable) {
			readonly = 1;
			break;
		}
	}
3673
	free_extent_map(em);
Y
Yan Zheng 已提交
3674
	return readonly;
3675 3676 3677 3678
}

void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
{
3679
	extent_map_tree_init(&tree->map_tree);
3680 3681 3682 3683 3684 3685
}

void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
{
	struct extent_map *em;

C
Chris Mason 已提交
3686
	while (1) {
3687
		write_lock(&tree->map_tree.lock);
3688 3689 3690
		em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
		if (em)
			remove_extent_mapping(&tree->map_tree, em);
3691
		write_unlock(&tree->map_tree.lock);
3692 3693 3694 3695 3696 3697 3698 3699 3700 3701
		if (!em)
			break;
		kfree(em->bdev);
		/* once for us */
		free_extent_map(em);
		/* once for the tree */
		free_extent_map(em);
	}
}

3702 3703 3704 3705 3706 3707 3708
int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
{
	struct extent_map *em;
	struct map_lookup *map;
	struct extent_map_tree *em_tree = &map_tree->map_tree;
	int ret;

3709
	read_lock(&em_tree->lock);
3710
	em = lookup_extent_mapping(em_tree, logical, len);
3711
	read_unlock(&em_tree->lock);
3712 3713 3714 3715 3716 3717
	BUG_ON(!em);

	BUG_ON(em->start > logical || em->start + em->len < logical);
	map = (struct map_lookup *)em->bdev;
	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
		ret = map->num_stripes;
C
Chris Mason 已提交
3718 3719
	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
		ret = map->sub_stripes;
3720 3721 3722 3723 3724 3725
	else
		ret = 1;
	free_extent_map(em);
	return ret;
}

3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741
static int find_live_mirror(struct map_lookup *map, int first, int num,
			    int optimal)
{
	int i;
	if (map->stripes[optimal].dev->bdev)
		return optimal;
	for (i = first; i < first + num; i++) {
		if (map->stripes[i].dev->bdev)
			return i;
	}
	/* we couldn't find one that doesn't fail.  Just return something
	 * and the io error handling code will clean up eventually
	 */
	return optimal;
}

3742 3743
static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
			     u64 logical, u64 *length,
3744
			     struct btrfs_bio **bbio_ret,
J
Jens Axboe 已提交
3745
			     int mirror_num)
3746 3747 3748 3749 3750
{
	struct extent_map *em;
	struct map_lookup *map;
	struct extent_map_tree *em_tree = &map_tree->map_tree;
	u64 offset;
3751
	u64 stripe_offset;
3752
	u64 stripe_end_offset;
3753
	u64 stripe_nr;
3754 3755
	u64 stripe_nr_orig;
	u64 stripe_nr_end;
3756
	int stripe_index;
3757
	int i;
L
Li Zefan 已提交
3758
	int ret = 0;
3759
	int num_stripes;
3760
	int max_errors = 0;
3761
	struct btrfs_bio *bbio = NULL;
3762

3763
	read_lock(&em_tree->lock);
3764
	em = lookup_extent_mapping(em_tree, logical, *length);
3765
	read_unlock(&em_tree->lock);
3766

3767
	if (!em) {
C
Chris Mason 已提交
3768 3769 3770
		printk(KERN_CRIT "unable to find logical %llu len %llu\n",
		       (unsigned long long)logical,
		       (unsigned long long)*length);
3771
		BUG();
3772
	}
3773 3774 3775 3776

	BUG_ON(em->start > logical || em->start + em->len < logical);
	map = (struct map_lookup *)em->bdev;
	offset = logical - em->start;
3777

3778 3779 3780
	if (mirror_num > map->num_stripes)
		mirror_num = 0;

3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793
	stripe_nr = offset;
	/*
	 * stripe_nr counts the total number of stripes we have to stride
	 * to get to this block
	 */
	do_div(stripe_nr, map->stripe_len);

	stripe_offset = stripe_nr * map->stripe_len;
	BUG_ON(offset < stripe_offset);

	/* stripe_offset is the offset of this block in its stripe*/
	stripe_offset = offset - stripe_offset;

3794 3795
	if (rw & REQ_DISCARD)
		*length = min_t(u64, em->len - offset, *length);
3796
	else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
3797 3798
		/* we limit the length of each bio to what fits in a stripe */
		*length = min_t(u64, em->len - offset,
3799
				map->stripe_len - stripe_offset);
3800 3801 3802
	} else {
		*length = em->len - offset;
	}
3803

3804
	if (!bbio_ret)
3805 3806
		goto out;

3807
	num_stripes = 1;
3808
	stripe_index = 0;
3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820
	stripe_nr_orig = stripe_nr;
	stripe_nr_end = (offset + *length + map->stripe_len - 1) &
			(~(map->stripe_len - 1));
	do_div(stripe_nr_end, map->stripe_len);
	stripe_end_offset = stripe_nr_end * map->stripe_len -
			    (offset + *length);
	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
		if (rw & REQ_DISCARD)
			num_stripes = min_t(u64, map->num_stripes,
					    stripe_nr_end - stripe_nr_orig);
		stripe_index = do_div(stripe_nr, map->num_stripes);
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3821
		if (rw & (REQ_WRITE | REQ_DISCARD))
3822
			num_stripes = map->num_stripes;
3823
		else if (mirror_num)
3824
			stripe_index = mirror_num - 1;
3825 3826 3827 3828
		else {
			stripe_index = find_live_mirror(map, 0,
					    map->num_stripes,
					    current->pid % map->num_stripes);
3829
			mirror_num = stripe_index + 1;
3830
		}
3831

3832
	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3833
		if (rw & (REQ_WRITE | REQ_DISCARD)) {
3834
			num_stripes = map->num_stripes;
3835
		} else if (mirror_num) {
3836
			stripe_index = mirror_num - 1;
3837 3838 3839
		} else {
			mirror_num = 1;
		}
3840

C
Chris Mason 已提交
3841 3842 3843 3844 3845 3846
	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
		int factor = map->num_stripes / map->sub_stripes;

		stripe_index = do_div(stripe_nr, factor);
		stripe_index *= map->sub_stripes;

J
Jens Axboe 已提交
3847
		if (rw & REQ_WRITE)
3848
			num_stripes = map->sub_stripes;
3849 3850 3851 3852
		else if (rw & REQ_DISCARD)
			num_stripes = min_t(u64, map->sub_stripes *
					    (stripe_nr_end - stripe_nr_orig),
					    map->num_stripes);
C
Chris Mason 已提交
3853 3854
		else if (mirror_num)
			stripe_index += mirror_num - 1;
3855
		else {
J
Jan Schmidt 已提交
3856
			int old_stripe_index = stripe_index;
3857 3858 3859
			stripe_index = find_live_mirror(map, stripe_index,
					      map->sub_stripes, stripe_index +
					      current->pid % map->sub_stripes);
J
Jan Schmidt 已提交
3860
			mirror_num = stripe_index - old_stripe_index + 1;
3861
		}
3862 3863 3864 3865 3866 3867 3868
	} else {
		/*
		 * after this do_div call, stripe_nr is the number of stripes
		 * on this device we have to walk to find the data, and
		 * stripe_index is the number of our device in the stripe array
		 */
		stripe_index = do_div(stripe_nr, map->num_stripes);
3869
		mirror_num = stripe_index + 1;
3870
	}
3871
	BUG_ON(stripe_index >= map->num_stripes);
3872

L
Li Zefan 已提交
3873 3874 3875 3876 3877 3878 3879
	bbio = kzalloc(btrfs_bio_size(num_stripes), GFP_NOFS);
	if (!bbio) {
		ret = -ENOMEM;
		goto out;
	}
	atomic_set(&bbio->error, 0);

3880
	if (rw & REQ_DISCARD) {
3881 3882 3883 3884
		int factor = 0;
		int sub_stripes = 0;
		u64 stripes_per_dev = 0;
		u32 remaining_stripes = 0;
L
Liu Bo 已提交
3885
		u32 last_stripe = 0;
3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898

		if (map->type &
		    (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
			if (map->type & BTRFS_BLOCK_GROUP_RAID0)
				sub_stripes = 1;
			else
				sub_stripes = map->sub_stripes;

			factor = map->num_stripes / sub_stripes;
			stripes_per_dev = div_u64_rem(stripe_nr_end -
						      stripe_nr_orig,
						      factor,
						      &remaining_stripes);
L
Liu Bo 已提交
3899 3900
			div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
			last_stripe *= sub_stripes;
3901 3902
		}

3903
		for (i = 0; i < num_stripes; i++) {
3904
			bbio->stripes[i].physical =
3905 3906
				map->stripes[stripe_index].physical +
				stripe_offset + stripe_nr * map->stripe_len;
3907
			bbio->stripes[i].dev = map->stripes[stripe_index].dev;
3908

3909 3910 3911 3912
			if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
					 BTRFS_BLOCK_GROUP_RAID10)) {
				bbio->stripes[i].length = stripes_per_dev *
							  map->stripe_len;
L
Liu Bo 已提交
3913

3914 3915 3916
				if (i / sub_stripes < remaining_stripes)
					bbio->stripes[i].length +=
						map->stripe_len;
L
Liu Bo 已提交
3917 3918 3919 3920 3921 3922 3923 3924 3925

				/*
				 * Special for the first stripe and
				 * the last stripe:
				 *
				 * |-------|...|-------|
				 *     |----------|
				 *    off     end_off
				 */
3926
				if (i < sub_stripes)
3927
					bbio->stripes[i].length -=
3928
						stripe_offset;
L
Liu Bo 已提交
3929 3930 3931 3932

				if (stripe_index >= last_stripe &&
				    stripe_index <= (last_stripe +
						     sub_stripes - 1))
3933
					bbio->stripes[i].length -=
3934
						stripe_end_offset;
L
Liu Bo 已提交
3935

3936 3937
				if (i == sub_stripes - 1)
					stripe_offset = 0;
3938
			} else
3939
				bbio->stripes[i].length = *length;
3940 3941 3942 3943 3944 3945 3946 3947 3948 3949

			stripe_index++;
			if (stripe_index == map->num_stripes) {
				/* This could only happen for RAID0/10 */
				stripe_index = 0;
				stripe_nr++;
			}
		}
	} else {
		for (i = 0; i < num_stripes; i++) {
3950
			bbio->stripes[i].physical =
3951 3952 3953
				map->stripes[stripe_index].physical +
				stripe_offset +
				stripe_nr * map->stripe_len;
3954
			bbio->stripes[i].dev =
3955
				map->stripes[stripe_index].dev;
3956
			stripe_index++;
3957
		}
3958
	}
L
Li Zefan 已提交
3959 3960 3961 3962 3963 3964 3965

	if (rw & REQ_WRITE) {
		if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
				 BTRFS_BLOCK_GROUP_RAID10 |
				 BTRFS_BLOCK_GROUP_DUP)) {
			max_errors = 1;
		}
3966
	}
L
Li Zefan 已提交
3967 3968 3969 3970 3971

	*bbio_ret = bbio;
	bbio->num_stripes = num_stripes;
	bbio->max_errors = max_errors;
	bbio->mirror_num = mirror_num;
3972
out:
3973
	free_extent_map(em);
L
Li Zefan 已提交
3974
	return ret;
3975 3976
}

3977 3978
int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
		      u64 logical, u64 *length,
3979
		      struct btrfs_bio **bbio_ret, int mirror_num)
3980
{
3981
	return __btrfs_map_block(map_tree, rw, logical, length, bbio_ret,
J
Jens Axboe 已提交
3982
				 mirror_num);
3983 3984
}

Y
Yan Zheng 已提交
3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997
int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
		     u64 chunk_start, u64 physical, u64 devid,
		     u64 **logical, int *naddrs, int *stripe_len)
{
	struct extent_map_tree *em_tree = &map_tree->map_tree;
	struct extent_map *em;
	struct map_lookup *map;
	u64 *buf;
	u64 bytenr;
	u64 length;
	u64 stripe_nr;
	int i, j, nr = 0;

3998
	read_lock(&em_tree->lock);
Y
Yan Zheng 已提交
3999
	em = lookup_extent_mapping(em_tree, chunk_start, 1);
4000
	read_unlock(&em_tree->lock);
Y
Yan Zheng 已提交
4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011

	BUG_ON(!em || em->start != chunk_start);
	map = (struct map_lookup *)em->bdev;

	length = em->len;
	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
		do_div(length, map->num_stripes / map->sub_stripes);
	else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
		do_div(length, map->num_stripes);

	buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
4012
	BUG_ON(!buf); /* -ENOMEM */
Y
Yan Zheng 已提交
4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030

	for (i = 0; i < map->num_stripes; i++) {
		if (devid && map->stripes[i].dev->devid != devid)
			continue;
		if (map->stripes[i].physical > physical ||
		    map->stripes[i].physical + length <= physical)
			continue;

		stripe_nr = physical - map->stripes[i].physical;
		do_div(stripe_nr, map->stripe_len);

		if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
			stripe_nr = stripe_nr * map->num_stripes + i;
			do_div(stripe_nr, map->sub_stripes);
		} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
			stripe_nr = stripe_nr * map->num_stripes + i;
		}
		bytenr = chunk_start + stripe_nr * map->stripe_len;
4031
		WARN_ON(nr >= map->num_stripes);
Y
Yan Zheng 已提交
4032 4033 4034 4035
		for (j = 0; j < nr; j++) {
			if (buf[j] == bytenr)
				break;
		}
4036 4037
		if (j == nr) {
			WARN_ON(nr >= map->num_stripes);
Y
Yan Zheng 已提交
4038
			buf[nr++] = bytenr;
4039
		}
Y
Yan Zheng 已提交
4040 4041 4042 4043 4044 4045 4046 4047
	}

	*logical = buf;
	*naddrs = nr;
	*stripe_len = map->stripe_len;

	free_extent_map(em);
	return 0;
4048 4049
}

4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074
static void *merge_stripe_index_into_bio_private(void *bi_private,
						 unsigned int stripe_index)
{
	/*
	 * with single, dup, RAID0, RAID1 and RAID10, stripe_index is
	 * at most 1.
	 * The alternative solution (instead of stealing bits from the
	 * pointer) would be to allocate an intermediate structure
	 * that contains the old private pointer plus the stripe_index.
	 */
	BUG_ON((((uintptr_t)bi_private) & 3) != 0);
	BUG_ON(stripe_index > 3);
	return (void *)(((uintptr_t)bi_private) | stripe_index);
}

static struct btrfs_bio *extract_bbio_from_bio_private(void *bi_private)
{
	return (struct btrfs_bio *)(((uintptr_t)bi_private) & ~((uintptr_t)3));
}

static unsigned int extract_stripe_index_from_bio_private(void *bi_private)
{
	return (unsigned int)((uintptr_t)bi_private) & 3;
}

4075
static void btrfs_end_bio(struct bio *bio, int err)
4076
{
4077
	struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private);
4078
	int is_orig_bio = 0;
4079

4080
	if (err) {
4081
		atomic_inc(&bbio->error);
4082 4083 4084 4085 4086 4087 4088 4089
		if (err == -EIO || err == -EREMOTEIO) {
			unsigned int stripe_index =
				extract_stripe_index_from_bio_private(
					bio->bi_private);
			struct btrfs_device *dev;

			BUG_ON(stripe_index >= bbio->num_stripes);
			dev = bbio->stripes[stripe_index].dev;
4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101
			if (dev->bdev) {
				if (bio->bi_rw & WRITE)
					btrfs_dev_stat_inc(dev,
						BTRFS_DEV_STAT_WRITE_ERRS);
				else
					btrfs_dev_stat_inc(dev,
						BTRFS_DEV_STAT_READ_ERRS);
				if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
					btrfs_dev_stat_inc(dev,
						BTRFS_DEV_STAT_FLUSH_ERRS);
				btrfs_dev_stat_print_on_error(dev);
			}
4102 4103
		}
	}
4104

4105
	if (bio == bbio->orig_bio)
4106 4107
		is_orig_bio = 1;

4108
	if (atomic_dec_and_test(&bbio->stripes_pending)) {
4109 4110
		if (!is_orig_bio) {
			bio_put(bio);
4111
			bio = bbio->orig_bio;
4112
		}
4113 4114
		bio->bi_private = bbio->private;
		bio->bi_end_io = bbio->end_io;
J
Jan Schmidt 已提交
4115 4116
		bio->bi_bdev = (struct block_device *)
					(unsigned long)bbio->mirror_num;
4117 4118 4119
		/* only send an error to the higher layers if it is
		 * beyond the tolerance of the multi-bio
		 */
4120
		if (atomic_read(&bbio->error) > bbio->max_errors) {
4121
			err = -EIO;
4122
		} else {
4123 4124 4125 4126 4127
			/*
			 * this bio is actually up to date, we didn't
			 * go over the max number of errors
			 */
			set_bit(BIO_UPTODATE, &bio->bi_flags);
4128
			err = 0;
4129
		}
4130
		kfree(bbio);
4131 4132

		bio_endio(bio, err);
4133
	} else if (!is_orig_bio) {
4134 4135 4136 4137
		bio_put(bio);
	}
}

4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151
struct async_sched {
	struct bio *bio;
	int rw;
	struct btrfs_fs_info *info;
	struct btrfs_work work;
};

/*
 * see run_scheduled_bios for a description of why bios are collected for
 * async submit.
 *
 * This will add one bio to the pending list for a device and make sure
 * the work struct is scheduled.
 */
4152
static noinline void schedule_bio(struct btrfs_root *root,
4153 4154
				 struct btrfs_device *device,
				 int rw, struct bio *bio)
4155 4156
{
	int should_queue = 1;
4157
	struct btrfs_pending_bios *pending_bios;
4158 4159

	/* don't bother with additional async steps for reads, right now */
4160
	if (!(rw & REQ_WRITE)) {
4161
		bio_get(bio);
4162
		btrfsic_submit_bio(rw, bio);
4163
		bio_put(bio);
4164
		return;
4165 4166 4167
	}

	/*
4168
	 * nr_async_bios allows us to reliably return congestion to the
4169 4170 4171 4172
	 * higher layers.  Otherwise, the async bio makes it appear we have
	 * made progress against dirty pages when we've really just put it
	 * on a queue for later
	 */
4173
	atomic_inc(&root->fs_info->nr_async_bios);
4174
	WARN_ON(bio->bi_next);
4175 4176 4177 4178
	bio->bi_next = NULL;
	bio->bi_rw |= rw;

	spin_lock(&device->io_lock);
4179
	if (bio->bi_rw & REQ_SYNC)
4180 4181 4182
		pending_bios = &device->pending_sync_bios;
	else
		pending_bios = &device->pending_bios;
4183

4184 4185
	if (pending_bios->tail)
		pending_bios->tail->bi_next = bio;
4186

4187 4188 4189
	pending_bios->tail = bio;
	if (!pending_bios->head)
		pending_bios->head = bio;
4190 4191 4192 4193 4194 4195
	if (device->running_pending)
		should_queue = 0;

	spin_unlock(&device->io_lock);

	if (should_queue)
4196 4197
		btrfs_queue_worker(&root->fs_info->submit_workers,
				   &device->work);
4198 4199
}

4200
int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
4201
		  int mirror_num, int async_submit)
4202 4203 4204
{
	struct btrfs_mapping_tree *map_tree;
	struct btrfs_device *dev;
4205
	struct bio *first_bio = bio;
4206
	u64 logical = (u64)bio->bi_sector << 9;
4207 4208 4209
	u64 length = 0;
	u64 map_length;
	int ret;
4210 4211
	int dev_nr = 0;
	int total_devs = 1;
4212
	struct btrfs_bio *bbio = NULL;
4213

4214
	length = bio->bi_size;
4215 4216
	map_tree = &root->fs_info->mapping_tree;
	map_length = length;
4217

4218
	ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio,
4219
			      mirror_num);
4220 4221
	if (ret) /* -ENOMEM */
		return ret;
4222

4223
	total_devs = bbio->num_stripes;
4224
	if (map_length < length) {
C
Chris Mason 已提交
4225 4226 4227 4228
		printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
		       "len %llu\n", (unsigned long long)logical,
		       (unsigned long long)length,
		       (unsigned long long)map_length);
4229 4230
		BUG();
	}
4231 4232 4233 4234 4235

	bbio->orig_bio = first_bio;
	bbio->private = first_bio->bi_private;
	bbio->end_io = first_bio->bi_end_io;
	atomic_set(&bbio->stripes_pending, bbio->num_stripes);
4236

C
Chris Mason 已提交
4237
	while (dev_nr < total_devs) {
4238 4239
		if (dev_nr < total_devs - 1) {
			bio = bio_clone(first_bio, GFP_NOFS);
4240
			BUG_ON(!bio); /* -ENOMEM */
4241 4242
		} else {
			bio = first_bio;
4243
		}
4244
		bio->bi_private = bbio;
4245 4246
		bio->bi_private = merge_stripe_index_into_bio_private(
				bio->bi_private, (unsigned int)dev_nr);
4247 4248 4249
		bio->bi_end_io = btrfs_end_bio;
		bio->bi_sector = bbio->stripes[dev_nr].physical >> 9;
		dev = bbio->stripes[dev_nr].dev;
4250
		if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
4251 4252 4253 4254 4255
#ifdef DEBUG
			struct rcu_string *name;

			rcu_read_lock();
			name = rcu_dereference(dev->name);
4256 4257 4258
			pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu "
				 "(%s id %llu), size=%u\n", rw,
				 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
4259 4260 4261
				 name->str, dev->devid, bio->bi_size);
			rcu_read_unlock();
#endif
4262
			bio->bi_bdev = dev->bdev;
4263 4264 4265
			if (async_submit)
				schedule_bio(root, dev, rw, bio);
			else
4266
				btrfsic_submit_bio(rw, bio);
4267 4268 4269 4270 4271
		} else {
			bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
			bio->bi_sector = logical >> 9;
			bio_endio(bio, -EIO);
		}
4272 4273
		dev_nr++;
	}
4274 4275 4276
	return 0;
}

4277
struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
Y
Yan Zheng 已提交
4278
				       u8 *uuid, u8 *fsid)
4279
{
Y
Yan Zheng 已提交
4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294
	struct btrfs_device *device;
	struct btrfs_fs_devices *cur_devices;

	cur_devices = root->fs_info->fs_devices;
	while (cur_devices) {
		if (!fsid ||
		    !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
			device = __find_device(&cur_devices->devices,
					       devid, uuid);
			if (device)
				return device;
		}
		cur_devices = cur_devices->seed;
	}
	return NULL;
4295 4296
}

4297 4298 4299 4300 4301 4302 4303
static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
					    u64 devid, u8 *dev_uuid)
{
	struct btrfs_device *device;
	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;

	device = kzalloc(sizeof(*device), GFP_NOFS);
4304 4305
	if (!device)
		return NULL;
4306 4307 4308 4309
	list_add(&device->dev_list,
		 &fs_devices->devices);
	device->dev_root = root->fs_info->dev_root;
	device->devid = devid;
4310
	device->work.func = pending_bios_fn;
Y
Yan Zheng 已提交
4311
	device->fs_devices = fs_devices;
4312
	device->missing = 1;
4313
	fs_devices->num_devices++;
4314
	fs_devices->missing_devices++;
4315
	spin_lock_init(&device->io_lock);
4316
	INIT_LIST_HEAD(&device->dev_alloc_list);
4317 4318 4319 4320
	memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
	return device;
}

4321 4322 4323 4324 4325 4326 4327 4328 4329 4330
static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
			  struct extent_buffer *leaf,
			  struct btrfs_chunk *chunk)
{
	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
	struct map_lookup *map;
	struct extent_map *em;
	u64 logical;
	u64 length;
	u64 devid;
4331
	u8 uuid[BTRFS_UUID_SIZE];
4332
	int num_stripes;
4333
	int ret;
4334
	int i;
4335

4336 4337
	logical = key->offset;
	length = btrfs_chunk_length(leaf, chunk);
4338

4339
	read_lock(&map_tree->map_tree.lock);
4340
	em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
4341
	read_unlock(&map_tree->map_tree.lock);
4342 4343 4344 4345 4346 4347 4348 4349 4350

	/* already mapped? */
	if (em && em->start <= logical && em->start + em->len > logical) {
		free_extent_map(em);
		return 0;
	} else if (em) {
		free_extent_map(em);
	}

4351
	em = alloc_extent_map();
4352 4353
	if (!em)
		return -ENOMEM;
4354 4355
	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4356 4357 4358 4359 4360 4361 4362 4363 4364
	if (!map) {
		free_extent_map(em);
		return -ENOMEM;
	}

	em->bdev = (struct block_device *)map;
	em->start = logical;
	em->len = length;
	em->block_start = 0;
C
Chris Mason 已提交
4365
	em->block_len = em->len;
4366

4367 4368 4369 4370 4371 4372
	map->num_stripes = num_stripes;
	map->io_width = btrfs_chunk_io_width(leaf, chunk);
	map->io_align = btrfs_chunk_io_align(leaf, chunk);
	map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
	map->type = btrfs_chunk_type(leaf, chunk);
C
Chris Mason 已提交
4373
	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
4374 4375 4376 4377
	for (i = 0; i < num_stripes; i++) {
		map->stripes[i].physical =
			btrfs_stripe_offset_nr(leaf, chunk, i);
		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
4378 4379 4380
		read_extent_buffer(leaf, uuid, (unsigned long)
				   btrfs_stripe_dev_uuid_nr(chunk, i),
				   BTRFS_UUID_SIZE);
Y
Yan Zheng 已提交
4381 4382
		map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
							NULL);
4383
		if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
4384 4385 4386 4387
			kfree(map);
			free_extent_map(em);
			return -EIO;
		}
4388 4389 4390 4391 4392 4393 4394 4395 4396 4397
		if (!map->stripes[i].dev) {
			map->stripes[i].dev =
				add_missing_dev(root, devid, uuid);
			if (!map->stripes[i].dev) {
				kfree(map);
				free_extent_map(em);
				return -EIO;
			}
		}
		map->stripes[i].dev->in_fs_metadata = 1;
4398 4399
	}

4400
	write_lock(&map_tree->map_tree.lock);
4401
	ret = add_extent_mapping(&map_tree->map_tree, em);
4402
	write_unlock(&map_tree->map_tree.lock);
4403
	BUG_ON(ret); /* Tree corruption */
4404 4405 4406 4407 4408
	free_extent_map(em);

	return 0;
}

4409
static void fill_device_from_item(struct extent_buffer *leaf,
4410 4411 4412 4413 4414 4415
				 struct btrfs_dev_item *dev_item,
				 struct btrfs_device *device)
{
	unsigned long ptr;

	device->devid = btrfs_device_id(leaf, dev_item);
4416 4417
	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
	device->total_bytes = device->disk_total_bytes;
4418 4419 4420 4421 4422 4423 4424
	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
	device->type = btrfs_device_type(leaf, dev_item);
	device->io_align = btrfs_device_io_align(leaf, dev_item);
	device->io_width = btrfs_device_io_width(leaf, dev_item);
	device->sector_size = btrfs_device_sector_size(leaf, dev_item);

	ptr = (unsigned long)btrfs_device_uuid(dev_item);
4425
	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
4426 4427
}

Y
Yan Zheng 已提交
4428 4429 4430 4431 4432
static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
{
	struct btrfs_fs_devices *fs_devices;
	int ret;

4433
	BUG_ON(!mutex_is_locked(&uuid_mutex));
Y
Yan Zheng 已提交
4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448

	fs_devices = root->fs_info->fs_devices->seed;
	while (fs_devices) {
		if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
			ret = 0;
			goto out;
		}
		fs_devices = fs_devices->seed;
	}

	fs_devices = find_fsid(fsid);
	if (!fs_devices) {
		ret = -ENOENT;
		goto out;
	}
Y
Yan Zheng 已提交
4449 4450 4451 4452

	fs_devices = clone_fs_devices(fs_devices);
	if (IS_ERR(fs_devices)) {
		ret = PTR_ERR(fs_devices);
Y
Yan Zheng 已提交
4453 4454 4455
		goto out;
	}

4456
	ret = __btrfs_open_devices(fs_devices, FMODE_READ,
4457
				   root->fs_info->bdev_holder);
4458 4459
	if (ret) {
		free_fs_devices(fs_devices);
Y
Yan Zheng 已提交
4460
		goto out;
4461
	}
Y
Yan Zheng 已提交
4462 4463 4464

	if (!fs_devices->seeding) {
		__btrfs_close_devices(fs_devices);
Y
Yan Zheng 已提交
4465
		free_fs_devices(fs_devices);
Y
Yan Zheng 已提交
4466 4467 4468 4469 4470 4471 4472 4473 4474 4475
		ret = -EINVAL;
		goto out;
	}

	fs_devices->seed = root->fs_info->fs_devices->seed;
	root->fs_info->fs_devices->seed = fs_devices;
out:
	return ret;
}

4476
static int read_one_dev(struct btrfs_root *root,
4477 4478 4479 4480 4481 4482
			struct extent_buffer *leaf,
			struct btrfs_dev_item *dev_item)
{
	struct btrfs_device *device;
	u64 devid;
	int ret;
Y
Yan Zheng 已提交
4483
	u8 fs_uuid[BTRFS_UUID_SIZE];
4484 4485
	u8 dev_uuid[BTRFS_UUID_SIZE];

4486
	devid = btrfs_device_id(leaf, dev_item);
4487 4488 4489
	read_extent_buffer(leaf, dev_uuid,
			   (unsigned long)btrfs_device_uuid(dev_item),
			   BTRFS_UUID_SIZE);
Y
Yan Zheng 已提交
4490 4491 4492 4493 4494 4495
	read_extent_buffer(leaf, fs_uuid,
			   (unsigned long)btrfs_device_fsid(dev_item),
			   BTRFS_UUID_SIZE);

	if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
		ret = open_seed_devices(root, fs_uuid);
Y
Yan Zheng 已提交
4496
		if (ret && !btrfs_test_opt(root, DEGRADED))
Y
Yan Zheng 已提交
4497 4498 4499 4500 4501
			return ret;
	}

	device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
	if (!device || !device->bdev) {
Y
Yan Zheng 已提交
4502
		if (!btrfs_test_opt(root, DEGRADED))
Y
Yan Zheng 已提交
4503 4504 4505
			return -EIO;

		if (!device) {
C
Chris Mason 已提交
4506 4507
			printk(KERN_WARNING "warning devid %llu missing\n",
			       (unsigned long long)devid);
Y
Yan Zheng 已提交
4508 4509 4510
			device = add_missing_dev(root, devid, dev_uuid);
			if (!device)
				return -ENOMEM;
4511 4512 4513 4514 4515 4516 4517 4518 4519
		} else if (!device->missing) {
			/*
			 * this happens when a device that was properly setup
			 * in the device info lists suddenly goes bad.
			 * device->bdev is NULL, and so we have to set
			 * device->missing to one here
			 */
			root->fs_info->fs_devices->missing_devices++;
			device->missing = 1;
Y
Yan Zheng 已提交
4520 4521 4522 4523 4524 4525 4526 4527
		}
	}

	if (device->fs_devices != root->fs_info->fs_devices) {
		BUG_ON(device->writeable);
		if (device->generation !=
		    btrfs_device_generation(leaf, dev_item))
			return -EINVAL;
4528
	}
4529 4530 4531

	fill_device_from_item(leaf, dev_item, device);
	device->dev_root = root->fs_info->dev_root;
4532
	device->in_fs_metadata = 1;
4533
	if (device->writeable) {
Y
Yan Zheng 已提交
4534
		device->fs_devices->total_rw_bytes += device->total_bytes;
4535 4536 4537 4538 4539
		spin_lock(&root->fs_info->free_chunk_lock);
		root->fs_info->free_chunk_space += device->total_bytes -
			device->bytes_used;
		spin_unlock(&root->fs_info->free_chunk_lock);
	}
4540 4541 4542 4543
	ret = 0;
	return ret;
}

Y
Yan Zheng 已提交
4544
int btrfs_read_sys_array(struct btrfs_root *root)
4545
{
4546
	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4547
	struct extent_buffer *sb;
4548 4549
	struct btrfs_disk_key *disk_key;
	struct btrfs_chunk *chunk;
4550 4551 4552
	u8 *ptr;
	unsigned long sb_ptr;
	int ret = 0;
4553 4554 4555 4556
	u32 num_stripes;
	u32 array_size;
	u32 len = 0;
	u32 cur;
4557
	struct btrfs_key key;
4558

Y
Yan Zheng 已提交
4559
	sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
4560 4561 4562 4563
					  BTRFS_SUPER_INFO_SIZE);
	if (!sb)
		return -ENOMEM;
	btrfs_set_buffer_uptodate(sb);
4564
	btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577
	/*
	 * The sb extent buffer is artifical and just used to read the system array.
	 * btrfs_set_buffer_uptodate() call does not properly mark all it's
	 * pages up-to-date when the page is larger: extent does not cover the
	 * whole page and consequently check_page_uptodate does not find all
	 * the page's extents up-to-date (the hole beyond sb),
	 * write_extent_buffer then triggers a WARN_ON.
	 *
	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
	 * but sb spans only this function. Add an explicit SetPageUptodate call
	 * to silence the warning eg. on PowerPC 64.
	 */
	if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
4578
		SetPageUptodate(sb->pages[0]);
4579

4580
	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
4581 4582 4583 4584 4585 4586 4587 4588 4589 4590
	array_size = btrfs_super_sys_array_size(super_copy);

	ptr = super_copy->sys_chunk_array;
	sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
	cur = 0;

	while (cur < array_size) {
		disk_key = (struct btrfs_disk_key *)ptr;
		btrfs_disk_key_to_cpu(&key, disk_key);

4591
		len = sizeof(*disk_key); ptr += len;
4592 4593 4594
		sb_ptr += len;
		cur += len;

4595
		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
4596
			chunk = (struct btrfs_chunk *)sb_ptr;
4597
			ret = read_one_chunk(root, &key, sb, chunk);
4598 4599
			if (ret)
				break;
4600 4601 4602
			num_stripes = btrfs_chunk_num_stripes(sb, chunk);
			len = btrfs_chunk_item_size(num_stripes);
		} else {
4603 4604
			ret = -EIO;
			break;
4605 4606 4607 4608 4609
		}
		ptr += len;
		sb_ptr += len;
		cur += len;
	}
4610
	free_extent_buffer(sb);
4611
	return ret;
4612 4613
}

4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635
struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root,
						   u64 logical, int mirror_num)
{
	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
	int ret;
	u64 map_length = 0;
	struct btrfs_bio *bbio = NULL;
	struct btrfs_device *device;

	BUG_ON(mirror_num == 0);
	ret = btrfs_map_block(map_tree, WRITE, logical, &map_length, &bbio,
			      mirror_num);
	if (ret) {
		BUG_ON(bbio != NULL);
		return NULL;
	}
	BUG_ON(mirror_num != bbio->mirror_num);
	device = bbio->stripes[mirror_num - 1].dev;
	kfree(bbio);
	return device;
}

4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650
int btrfs_read_chunk_tree(struct btrfs_root *root)
{
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	struct btrfs_key found_key;
	int ret;
	int slot;

	root = root->fs_info->chunk_root;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

4651 4652 4653
	mutex_lock(&uuid_mutex);
	lock_chunks(root);

4654 4655 4656 4657 4658 4659 4660 4661 4662
	/* first we search for all of the device items, and then we
	 * read in all of the chunk items.  This way we can create chunk
	 * mappings that reference all of the devices that are afound
	 */
	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
	key.offset = 0;
	key.type = 0;
again:
	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4663 4664
	if (ret < 0)
		goto error;
C
Chris Mason 已提交
4665
	while (1) {
4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683
		leaf = path->nodes[0];
		slot = path->slots[0];
		if (slot >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret == 0)
				continue;
			if (ret < 0)
				goto error;
			break;
		}
		btrfs_item_key_to_cpu(leaf, &found_key, slot);
		if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
			if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
				break;
			if (found_key.type == BTRFS_DEV_ITEM_KEY) {
				struct btrfs_dev_item *dev_item;
				dev_item = btrfs_item_ptr(leaf, slot,
						  struct btrfs_dev_item);
4684
				ret = read_one_dev(root, leaf, dev_item);
Y
Yan Zheng 已提交
4685 4686
				if (ret)
					goto error;
4687 4688 4689 4690 4691
			}
		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
			struct btrfs_chunk *chunk;
			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
			ret = read_one_chunk(root, &found_key, leaf, chunk);
Y
Yan Zheng 已提交
4692 4693
			if (ret)
				goto error;
4694 4695 4696 4697 4698
		}
		path->slots[0]++;
	}
	if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
		key.objectid = 0;
4699
		btrfs_release_path(path);
4700 4701 4702 4703
		goto again;
	}
	ret = 0;
error:
4704 4705 4706
	unlock_chunks(root);
	mutex_unlock(&uuid_mutex);

Y
Yan Zheng 已提交
4707
	btrfs_free_path(path);
4708 4709
	return ret;
}
4710

4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798
static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
{
	int i;

	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
		btrfs_dev_stat_reset(dev, i);
}

int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
{
	struct btrfs_key key;
	struct btrfs_key found_key;
	struct btrfs_root *dev_root = fs_info->dev_root;
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	struct extent_buffer *eb;
	int slot;
	int ret = 0;
	struct btrfs_device *device;
	struct btrfs_path *path = NULL;
	int i;

	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

	mutex_lock(&fs_devices->device_list_mutex);
	list_for_each_entry(device, &fs_devices->devices, dev_list) {
		int item_size;
		struct btrfs_dev_stats_item *ptr;

		key.objectid = 0;
		key.type = BTRFS_DEV_STATS_KEY;
		key.offset = device->devid;
		ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
		if (ret) {
			__btrfs_reset_dev_stats(device);
			device->dev_stats_valid = 1;
			btrfs_release_path(path);
			continue;
		}
		slot = path->slots[0];
		eb = path->nodes[0];
		btrfs_item_key_to_cpu(eb, &found_key, slot);
		item_size = btrfs_item_size_nr(eb, slot);

		ptr = btrfs_item_ptr(eb, slot,
				     struct btrfs_dev_stats_item);

		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
			if (item_size >= (1 + i) * sizeof(__le64))
				btrfs_dev_stat_set(device, i,
					btrfs_dev_stats_value(eb, ptr, i));
			else
				btrfs_dev_stat_reset(device, i);
		}

		device->dev_stats_valid = 1;
		btrfs_dev_stat_print_on_load(device);
		btrfs_release_path(path);
	}
	mutex_unlock(&fs_devices->device_list_mutex);

out:
	btrfs_free_path(path);
	return ret < 0 ? ret : 0;
}

static int update_dev_stat_item(struct btrfs_trans_handle *trans,
				struct btrfs_root *dev_root,
				struct btrfs_device *device)
{
	struct btrfs_path *path;
	struct btrfs_key key;
	struct extent_buffer *eb;
	struct btrfs_dev_stats_item *ptr;
	int ret;
	int i;

	key.objectid = 0;
	key.type = BTRFS_DEV_STATS_KEY;
	key.offset = device->devid;

	path = btrfs_alloc_path();
	BUG_ON(!path);
	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
	if (ret < 0) {
4799 4800
		printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
			      ret, rcu_str_deref(device->name));
4801 4802 4803 4804 4805 4806 4807 4808
		goto out;
	}

	if (ret == 0 &&
	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
		/* need to delete old one and insert a new one */
		ret = btrfs_del_item(trans, dev_root, path);
		if (ret != 0) {
4809 4810
			printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
				      rcu_str_deref(device->name), ret);
4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821
			goto out;
		}
		ret = 1;
	}

	if (ret == 1) {
		/* need to insert a new item */
		btrfs_release_path(path);
		ret = btrfs_insert_empty_item(trans, dev_root, path,
					      &key, sizeof(*ptr));
		if (ret < 0) {
4822 4823
			printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
				      rcu_str_deref(device->name), ret);
4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864
			goto out;
		}
	}

	eb = path->nodes[0];
	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
		btrfs_set_dev_stats_value(eb, ptr, i,
					  btrfs_dev_stat_read(device, i));
	btrfs_mark_buffer_dirty(eb);

out:
	btrfs_free_path(path);
	return ret;
}

/*
 * called from commit_transaction. Writes all changed device stats to disk.
 */
int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
			struct btrfs_fs_info *fs_info)
{
	struct btrfs_root *dev_root = fs_info->dev_root;
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
	struct btrfs_device *device;
	int ret = 0;

	mutex_lock(&fs_devices->device_list_mutex);
	list_for_each_entry(device, &fs_devices->devices, dev_list) {
		if (!device->dev_stats_valid || !device->dev_stats_dirty)
			continue;

		ret = update_dev_stat_item(trans, dev_root, device);
		if (!ret)
			device->dev_stats_dirty = 0;
	}
	mutex_unlock(&fs_devices->device_list_mutex);

	return ret;
}

4865 4866 4867 4868 4869 4870 4871 4872
void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
{
	btrfs_dev_stat_inc(dev, index);
	btrfs_dev_stat_print_on_error(dev);
}

void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
{
4873 4874
	if (!dev->dev_stats_valid)
		return;
4875
	printk_ratelimited_in_rcu(KERN_ERR
4876
			   "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
4877
			   rcu_str_deref(dev->name),
4878 4879 4880 4881 4882 4883 4884 4885
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
			   btrfs_dev_stat_read(dev,
					       BTRFS_DEV_STAT_CORRUPTION_ERRS),
			   btrfs_dev_stat_read(dev,
					       BTRFS_DEV_STAT_GENERATION_ERRS));
}
4886

4887 4888
static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
{
4889 4890 4891 4892 4893 4894 4895 4896
	int i;

	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
		if (btrfs_dev_stat_read(dev, i) != 0)
			break;
	if (i == BTRFS_DEV_STAT_VALUES_MAX)
		return; /* all values == 0, suppress message */

4897 4898
	printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
	       rcu_str_deref(dev->name),
4899 4900 4901 4902 4903 4904 4905
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
}

4906
int btrfs_get_dev_stats(struct btrfs_root *root,
4907
			struct btrfs_ioctl_get_dev_stats *stats)
4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920
{
	struct btrfs_device *dev;
	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
	int i;

	mutex_lock(&fs_devices->device_list_mutex);
	dev = btrfs_find_device(root, stats->devid, NULL, NULL);
	mutex_unlock(&fs_devices->device_list_mutex);

	if (!dev) {
		printk(KERN_WARNING
		       "btrfs: get dev_stats failed, device not found\n");
		return -ENODEV;
4921 4922 4923 4924
	} else if (!dev->dev_stats_valid) {
		printk(KERN_WARNING
		       "btrfs: get dev_stats failed, not yet valid\n");
		return -ENODEV;
4925
	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941
		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
			if (stats->nr_items > i)
				stats->values[i] =
					btrfs_dev_stat_read_and_reset(dev, i);
			else
				btrfs_dev_stat_reset(dev, i);
		}
	} else {
		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
			if (stats->nr_items > i)
				stats->values[i] = btrfs_dev_stat_read(dev, i);
	}
	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
	return 0;
}