disk-io.c 65.3 KB
Newer Older
C
Chris Mason 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

C
Chris Mason 已提交
19
#include <linux/fs.h>
20
#include <linux/blkdev.h>
C
Chris Mason 已提交
21
#include <linux/scatterlist.h>
C
Chris Mason 已提交
22
#include <linux/swap.h>
23
#include <linux/radix-tree.h>
C
Chris Mason 已提交
24
#include <linux/writeback.h>
C
Chris Mason 已提交
25
#include <linux/buffer_head.h>
26
#include <linux/workqueue.h>
27
#include <linux/kthread.h>
C
Chris Mason 已提交
28 29
#include <linux/freezer.h>
#include "compat.h"
30
#include "crc32c.h"
31 32
#include "ctree.h"
#include "disk-io.h"
33
#include "transaction.h"
34
#include "btrfs_inode.h"
35
#include "volumes.h"
36
#include "print-tree.h"
37
#include "async-thread.h"
38
#include "locking.h"
C
Chris Mason 已提交
39
#include "ref-cache.h"
40
#include "tree-log.h"
41
#include "free-space-cache.h"
42

43
static struct extent_io_ops btree_extent_io_ops;
44
static void end_workqueue_fn(struct btrfs_work *work);
45

C
Chris Mason 已提交
46 47 48 49 50
/*
 * end_io_wq structs are used to do processing in task context when an IO is
 * complete.  This is used during reads to verify checksums, and it is used
 * by writes to insert metadata for new file extents after IO is complete.
 */
51 52 53 54 55 56
struct end_io_wq {
	struct bio *bio;
	bio_end_io_t *end_io;
	void *private;
	struct btrfs_fs_info *info;
	int error;
57
	int metadata;
58
	struct list_head list;
59
	struct btrfs_work work;
60
};
61

C
Chris Mason 已提交
62 63 64 65 66
/*
 * async submit bios are used to offload expensive checksumming
 * onto the worker threads.  They checksum file and metadata bios
 * just before they are sent down the IO stack.
 */
67 68 69 70
struct async_submit_bio {
	struct inode *inode;
	struct bio *bio;
	struct list_head list;
C
Chris Mason 已提交
71 72
	extent_submit_bio_hook_t *submit_bio_start;
	extent_submit_bio_hook_t *submit_bio_done;
73 74
	int rw;
	int mirror_num;
C
Chris Mason 已提交
75
	unsigned long bio_flags;
76
	struct btrfs_work work;
77 78
};

79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
/* These are used to set the lockdep class on the extent buffer locks.
 * The class is set by the readpage_end_io_hook after the buffer has
 * passed csum validation but before the pages are unlocked.
 *
 * The lockdep class is also set by btrfs_init_new_buffer on freshly
 * allocated blocks.
 *
 * The class is based on the level in the tree block, which allows lockdep
 * to know that lower nodes nest inside the locks of higher nodes.
 *
 * We also add a check to make sure the highest level of the tree is
 * the same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this
 * code needs update as well.
 */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# if BTRFS_MAX_LEVEL != 8
#  error
# endif
static struct lock_class_key btrfs_eb_class[BTRFS_MAX_LEVEL + 1];
static const char *btrfs_eb_name[BTRFS_MAX_LEVEL + 1] = {
	/* leaf */
	"btrfs-extent-00",
	"btrfs-extent-01",
	"btrfs-extent-02",
	"btrfs-extent-03",
	"btrfs-extent-04",
	"btrfs-extent-05",
	"btrfs-extent-06",
	"btrfs-extent-07",
	/* highest possible level */
	"btrfs-extent-08",
};
#endif

C
Chris Mason 已提交
113 114 115 116
/*
 * extents on the btree inode are pretty simple, there's one extent
 * that covers the entire device
 */
117 118 119
static struct extent_map *btree_get_extent(struct inode *inode,
		struct page *page, size_t page_offset, u64 start, u64 len,
		int create)
120
{
121 122 123 124
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
	struct extent_map *em;
	int ret;

125 126
	spin_lock(&em_tree->lock);
	em = lookup_extent_mapping(em_tree, start, len);
127 128 129 130
	if (em) {
		em->bdev =
			BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
		spin_unlock(&em_tree->lock);
131
		goto out;
132 133
	}
	spin_unlock(&em_tree->lock);
134

135 136 137 138 139 140
	em = alloc_extent_map(GFP_NOFS);
	if (!em) {
		em = ERR_PTR(-ENOMEM);
		goto out;
	}
	em->start = 0;
141
	em->len = (u64)-1;
C
Chris Mason 已提交
142
	em->block_len = (u64)-1;
143
	em->block_start = 0;
144
	em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
145 146

	spin_lock(&em_tree->lock);
147 148
	ret = add_extent_mapping(em_tree, em);
	if (ret == -EEXIST) {
149 150 151
		u64 failed_start = em->start;
		u64 failed_len = em->len;

152
		free_extent_map(em);
153
		em = lookup_extent_mapping(em_tree, start, len);
154
		if (em) {
155
			ret = 0;
156 157 158
		} else {
			em = lookup_extent_mapping(em_tree, failed_start,
						   failed_len);
159
			ret = -EIO;
160
		}
161
	} else if (ret) {
162 163
		free_extent_map(em);
		em = NULL;
164
	}
165 166 167 168
	spin_unlock(&em_tree->lock);

	if (ret)
		em = ERR_PTR(ret);
169 170
out:
	return em;
171 172
}

173 174
u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
{
175
	return btrfs_crc32c(seed, data, len);
176 177 178 179 180 181 182
}

void btrfs_csum_final(u32 crc, char *result)
{
	*(__le32 *)result = ~cpu_to_le32(crc);
}

C
Chris Mason 已提交
183 184 185 186
/*
 * compute the csum for a btree block, and either verify it or write it
 * into the csum field of the block.
 */
187 188 189
static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
			   int verify)
{
190 191 192
	u16 csum_size =
		btrfs_super_csum_size(&root->fs_info->super_copy);
	char *result = NULL;
193 194 195 196 197 198 199 200 201
	unsigned long len;
	unsigned long cur_len;
	unsigned long offset = BTRFS_CSUM_SIZE;
	char *map_token = NULL;
	char *kaddr;
	unsigned long map_start;
	unsigned long map_len;
	int err;
	u32 crc = ~(u32)0;
202
	unsigned long inline_result;
203 204

	len = buf->len - offset;
C
Chris Mason 已提交
205
	while (len > 0) {
206 207 208
		err = map_private_extent_buffer(buf, offset, 32,
					&map_token, &kaddr,
					&map_start, &map_len, KM_USER0);
C
Chris Mason 已提交
209
		if (err)
210 211 212 213 214 215 216 217
			return 1;
		cur_len = min(len, map_len - (offset - map_start));
		crc = btrfs_csum_data(root, kaddr + offset - map_start,
				      crc, cur_len);
		len -= cur_len;
		offset += cur_len;
		unmap_extent_buffer(buf, map_token, KM_USER0);
	}
218 219 220 221 222 223 224 225
	if (csum_size > sizeof(inline_result)) {
		result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
		if (!result)
			return 1;
	} else {
		result = (char *)&inline_result;
	}

226 227 228
	btrfs_csum_final(crc, result);

	if (verify) {
229
		if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
230 231
			u32 val;
			u32 found = 0;
232
			memcpy(&found, result, csum_size);
233

234
			read_extent_buffer(buf, &val, 0, csum_size);
C
Chris Mason 已提交
235 236
			printk(KERN_INFO "btrfs: %s checksum verify failed "
			       "on %llu wanted %X found %X level %d\n",
237
			       root->fs_info->sb->s_id,
C
Chris Mason 已提交
238
			       buf->start, val, found, btrfs_header_level(buf));
239 240
			if (result != (char *)&inline_result)
				kfree(result);
241 242 243
			return 1;
		}
	} else {
244
		write_extent_buffer(buf, result, 0, csum_size);
245
	}
246 247
	if (result != (char *)&inline_result)
		kfree(result);
248 249 250
	return 0;
}

C
Chris Mason 已提交
251 252 253 254 255 256
/*
 * we can't consider a given block up to date unless the transid of the
 * block matches the transid in the parent node's pointer.  This is how we
 * detect blocks that either didn't get written at all or got written
 * in the wrong place.
 */
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
static int verify_parent_transid(struct extent_io_tree *io_tree,
				 struct extent_buffer *eb, u64 parent_transid)
{
	int ret;

	if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
		return 0;

	lock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS);
	if (extent_buffer_uptodate(io_tree, eb) &&
	    btrfs_header_generation(eb) == parent_transid) {
		ret = 0;
		goto out;
	}
	printk("parent transid verify failed on %llu wanted %llu found %llu\n",
	       (unsigned long long)eb->start,
	       (unsigned long long)parent_transid,
	       (unsigned long long)btrfs_header_generation(eb));
	ret = 1;
	clear_extent_buffer_uptodate(io_tree, eb);
C
Chris Mason 已提交
277
out:
278 279 280 281 282
	unlock_extent(io_tree, eb->start, eb->start + eb->len - 1,
		      GFP_NOFS);
	return ret;
}

C
Chris Mason 已提交
283 284 285 286
/*
 * helper to read a given tree block, doing retries as required when
 * the checksums don't match and we have alternate mirrors to try.
 */
287 288
static int btree_read_extent_buffer_pages(struct btrfs_root *root,
					  struct extent_buffer *eb,
289
					  u64 start, u64 parent_transid)
290 291 292 293 294 295 296 297 298 299
{
	struct extent_io_tree *io_tree;
	int ret;
	int num_copies = 0;
	int mirror_num = 0;

	io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
	while (1) {
		ret = read_extent_buffer_pages(io_tree, eb, start, 1,
					       btree_get_extent, mirror_num);
300 301
		if (!ret &&
		    !verify_parent_transid(io_tree, eb, parent_transid))
302
			return ret;
C
Chris Mason 已提交
303

304 305
		num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
					      eb->start, eb->len);
C
Chris Mason 已提交
306
		if (num_copies == 1)
307
			return ret;
C
Chris Mason 已提交
308

309
		mirror_num++;
C
Chris Mason 已提交
310
		if (mirror_num > num_copies)
311 312 313 314
			return ret;
	}
	return -EIO;
}
315

C
Chris Mason 已提交
316
/*
C
Chris Mason 已提交
317 318
 * checksum a dirty tree block before IO.  This has extra checks to make sure
 * we only fill in the checksum field in the first page of a multi-page block
C
Chris Mason 已提交
319
 */
C
Chris Mason 已提交
320

321
static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
322
{
323
	struct extent_io_tree *tree;
324
	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
325 326 327 328
	u64 found_start;
	int found_level;
	unsigned long len;
	struct extent_buffer *eb;
329 330
	int ret;

331
	tree = &BTRFS_I(page->mapping->host)->io_tree;
332 333 334 335 336 337

	if (page->private == EXTENT_PAGE_PRIVATE)
		goto out;
	if (!page->private)
		goto out;
	len = page->private >> 2;
C
Chris Mason 已提交
338 339
	WARN_ON(len == 0);

340
	eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
341 342
	ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
					     btrfs_header_generation(eb));
343
	BUG_ON(ret);
344 345
	found_start = btrfs_header_bytenr(eb);
	if (found_start != start) {
346 347 348 349 350 351 352 353 354 355
		WARN_ON(1);
		goto err;
	}
	if (eb->first_page != page) {
		WARN_ON(1);
		goto err;
	}
	if (!PageUptodate(page)) {
		WARN_ON(1);
		goto err;
356 357
	}
	found_level = btrfs_header_level(eb);
C
Chris Mason 已提交
358

359
	csum_tree_block(root, eb, 0);
360
err:
361 362 363 364 365
	free_extent_buffer(eb);
out:
	return 0;
}

Y
Yan Zheng 已提交
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
static int check_tree_block_fsid(struct btrfs_root *root,
				 struct extent_buffer *eb)
{
	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
	u8 fsid[BTRFS_UUID_SIZE];
	int ret = 1;

	read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb),
			   BTRFS_FSID_SIZE);
	while (fs_devices) {
		if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
			ret = 0;
			break;
		}
		fs_devices = fs_devices->seed;
	}
	return ret;
}

385 386 387 388 389 390 391 392 393
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level)
{
	lockdep_set_class_and_name(&eb->lock,
			   &btrfs_eb_class[level],
			   btrfs_eb_name[level]);
}
#endif

394
static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
395 396 397 398 399 400 401 402
			       struct extent_state *state)
{
	struct extent_io_tree *tree;
	u64 found_start;
	int found_level;
	unsigned long len;
	struct extent_buffer *eb;
	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
403
	int ret = 0;
404 405 406 407 408 409

	tree = &BTRFS_I(page->mapping->host)->io_tree;
	if (page->private == EXTENT_PAGE_PRIVATE)
		goto out;
	if (!page->private)
		goto out;
C
Chris Mason 已提交
410

411
	len = page->private >> 2;
C
Chris Mason 已提交
412 413
	WARN_ON(len == 0);

414
	eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
415

416
	found_start = btrfs_header_bytenr(eb);
417
	if (found_start != start) {
C
Chris Mason 已提交
418
		printk(KERN_INFO "btrfs bad tree block start %llu %llu\n",
419 420
		       (unsigned long long)found_start,
		       (unsigned long long)eb->start);
421
		ret = -EIO;
422 423 424
		goto err;
	}
	if (eb->first_page != page) {
C
Chris Mason 已提交
425 426
		printk(KERN_INFO "btrfs bad first page %lu %lu\n",
		       eb->first_page->index, page->index);
427
		WARN_ON(1);
428
		ret = -EIO;
429 430
		goto err;
	}
Y
Yan Zheng 已提交
431
	if (check_tree_block_fsid(root, eb)) {
C
Chris Mason 已提交
432 433
		printk(KERN_INFO "btrfs bad fsid on block %llu\n",
		       (unsigned long long)eb->start);
434 435 436
		ret = -EIO;
		goto err;
	}
437 438
	found_level = btrfs_header_level(eb);

439 440
	btrfs_set_buffer_lockdep_class(eb, found_level);

441
	ret = csum_tree_block(root, eb, 1);
442 443
	if (ret)
		ret = -EIO;
444 445 446 447 448 449

	end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
	end = eb->start + end - 1;
err:
	free_extent_buffer(eb);
out:
450
	return ret;
451 452 453 454 455 456 457 458 459
}

static void end_workqueue_bio(struct bio *bio, int err)
{
	struct end_io_wq *end_io_wq = bio->bi_private;
	struct btrfs_fs_info *fs_info;

	fs_info = end_io_wq->info;
	end_io_wq->error = err;
460 461
	end_io_wq->work.func = end_workqueue_fn;
	end_io_wq->work.flags = 0;
462 463

	if (bio->bi_rw & (1 << BIO_RW)) {
464 465 466 467 468 469
		if (end_io_wq->metadata)
			btrfs_queue_worker(&fs_info->endio_meta_write_workers,
					   &end_io_wq->work);
		else
			btrfs_queue_worker(&fs_info->endio_write_workers,
					   &end_io_wq->work);
470 471 472 473 474 475 476 477
	} else {
		if (end_io_wq->metadata)
			btrfs_queue_worker(&fs_info->endio_meta_workers,
					   &end_io_wq->work);
		else
			btrfs_queue_worker(&fs_info->endio_workers,
					   &end_io_wq->work);
	}
478 479
}

480 481
int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
			int metadata)
482
{
483 484 485 486 487 488 489
	struct end_io_wq *end_io_wq;
	end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
	if (!end_io_wq)
		return -ENOMEM;

	end_io_wq->private = bio->bi_private;
	end_io_wq->end_io = bio->bi_end_io;
490
	end_io_wq->info = info;
491 492
	end_io_wq->error = 0;
	end_io_wq->bio = bio;
493
	end_io_wq->metadata = metadata;
494 495 496

	bio->bi_private = end_io_wq;
	bio->bi_end_io = end_workqueue_bio;
497 498 499
	return 0;
}

500
unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
501
{
502 503 504 505 506
	unsigned long limit = min_t(unsigned long,
				    info->workers.max_workers,
				    info->fs_devices->open_devices);
	return 256 * limit;
}
507

508 509
int btrfs_congested_async(struct btrfs_fs_info *info, int iodone)
{
510 511
	return atomic_read(&info->nr_async_bios) >
		btrfs_async_submit_limit(info);
512 513
}

C
Chris Mason 已提交
514 515 516 517 518 519 520 521 522 523 524 525
static void run_one_async_start(struct btrfs_work *work)
{
	struct btrfs_fs_info *fs_info;
	struct async_submit_bio *async;

	async = container_of(work, struct  async_submit_bio, work);
	fs_info = BTRFS_I(async->inode)->root->fs_info;
	async->submit_bio_start(async->inode, async->rw, async->bio,
			       async->mirror_num, async->bio_flags);
}

static void run_one_async_done(struct btrfs_work *work)
526 527 528
{
	struct btrfs_fs_info *fs_info;
	struct async_submit_bio *async;
529
	int limit;
530 531 532

	async = container_of(work, struct  async_submit_bio, work);
	fs_info = BTRFS_I(async->inode)->root->fs_info;
533

534
	limit = btrfs_async_submit_limit(fs_info);
535 536
	limit = limit * 2 / 3;

537
	atomic_dec(&fs_info->nr_async_submits);
538

539 540
	if (atomic_read(&fs_info->nr_async_submits) < limit &&
	    waitqueue_active(&fs_info->async_submit_wait))
541 542
		wake_up(&fs_info->async_submit_wait);

C
Chris Mason 已提交
543
	async->submit_bio_done(async->inode, async->rw, async->bio,
C
Chris Mason 已提交
544
			       async->mirror_num, async->bio_flags);
C
Chris Mason 已提交
545 546 547 548 549 550 551
}

static void run_one_async_free(struct btrfs_work *work)
{
	struct async_submit_bio *async;

	async = container_of(work, struct  async_submit_bio, work);
552 553 554
	kfree(async);
}

555 556
int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
			int rw, struct bio *bio, int mirror_num,
C
Chris Mason 已提交
557
			unsigned long bio_flags,
C
Chris Mason 已提交
558 559
			extent_submit_bio_hook_t *submit_bio_start,
			extent_submit_bio_hook_t *submit_bio_done)
560 561 562 563 564 565 566 567 568 569 570
{
	struct async_submit_bio *async;

	async = kmalloc(sizeof(*async), GFP_NOFS);
	if (!async)
		return -ENOMEM;

	async->inode = inode;
	async->rw = rw;
	async->bio = bio;
	async->mirror_num = mirror_num;
C
Chris Mason 已提交
571 572 573 574 575 576 577
	async->submit_bio_start = submit_bio_start;
	async->submit_bio_done = submit_bio_done;

	async->work.func = run_one_async_start;
	async->work.ordered_func = run_one_async_done;
	async->work.ordered_free = run_one_async_free;

578
	async->work.flags = 0;
C
Chris Mason 已提交
579
	async->bio_flags = bio_flags;
580

581
	atomic_inc(&fs_info->nr_async_submits);
582
	btrfs_queue_worker(&fs_info->workers, &async->work);
C
Chris Mason 已提交
583
#if 0
584
	int limit = btrfs_async_submit_limit(fs_info);
585 586
	if (atomic_read(&fs_info->nr_async_submits) > limit) {
		wait_event_timeout(fs_info->async_submit_wait,
587 588
			   (atomic_read(&fs_info->nr_async_submits) < limit),
			   HZ/10);
589 590 591 592 593

		wait_event_timeout(fs_info->async_submit_wait,
			   (atomic_read(&fs_info->nr_async_bios) < limit),
			   HZ/10);
	}
C
Chris Mason 已提交
594
#endif
C
Chris Mason 已提交
595
	while (atomic_read(&fs_info->async_submit_draining) &&
596 597 598 599 600
	      atomic_read(&fs_info->nr_async_submits)) {
		wait_event(fs_info->async_submit_wait,
			   (atomic_read(&fs_info->nr_async_submits) == 0));
	}

601 602 603
	return 0;
}

604 605 606 607 608 609 610
static int btree_csum_one_bio(struct bio *bio)
{
	struct bio_vec *bvec = bio->bi_io_vec;
	int bio_index = 0;
	struct btrfs_root *root;

	WARN_ON(bio->bi_vcnt <= 0);
C
Chris Mason 已提交
611
	while (bio_index < bio->bi_vcnt) {
612 613 614 615 616 617 618 619
		root = BTRFS_I(bvec->bv_page->mapping->host)->root;
		csum_dirty_buffer(root, bvec->bv_page);
		bio_index++;
		bvec++;
	}
	return 0;
}

C
Chris Mason 已提交
620 621 622
static int __btree_submit_bio_start(struct inode *inode, int rw,
				    struct bio *bio, int mirror_num,
				    unsigned long bio_flags)
623
{
624 625
	/*
	 * when we're called for a write, we're already in the async
626
	 * submission context.  Just jump into btrfs_map_bio
627
	 */
C
Chris Mason 已提交
628 629 630
	btree_csum_one_bio(bio);
	return 0;
}
631

C
Chris Mason 已提交
632 633 634
static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
				 int mirror_num, unsigned long bio_flags)
{
635
	/*
C
Chris Mason 已提交
636 637
	 * when we're called for a write, we're already in the async
	 * submission context.  Just jump into btrfs_map_bio
638 639
	 */
	return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
640 641
}

642
static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
C
Chris Mason 已提交
643
				 int mirror_num, unsigned long bio_flags)
644
{
645 646 647 648 649 650
	int ret;

	ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
					  bio, 1);
	BUG_ON(ret);

651
	if (!(rw & (1 << BIO_RW))) {
C
Chris Mason 已提交
652 653 654 655 656
		/*
		 * called for a read, do the setup so that checksum validation
		 * can happen in the async kernel threads
		 */
		return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
657
				     mirror_num, 0);
658
	}
659 660 661 662
	/*
	 * kthread helpers are used to submit writes so that checksumming
	 * can happen in parallel across all CPUs
	 */
663
	return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
C
Chris Mason 已提交
664
				   inode, rw, bio, mirror_num, 0,
C
Chris Mason 已提交
665 666
				   __btree_submit_bio_start,
				   __btree_submit_bio_done);
667 668
}

669 670
static int btree_writepage(struct page *page, struct writeback_control *wbc)
{
671
	struct extent_io_tree *tree;
672 673 674 675
	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
	struct extent_buffer *eb;
	int was_dirty;

676
	tree = &BTRFS_I(page->mapping->host)->io_tree;
677 678 679 680
	if (!(current->flags & PF_MEMALLOC)) {
		return extent_write_full_page(tree, page,
					      btree_get_extent, wbc);
	}
681

682 683 684 685 686 687 688 689 690 691
	redirty_page_for_writepage(wbc, page);
	eb = btrfs_find_tree_block(root, page_offset(page),
				      PAGE_CACHE_SIZE);
	WARN_ON(!eb);

	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
	if (!was_dirty) {
		spin_lock(&root->fs_info->delalloc_lock);
		root->fs_info->dirty_metadata_bytes += PAGE_CACHE_SIZE;
		spin_unlock(&root->fs_info->delalloc_lock);
692
	}
693 694 695 696
	free_extent_buffer(eb);

	unlock_page(page);
	return 0;
697
}
698 699 700 701

static int btree_writepages(struct address_space *mapping,
			    struct writeback_control *wbc)
{
702 703
	struct extent_io_tree *tree;
	tree = &BTRFS_I(mapping->host)->io_tree;
704
	if (wbc->sync_mode == WB_SYNC_NONE) {
705
		struct btrfs_root *root = BTRFS_I(mapping->host)->root;
706
		u64 num_dirty;
707
		unsigned long thresh = 32 * 1024 * 1024;
708 709 710 711

		if (wbc->for_kupdate)
			return 0;

712 713
		/* this is a bit racy, but that's ok */
		num_dirty = root->fs_info->dirty_metadata_bytes;
C
Chris Mason 已提交
714
		if (num_dirty < thresh)
715 716
			return 0;
	}
717 718 719
	return extent_writepages(tree, mapping, btree_get_extent, wbc);
}

720
static int btree_readpage(struct file *file, struct page *page)
721
{
722 723
	struct extent_io_tree *tree;
	tree = &BTRFS_I(page->mapping->host)->io_tree;
724 725
	return extent_read_full_page(tree, page, btree_get_extent);
}
C
Chris Mason 已提交
726

727
static int btree_releasepage(struct page *page, gfp_t gfp_flags)
728
{
729 730
	struct extent_io_tree *tree;
	struct extent_map_tree *map;
731
	int ret;
732

733
	if (PageWriteback(page) || PageDirty(page))
C
Chris Mason 已提交
734
		return 0;
735

736 737
	tree = &BTRFS_I(page->mapping->host)->io_tree;
	map = &BTRFS_I(page->mapping->host)->extent_tree;
738

739
	ret = try_release_extent_state(map, tree, page, gfp_flags);
C
Chris Mason 已提交
740
	if (!ret)
741 742 743
		return 0;

	ret = try_release_extent_buffer(tree, page);
744 745 746 747 748
	if (ret == 1) {
		ClearPagePrivate(page);
		set_page_private(page, 0);
		page_cache_release(page);
	}
749

750 751 752
	return ret;
}

753
static void btree_invalidatepage(struct page *page, unsigned long offset)
754
{
755 756
	struct extent_io_tree *tree;
	tree = &BTRFS_I(page->mapping->host)->io_tree;
757 758
	extent_invalidatepage(tree, page, offset);
	btree_releasepage(page, GFP_NOFS);
759
	if (PagePrivate(page)) {
C
Chris Mason 已提交
760 761
		printk(KERN_WARNING "btrfs warning page private not zero "
		       "on page %llu\n", (unsigned long long)page_offset(page));
762 763 764 765
		ClearPagePrivate(page);
		set_page_private(page, 0);
		page_cache_release(page);
	}
766 767
}

768
#if 0
769
static int btree_writepage(struct page *page, struct writeback_control *wbc)
770
{
C
Chris Mason 已提交
771
	struct buffer_head *bh;
772
	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
C
Chris Mason 已提交
773 774 775 776 777 778 779 780 781 782 783 784
	struct buffer_head *head;
	if (!page_has_buffers(page)) {
		create_empty_buffers(page, root->fs_info->sb->s_blocksize,
					(1 << BH_Dirty)|(1 << BH_Uptodate));
	}
	head = page_buffers(page);
	bh = head;
	do {
		if (buffer_dirty(bh))
			csum_tree_block(root, bh, 0);
		bh = bh->b_this_page;
	} while (bh != head);
785
	return block_write_full_page(page, btree_get_block, wbc);
786
}
787
#endif
788

789 790 791
static struct address_space_operations btree_aops = {
	.readpage	= btree_readpage,
	.writepage	= btree_writepage,
792
	.writepages	= btree_writepages,
793 794
	.releasepage	= btree_releasepage,
	.invalidatepage = btree_invalidatepage,
795 796 797
	.sync_page	= block_sync_page,
};

798 799
int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
			 u64 parent_transid)
C
Chris Mason 已提交
800
{
801 802
	struct extent_buffer *buf = NULL;
	struct inode *btree_inode = root->fs_info->btree_inode;
803
	int ret = 0;
C
Chris Mason 已提交
804

805
	buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
806
	if (!buf)
C
Chris Mason 已提交
807
		return 0;
808
	read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
809
				 buf, 0, 0, btree_get_extent, 0);
810
	free_extent_buffer(buf);
811
	return ret;
C
Chris Mason 已提交
812 813
}

814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835
struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
					    u64 bytenr, u32 blocksize)
{
	struct inode *btree_inode = root->fs_info->btree_inode;
	struct extent_buffer *eb;
	eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
				bytenr, blocksize, GFP_NOFS);
	return eb;
}

struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
						 u64 bytenr, u32 blocksize)
{
	struct inode *btree_inode = root->fs_info->btree_inode;
	struct extent_buffer *eb;

	eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
				 bytenr, blocksize, NULL, GFP_NOFS);
	return eb;
}


836 837 838
int btrfs_write_tree_block(struct extent_buffer *buf)
{
	return btrfs_fdatawrite_range(buf->first_page->mapping, buf->start,
839
				      buf->start + buf->len - 1, WB_SYNC_ALL);
840 841 842 843 844
}

int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
{
	return btrfs_wait_on_page_writeback_range(buf->first_page->mapping,
C
Chris Mason 已提交
845
				  buf->start, buf->start + buf->len - 1);
846 847
}

848
struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
849
				      u32 blocksize, u64 parent_transid)
850 851 852 853 854 855 856 857 858 859 860 861
{
	struct extent_buffer *buf = NULL;
	struct inode *btree_inode = root->fs_info->btree_inode;
	struct extent_io_tree *io_tree;
	int ret;

	io_tree = &BTRFS_I(btree_inode)->io_tree;

	buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
	if (!buf)
		return NULL;

862
	ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
863

C
Chris Mason 已提交
864
	if (ret == 0)
865
		set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
C
Chris Mason 已提交
866
	else
867
		WARN_ON(1);
868
	return buf;
869

870 871
}

872
int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
873
		     struct extent_buffer *buf)
874
{
875
	struct inode *btree_inode = root->fs_info->btree_inode;
876
	if (btrfs_header_generation(buf) ==
877
	    root->fs_info->running_transaction->transid) {
878
		btrfs_assert_tree_locked(buf);
879

880 881 882 883 884 885 886 887
		if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
			spin_lock(&root->fs_info->delalloc_lock);
			if (root->fs_info->dirty_metadata_bytes >= buf->len)
				root->fs_info->dirty_metadata_bytes -= buf->len;
			else
				WARN_ON(1);
			spin_unlock(&root->fs_info->delalloc_lock);
		}
888

889 890
		/* ugh, clear_extent_buffer_dirty needs to lock the page */
		btrfs_set_lock_blocking(buf);
891
		clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
892
					  buf);
893
	}
894 895 896
	return 0;
}

897
static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
898
			u32 stripesize, struct btrfs_root *root,
899
			struct btrfs_fs_info *fs_info,
C
Chris Mason 已提交
900
			u64 objectid)
901
{
C
Chris Mason 已提交
902
	root->node = NULL;
903
	root->commit_root = NULL;
Y
Yan Zheng 已提交
904
	root->ref_tree = NULL;
905 906 907
	root->sectorsize = sectorsize;
	root->nodesize = nodesize;
	root->leafsize = leafsize;
908
	root->stripesize = stripesize;
C
Chris Mason 已提交
909
	root->ref_cows = 0;
910 911
	root->track_dirty = 0;

912
	root->fs_info = fs_info;
913 914
	root->objectid = objectid;
	root->last_trans = 0;
C
Chris Mason 已提交
915 916
	root->highest_inode = 0;
	root->last_inode_alloc = 0;
917
	root->name = NULL;
918
	root->in_sysfs = 0;
919 920

	INIT_LIST_HEAD(&root->dirty_list);
921
	INIT_LIST_HEAD(&root->orphan_list);
922
	INIT_LIST_HEAD(&root->dead_list);
923
	spin_lock_init(&root->node_lock);
924
	spin_lock_init(&root->list_lock);
925
	mutex_init(&root->objectid_mutex);
926
	mutex_init(&root->log_mutex);
Y
Yan Zheng 已提交
927 928 929 930 931 932 933 934
	init_waitqueue_head(&root->log_writer_wait);
	init_waitqueue_head(&root->log_commit_wait[0]);
	init_waitqueue_head(&root->log_commit_wait[1]);
	atomic_set(&root->log_commit[0], 0);
	atomic_set(&root->log_commit[1], 0);
	atomic_set(&root->log_writers, 0);
	root->log_batch = 0;
	root->log_transid = 0;
935 936
	extent_io_tree_init(&root->dirty_log_pages,
			     fs_info->btree_inode->i_mapping, GFP_NOFS);
C
Chris Mason 已提交
937 938 939 940

	btrfs_leaf_ref_tree_init(&root->ref_tree_struct);
	root->ref_tree = &root->ref_tree_struct;

941 942
	memset(&root->root_key, 0, sizeof(root->root_key));
	memset(&root->root_item, 0, sizeof(root->root_item));
943
	memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
944
	memset(&root->root_kobj, 0, sizeof(root->root_kobj));
945
	root->defrag_trans_start = fs_info->generation;
946
	init_completion(&root->kobj_unregister);
947 948
	root->defrag_running = 0;
	root->defrag_level = 0;
949
	root->root_key.objectid = objectid;
950 951 952 953 954 955
	root->anon_super.s_root = NULL;
	root->anon_super.s_dev = 0;
	INIT_LIST_HEAD(&root->anon_super.s_list);
	INIT_LIST_HEAD(&root->anon_super.s_instances);
	init_rwsem(&root->anon_super.s_umount);

956 957 958
	return 0;
}

959
static int find_and_setup_root(struct btrfs_root *tree_root,
960 961
			       struct btrfs_fs_info *fs_info,
			       u64 objectid,
C
Chris Mason 已提交
962
			       struct btrfs_root *root)
963 964
{
	int ret;
965
	u32 blocksize;
966
	u64 generation;
967

968
	__setup_root(tree_root->nodesize, tree_root->leafsize,
969 970
		     tree_root->sectorsize, tree_root->stripesize,
		     root, fs_info, objectid);
971 972 973 974
	ret = btrfs_find_last_root(tree_root, objectid,
				   &root->root_item, &root->root_key);
	BUG_ON(ret);

975
	generation = btrfs_root_generation(&root->root_item);
976 977
	blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
	root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
978
				     blocksize, generation);
979
	BUG_ON(!root->node);
980 981 982
	return 0;
}

983 984 985 986
int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
			     struct btrfs_fs_info *fs_info)
{
	struct extent_buffer *eb;
987 988 989
	struct btrfs_root *log_root_tree = fs_info->log_root_tree;
	u64 start = 0;
	u64 end = 0;
990 991
	int ret;

992
	if (!log_root_tree)
993 994
		return 0;

C
Chris Mason 已提交
995
	while (1) {
996 997 998 999 1000 1001 1002 1003
		ret = find_first_extent_bit(&log_root_tree->dirty_log_pages,
				    0, &start, &end, EXTENT_DIRTY);
		if (ret)
			break;

		clear_extent_dirty(&log_root_tree->dirty_log_pages,
				   start, end, GFP_NOFS);
	}
1004 1005 1006 1007 1008
	eb = fs_info->log_root_tree->node;

	WARN_ON(btrfs_header_level(eb) != 0);
	WARN_ON(btrfs_header_nritems(eb) != 0);

1009 1010
	ret = btrfs_free_reserved_extent(fs_info->tree_root,
				eb->start, eb->len);
1011 1012 1013 1014 1015 1016 1017 1018
	BUG_ON(ret);

	free_extent_buffer(eb);
	kfree(fs_info->log_root_tree);
	fs_info->log_root_tree = NULL;
	return 0;
}

Y
Yan Zheng 已提交
1019 1020
static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
					 struct btrfs_fs_info *fs_info)
1021 1022 1023
{
	struct btrfs_root *root;
	struct btrfs_root *tree_root = fs_info->tree_root;
Y
Yan Zheng 已提交
1024
	struct extent_buffer *leaf;
1025 1026 1027

	root = kzalloc(sizeof(*root), GFP_NOFS);
	if (!root)
Y
Yan Zheng 已提交
1028
		return ERR_PTR(-ENOMEM);
1029 1030 1031 1032 1033 1034 1035 1036

	__setup_root(tree_root->nodesize, tree_root->leafsize,
		     tree_root->sectorsize, tree_root->stripesize,
		     root, fs_info, BTRFS_TREE_LOG_OBJECTID);

	root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
	root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
Y
Yan Zheng 已提交
1037 1038 1039 1040 1041 1042
	/*
	 * log trees do not get reference counted because they go away
	 * before a real commit is actually done.  They do store pointers
	 * to file data extents, and those reference counts still get
	 * updated (along with back refs to the log tree).
	 */
1043 1044
	root->ref_cows = 0;

Y
Yan Zheng 已提交
1045 1046 1047 1048 1049 1050 1051
	leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
				      0, BTRFS_TREE_LOG_OBJECTID,
				      trans->transid, 0, 0, 0);
	if (IS_ERR(leaf)) {
		kfree(root);
		return ERR_CAST(leaf);
	}
1052

Y
Yan Zheng 已提交
1053
	root->node = leaf;
1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
	btrfs_set_header_nritems(root->node, 0);
	btrfs_set_header_level(root->node, 0);
	btrfs_set_header_bytenr(root->node, root->node->start);
	btrfs_set_header_generation(root->node, trans->transid);
	btrfs_set_header_owner(root->node, BTRFS_TREE_LOG_OBJECTID);

	write_extent_buffer(root->node, root->fs_info->fsid,
			    (unsigned long)btrfs_header_fsid(root->node),
			    BTRFS_FSID_SIZE);
	btrfs_mark_buffer_dirty(root->node);
	btrfs_tree_unlock(root->node);
Y
Yan Zheng 已提交
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
	return root;
}

int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
			     struct btrfs_fs_info *fs_info)
{
	struct btrfs_root *log_root;

	log_root = alloc_log_tree(trans, fs_info);
	if (IS_ERR(log_root))
		return PTR_ERR(log_root);
	WARN_ON(fs_info->log_root_tree);
	fs_info->log_root_tree = log_root;
	return 0;
}

int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
		       struct btrfs_root *root)
{
	struct btrfs_root *log_root;
	struct btrfs_inode_item *inode_item;

	log_root = alloc_log_tree(trans, root->fs_info);
	if (IS_ERR(log_root))
		return PTR_ERR(log_root);

	log_root->last_trans = trans->transid;
	log_root->root_key.offset = root->root_key.objectid;

	inode_item = &log_root->root_item.inode;
	inode_item->generation = cpu_to_le64(1);
	inode_item->size = cpu_to_le64(3);
	inode_item->nlink = cpu_to_le32(1);
	inode_item->nbytes = cpu_to_le64(root->leafsize);
	inode_item->mode = cpu_to_le32(S_IFDIR | 0755);

	btrfs_set_root_bytenr(&log_root->root_item, log_root->node->start);
	btrfs_set_root_generation(&log_root->root_item, trans->transid);

	WARN_ON(root->log_root);
	root->log_root = log_root;
	root->log_transid = 0;
1107 1108 1109 1110 1111 1112 1113 1114
	return 0;
}

struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
					       struct btrfs_key *location)
{
	struct btrfs_root *root;
	struct btrfs_fs_info *fs_info = tree_root->fs_info;
1115
	struct btrfs_path *path;
1116
	struct extent_buffer *l;
C
Chris Mason 已提交
1117
	u64 highest_inode;
1118
	u64 generation;
1119
	u32 blocksize;
1120 1121
	int ret = 0;

1122
	root = kzalloc(sizeof(*root), GFP_NOFS);
C
Chris Mason 已提交
1123
	if (!root)
1124 1125
		return ERR_PTR(-ENOMEM);
	if (location->offset == (u64)-1) {
1126
		ret = find_and_setup_root(tree_root, fs_info,
1127 1128 1129 1130 1131 1132 1133 1134
					  location->objectid, root);
		if (ret) {
			kfree(root);
			return ERR_PTR(ret);
		}
		goto insert;
	}

1135
	__setup_root(tree_root->nodesize, tree_root->leafsize,
1136 1137
		     tree_root->sectorsize, tree_root->stripesize,
		     root, fs_info, location->objectid);
1138 1139 1140 1141 1142 1143 1144 1145 1146

	path = btrfs_alloc_path();
	BUG_ON(!path);
	ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
	if (ret != 0) {
		if (ret > 0)
			ret = -ENOENT;
		goto out;
	}
1147 1148 1149
	l = path->nodes[0];
	read_extent_buffer(l, &root->root_item,
	       btrfs_item_ptr_offset(l, path->slots[0]),
1150
	       sizeof(root->root_item));
1151
	memcpy(&root->root_key, location, sizeof(*location));
1152 1153 1154 1155 1156 1157 1158 1159
	ret = 0;
out:
	btrfs_release_path(root, path);
	btrfs_free_path(path);
	if (ret) {
		kfree(root);
		return ERR_PTR(ret);
	}
1160
	generation = btrfs_root_generation(&root->root_item);
1161 1162
	blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
	root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1163
				     blocksize, generation);
1164 1165
	BUG_ON(!root->node);
insert:
1166 1167 1168 1169 1170 1171 1172
	if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
		root->ref_cows = 1;
		ret = btrfs_find_highest_inode(root, &highest_inode);
		if (ret == 0) {
			root->highest_inode = highest_inode;
			root->last_inode_alloc = highest_inode;
		}
1173 1174 1175 1176
	}
	return root;
}

C
Chris Mason 已提交
1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191
struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
					u64 root_objectid)
{
	struct btrfs_root *root;

	if (root_objectid == BTRFS_ROOT_TREE_OBJECTID)
		return fs_info->tree_root;
	if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID)
		return fs_info->extent_root;

	root = radix_tree_lookup(&fs_info->fs_roots_radix,
				 (unsigned long)root_objectid);
	return root;
}

1192 1193
struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
					      struct btrfs_key *location)
1194 1195 1196 1197
{
	struct btrfs_root *root;
	int ret;

1198 1199 1200 1201
	if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
		return fs_info->tree_root;
	if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
		return fs_info->extent_root;
1202 1203 1204 1205
	if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
		return fs_info->chunk_root;
	if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
		return fs_info->dev_root;
1206 1207
	if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
		return fs_info->csum_root;
1208

1209 1210 1211 1212 1213
	root = radix_tree_lookup(&fs_info->fs_roots_radix,
				 (unsigned long)location->objectid);
	if (root)
		return root;

1214
	root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
1215 1216
	if (IS_ERR(root))
		return root;
1217 1218 1219

	set_anon_super(&root->anon_super, NULL);

C
Chris Mason 已提交
1220 1221
	ret = radix_tree_insert(&fs_info->fs_roots_radix,
				(unsigned long)root->root_key.objectid,
1222 1223
				root);
	if (ret) {
1224
		free_extent_buffer(root->node);
1225 1226 1227
		kfree(root);
		return ERR_PTR(ret);
	}
Y
Yan Zheng 已提交
1228 1229 1230 1231 1232 1233
	if (!(fs_info->sb->s_flags & MS_RDONLY)) {
		ret = btrfs_find_dead_roots(fs_info->tree_root,
					    root->root_key.objectid, root);
		BUG_ON(ret);
		btrfs_orphan_cleanup(root);
	}
1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246
	return root;
}

struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
				      struct btrfs_key *location,
				      const char *name, int namelen)
{
	struct btrfs_root *root;
	int ret;

	root = btrfs_read_fs_root_no_name(fs_info, location);
	if (!root)
		return NULL;
1247

1248 1249 1250
	if (root->in_sysfs)
		return root;

1251 1252
	ret = btrfs_set_root_name(root, name, namelen);
	if (ret) {
1253
		free_extent_buffer(root->node);
1254 1255 1256
		kfree(root);
		return ERR_PTR(ret);
	}
1257
#if 0
1258 1259
	ret = btrfs_sysfs_add_root(root);
	if (ret) {
1260
		free_extent_buffer(root->node);
1261 1262 1263 1264
		kfree(root->name);
		kfree(root);
		return ERR_PTR(ret);
	}
1265
#endif
1266
	root->in_sysfs = 1;
1267 1268
	return root;
}
C
Chris Mason 已提交
1269 1270 1271 1272 1273 1274 1275

static int btrfs_congested_fn(void *congested_data, int bdi_bits)
{
	struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
	int ret = 0;
	struct btrfs_device *device;
	struct backing_dev_info *bdi;
1276
#if 0
1277
	if ((bdi_bits & (1 << BDI_write_congested)) &&
1278
	    btrfs_congested_async(info, 0))
1279
		return 1;
1280
#endif
Q
Qinghuang Feng 已提交
1281
	list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
1282 1283
		if (!device->bdev)
			continue;
C
Chris Mason 已提交
1284 1285 1286 1287 1288 1289 1290 1291 1292
		bdi = blk_get_backing_dev_info(device->bdev);
		if (bdi && bdi_congested(bdi, bdi_bits)) {
			ret = 1;
			break;
		}
	}
	return ret;
}

1293 1294 1295 1296 1297 1298 1299 1300 1301 1302
/*
 * this unplugs every device on the box, and it is only used when page
 * is null
 */
static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
{
	struct btrfs_device *device;
	struct btrfs_fs_info *info;

	info = (struct btrfs_fs_info *)bdi->unplug_io_data;
Q
Qinghuang Feng 已提交
1303
	list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
1304 1305 1306
		if (!device->bdev)
			continue;

1307
		bdi = blk_get_backing_dev_info(device->bdev);
C
Chris Mason 已提交
1308
		if (bdi->unplug_io_fn)
1309 1310 1311 1312
			bdi->unplug_io_fn(bdi, page);
	}
}

1313
static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
C
Chris Mason 已提交
1314
{
1315
	struct inode *inode;
1316 1317
	struct extent_map_tree *em_tree;
	struct extent_map *em;
1318
	struct address_space *mapping;
1319 1320
	u64 offset;

1321
	/* the generic O_DIRECT read code does this */
1322
	if (1 || !page) {
1323 1324 1325 1326
		__unplug_io_fn(bdi, page);
		return;
	}

1327 1328 1329 1330 1331 1332 1333 1334 1335 1336
	/*
	 * page->mapping may change at any time.  Get a consistent copy
	 * and use that for everything below
	 */
	smp_mb();
	mapping = page->mapping;
	if (!mapping)
		return;

	inode = mapping->host;
1337 1338 1339 1340 1341 1342 1343 1344 1345 1346

	/*
	 * don't do the expensive searching for a small number of
	 * devices
	 */
	if (BTRFS_I(inode)->root->fs_info->fs_devices->open_devices <= 2) {
		__unplug_io_fn(bdi, page);
		return;
	}

1347
	offset = page_offset(page);
C
Chris Mason 已提交
1348

1349 1350 1351 1352
	em_tree = &BTRFS_I(inode)->extent_tree;
	spin_lock(&em_tree->lock);
	em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
	spin_unlock(&em_tree->lock);
1353 1354
	if (!em) {
		__unplug_io_fn(bdi, page);
1355
		return;
1356
	}
1357

1358 1359 1360 1361 1362
	if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
		free_extent_map(em);
		__unplug_io_fn(bdi, page);
		return;
	}
1363 1364 1365 1366
	offset = offset - em->start;
	btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
			  em->block_start + offset, page);
	free_extent_map(em);
C
Chris Mason 已提交
1367 1368 1369 1370 1371
}

static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
{
	bdi_init(bdi);
1372
	bdi->ra_pages	= default_backing_dev_info.ra_pages;
C
Chris Mason 已提交
1373 1374 1375 1376 1377 1378 1379 1380 1381
	bdi->state		= 0;
	bdi->capabilities	= default_backing_dev_info.capabilities;
	bdi->unplug_io_fn	= btrfs_unplug_io_fn;
	bdi->unplug_io_data	= info;
	bdi->congested_fn	= btrfs_congested_fn;
	bdi->congested_data	= info;
	return 0;
}

1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418
static int bio_ready_for_csum(struct bio *bio)
{
	u64 length = 0;
	u64 buf_len = 0;
	u64 start = 0;
	struct page *page;
	struct extent_io_tree *io_tree = NULL;
	struct btrfs_fs_info *info = NULL;
	struct bio_vec *bvec;
	int i;
	int ret;

	bio_for_each_segment(bvec, bio, i) {
		page = bvec->bv_page;
		if (page->private == EXTENT_PAGE_PRIVATE) {
			length += bvec->bv_len;
			continue;
		}
		if (!page->private) {
			length += bvec->bv_len;
			continue;
		}
		length = bvec->bv_len;
		buf_len = page->private >> 2;
		start = page_offset(page) + bvec->bv_offset;
		io_tree = &BTRFS_I(page->mapping->host)->io_tree;
		info = BTRFS_I(page->mapping->host)->root->fs_info;
	}
	/* are we fully contained in this bio? */
	if (buf_len <= length)
		return 1;

	ret = extent_range_uptodate(io_tree, start + length,
				    start + buf_len - 1);
	return ret;
}

1419 1420 1421 1422 1423
/*
 * called by the kthread helper functions to finally call the bio end_io
 * functions.  This is where read checksum verification actually happens
 */
static void end_workqueue_fn(struct btrfs_work *work)
1424 1425
{
	struct bio *bio;
1426 1427
	struct end_io_wq *end_io_wq;
	struct btrfs_fs_info *fs_info;
1428 1429
	int error;

1430 1431 1432
	end_io_wq = container_of(work, struct end_io_wq, work);
	bio = end_io_wq->bio;
	fs_info = end_io_wq->info;
1433

1434
	/* metadata bio reads are special because the whole tree block must
1435 1436 1437 1438
	 * be checksummed at once.  This makes sure the entire block is in
	 * ram and up to date before trying to verify things.  For
	 * blocksize <= pagesize, it is basically a noop
	 */
1439 1440
	if (!(bio->bi_rw & (1 << BIO_RW)) && end_io_wq->metadata &&
	    !bio_ready_for_csum(bio)) {
1441
		btrfs_queue_worker(&fs_info->endio_meta_workers,
1442 1443 1444 1445 1446 1447 1448 1449
				   &end_io_wq->work);
		return;
	}
	error = end_io_wq->error;
	bio->bi_private = end_io_wq->private;
	bio->bi_end_io = end_io_wq->end_io;
	kfree(end_io_wq);
	bio_endio(bio, error);
1450 1451
}

1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503
static int cleaner_kthread(void *arg)
{
	struct btrfs_root *root = arg;

	do {
		smp_mb();
		if (root->fs_info->closing)
			break;

		vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
		mutex_lock(&root->fs_info->cleaner_mutex);
		btrfs_clean_old_snapshots(root);
		mutex_unlock(&root->fs_info->cleaner_mutex);

		if (freezing(current)) {
			refrigerator();
		} else {
			smp_mb();
			if (root->fs_info->closing)
				break;
			set_current_state(TASK_INTERRUPTIBLE);
			schedule();
			__set_current_state(TASK_RUNNING);
		}
	} while (!kthread_should_stop());
	return 0;
}

static int transaction_kthread(void *arg)
{
	struct btrfs_root *root = arg;
	struct btrfs_trans_handle *trans;
	struct btrfs_transaction *cur;
	unsigned long now;
	unsigned long delay;
	int ret;

	do {
		smp_mb();
		if (root->fs_info->closing)
			break;

		delay = HZ * 30;
		vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
		mutex_lock(&root->fs_info->transaction_kthread_mutex);

		mutex_lock(&root->fs_info->trans_mutex);
		cur = root->fs_info->running_transaction;
		if (!cur) {
			mutex_unlock(&root->fs_info->trans_mutex);
			goto sleep;
		}
Y
Yan Zheng 已提交
1504

1505 1506 1507 1508 1509 1510 1511 1512 1513
		now = get_seconds();
		if (now < cur->start_time || now - cur->start_time < 30) {
			mutex_unlock(&root->fs_info->trans_mutex);
			delay = HZ * 5;
			goto sleep;
		}
		mutex_unlock(&root->fs_info->trans_mutex);
		trans = btrfs_start_transaction(root, 1);
		ret = btrfs_commit_transaction(trans, root);
1514

1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531
sleep:
		wake_up_process(root->fs_info->cleaner_kthread);
		mutex_unlock(&root->fs_info->transaction_kthread_mutex);

		if (freezing(current)) {
			refrigerator();
		} else {
			if (root->fs_info->closing)
				break;
			set_current_state(TASK_INTERRUPTIBLE);
			schedule_timeout(delay);
			__set_current_state(TASK_RUNNING);
		}
	} while (!kthread_should_stop());
	return 0;
}

1532
struct btrfs_root *open_ctree(struct super_block *sb,
1533 1534
			      struct btrfs_fs_devices *fs_devices,
			      char *options)
1535
{
1536 1537 1538 1539
	u32 sectorsize;
	u32 nodesize;
	u32 leafsize;
	u32 blocksize;
1540
	u32 stripesize;
1541
	u64 generation;
1542
	u64 features;
1543
	struct btrfs_key location;
1544
	struct buffer_head *bh;
1545
	struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root),
C
Chris Mason 已提交
1546
						 GFP_NOFS);
1547 1548
	struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
						 GFP_NOFS);
1549
	struct btrfs_root *tree_root = kzalloc(sizeof(struct btrfs_root),
C
Chris Mason 已提交
1550
					       GFP_NOFS);
1551
	struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info),
C
Chris Mason 已提交
1552
						GFP_NOFS);
1553
	struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root),
1554
						GFP_NOFS);
1555
	struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root),
1556
					      GFP_NOFS);
1557 1558
	struct btrfs_root *log_tree_root;

1559
	int ret;
1560
	int err = -EINVAL;
1561

C
Chris Mason 已提交
1562
	struct btrfs_super_block *disk_super;
1563

1564
	if (!extent_root || !tree_root || !fs_info ||
1565
	    !chunk_root || !dev_root || !csum_root) {
C
Chris Mason 已提交
1566 1567 1568
		err = -ENOMEM;
		goto fail;
	}
1569
	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
C
Chris Mason 已提交
1570
	INIT_LIST_HEAD(&fs_info->trans_list);
1571
	INIT_LIST_HEAD(&fs_info->dead_roots);
1572
	INIT_LIST_HEAD(&fs_info->hashers);
1573
	INIT_LIST_HEAD(&fs_info->delalloc_inodes);
1574
	INIT_LIST_HEAD(&fs_info->ordered_operations);
1575
	spin_lock_init(&fs_info->delalloc_lock);
1576
	spin_lock_init(&fs_info->new_trans_lock);
Y
Yan Zheng 已提交
1577
	spin_lock_init(&fs_info->ref_cache_lock);
1578

1579
	init_completion(&fs_info->kobj_unregister);
1580 1581
	fs_info->tree_root = tree_root;
	fs_info->extent_root = extent_root;
1582
	fs_info->csum_root = csum_root;
1583 1584
	fs_info->chunk_root = chunk_root;
	fs_info->dev_root = dev_root;
1585
	fs_info->fs_devices = fs_devices;
1586
	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1587
	INIT_LIST_HEAD(&fs_info->space_info);
1588
	btrfs_mapping_init(&fs_info->mapping_tree);
1589
	atomic_set(&fs_info->nr_async_submits, 0);
1590
	atomic_set(&fs_info->async_delalloc_pages, 0);
1591
	atomic_set(&fs_info->async_submit_draining, 0);
1592
	atomic_set(&fs_info->nr_async_bios, 0);
1593
	atomic_set(&fs_info->throttles, 0);
1594
	atomic_set(&fs_info->throttle_gen, 0);
C
Chris Mason 已提交
1595
	fs_info->sb = sb;
1596
	fs_info->max_extent = (u64)-1;
1597
	fs_info->max_inline = 8192 * 1024;
C
Chris Mason 已提交
1598
	setup_bdi(fs_info, &fs_info->bdi);
1599 1600
	fs_info->btree_inode = new_inode(sb);
	fs_info->btree_inode->i_ino = 1;
C
Chris Mason 已提交
1601
	fs_info->btree_inode->i_nlink = 1;
C
Chris Mason 已提交
1602

1603 1604
	fs_info->thread_pool_size = min_t(unsigned long,
					  num_online_cpus() + 2, 8);
1605

1606 1607 1608
	INIT_LIST_HEAD(&fs_info->ordered_extents);
	spin_lock_init(&fs_info->ordered_extent_lock);

1609 1610 1611
	sb->s_blocksize = 4096;
	sb->s_blocksize_bits = blksize_bits(4096);

1612 1613 1614 1615 1616 1617
	/*
	 * we set the i_size on the btree inode to the max possible int.
	 * the real end of the address space is determined by all of
	 * the devices in the system
	 */
	fs_info->btree_inode->i_size = OFFSET_MAX;
1618
	fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
C
Chris Mason 已提交
1619 1620
	fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;

1621
	extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
1622 1623
			     fs_info->btree_inode->i_mapping,
			     GFP_NOFS);
1624 1625 1626 1627
	extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
			     GFP_NOFS);

	BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
1628

J
Josef Bacik 已提交
1629 1630 1631
	spin_lock_init(&fs_info->block_group_cache_lock);
	fs_info->block_group_cache_tree.rb_node = NULL;

1632
	extent_io_tree_init(&fs_info->pinned_extents,
1633
			     fs_info->btree_inode->i_mapping, GFP_NOFS);
1634
	fs_info->do_barriers = 1;
1635

Z
Zheng Yan 已提交
1636 1637
	INIT_LIST_HEAD(&fs_info->dead_reloc_roots);
	btrfs_leaf_ref_tree_init(&fs_info->reloc_ref_tree);
Z
Zheng Yan 已提交
1638 1639
	btrfs_leaf_ref_tree_init(&fs_info->shared_ref_tree);

1640 1641 1642
	BTRFS_I(fs_info->btree_inode)->root = tree_root;
	memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
	       sizeof(struct btrfs_key));
C
Chris Mason 已提交
1643
	insert_inode_hash(fs_info->btree_inode);
C
Chris Mason 已提交
1644

C
Chris Mason 已提交
1645
	mutex_init(&fs_info->trans_mutex);
1646
	mutex_init(&fs_info->ordered_operations_mutex);
1647
	mutex_init(&fs_info->tree_log_mutex);
1648
	mutex_init(&fs_info->drop_mutex);
1649
	mutex_init(&fs_info->chunk_mutex);
1650 1651
	mutex_init(&fs_info->transaction_kthread_mutex);
	mutex_init(&fs_info->cleaner_mutex);
1652
	mutex_init(&fs_info->volume_mutex);
Z
Zheng Yan 已提交
1653
	mutex_init(&fs_info->tree_reloc_mutex);
1654 1655 1656 1657

	btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
	btrfs_init_free_cluster(&fs_info->data_alloc_cluster);

1658
	init_waitqueue_head(&fs_info->transaction_throttle);
1659
	init_waitqueue_head(&fs_info->transaction_wait);
1660
	init_waitqueue_head(&fs_info->async_submit_wait);
1661

1662
	__setup_root(4096, 4096, 4096, 4096, tree_root,
C
Chris Mason 已提交
1663
		     fs_info, BTRFS_ROOT_TREE_OBJECTID);
1664

1665

Y
Yan Zheng 已提交
1666
	bh = btrfs_read_dev_super(fs_devices->latest_bdev);
1667
	if (!bh)
C
Chris Mason 已提交
1668 1669
		goto fail_iput;

1670
	memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
1671 1672
	memcpy(&fs_info->super_for_commit, &fs_info->super_copy,
	       sizeof(fs_info->super_for_commit));
1673
	brelse(bh);
1674

1675
	memcpy(fs_info->fsid, fs_info->super_copy.fsid, BTRFS_FSID_SIZE);
1676

1677
	disk_super = &fs_info->super_copy;
1678
	if (!btrfs_super_root(disk_super))
1679
		goto fail_iput;
1680

Y
Yan Zheng 已提交
1681 1682 1683
	ret = btrfs_parse_options(tree_root, options);
	if (ret) {
		err = ret;
1684
		goto fail_iput;
Y
Yan Zheng 已提交
1685
	}
1686

1687 1688 1689 1690 1691 1692 1693
	features = btrfs_super_incompat_flags(disk_super) &
		~BTRFS_FEATURE_INCOMPAT_SUPP;
	if (features) {
		printk(KERN_ERR "BTRFS: couldn't mount because of "
		       "unsupported optional features (%Lx).\n",
		       features);
		err = -EINVAL;
1694
		goto fail_iput;
1695 1696 1697 1698 1699 1700 1701 1702 1703
	}

	features = btrfs_super_compat_ro_flags(disk_super) &
		~BTRFS_FEATURE_COMPAT_RO_SUPP;
	if (!(sb->s_flags & MS_RDONLY) && features) {
		printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
		       "unsupported option features (%Lx).\n",
		       features);
		err = -EINVAL;
1704
		goto fail_iput;
1705 1706
	}

1707 1708 1709 1710 1711
	/*
	 * we need to start all the end_io workers up front because the
	 * queue work function gets called at interrupt time, and so it
	 * cannot dynamically grow.
	 */
1712 1713
	btrfs_init_workers(&fs_info->workers, "worker",
			   fs_info->thread_pool_size);
C
Chris Mason 已提交
1714

1715 1716 1717
	btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
			   fs_info->thread_pool_size);

1718
	btrfs_init_workers(&fs_info->submit_workers, "submit",
1719 1720
			   min_t(u64, fs_devices->num_devices,
			   fs_info->thread_pool_size));
1721 1722 1723 1724 1725 1726

	/* a higher idle thresh on the submit workers makes it much more
	 * likely that bios will be send down in a sane order to the
	 * devices
	 */
	fs_info->submit_workers.idle_thresh = 64;
1727

1728
	fs_info->workers.idle_thresh = 16;
C
Chris Mason 已提交
1729
	fs_info->workers.ordered = 1;
1730

1731 1732 1733
	fs_info->delalloc_workers.idle_thresh = 2;
	fs_info->delalloc_workers.ordered = 1;

1734 1735 1736
	btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1);
	btrfs_init_workers(&fs_info->endio_workers, "endio",
			   fs_info->thread_pool_size);
1737 1738
	btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
			   fs_info->thread_pool_size);
1739 1740
	btrfs_init_workers(&fs_info->endio_meta_write_workers,
			   "endio-meta-write", fs_info->thread_pool_size);
1741
	btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
1742
			   fs_info->thread_pool_size);
1743 1744 1745 1746 1747 1748

	/*
	 * endios are largely parallel and should have a very
	 * low idle thresh
	 */
	fs_info->endio_workers.idle_thresh = 4;
1749 1750
	fs_info->endio_meta_workers.idle_thresh = 4;

1751
	fs_info->endio_write_workers.idle_thresh = 64;
1752
	fs_info->endio_meta_write_workers.idle_thresh = 64;
1753

1754
	btrfs_start_workers(&fs_info->workers, 1);
1755
	btrfs_start_workers(&fs_info->submit_workers, 1);
1756
	btrfs_start_workers(&fs_info->delalloc_workers, 1);
1757
	btrfs_start_workers(&fs_info->fixup_workers, 1);
1758
	btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
1759 1760
	btrfs_start_workers(&fs_info->endio_meta_workers,
			    fs_info->thread_pool_size);
1761 1762
	btrfs_start_workers(&fs_info->endio_meta_write_workers,
			    fs_info->thread_pool_size);
1763 1764
	btrfs_start_workers(&fs_info->endio_write_workers,
			    fs_info->thread_pool_size);
1765

1766
	fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
C
Chris Mason 已提交
1767 1768
	fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
				    4 * 1024 * 1024 / PAGE_CACHE_SIZE);
1769

1770 1771 1772
	nodesize = btrfs_super_nodesize(disk_super);
	leafsize = btrfs_super_leafsize(disk_super);
	sectorsize = btrfs_super_sectorsize(disk_super);
1773
	stripesize = btrfs_super_stripesize(disk_super);
1774 1775 1776
	tree_root->nodesize = nodesize;
	tree_root->leafsize = leafsize;
	tree_root->sectorsize = sectorsize;
1777
	tree_root->stripesize = stripesize;
1778 1779 1780

	sb->s_blocksize = sectorsize;
	sb->s_blocksize_bits = blksize_bits(sectorsize);
1781

C
Chris Mason 已提交
1782 1783
	if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
		    sizeof(disk_super->magic))) {
C
Chris Mason 已提交
1784
		printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
C
Chris Mason 已提交
1785 1786
		goto fail_sb_buffer;
	}
1787

1788
	mutex_lock(&fs_info->chunk_mutex);
Y
Yan Zheng 已提交
1789
	ret = btrfs_read_sys_array(tree_root);
1790
	mutex_unlock(&fs_info->chunk_mutex);
1791
	if (ret) {
C
Chris Mason 已提交
1792 1793
		printk(KERN_WARNING "btrfs: failed to read the system "
		       "array on %s\n", sb->s_id);
1794 1795
		goto fail_sys_array;
	}
1796 1797 1798

	blocksize = btrfs_level_size(tree_root,
				     btrfs_super_chunk_root_level(disk_super));
1799
	generation = btrfs_super_chunk_root_generation(disk_super);
1800 1801 1802 1803 1804 1805

	__setup_root(nodesize, leafsize, sectorsize, stripesize,
		     chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);

	chunk_root->node = read_tree_block(chunk_root,
					   btrfs_super_chunk_root(disk_super),
1806
					   blocksize, generation);
1807 1808
	BUG_ON(!chunk_root->node);

1809
	read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
C
Chris Mason 已提交
1810 1811
	   (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
	   BTRFS_UUID_SIZE);
1812

1813
	mutex_lock(&fs_info->chunk_mutex);
1814
	ret = btrfs_read_chunk_tree(chunk_root);
1815
	mutex_unlock(&fs_info->chunk_mutex);
Y
Yan Zheng 已提交
1816
	if (ret) {
C
Chris Mason 已提交
1817 1818
		printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
		       sb->s_id);
Y
Yan Zheng 已提交
1819 1820
		goto fail_chunk_root;
	}
1821

1822 1823
	btrfs_close_extra_devices(fs_devices);

1824 1825
	blocksize = btrfs_level_size(tree_root,
				     btrfs_super_root_level(disk_super));
1826
	generation = btrfs_super_generation(disk_super);
1827

C
Chris Mason 已提交
1828
	tree_root->node = read_tree_block(tree_root,
1829
					  btrfs_super_root(disk_super),
1830
					  blocksize, generation);
C
Chris Mason 已提交
1831
	if (!tree_root->node)
Y
Yan Zheng 已提交
1832
		goto fail_chunk_root;
1833

1834 1835

	ret = find_and_setup_root(tree_root, fs_info,
C
Chris Mason 已提交
1836
				  BTRFS_EXTENT_TREE_OBJECTID, extent_root);
1837
	if (ret)
C
Chris Mason 已提交
1838
		goto fail_tree_root;
1839 1840 1841 1842 1843 1844 1845
	extent_root->track_dirty = 1;

	ret = find_and_setup_root(tree_root, fs_info,
				  BTRFS_DEV_TREE_OBJECTID, dev_root);
	dev_root->track_dirty = 1;
	if (ret)
		goto fail_extent_root;
1846

1847 1848 1849 1850 1851 1852 1853
	ret = find_and_setup_root(tree_root, fs_info,
				  BTRFS_CSUM_TREE_OBJECTID, csum_root);
	if (ret)
		goto fail_extent_root;

	csum_root->track_dirty = 1;

C
Chris Mason 已提交
1854 1855
	btrfs_read_block_groups(extent_root);

1856
	fs_info->generation = generation;
Y
Yan Zheng 已提交
1857
	fs_info->last_trans_committed = generation;
C
Chris Mason 已提交
1858 1859 1860
	fs_info->data_alloc_profile = (u64)-1;
	fs_info->metadata_alloc_profile = (u64)-1;
	fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
1861 1862
	fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
					       "btrfs-cleaner");
1863
	if (IS_ERR(fs_info->cleaner_kthread))
1864
		goto fail_csum_root;
1865 1866 1867 1868

	fs_info->transaction_kthread = kthread_run(transaction_kthread,
						   tree_root,
						   "btrfs-transaction");
1869
	if (IS_ERR(fs_info->transaction_kthread))
1870
		goto fail_cleaner;
1871

1872 1873 1874
	if (btrfs_super_log_root(disk_super) != 0) {
		u64 bytenr = btrfs_super_log_root(disk_super);

1875
		if (fs_devices->rw_devices == 0) {
C
Chris Mason 已提交
1876 1877
			printk(KERN_WARNING "Btrfs log replay required "
			       "on RO media\n");
1878 1879 1880
			err = -EIO;
			goto fail_trans_kthread;
		}
1881 1882 1883
		blocksize =
		     btrfs_level_size(tree_root,
				      btrfs_super_log_root_level(disk_super));
C
Chris Mason 已提交
1884

1885 1886 1887 1888 1889 1890 1891
		log_tree_root = kzalloc(sizeof(struct btrfs_root),
						      GFP_NOFS);

		__setup_root(nodesize, leafsize, sectorsize, stripesize,
			     log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);

		log_tree_root->node = read_tree_block(tree_root, bytenr,
1892 1893
						      blocksize,
						      generation + 1);
1894 1895
		ret = btrfs_recover_log_trees(log_tree_root);
		BUG_ON(ret);
1896 1897 1898 1899 1900

		if (sb->s_flags & MS_RDONLY) {
			ret =  btrfs_commit_super(tree_root);
			BUG_ON(ret);
		}
1901
	}
Z
Zheng Yan 已提交
1902

1903 1904 1905 1906
	if (!(sb->s_flags & MS_RDONLY)) {
		ret = btrfs_cleanup_reloc_trees(tree_root);
		BUG_ON(ret);
	}
Z
Zheng Yan 已提交
1907

1908 1909 1910 1911 1912 1913
	location.objectid = BTRFS_FS_TREE_OBJECTID;
	location.type = BTRFS_ROOT_ITEM_KEY;
	location.offset = (u64)-1;

	fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
	if (!fs_info->fs_root)
1914
		goto fail_trans_kthread;
1915
	return tree_root;
C
Chris Mason 已提交
1916

1917 1918
fail_trans_kthread:
	kthread_stop(fs_info->transaction_kthread);
1919
fail_cleaner:
1920
	kthread_stop(fs_info->cleaner_kthread);
1921 1922 1923 1924 1925 1926 1927 1928

	/*
	 * make sure we're done with the btree inode before we stop our
	 * kthreads
	 */
	filemap_write_and_wait(fs_info->btree_inode->i_mapping);
	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);

1929 1930
fail_csum_root:
	free_extent_buffer(csum_root->node);
1931 1932
fail_extent_root:
	free_extent_buffer(extent_root->node);
C
Chris Mason 已提交
1933
fail_tree_root:
1934
	free_extent_buffer(tree_root->node);
Y
Yan Zheng 已提交
1935 1936
fail_chunk_root:
	free_extent_buffer(chunk_root->node);
1937
fail_sys_array:
1938
	free_extent_buffer(dev_root->node);
C
Chris Mason 已提交
1939
fail_sb_buffer:
1940
	btrfs_stop_workers(&fs_info->fixup_workers);
1941
	btrfs_stop_workers(&fs_info->delalloc_workers);
1942 1943
	btrfs_stop_workers(&fs_info->workers);
	btrfs_stop_workers(&fs_info->endio_workers);
1944
	btrfs_stop_workers(&fs_info->endio_meta_workers);
1945
	btrfs_stop_workers(&fs_info->endio_meta_write_workers);
1946
	btrfs_stop_workers(&fs_info->endio_write_workers);
1947
	btrfs_stop_workers(&fs_info->submit_workers);
1948
fail_iput:
1949
	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
1950
	iput(fs_info->btree_inode);
1951

1952
	btrfs_close_devices(fs_info->fs_devices);
1953
	btrfs_mapping_tree_free(&fs_info->mapping_tree);
1954
	bdi_destroy(&fs_info->bdi);
1955

1956
fail:
C
Chris Mason 已提交
1957 1958 1959
	kfree(extent_root);
	kfree(tree_root);
	kfree(fs_info);
1960 1961
	kfree(chunk_root);
	kfree(dev_root);
1962
	kfree(csum_root);
C
Chris Mason 已提交
1963
	return ERR_PTR(err);
1964 1965
}

1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977
static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
{
	char b[BDEVNAME_SIZE];

	if (uptodate) {
		set_buffer_uptodate(bh);
	} else {
		if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
			printk(KERN_WARNING "lost page write due to "
					"I/O error on %s\n",
				       bdevname(bh->b_bdev, b));
		}
1978 1979 1980
		/* note, we dont' set_buffer_write_io_error because we have
		 * our own ways of dealing with the IO errors
		 */
1981 1982 1983 1984 1985 1986
		clear_buffer_uptodate(bh);
	}
	unlock_buffer(bh);
	put_bh(bh);
}

Y
Yan Zheng 已提交
1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117
struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
{
	struct buffer_head *bh;
	struct buffer_head *latest = NULL;
	struct btrfs_super_block *super;
	int i;
	u64 transid = 0;
	u64 bytenr;

	/* we would like to check all the supers, but that would make
	 * a btrfs mount succeed after a mkfs from a different FS.
	 * So, we need to add a special mount option to scan for
	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
	 */
	for (i = 0; i < 1; i++) {
		bytenr = btrfs_sb_offset(i);
		if (bytenr + 4096 >= i_size_read(bdev->bd_inode))
			break;
		bh = __bread(bdev, bytenr / 4096, 4096);
		if (!bh)
			continue;

		super = (struct btrfs_super_block *)bh->b_data;
		if (btrfs_super_bytenr(super) != bytenr ||
		    strncmp((char *)(&super->magic), BTRFS_MAGIC,
			    sizeof(super->magic))) {
			brelse(bh);
			continue;
		}

		if (!latest || btrfs_super_generation(super) > transid) {
			brelse(latest);
			latest = bh;
			transid = btrfs_super_generation(super);
		} else {
			brelse(bh);
		}
	}
	return latest;
}

static int write_dev_supers(struct btrfs_device *device,
			    struct btrfs_super_block *sb,
			    int do_barriers, int wait, int max_mirrors)
{
	struct buffer_head *bh;
	int i;
	int ret;
	int errors = 0;
	u32 crc;
	u64 bytenr;
	int last_barrier = 0;

	if (max_mirrors == 0)
		max_mirrors = BTRFS_SUPER_MIRROR_MAX;

	/* make sure only the last submit_bh does a barrier */
	if (do_barriers) {
		for (i = 0; i < max_mirrors; i++) {
			bytenr = btrfs_sb_offset(i);
			if (bytenr + BTRFS_SUPER_INFO_SIZE >=
			    device->total_bytes)
				break;
			last_barrier = i;
		}
	}

	for (i = 0; i < max_mirrors; i++) {
		bytenr = btrfs_sb_offset(i);
		if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
			break;

		if (wait) {
			bh = __find_get_block(device->bdev, bytenr / 4096,
					      BTRFS_SUPER_INFO_SIZE);
			BUG_ON(!bh);
			brelse(bh);
			wait_on_buffer(bh);
			if (buffer_uptodate(bh)) {
				brelse(bh);
				continue;
			}
		} else {
			btrfs_set_super_bytenr(sb, bytenr);

			crc = ~(u32)0;
			crc = btrfs_csum_data(NULL, (char *)sb +
					      BTRFS_CSUM_SIZE, crc,
					      BTRFS_SUPER_INFO_SIZE -
					      BTRFS_CSUM_SIZE);
			btrfs_csum_final(crc, sb->csum);

			bh = __getblk(device->bdev, bytenr / 4096,
				      BTRFS_SUPER_INFO_SIZE);
			memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);

			set_buffer_uptodate(bh);
			get_bh(bh);
			lock_buffer(bh);
			bh->b_end_io = btrfs_end_buffer_write_sync;
		}

		if (i == last_barrier && do_barriers && device->barriers) {
			ret = submit_bh(WRITE_BARRIER, bh);
			if (ret == -EOPNOTSUPP) {
				printk("btrfs: disabling barriers on dev %s\n",
				       device->name);
				set_buffer_uptodate(bh);
				device->barriers = 0;
				get_bh(bh);
				lock_buffer(bh);
				ret = submit_bh(WRITE, bh);
			}
		} else {
			ret = submit_bh(WRITE, bh);
		}

		if (!ret && wait) {
			wait_on_buffer(bh);
			if (!buffer_uptodate(bh))
				errors++;
		} else if (ret) {
			errors++;
		}
		if (wait)
			brelse(bh);
	}
	return errors < i ? 0 : -1;
}

int write_all_supers(struct btrfs_root *root, int max_mirrors)
2118 2119 2120
{
	struct list_head *head = &root->fs_info->fs_devices->devices;
	struct btrfs_device *dev;
2121
	struct btrfs_super_block *sb;
2122 2123 2124
	struct btrfs_dev_item *dev_item;
	int ret;
	int do_barriers;
2125 2126
	int max_errors;
	int total_errors = 0;
2127
	u64 flags;
2128

2129
	max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
2130 2131
	do_barriers = !btrfs_test_opt(root, NOBARRIER);

2132 2133
	sb = &root->fs_info->super_for_commit;
	dev_item = &sb->dev_item;
Q
Qinghuang Feng 已提交
2134
	list_for_each_entry(dev, head, dev_list) {
2135 2136 2137 2138
		if (!dev->bdev) {
			total_errors++;
			continue;
		}
Y
Yan Zheng 已提交
2139
		if (!dev->in_fs_metadata || !dev->writeable)
2140 2141
			continue;

Y
Yan Zheng 已提交
2142
		btrfs_set_stack_device_generation(dev_item, 0);
2143 2144 2145 2146 2147 2148 2149 2150
		btrfs_set_stack_device_type(dev_item, dev->type);
		btrfs_set_stack_device_id(dev_item, dev->devid);
		btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
		btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
		btrfs_set_stack_device_io_align(dev_item, dev->io_align);
		btrfs_set_stack_device_io_width(dev_item, dev->io_width);
		btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
		memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
Y
Yan Zheng 已提交
2151
		memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
Y
Yan Zheng 已提交
2152

2153 2154 2155
		flags = btrfs_super_flags(sb);
		btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);

Y
Yan Zheng 已提交
2156
		ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
2157 2158
		if (ret)
			total_errors++;
2159
	}
2160
	if (total_errors > max_errors) {
C
Chris Mason 已提交
2161 2162
		printk(KERN_ERR "btrfs: %d errors while writing supers\n",
		       total_errors);
2163 2164
		BUG();
	}
2165

Y
Yan Zheng 已提交
2166
	total_errors = 0;
Q
Qinghuang Feng 已提交
2167
	list_for_each_entry(dev, head, dev_list) {
2168 2169
		if (!dev->bdev)
			continue;
Y
Yan Zheng 已提交
2170
		if (!dev->in_fs_metadata || !dev->writeable)
2171 2172
			continue;

Y
Yan Zheng 已提交
2173 2174 2175
		ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
		if (ret)
			total_errors++;
2176
	}
2177
	if (total_errors > max_errors) {
C
Chris Mason 已提交
2178 2179
		printk(KERN_ERR "btrfs: %d errors while writing supers\n",
		       total_errors);
2180 2181
		BUG();
	}
2182 2183 2184
	return 0;
}

Y
Yan Zheng 已提交
2185 2186
int write_ctree_super(struct btrfs_trans_handle *trans,
		      struct btrfs_root *root, int max_mirrors)
2187
{
2188
	int ret;
2189

Y
Yan Zheng 已提交
2190
	ret = write_all_supers(root, max_mirrors);
2191
	return ret;
C
Chris Mason 已提交
2192 2193
}

2194
int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
C
Chris Mason 已提交
2195 2196 2197
{
	radix_tree_delete(&fs_info->fs_roots_radix,
			  (unsigned long)root->root_key.objectid);
2198 2199 2200 2201
	if (root->anon_super.s_dev) {
		down_write(&root->anon_super.s_umount);
		kill_anon_super(&root->anon_super);
	}
C
Chris Mason 已提交
2202
	if (root->node)
2203
		free_extent_buffer(root->node);
C
Chris Mason 已提交
2204
	if (root->commit_root)
2205
		free_extent_buffer(root->commit_root);
C
Chris Mason 已提交
2206
	kfree(root->name);
C
Chris Mason 已提交
2207 2208 2209 2210
	kfree(root);
	return 0;
}

C
Chris Mason 已提交
2211
static int del_fs_roots(struct btrfs_fs_info *fs_info)
2212 2213 2214 2215 2216
{
	int ret;
	struct btrfs_root *gang[8];
	int i;

C
Chris Mason 已提交
2217
	while (1) {
2218 2219 2220 2221 2222
		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
					     (void **)gang, 0,
					     ARRAY_SIZE(gang));
		if (!ret)
			break;
C
Chris Mason 已提交
2223
		for (i = 0; i < ret; i++)
2224
			btrfs_free_fs_root(fs_info, gang[i]);
2225 2226 2227
	}
	return 0;
}
2228

Y
Yan Zheng 已提交
2229
int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
C
Chris Mason 已提交
2230
{
Y
Yan Zheng 已提交
2231 2232 2233
	u64 root_objectid = 0;
	struct btrfs_root *gang[8];
	int i;
2234
	int ret;
2235

Y
Yan Zheng 已提交
2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252
	while (1) {
		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
					     (void **)gang, root_objectid,
					     ARRAY_SIZE(gang));
		if (!ret)
			break;
		for (i = 0; i < ret; i++) {
			root_objectid = gang[i]->root_key.objectid;
			ret = btrfs_find_dead_roots(fs_info->tree_root,
						    root_objectid, gang[i]);
			BUG_ON(ret);
			btrfs_orphan_cleanup(gang[i]);
		}
		root_objectid++;
	}
	return 0;
}
2253

Y
Yan Zheng 已提交
2254 2255 2256 2257
int btrfs_commit_super(struct btrfs_root *root)
{
	struct btrfs_trans_handle *trans;
	int ret;
2258

Y
Yan Zheng 已提交
2259
	mutex_lock(&root->fs_info->cleaner_mutex);
2260
	btrfs_clean_old_snapshots(root);
Y
Yan Zheng 已提交
2261
	mutex_unlock(&root->fs_info->cleaner_mutex);
C
Chris Mason 已提交
2262
	trans = btrfs_start_transaction(root, 1);
2263
	ret = btrfs_commit_transaction(trans, root);
Y
Yan Zheng 已提交
2264 2265
	BUG_ON(ret);
	/* run commit again to drop the original snapshot */
C
Chris Mason 已提交
2266 2267 2268
	trans = btrfs_start_transaction(root, 1);
	btrfs_commit_transaction(trans, root);
	ret = btrfs_write_and_wait_transaction(NULL, root);
2269
	BUG_ON(ret);
2270

Y
Yan Zheng 已提交
2271
	ret = write_ctree_super(NULL, root, 0);
Y
Yan Zheng 已提交
2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287
	return ret;
}

int close_ctree(struct btrfs_root *root)
{
	struct btrfs_fs_info *fs_info = root->fs_info;
	int ret;

	fs_info->closing = 1;
	smp_mb();

	kthread_stop(root->fs_info->transaction_kthread);
	kthread_stop(root->fs_info->cleaner_kthread);

	if (!(fs_info->sb->s_flags & MS_RDONLY)) {
		ret =  btrfs_commit_super(root);
C
Chris Mason 已提交
2288 2289
		if (ret)
			printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
Y
Yan Zheng 已提交
2290
	}
2291

C
Chris Mason 已提交
2292
	if (fs_info->delalloc_bytes) {
C
Chris Mason 已提交
2293
		printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
C
Chris Mason 已提交
2294 2295
		       fs_info->delalloc_bytes);
	}
Y
Yan Zheng 已提交
2296
	if (fs_info->total_ref_cache_size) {
C
Chris Mason 已提交
2297 2298
		printk(KERN_INFO "btrfs: at umount reference cache size %llu\n",
		       (unsigned long long)fs_info->total_ref_cache_size);
Y
Yan Zheng 已提交
2299
	}
2300

2301
	if (fs_info->extent_root->node)
2302
		free_extent_buffer(fs_info->extent_root->node);
2303

2304
	if (fs_info->tree_root->node)
2305
		free_extent_buffer(fs_info->tree_root->node);
2306

C
Chris Mason 已提交
2307
	if (root->fs_info->chunk_root->node)
2308 2309
		free_extent_buffer(root->fs_info->chunk_root->node);

C
Chris Mason 已提交
2310
	if (root->fs_info->dev_root->node)
2311 2312
		free_extent_buffer(root->fs_info->dev_root->node);

C
Chris Mason 已提交
2313
	if (root->fs_info->csum_root->node)
2314 2315
		free_extent_buffer(root->fs_info->csum_root->node);

C
Chris Mason 已提交
2316
	btrfs_free_block_groups(root->fs_info);
2317

Y
Yan Zheng 已提交
2318
	del_fs_roots(fs_info);
2319

Y
Yan Zheng 已提交
2320
	iput(fs_info->btree_inode);
2321

2322
	btrfs_stop_workers(&fs_info->fixup_workers);
2323
	btrfs_stop_workers(&fs_info->delalloc_workers);
2324 2325
	btrfs_stop_workers(&fs_info->workers);
	btrfs_stop_workers(&fs_info->endio_workers);
2326
	btrfs_stop_workers(&fs_info->endio_meta_workers);
2327
	btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2328
	btrfs_stop_workers(&fs_info->endio_write_workers);
2329
	btrfs_stop_workers(&fs_info->submit_workers);
2330

2331
#if 0
C
Chris Mason 已提交
2332
	while (!list_empty(&fs_info->hashers)) {
2333 2334 2335 2336 2337 2338 2339 2340
		struct btrfs_hasher *hasher;
		hasher = list_entry(fs_info->hashers.next, struct btrfs_hasher,
				    hashers);
		list_del(&hasher->hashers);
		crypto_free_hash(&fs_info->hash_tfm);
		kfree(hasher);
	}
#endif
2341
	btrfs_close_devices(fs_info->fs_devices);
2342
	btrfs_mapping_tree_free(&fs_info->mapping_tree);
2343

C
Chris Mason 已提交
2344
	bdi_destroy(&fs_info->bdi);
2345

2346 2347
	kfree(fs_info->extent_root);
	kfree(fs_info->tree_root);
2348 2349
	kfree(fs_info->chunk_root);
	kfree(fs_info->dev_root);
2350
	kfree(fs_info->csum_root);
2351 2352 2353
	return 0;
}

2354
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
2355
{
2356
	int ret;
2357
	struct inode *btree_inode = buf->first_page->mapping->host;
2358 2359 2360 2361 2362 2363 2364 2365

	ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf);
	if (!ret)
		return ret;

	ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
				    parent_transid);
	return !ret;
2366 2367 2368
}

int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
C
Chris Mason 已提交
2369
{
2370
	struct inode *btree_inode = buf->first_page->mapping->host;
2371
	return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
2372 2373
					  buf);
}
2374

2375 2376
void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
{
2377
	struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
2378 2379
	u64 transid = btrfs_header_generation(buf);
	struct inode *btree_inode = root->fs_info->btree_inode;
2380
	int was_dirty;
2381

2382
	btrfs_assert_tree_locked(buf);
C
Chris Mason 已提交
2383
	if (transid != root->fs_info->generation) {
C
Chris Mason 已提交
2384 2385
		printk(KERN_CRIT "btrfs transid mismatch buffer %llu, "
		       "found %llu running %llu\n",
2386
			(unsigned long long)buf->start,
C
Chris Mason 已提交
2387 2388
			(unsigned long long)transid,
			(unsigned long long)root->fs_info->generation);
C
Chris Mason 已提交
2389 2390
		WARN_ON(1);
	}
2391 2392 2393 2394 2395 2396 2397
	was_dirty = set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
					    buf);
	if (!was_dirty) {
		spin_lock(&root->fs_info->delalloc_lock);
		root->fs_info->dirty_metadata_bytes += buf->len;
		spin_unlock(&root->fs_info->delalloc_lock);
	}
2398 2399
}

2400
void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
C
Chris Mason 已提交
2401
{
2402 2403 2404 2405
	/*
	 * looks as though older kernels can get into trouble with
	 * this code, they end up stuck in balance_dirty_pages forever
	 */
2406 2407 2408
	struct extent_io_tree *tree;
	u64 num_dirty;
	u64 start = 0;
2409
	unsigned long thresh = 32 * 1024 * 1024;
2410 2411
	tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;

2412
	if (current_is_pdflush() || current->flags & PF_MEMALLOC)
2413 2414 2415 2416 2417 2418
		return;

	num_dirty = count_range_bits(tree, &start, (u64)-1,
				     thresh, EXTENT_DIRTY);
	if (num_dirty > thresh) {
		balance_dirty_pages_ratelimited_nr(
C
Chris Mason 已提交
2419
				   root->fs_info->btree_inode->i_mapping, 1);
2420
	}
2421
	return;
C
Chris Mason 已提交
2422
}
2423

2424
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
2425
{
2426
	struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
2427
	int ret;
2428
	ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
C
Chris Mason 已提交
2429
	if (ret == 0)
2430
		set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
2431
	return ret;
2432
}
2433

C
Chris Mason 已提交
2434 2435 2436
int btree_lock_page_hook(struct page *page)
{
	struct inode *inode = page->mapping->host;
2437
	struct btrfs_root *root = BTRFS_I(inode)->root;
C
Chris Mason 已提交
2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
	struct extent_buffer *eb;
	unsigned long len;
	u64 bytenr = page_offset(page);

	if (page->private == EXTENT_PAGE_PRIVATE)
		goto out;

	len = page->private >> 2;
	eb = find_extent_buffer(io_tree, bytenr, len, GFP_NOFS);
	if (!eb)
		goto out;

	btrfs_tree_lock(eb);
	btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
2453 2454 2455 2456 2457 2458 2459 2460 2461 2462

	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
		spin_lock(&root->fs_info->delalloc_lock);
		if (root->fs_info->dirty_metadata_bytes >= eb->len)
			root->fs_info->dirty_metadata_bytes -= eb->len;
		else
			WARN_ON(1);
		spin_unlock(&root->fs_info->delalloc_lock);
	}

C
Chris Mason 已提交
2463 2464 2465 2466 2467 2468 2469
	btrfs_tree_unlock(eb);
	free_extent_buffer(eb);
out:
	lock_page(page);
	return 0;
}

2470
static struct extent_io_ops btree_extent_io_ops = {
C
Chris Mason 已提交
2471
	.write_cache_pages_lock_hook = btree_lock_page_hook,
2472
	.readpage_end_io_hook = btree_readpage_end_io_hook,
2473
	.submit_bio_hook = btree_submit_bio_hook,
2474 2475
	/* note we're sharing with inode.c for the merge bio hook */
	.merge_bio_hook = btrfs_merge_bio_hook,
2476
};