disk-io.c 47.0 KB
Newer Older
C
Chris Mason 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

C
Chris Mason 已提交
19
#include <linux/fs.h>
20
#include <linux/blkdev.h>
C
Chris Mason 已提交
21
#include <linux/scatterlist.h>
C
Chris Mason 已提交
22
#include <linux/swap.h>
23
#include <linux/radix-tree.h>
C
Chris Mason 已提交
24
#include <linux/writeback.h>
25
#include <linux/buffer_head.h> // for block_sync_page
26
#include <linux/workqueue.h>
27
#include "crc32c.h"
28 29
#include "ctree.h"
#include "disk-io.h"
30
#include "transaction.h"
31
#include "btrfs_inode.h"
32
#include "volumes.h"
33
#include "print-tree.h"
34
#include "async-thread.h"
35
#include "locking.h"
36

37 38
#if 0
static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf)
39
{
40 41 42 43
	if (extent_buffer_blocknr(buf) != btrfs_header_blocknr(buf)) {
		printk(KERN_CRIT "buf blocknr(buf) is %llu, header is %llu\n",
		       (unsigned long long)extent_buffer_blocknr(buf),
		       (unsigned long long)btrfs_header_blocknr(buf));
C
Chris Mason 已提交
44
		return 1;
45
	}
46
	return 0;
47
}
48
#endif
49

50
static struct extent_io_ops btree_extent_io_ops;
51
static void end_workqueue_fn(struct btrfs_work *work);
52 53 54 55 56 57 58

struct end_io_wq {
	struct bio *bio;
	bio_end_io_t *end_io;
	void *private;
	struct btrfs_fs_info *info;
	int error;
59
	int metadata;
60
	struct list_head list;
61
	struct btrfs_work work;
62
};
63

64 65 66 67 68 69 70
struct async_submit_bio {
	struct inode *inode;
	struct bio *bio;
	struct list_head list;
	extent_submit_bio_hook_t *submit_bio_hook;
	int rw;
	int mirror_num;
71
	struct btrfs_work work;
72 73
};

74
struct extent_map *btree_get_extent(struct inode *inode, struct page *page,
75
				    size_t page_offset, u64 start, u64 len,
76
				    int create)
77
{
78 79 80 81
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
	struct extent_map *em;
	int ret;

82 83
	spin_lock(&em_tree->lock);
	em = lookup_extent_mapping(em_tree, start, len);
84 85 86 87
	if (em) {
		em->bdev =
			BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
		spin_unlock(&em_tree->lock);
88
		goto out;
89 90
	}
	spin_unlock(&em_tree->lock);
91

92 93 94 95 96 97
	em = alloc_extent_map(GFP_NOFS);
	if (!em) {
		em = ERR_PTR(-ENOMEM);
		goto out;
	}
	em->start = 0;
98
	em->len = (u64)-1;
99
	em->block_start = 0;
100
	em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
101 102

	spin_lock(&em_tree->lock);
103 104
	ret = add_extent_mapping(em_tree, em);
	if (ret == -EEXIST) {
105 106 107 108 109
		u64 failed_start = em->start;
		u64 failed_len = em->len;

		printk("failed to insert %Lu %Lu -> %Lu into tree\n",
		       em->start, em->len, em->block_start);
110
		free_extent_map(em);
111
		em = lookup_extent_mapping(em_tree, start, len);
112 113 114
		if (em) {
			printk("after failing, found %Lu %Lu %Lu\n",
			       em->start, em->len, em->block_start);
115
			ret = 0;
116 117 118 119 120 121 122 123 124
		} else {
			em = lookup_extent_mapping(em_tree, failed_start,
						   failed_len);
			if (em) {
				printk("double failure lookup gives us "
				       "%Lu %Lu -> %Lu\n", em->start,
				       em->len, em->block_start);
				free_extent_map(em);
			}
125
			ret = -EIO;
126
		}
127
	} else if (ret) {
128 129
		free_extent_map(em);
		em = NULL;
130
	}
131 132 133 134
	spin_unlock(&em_tree->lock);

	if (ret)
		em = ERR_PTR(ret);
135 136
out:
	return em;
137 138
}

139 140
u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
{
141
	return btrfs_crc32c(seed, data, len);
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
}

void btrfs_csum_final(u32 crc, char *result)
{
	*(__le32 *)result = ~cpu_to_le32(crc);
}

static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
			   int verify)
{
	char result[BTRFS_CRC32_SIZE];
	unsigned long len;
	unsigned long cur_len;
	unsigned long offset = BTRFS_CSUM_SIZE;
	char *map_token = NULL;
	char *kaddr;
	unsigned long map_start;
	unsigned long map_len;
	int err;
	u32 crc = ~(u32)0;

	len = buf->len - offset;
	while(len > 0) {
		err = map_private_extent_buffer(buf, offset, 32,
					&map_token, &kaddr,
					&map_start, &map_len, KM_USER0);
		if (err) {
			printk("failed to map extent buffer! %lu\n",
			       offset);
			return 1;
		}
		cur_len = min(len, map_len - (offset - map_start));
		crc = btrfs_csum_data(root, kaddr + offset - map_start,
				      crc, cur_len);
		len -= cur_len;
		offset += cur_len;
		unmap_extent_buffer(buf, map_token, KM_USER0);
	}
	btrfs_csum_final(crc, result);

	if (verify) {
183 184 185 186 187 188 189 190
		int from_this_trans = 0;

		if (root->fs_info->running_transaction &&
		    btrfs_header_generation(buf) ==
		    root->fs_info->running_transaction->transid)
			from_this_trans = 1;

		/* FIXME, this is not good */
191
		if (memcmp_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE)) {
192 193 194 195 196 197
			u32 val;
			u32 found = 0;
			memcpy(&found, result, BTRFS_CRC32_SIZE);

			read_extent_buffer(buf, &val, 0, BTRFS_CRC32_SIZE);
			printk("btrfs: %s checksum verify failed on %llu "
198 199
			       "wanted %X found %X from_this_trans %d "
			       "level %d\n",
200
			       root->fs_info->sb->s_id,
201 202
			       buf->start, val, found, from_this_trans,
			       btrfs_header_level(buf));
203 204 205 206 207 208 209 210
			return 1;
		}
	} else {
		write_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE);
	}
	return 0;
}

211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
static int verify_parent_transid(struct extent_io_tree *io_tree,
				 struct extent_buffer *eb, u64 parent_transid)
{
	int ret;

	if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
		return 0;

	lock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS);
	if (extent_buffer_uptodate(io_tree, eb) &&
	    btrfs_header_generation(eb) == parent_transid) {
		ret = 0;
		goto out;
	}
	printk("parent transid verify failed on %llu wanted %llu found %llu\n",
	       (unsigned long long)eb->start,
	       (unsigned long long)parent_transid,
	       (unsigned long long)btrfs_header_generation(eb));
	ret = 1;
out:
	clear_extent_buffer_uptodate(io_tree, eb);
	unlock_extent(io_tree, eb->start, eb->start + eb->len - 1,
		      GFP_NOFS);
	return ret;

}

238 239
static int btree_read_extent_buffer_pages(struct btrfs_root *root,
					  struct extent_buffer *eb,
240
					  u64 start, u64 parent_transid)
241 242 243 244 245 246 247 248 249 250
{
	struct extent_io_tree *io_tree;
	int ret;
	int num_copies = 0;
	int mirror_num = 0;

	io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
	while (1) {
		ret = read_extent_buffer_pages(io_tree, eb, start, 1,
					       btree_get_extent, mirror_num);
251 252
		if (!ret &&
		    !verify_parent_transid(io_tree, eb, parent_transid))
253
			return ret;
C
Chris Mason 已提交
254

255 256
		num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
					      eb->start, eb->len);
C
Chris Mason 已提交
257
		if (num_copies == 1)
258
			return ret;
C
Chris Mason 已提交
259

260
		mirror_num++;
C
Chris Mason 已提交
261
		if (mirror_num > num_copies)
262 263 264 265
			return ret;
	}
	return -EIO;
}
266 267 268

int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
{
269
	struct extent_io_tree *tree;
270
	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
271 272 273 274
	u64 found_start;
	int found_level;
	unsigned long len;
	struct extent_buffer *eb;
275 276
	int ret;

277
	tree = &BTRFS_I(page->mapping->host)->io_tree;
278 279 280 281 282 283 284 285 286 287

	if (page->private == EXTENT_PAGE_PRIVATE)
		goto out;
	if (!page->private)
		goto out;
	len = page->private >> 2;
	if (len == 0) {
		WARN_ON(1);
	}
	eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
288 289
	ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
					     btrfs_header_generation(eb));
290
	BUG_ON(ret);
291
	btrfs_clear_buffer_defrag(eb);
292 293 294 295
	found_start = btrfs_header_bytenr(eb);
	if (found_start != start) {
		printk("warning: eb start incorrect %Lu buffer %Lu len %lu\n",
		       start, found_start, len);
296 297 298 299 300 301 302 303 304 305 306 307 308
		WARN_ON(1);
		goto err;
	}
	if (eb->first_page != page) {
		printk("bad first page %lu %lu\n", eb->first_page->index,
		       page->index);
		WARN_ON(1);
		goto err;
	}
	if (!PageUptodate(page)) {
		printk("csum not up to date page %lu\n", page->index);
		WARN_ON(1);
		goto err;
309 310
	}
	found_level = btrfs_header_level(eb);
311 312 313
	spin_lock(&root->fs_info->hash_lock);
	btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
	spin_unlock(&root->fs_info->hash_lock);
314
	csum_tree_block(root, eb, 0);
315
err:
316 317 318 319 320
	free_extent_buffer(eb);
out:
	return 0;
}

321
static int btree_writepage_io_hook(struct page *page, u64 start, u64 end)
322
{
323 324 325
	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;

	csum_dirty_buffer(root, page);
326 327 328
	return 0;
}

329 330 331 332 333 334 335 336 337
int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
			       struct extent_state *state)
{
	struct extent_io_tree *tree;
	u64 found_start;
	int found_level;
	unsigned long len;
	struct extent_buffer *eb;
	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
338
	int ret = 0;
339 340 341 342 343 344 345 346 347 348 349

	tree = &BTRFS_I(page->mapping->host)->io_tree;
	if (page->private == EXTENT_PAGE_PRIVATE)
		goto out;
	if (!page->private)
		goto out;
	len = page->private >> 2;
	if (len == 0) {
		WARN_ON(1);
	}
	eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
350

351 352 353
	btrfs_clear_buffer_defrag(eb);
	found_start = btrfs_header_bytenr(eb);
	if (found_start != start) {
354
		ret = -EIO;
355 356 357 358 359 360
		goto err;
	}
	if (eb->first_page != page) {
		printk("bad first page %lu %lu\n", eb->first_page->index,
		       page->index);
		WARN_ON(1);
361
		ret = -EIO;
362 363
		goto err;
	}
364 365 366 367 368 369 370
	if (memcmp_extent_buffer(eb, root->fs_info->fsid,
				 (unsigned long)btrfs_header_fsid(eb),
				 BTRFS_FSID_SIZE)) {
		printk("bad fsid on block %Lu\n", eb->start);
		ret = -EIO;
		goto err;
	}
371 372 373
	found_level = btrfs_header_level(eb);

	ret = csum_tree_block(root, eb, 1);
374 375
	if (ret)
		ret = -EIO;
376 377 378 379 380 381 382

	end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
	end = eb->start + end - 1;
	release_extent_buffer_tail_pages(eb);
err:
	free_extent_buffer(eb);
out:
383
	return ret;
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
}

#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
static void end_workqueue_bio(struct bio *bio, int err)
#else
static int end_workqueue_bio(struct bio *bio,
				   unsigned int bytes_done, int err)
#endif
{
	struct end_io_wq *end_io_wq = bio->bi_private;
	struct btrfs_fs_info *fs_info;

#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
	if (bio->bi_size)
		return 1;
#endif

	fs_info = end_io_wq->info;
	end_io_wq->error = err;
403 404 405
	end_io_wq->work.func = end_workqueue_fn;
	end_io_wq->work.flags = 0;
	btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work);
406 407 408 409 410 411

#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
	return 0;
#endif
}

412 413
int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
			int metadata)
414
{
415 416 417 418 419 420 421
	struct end_io_wq *end_io_wq;
	end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
	if (!end_io_wq)
		return -ENOMEM;

	end_io_wq->private = bio->bi_private;
	end_io_wq->end_io = bio->bi_end_io;
422
	end_io_wq->info = info;
423 424
	end_io_wq->error = 0;
	end_io_wq->bio = bio;
425
	end_io_wq->metadata = metadata;
426 427 428

	bio->bi_private = end_io_wq;
	bio->bi_end_io = end_workqueue_bio;
429 430 431
	return 0;
}

432 433 434 435 436 437 438 439 440 441 442 443 444
static void run_one_async_submit(struct btrfs_work *work)
{
	struct btrfs_fs_info *fs_info;
	struct async_submit_bio *async;

	async = container_of(work, struct  async_submit_bio, work);
	fs_info = BTRFS_I(async->inode)->root->fs_info;
	atomic_dec(&fs_info->nr_async_submits);
	async->submit_bio_hook(async->inode, async->rw, async->bio,
			       async->mirror_num);
	kfree(async);
}

445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
			int rw, struct bio *bio, int mirror_num,
			extent_submit_bio_hook_t *submit_bio_hook)
{
	struct async_submit_bio *async;

	async = kmalloc(sizeof(*async), GFP_NOFS);
	if (!async)
		return -ENOMEM;

	async->inode = inode;
	async->rw = rw;
	async->bio = bio;
	async->mirror_num = mirror_num;
	async->submit_bio_hook = submit_bio_hook;
460 461
	async->work.func = run_one_async_submit;
	async->work.flags = 0;
462
	atomic_inc(&fs_info->nr_async_submits);
463
	btrfs_queue_worker(&fs_info->workers, &async->work);
464 465 466 467
	return 0;
}

static int __btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
468
				 int mirror_num)
469 470 471 472 473 474 475
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	u64 offset;
	int ret;

	offset = bio->bi_sector << 9;

476 477 478 479
	/*
	 * when we're called for a write, we're already in the async
	 * submission context.  Just jump ingo btrfs_map_bio
	 */
480
	if (rw & (1 << BIO_RW)) {
481 482
		return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
				     mirror_num, 0);
483 484
	}

485 486 487 488
	/*
	 * called for a read, do the setup so that checksum validation
	 * can happen in the async kernel threads
	 */
489 490
	ret = btrfs_bio_wq_end_io(root->fs_info, bio, 1);
	BUG_ON(ret);
491

492
	return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
493 494
}

495 496 497
static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
				 int mirror_num)
{
498 499 500 501
	/*
	 * kthread helpers are used to submit writes so that checksumming
	 * can happen in parallel across all CPUs
	 */
502 503 504 505 506 507 508 509
	if (!(rw & (1 << BIO_RW))) {
		return __btree_submit_bio_hook(inode, rw, bio, mirror_num);
	}
	return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
				   inode, rw, bio, mirror_num,
				   __btree_submit_bio_hook);
}

510 511
static int btree_writepage(struct page *page, struct writeback_control *wbc)
{
512 513
	struct extent_io_tree *tree;
	tree = &BTRFS_I(page->mapping->host)->io_tree;
514 515
	return extent_write_full_page(tree, page, btree_get_extent, wbc);
}
516 517 518 519

static int btree_writepages(struct address_space *mapping,
			    struct writeback_control *wbc)
{
520 521
	struct extent_io_tree *tree;
	tree = &BTRFS_I(mapping->host)->io_tree;
522
	if (wbc->sync_mode == WB_SYNC_NONE) {
523 524 525
		u64 num_dirty;
		u64 start = 0;
		unsigned long thresh = 96 * 1024 * 1024;
526 527 528 529

		if (wbc->for_kupdate)
			return 0;

530 531 532 533 534
		if (current_is_pdflush()) {
			thresh = 96 * 1024 * 1024;
		} else {
			thresh = 8 * 1024 * 1024;
		}
535 536
		num_dirty = count_range_bits(tree, &start, (u64)-1,
					     thresh, EXTENT_DIRTY);
537 538 539 540
		if (num_dirty < thresh) {
			return 0;
		}
	}
541 542 543
	return extent_writepages(tree, mapping, btree_get_extent, wbc);
}

544 545
int btree_readpage(struct file *file, struct page *page)
{
546 547
	struct extent_io_tree *tree;
	tree = &BTRFS_I(page->mapping->host)->io_tree;
548 549
	return extent_read_full_page(tree, page, btree_get_extent);
}
C
Chris Mason 已提交
550

551
static int btree_releasepage(struct page *page, gfp_t gfp_flags)
552
{
553 554
	struct extent_io_tree *tree;
	struct extent_map_tree *map;
555
	int ret;
556

557 558 559 560 561 562
	if (page_count(page) > 3) {
		/* once for page->private, once for the caller, once
		 * once for the page cache
		 */
		return 0;
	}
563 564
	tree = &BTRFS_I(page->mapping->host)->io_tree;
	map = &BTRFS_I(page->mapping->host)->extent_tree;
565
	ret = try_release_extent_state(map, tree, page, gfp_flags);
566
	if (ret == 1) {
567
		invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
568 569 570 571
		ClearPagePrivate(page);
		set_page_private(page, 0);
		page_cache_release(page);
	}
572 573 574
	return ret;
}

575
static void btree_invalidatepage(struct page *page, unsigned long offset)
576
{
577 578
	struct extent_io_tree *tree;
	tree = &BTRFS_I(page->mapping->host)->io_tree;
579 580
	extent_invalidatepage(tree, page, offset);
	btree_releasepage(page, GFP_NOFS);
581
	if (PagePrivate(page)) {
582
		invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
583 584 585 586
		ClearPagePrivate(page);
		set_page_private(page, 0);
		page_cache_release(page);
	}
587 588
}

589
#if 0
590
static int btree_writepage(struct page *page, struct writeback_control *wbc)
591
{
C
Chris Mason 已提交
592
	struct buffer_head *bh;
593
	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
C
Chris Mason 已提交
594 595 596 597 598 599 600 601 602 603 604 605
	struct buffer_head *head;
	if (!page_has_buffers(page)) {
		create_empty_buffers(page, root->fs_info->sb->s_blocksize,
					(1 << BH_Dirty)|(1 << BH_Uptodate));
	}
	head = page_buffers(page);
	bh = head;
	do {
		if (buffer_dirty(bh))
			csum_tree_block(root, bh, 0);
		bh = bh->b_this_page;
	} while (bh != head);
606
	return block_write_full_page(page, btree_get_block, wbc);
607
}
608
#endif
609

610 611 612
static struct address_space_operations btree_aops = {
	.readpage	= btree_readpage,
	.writepage	= btree_writepage,
613
	.writepages	= btree_writepages,
614 615
	.releasepage	= btree_releasepage,
	.invalidatepage = btree_invalidatepage,
616 617 618
	.sync_page	= block_sync_page,
};

619 620
int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
			 u64 parent_transid)
C
Chris Mason 已提交
621
{
622 623
	struct extent_buffer *buf = NULL;
	struct inode *btree_inode = root->fs_info->btree_inode;
624
	int ret = 0;
C
Chris Mason 已提交
625

626
	buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
627
	if (!buf)
C
Chris Mason 已提交
628
		return 0;
629
	read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
630
				 buf, 0, 0, btree_get_extent, 0);
631
	free_extent_buffer(buf);
632
	return ret;
C
Chris Mason 已提交
633 634
}

635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657
struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
					    u64 bytenr, u32 blocksize)
{
	struct inode *btree_inode = root->fs_info->btree_inode;
	struct extent_buffer *eb;
	eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
				bytenr, blocksize, GFP_NOFS);
	return eb;
}

struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
						 u64 bytenr, u32 blocksize)
{
	struct inode *btree_inode = root->fs_info->btree_inode;
	struct extent_buffer *eb;

	eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
				 bytenr, blocksize, NULL, GFP_NOFS);
	return eb;
}


struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
658
				      u32 blocksize, u64 parent_transid)
659 660 661 662 663 664 665 666 667 668 669 670
{
	struct extent_buffer *buf = NULL;
	struct inode *btree_inode = root->fs_info->btree_inode;
	struct extent_io_tree *io_tree;
	int ret;

	io_tree = &BTRFS_I(btree_inode)->io_tree;

	buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
	if (!buf)
		return NULL;

671
	ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
672 673 674 675

	if (ret == 0) {
		buf->flags |= EXTENT_UPTODATE;
	}
676
	return buf;
677

678 679
}

680
int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
681
		     struct extent_buffer *buf)
682
{
683
	struct inode *btree_inode = root->fs_info->btree_inode;
684
	if (btrfs_header_generation(buf) ==
685 686
	    root->fs_info->running_transaction->transid) {
		WARN_ON(!btrfs_tree_locked(buf));
687
		clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
688
					  buf);
689
	}
690 691 692 693 694 695 696
	return 0;
}

int wait_on_tree_block_writeback(struct btrfs_root *root,
				 struct extent_buffer *buf)
{
	struct inode *btree_inode = root->fs_info->btree_inode;
697
	wait_on_extent_buffer_writeback(&BTRFS_I(btree_inode)->io_tree,
698 699 700 701
					buf);
	return 0;
}

702
static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
703
			u32 stripesize, struct btrfs_root *root,
704
			struct btrfs_fs_info *fs_info,
C
Chris Mason 已提交
705
			u64 objectid)
706
{
C
Chris Mason 已提交
707
	root->node = NULL;
708
	root->inode = NULL;
709
	root->commit_root = NULL;
710 711 712
	root->sectorsize = sectorsize;
	root->nodesize = nodesize;
	root->leafsize = leafsize;
713
	root->stripesize = stripesize;
C
Chris Mason 已提交
714
	root->ref_cows = 0;
715 716
	root->track_dirty = 0;

717
	root->fs_info = fs_info;
718 719
	root->objectid = objectid;
	root->last_trans = 0;
C
Chris Mason 已提交
720 721
	root->highest_inode = 0;
	root->last_inode_alloc = 0;
722
	root->name = NULL;
723
	root->in_sysfs = 0;
724 725

	INIT_LIST_HEAD(&root->dirty_list);
726
	spin_lock_init(&root->node_lock);
727 728
	memset(&root->root_key, 0, sizeof(root->root_key));
	memset(&root->root_item, 0, sizeof(root->root_item));
729
	memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
730 731
	memset(&root->root_kobj, 0, sizeof(root->root_kobj));
	init_completion(&root->kobj_unregister);
732 733
	root->defrag_running = 0;
	root->defrag_level = 0;
734
	root->root_key.objectid = objectid;
735 736 737
	return 0;
}

738
static int find_and_setup_root(struct btrfs_root *tree_root,
739 740
			       struct btrfs_fs_info *fs_info,
			       u64 objectid,
C
Chris Mason 已提交
741
			       struct btrfs_root *root)
742 743
{
	int ret;
744
	u32 blocksize;
745

746
	__setup_root(tree_root->nodesize, tree_root->leafsize,
747 748
		     tree_root->sectorsize, tree_root->stripesize,
		     root, fs_info, objectid);
749 750 751 752
	ret = btrfs_find_last_root(tree_root, objectid,
				   &root->root_item, &root->root_key);
	BUG_ON(ret);

753 754
	blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
	root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
755
				     blocksize, 0);
756
	BUG_ON(!root->node);
757 758 759
	return 0;
}

760 761
struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_fs_info *fs_info,
					       struct btrfs_key *location)
762 763 764 765
{
	struct btrfs_root *root;
	struct btrfs_root *tree_root = fs_info->tree_root;
	struct btrfs_path *path;
766
	struct extent_buffer *l;
C
Chris Mason 已提交
767
	u64 highest_inode;
768
	u32 blocksize;
769 770
	int ret = 0;

771
	root = kzalloc(sizeof(*root), GFP_NOFS);
C
Chris Mason 已提交
772
	if (!root)
773 774
		return ERR_PTR(-ENOMEM);
	if (location->offset == (u64)-1) {
775
		ret = find_and_setup_root(tree_root, fs_info,
776 777 778 779 780 781 782 783
					  location->objectid, root);
		if (ret) {
			kfree(root);
			return ERR_PTR(ret);
		}
		goto insert;
	}

784
	__setup_root(tree_root->nodesize, tree_root->leafsize,
785 786
		     tree_root->sectorsize, tree_root->stripesize,
		     root, fs_info, location->objectid);
787 788 789 790 791 792 793 794 795

	path = btrfs_alloc_path();
	BUG_ON(!path);
	ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
	if (ret != 0) {
		if (ret > 0)
			ret = -ENOENT;
		goto out;
	}
796 797 798
	l = path->nodes[0];
	read_extent_buffer(l, &root->root_item,
	       btrfs_item_ptr_offset(l, path->slots[0]),
799
	       sizeof(root->root_item));
800
	memcpy(&root->root_key, location, sizeof(*location));
801 802 803 804 805 806 807 808
	ret = 0;
out:
	btrfs_release_path(root, path);
	btrfs_free_path(path);
	if (ret) {
		kfree(root);
		return ERR_PTR(ret);
	}
809 810
	blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
	root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
811
				     blocksize, 0);
812 813 814
	BUG_ON(!root->node);
insert:
	root->ref_cows = 1;
815 816 817 818 819 820 821 822
	ret = btrfs_find_highest_inode(root, &highest_inode);
	if (ret == 0) {
		root->highest_inode = highest_inode;
		root->last_inode_alloc = highest_inode;
	}
	return root;
}

C
Chris Mason 已提交
823 824 825 826 827 828 829 830 831 832 833 834 835 836 837
struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
					u64 root_objectid)
{
	struct btrfs_root *root;

	if (root_objectid == BTRFS_ROOT_TREE_OBJECTID)
		return fs_info->tree_root;
	if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID)
		return fs_info->extent_root;

	root = radix_tree_lookup(&fs_info->fs_roots_radix,
				 (unsigned long)root_objectid);
	return root;
}

838 839
struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
					      struct btrfs_key *location)
840 841 842 843
{
	struct btrfs_root *root;
	int ret;

844 845 846 847
	if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
		return fs_info->tree_root;
	if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
		return fs_info->extent_root;
848 849 850 851
	if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
		return fs_info->chunk_root;
	if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
		return fs_info->dev_root;
852

853 854 855 856 857 858 859 860
	root = radix_tree_lookup(&fs_info->fs_roots_radix,
				 (unsigned long)location->objectid);
	if (root)
		return root;

	root = btrfs_read_fs_root_no_radix(fs_info, location);
	if (IS_ERR(root))
		return root;
C
Chris Mason 已提交
861 862
	ret = radix_tree_insert(&fs_info->fs_roots_radix,
				(unsigned long)root->root_key.objectid,
863 864
				root);
	if (ret) {
865
		free_extent_buffer(root->node);
866 867 868
		kfree(root);
		return ERR_PTR(ret);
	}
869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
	ret = btrfs_find_dead_roots(fs_info->tree_root,
				    root->root_key.objectid, root);
	BUG_ON(ret);

	return root;
}

struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
				      struct btrfs_key *location,
				      const char *name, int namelen)
{
	struct btrfs_root *root;
	int ret;

	root = btrfs_read_fs_root_no_name(fs_info, location);
	if (!root)
		return NULL;
886

887 888 889
	if (root->in_sysfs)
		return root;

890 891
	ret = btrfs_set_root_name(root, name, namelen);
	if (ret) {
892
		free_extent_buffer(root->node);
893 894 895 896 897 898
		kfree(root);
		return ERR_PTR(ret);
	}

	ret = btrfs_sysfs_add_root(root);
	if (ret) {
899
		free_extent_buffer(root->node);
900 901 902 903
		kfree(root->name);
		kfree(root);
		return ERR_PTR(ret);
	}
904
	root->in_sysfs = 1;
905 906
	return root;
}
907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924
#if 0
static int add_hasher(struct btrfs_fs_info *info, char *type) {
	struct btrfs_hasher *hasher;

	hasher = kmalloc(sizeof(*hasher), GFP_NOFS);
	if (!hasher)
		return -ENOMEM;
	hasher->hash_tfm = crypto_alloc_hash(type, 0, CRYPTO_ALG_ASYNC);
	if (!hasher->hash_tfm) {
		kfree(hasher);
		return -EINVAL;
	}
	spin_lock(&info->hash_lock);
	list_add(&hasher->list, &info->hashers);
	spin_unlock(&info->hash_lock);
	return 0;
}
#endif
C
Chris Mason 已提交
925 926 927 928 929

static int btrfs_congested_fn(void *congested_data, int bdi_bits)
{
	struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
	int ret = 0;
930
	int limit = 256 * info->fs_devices->open_devices;
C
Chris Mason 已提交
931 932 933 934
	struct list_head *cur;
	struct btrfs_device *device;
	struct backing_dev_info *bdi;

935 936 937 938 939
	if ((bdi_bits & (1 << BDI_write_congested)) &&
	    atomic_read(&info->nr_async_submits) > limit) {
		return 1;
	}

C
Chris Mason 已提交
940 941
	list_for_each(cur, &info->fs_devices->devices) {
		device = list_entry(cur, struct btrfs_device, dev_list);
942 943
		if (!device->bdev)
			continue;
C
Chris Mason 已提交
944 945 946 947 948 949 950 951 952
		bdi = blk_get_backing_dev_info(device->bdev);
		if (bdi && bdi_congested(bdi, bdi_bits)) {
			ret = 1;
			break;
		}
	}
	return ret;
}

953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972
/*
 * this unplugs every device on the box, and it is only used when page
 * is null
 */
static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
{
	struct list_head *cur;
	struct btrfs_device *device;
	struct btrfs_fs_info *info;

	info = (struct btrfs_fs_info *)bdi->unplug_io_data;
	list_for_each(cur, &info->fs_devices->devices) {
		device = list_entry(cur, struct btrfs_device, dev_list);
		bdi = blk_get_backing_dev_info(device->bdev);
		if (bdi->unplug_io_fn) {
			bdi->unplug_io_fn(bdi, page);
		}
	}
}

C
Chris Mason 已提交
973 974
void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
{
975
	struct inode *inode;
976 977
	struct extent_map_tree *em_tree;
	struct extent_map *em;
978
	struct address_space *mapping;
979 980
	u64 offset;

981
	/* the generic O_DIRECT read code does this */
982 983 984 985 986
	if (!page) {
		__unplug_io_fn(bdi, page);
		return;
	}

987 988 989 990 991 992 993 994 995 996
	/*
	 * page->mapping may change at any time.  Get a consistent copy
	 * and use that for everything below
	 */
	smp_mb();
	mapping = page->mapping;
	if (!mapping)
		return;

	inode = mapping->host;
997
	offset = page_offset(page);
C
Chris Mason 已提交
998

999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009
	em_tree = &BTRFS_I(inode)->extent_tree;
	spin_lock(&em_tree->lock);
	em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
	spin_unlock(&em_tree->lock);
	if (!em)
		return;

	offset = offset - em->start;
	btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
			  em->block_start + offset, page);
	free_extent_map(em);
C
Chris Mason 已提交
1010 1011 1012 1013
}

static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
{
1014
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
C
Chris Mason 已提交
1015
	bdi_init(bdi);
1016
#endif
1017
	bdi->ra_pages	= default_backing_dev_info.ra_pages;
C
Chris Mason 已提交
1018 1019 1020 1021 1022 1023 1024 1025 1026
	bdi->state		= 0;
	bdi->capabilities	= default_backing_dev_info.capabilities;
	bdi->unplug_io_fn	= btrfs_unplug_io_fn;
	bdi->unplug_io_data	= info;
	bdi->congested_fn	= btrfs_congested_fn;
	bdi->congested_data	= info;
	return 0;
}

1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
static int bio_ready_for_csum(struct bio *bio)
{
	u64 length = 0;
	u64 buf_len = 0;
	u64 start = 0;
	struct page *page;
	struct extent_io_tree *io_tree = NULL;
	struct btrfs_fs_info *info = NULL;
	struct bio_vec *bvec;
	int i;
	int ret;

	bio_for_each_segment(bvec, bio, i) {
		page = bvec->bv_page;
		if (page->private == EXTENT_PAGE_PRIVATE) {
			length += bvec->bv_len;
			continue;
		}
		if (!page->private) {
			length += bvec->bv_len;
			continue;
		}
		length = bvec->bv_len;
		buf_len = page->private >> 2;
		start = page_offset(page) + bvec->bv_offset;
		io_tree = &BTRFS_I(page->mapping->host)->io_tree;
		info = BTRFS_I(page->mapping->host)->root->fs_info;
	}
	/* are we fully contained in this bio? */
	if (buf_len <= length)
		return 1;

	ret = extent_range_uptodate(io_tree, start + length,
				    start + buf_len - 1);
	if (ret == 1)
		return ret;
	return ret;
}

1066 1067 1068 1069 1070
/*
 * called by the kthread helper functions to finally call the bio end_io
 * functions.  This is where read checksum verification actually happens
 */
static void end_workqueue_fn(struct btrfs_work *work)
1071 1072
{
	struct bio *bio;
1073 1074
	struct end_io_wq *end_io_wq;
	struct btrfs_fs_info *fs_info;
1075 1076
	int error;

1077 1078 1079
	end_io_wq = container_of(work, struct end_io_wq, work);
	bio = end_io_wq->bio;
	fs_info = end_io_wq->info;
1080

1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096
	/* metadata bios are special because the whole tree block must
	 * be checksummed at once.  This makes sure the entire block is in
	 * ram and up to date before trying to verify things.  For
	 * blocksize <= pagesize, it is basically a noop
	 */
	if (end_io_wq->metadata && !bio_ready_for_csum(bio)) {
		btrfs_queue_worker(&fs_info->endio_workers,
				   &end_io_wq->work);
		return;
	}
	error = end_io_wq->error;
	bio->bi_private = end_io_wq->private;
	bio->bi_end_io = end_io_wq->end_io;
	kfree(end_io_wq);
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
	bio_endio(bio, bio->bi_size, error);
1097
#else
1098
	bio_endio(bio, error);
1099 1100 1101
#endif
}

1102
struct btrfs_root *open_ctree(struct super_block *sb,
1103 1104
			      struct btrfs_fs_devices *fs_devices,
			      char *options)
1105
{
1106 1107 1108 1109
	u32 sectorsize;
	u32 nodesize;
	u32 leafsize;
	u32 blocksize;
1110
	u32 stripesize;
1111
	struct buffer_head *bh;
C
Chris Mason 已提交
1112 1113 1114 1115
	struct btrfs_root *extent_root = kmalloc(sizeof(struct btrfs_root),
						 GFP_NOFS);
	struct btrfs_root *tree_root = kmalloc(sizeof(struct btrfs_root),
					       GFP_NOFS);
1116
	struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info),
C
Chris Mason 已提交
1117
						GFP_NOFS);
1118 1119 1120 1121
	struct btrfs_root *chunk_root = kmalloc(sizeof(struct btrfs_root),
						GFP_NOFS);
	struct btrfs_root *dev_root = kmalloc(sizeof(struct btrfs_root),
					      GFP_NOFS);
1122
	int ret;
1123
	int err = -EINVAL;
1124

C
Chris Mason 已提交
1125
	struct btrfs_super_block *disk_super;
1126

C
Chris Mason 已提交
1127 1128 1129 1130
	if (!extent_root || !tree_root || !fs_info) {
		err = -ENOMEM;
		goto fail;
	}
1131
	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
C
Chris Mason 已提交
1132
	INIT_LIST_HEAD(&fs_info->trans_list);
1133
	INIT_LIST_HEAD(&fs_info->dead_roots);
1134 1135
	INIT_LIST_HEAD(&fs_info->hashers);
	spin_lock_init(&fs_info->hash_lock);
1136
	spin_lock_init(&fs_info->delalloc_lock);
1137
	spin_lock_init(&fs_info->new_trans_lock);
1138

1139
	init_completion(&fs_info->kobj_unregister);
1140 1141
	fs_info->tree_root = tree_root;
	fs_info->extent_root = extent_root;
1142 1143
	fs_info->chunk_root = chunk_root;
	fs_info->dev_root = dev_root;
1144
	fs_info->fs_devices = fs_devices;
1145
	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1146
	INIT_LIST_HEAD(&fs_info->space_info);
1147
	btrfs_mapping_init(&fs_info->mapping_tree);
1148
	atomic_set(&fs_info->nr_async_submits, 0);
C
Chris Mason 已提交
1149
	fs_info->sb = sb;
1150
	fs_info->max_extent = (u64)-1;
1151
	fs_info->max_inline = 8192 * 1024;
C
Chris Mason 已提交
1152
	setup_bdi(fs_info, &fs_info->bdi);
1153 1154
	fs_info->btree_inode = new_inode(sb);
	fs_info->btree_inode->i_ino = 1;
C
Chris Mason 已提交
1155
	fs_info->btree_inode->i_nlink = 1;
1156
	fs_info->thread_pool_size = min(num_online_cpus() + 2, 8);
1157

1158 1159 1160
	sb->s_blocksize = 4096;
	sb->s_blocksize_bits = blksize_bits(4096);

1161 1162 1163 1164 1165 1166
	/*
	 * we set the i_size on the btree inode to the max possible int.
	 * the real end of the address space is determined by all of
	 * the devices in the system
	 */
	fs_info->btree_inode->i_size = OFFSET_MAX;
1167
	fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
C
Chris Mason 已提交
1168 1169
	fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;

1170
	extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
1171 1172
			     fs_info->btree_inode->i_mapping,
			     GFP_NOFS);
1173 1174 1175 1176
	extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
			     GFP_NOFS);

	BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
1177

1178
	extent_io_tree_init(&fs_info->free_space_cache,
1179
			     fs_info->btree_inode->i_mapping, GFP_NOFS);
1180
	extent_io_tree_init(&fs_info->block_group_cache,
1181
			     fs_info->btree_inode->i_mapping, GFP_NOFS);
1182
	extent_io_tree_init(&fs_info->pinned_extents,
1183
			     fs_info->btree_inode->i_mapping, GFP_NOFS);
1184
	extent_io_tree_init(&fs_info->pending_del,
1185
			     fs_info->btree_inode->i_mapping, GFP_NOFS);
1186
	extent_io_tree_init(&fs_info->extent_ins,
1187
			     fs_info->btree_inode->i_mapping, GFP_NOFS);
1188
	fs_info->do_barriers = 1;
1189

1190 1191 1192
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
	INIT_WORK(&fs_info->trans_work, btrfs_transaction_cleaner, fs_info);
#else
C
Chris Mason 已提交
1193
	INIT_DELAYED_WORK(&fs_info->trans_work, btrfs_transaction_cleaner);
1194
#endif
1195 1196 1197
	BTRFS_I(fs_info->btree_inode)->root = tree_root;
	memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
	       sizeof(struct btrfs_key));
C
Chris Mason 已提交
1198
	insert_inode_hash(fs_info->btree_inode);
1199
	mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
C
Chris Mason 已提交
1200

C
Chris Mason 已提交
1201
	mutex_init(&fs_info->trans_mutex);
C
Chris Mason 已提交
1202
	mutex_init(&fs_info->fs_mutex);
1203 1204
	mutex_init(&fs_info->alloc_mutex);
	mutex_init(&fs_info->chunk_mutex);
1205

1206 1207 1208 1209 1210 1211 1212 1213
#if 0
	ret = add_hasher(fs_info, "crc32c");
	if (ret) {
		printk("btrfs: failed hash setup, modprobe cryptomgr?\n");
		err = -ENOMEM;
		goto fail_iput;
	}
#endif
1214
	__setup_root(4096, 4096, 4096, 4096, tree_root,
C
Chris Mason 已提交
1215
		     fs_info, BTRFS_ROOT_TREE_OBJECTID);
1216

1217

1218 1219 1220
	bh = __bread(fs_devices->latest_bdev,
		     BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
	if (!bh)
C
Chris Mason 已提交
1221 1222
		goto fail_iput;

1223 1224
	memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
	brelse(bh);
1225

1226
	memcpy(fs_info->fsid, fs_info->super_copy.fsid, BTRFS_FSID_SIZE);
1227

1228
	disk_super = &fs_info->super_copy;
1229
	if (!btrfs_super_root(disk_super))
C
Chris Mason 已提交
1230
		goto fail_sb_buffer;
1231

1232 1233 1234
	err = btrfs_parse_options(tree_root, options);
	if (err)
		goto fail_sb_buffer;
1235

1236 1237 1238 1239 1240 1241
	/*
	 * we need to start all the end_io workers up front because the
	 * queue work function gets called at interrupt time, and so it
	 * cannot dynamically grow.
	 */
	btrfs_init_workers(&fs_info->workers, fs_info->thread_pool_size);
1242
	btrfs_init_workers(&fs_info->submit_workers, fs_info->thread_pool_size);
1243 1244
	btrfs_init_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
	btrfs_start_workers(&fs_info->workers, 1);
1245
	btrfs_start_workers(&fs_info->submit_workers, 1);
1246 1247 1248
	btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size);


1249
	err = -EINVAL;
1250
	if (btrfs_super_num_devices(disk_super) > fs_devices->open_devices) {
1251 1252
		printk("Btrfs: wanted %llu devices, but found %llu\n",
		       (unsigned long long)btrfs_super_num_devices(disk_super),
1253
		       (unsigned long long)fs_devices->open_devices);
1254 1255 1256 1257 1258
		if (btrfs_test_opt(tree_root, DEGRADED))
			printk("continuing in degraded mode\n");
		else {
			goto fail_sb_buffer;
		}
1259
	}
1260

1261 1262
	fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);

1263 1264 1265
	nodesize = btrfs_super_nodesize(disk_super);
	leafsize = btrfs_super_leafsize(disk_super);
	sectorsize = btrfs_super_sectorsize(disk_super);
1266
	stripesize = btrfs_super_stripesize(disk_super);
1267 1268 1269
	tree_root->nodesize = nodesize;
	tree_root->leafsize = leafsize;
	tree_root->sectorsize = sectorsize;
1270
	tree_root->stripesize = stripesize;
1271 1272 1273

	sb->s_blocksize = sectorsize;
	sb->s_blocksize_bits = blksize_bits(sectorsize);
1274

C
Chris Mason 已提交
1275 1276 1277 1278 1279
	if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
		    sizeof(disk_super->magic))) {
		printk("btrfs: valid FS not found on %s\n", sb->s_id);
		goto fail_sb_buffer;
	}
1280

1281
	mutex_lock(&fs_info->fs_mutex);
1282

1283
	mutex_lock(&fs_info->chunk_mutex);
1284
	ret = btrfs_read_sys_array(tree_root);
1285
	mutex_unlock(&fs_info->chunk_mutex);
1286 1287 1288 1289 1290
	if (ret) {
		printk("btrfs: failed to read the system array on %s\n",
		       sb->s_id);
		goto fail_sys_array;
	}
1291 1292 1293 1294 1295 1296 1297 1298 1299

	blocksize = btrfs_level_size(tree_root,
				     btrfs_super_chunk_root_level(disk_super));

	__setup_root(nodesize, leafsize, sectorsize, stripesize,
		     chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);

	chunk_root->node = read_tree_block(chunk_root,
					   btrfs_super_chunk_root(disk_super),
1300
					   blocksize, 0);
1301 1302
	BUG_ON(!chunk_root->node);

1303 1304 1305 1306
	read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
	         (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
		 BTRFS_UUID_SIZE);

1307
	mutex_lock(&fs_info->chunk_mutex);
1308
	ret = btrfs_read_chunk_tree(chunk_root);
1309
	mutex_unlock(&fs_info->chunk_mutex);
1310 1311
	BUG_ON(ret);

1312 1313
	btrfs_close_extra_devices(fs_devices);

1314 1315
	blocksize = btrfs_level_size(tree_root,
				     btrfs_super_root_level(disk_super));
1316

1317

C
Chris Mason 已提交
1318
	tree_root->node = read_tree_block(tree_root,
1319
					  btrfs_super_root(disk_super),
1320
					  blocksize, 0);
C
Chris Mason 已提交
1321 1322
	if (!tree_root->node)
		goto fail_sb_buffer;
1323

1324 1325

	ret = find_and_setup_root(tree_root, fs_info,
C
Chris Mason 已提交
1326
				  BTRFS_EXTENT_TREE_OBJECTID, extent_root);
1327
	if (ret)
C
Chris Mason 已提交
1328
		goto fail_tree_root;
1329 1330 1331 1332 1333 1334 1335 1336
	extent_root->track_dirty = 1;

	ret = find_and_setup_root(tree_root, fs_info,
				  BTRFS_DEV_TREE_OBJECTID, dev_root);
	dev_root->track_dirty = 1;

	if (ret)
		goto fail_extent_root;
1337

C
Chris Mason 已提交
1338 1339
	btrfs_read_block_groups(extent_root);

1340
	fs_info->generation = btrfs_super_generation(disk_super) + 1;
C
Chris Mason 已提交
1341 1342 1343 1344
	fs_info->data_alloc_profile = (u64)-1;
	fs_info->metadata_alloc_profile = (u64)-1;
	fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;

C
Chris Mason 已提交
1345
	mutex_unlock(&fs_info->fs_mutex);
1346
	return tree_root;
C
Chris Mason 已提交
1347

1348 1349
fail_extent_root:
	free_extent_buffer(extent_root->node);
C
Chris Mason 已提交
1350
fail_tree_root:
1351
	free_extent_buffer(tree_root->node);
1352 1353
fail_sys_array:
	mutex_unlock(&fs_info->fs_mutex);
C
Chris Mason 已提交
1354
fail_sb_buffer:
1355
	extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree);
1356 1357
	btrfs_stop_workers(&fs_info->workers);
	btrfs_stop_workers(&fs_info->endio_workers);
1358
	btrfs_stop_workers(&fs_info->submit_workers);
1359 1360
fail_iput:
	iput(fs_info->btree_inode);
C
Chris Mason 已提交
1361
fail:
1362
	btrfs_close_devices(fs_info->fs_devices);
1363 1364
	btrfs_mapping_tree_free(&fs_info->mapping_tree);

C
Chris Mason 已提交
1365 1366
	kfree(extent_root);
	kfree(tree_root);
1367
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1368
	bdi_destroy(&fs_info->bdi);
1369
#endif
C
Chris Mason 已提交
1370 1371
	kfree(fs_info);
	return ERR_PTR(err);
1372 1373
}

1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385
static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
{
	char b[BDEVNAME_SIZE];

	if (uptodate) {
		set_buffer_uptodate(bh);
	} else {
		if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
			printk(KERN_WARNING "lost page write due to "
					"I/O error on %s\n",
				       bdevname(bh->b_bdev, b));
		}
1386 1387 1388
		/* note, we dont' set_buffer_write_io_error because we have
		 * our own ways of dealing with the IO errors
		 */
1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399
		clear_buffer_uptodate(bh);
	}
	unlock_buffer(bh);
	put_bh(bh);
}

int write_all_supers(struct btrfs_root *root)
{
	struct list_head *cur;
	struct list_head *head = &root->fs_info->fs_devices->devices;
	struct btrfs_device *dev;
1400
	struct btrfs_super_block *sb;
1401 1402 1403 1404
	struct btrfs_dev_item *dev_item;
	struct buffer_head *bh;
	int ret;
	int do_barriers;
1405 1406
	int max_errors;
	int total_errors = 0;
1407 1408
	u32 crc;
	u64 flags;
1409

1410
	max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
1411 1412
	do_barriers = !btrfs_test_opt(root, NOBARRIER);

1413 1414
	sb = &root->fs_info->super_for_commit;
	dev_item = &sb->dev_item;
1415 1416
	list_for_each(cur, head) {
		dev = list_entry(cur, struct btrfs_device, dev_list);
1417 1418 1419 1420 1421 1422 1423
		if (!dev->bdev) {
			total_errors++;
			continue;
		}
		if (!dev->in_fs_metadata)
			continue;

1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441
		btrfs_set_stack_device_type(dev_item, dev->type);
		btrfs_set_stack_device_id(dev_item, dev->devid);
		btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
		btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
		btrfs_set_stack_device_io_align(dev_item, dev->io_align);
		btrfs_set_stack_device_io_width(dev_item, dev->io_width);
		btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
		memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
		flags = btrfs_super_flags(sb);
		btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);


		crc = ~(u32)0;
		crc = btrfs_csum_data(root, (char *)sb + BTRFS_CSUM_SIZE, crc,
				      BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
		btrfs_csum_final(crc, sb->csum);

		bh = __getblk(dev->bdev, BTRFS_SUPER_INFO_OFFSET / 4096,
1442 1443
			      BTRFS_SUPER_INFO_SIZE);

1444
		memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465
		dev->pending_io = bh;

		get_bh(bh);
		set_buffer_uptodate(bh);
		lock_buffer(bh);
		bh->b_end_io = btrfs_end_buffer_write_sync;

		if (do_barriers && dev->barriers) {
			ret = submit_bh(WRITE_BARRIER, bh);
			if (ret == -EOPNOTSUPP) {
				printk("btrfs: disabling barriers on dev %s\n",
				       dev->name);
				set_buffer_uptodate(bh);
				dev->barriers = 0;
				get_bh(bh);
				lock_buffer(bh);
				ret = submit_bh(WRITE, bh);
			}
		} else {
			ret = submit_bh(WRITE, bh);
		}
1466 1467
		if (ret)
			total_errors++;
1468
	}
1469 1470 1471 1472 1473
	if (total_errors > max_errors) {
		printk("btrfs: %d errors while writing supers\n", total_errors);
		BUG();
	}
	total_errors = 0;
1474 1475 1476

	list_for_each(cur, head) {
		dev = list_entry(cur, struct btrfs_device, dev_list);
1477 1478 1479 1480 1481
		if (!dev->bdev)
			continue;
		if (!dev->in_fs_metadata)
			continue;

1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495
		BUG_ON(!dev->pending_io);
		bh = dev->pending_io;
		wait_on_buffer(bh);
		if (!buffer_uptodate(dev->pending_io)) {
			if (do_barriers && dev->barriers) {
				printk("btrfs: disabling barriers on dev %s\n",
				       dev->name);
				set_buffer_uptodate(bh);
				get_bh(bh);
				lock_buffer(bh);
				dev->barriers = 0;
				ret = submit_bh(WRITE, bh);
				BUG_ON(ret);
				wait_on_buffer(bh);
1496 1497
				if (!buffer_uptodate(bh))
					total_errors++;
1498
			} else {
1499
				total_errors++;
1500 1501 1502 1503 1504 1505
			}

		}
		dev->pending_io = NULL;
		brelse(bh);
	}
1506 1507 1508 1509
	if (total_errors > max_errors) {
		printk("btrfs: %d errors while writing supers\n", total_errors);
		BUG();
	}
1510 1511 1512
	return 0;
}

1513
int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root
C
Chris Mason 已提交
1514
		      *root)
1515
{
1516
	int ret;
1517

1518
	ret = write_all_supers(root);
1519
	return ret;
C
Chris Mason 已提交
1520 1521
}

1522
int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
C
Chris Mason 已提交
1523 1524 1525
{
	radix_tree_delete(&fs_info->fs_roots_radix,
			  (unsigned long)root->root_key.objectid);
1526 1527
	if (root->in_sysfs)
		btrfs_sysfs_del_root(root);
C
Chris Mason 已提交
1528 1529 1530
	if (root->inode)
		iput(root->inode);
	if (root->node)
1531
		free_extent_buffer(root->node);
C
Chris Mason 已提交
1532
	if (root->commit_root)
1533
		free_extent_buffer(root->commit_root);
1534 1535
	if (root->name)
		kfree(root->name);
C
Chris Mason 已提交
1536 1537 1538 1539
	kfree(root);
	return 0;
}

C
Chris Mason 已提交
1540
static int del_fs_roots(struct btrfs_fs_info *fs_info)
1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551
{
	int ret;
	struct btrfs_root *gang[8];
	int i;

	while(1) {
		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
					     (void **)gang, 0,
					     ARRAY_SIZE(gang));
		if (!ret)
			break;
C
Chris Mason 已提交
1552
		for (i = 0; i < ret; i++)
1553
			btrfs_free_fs_root(fs_info, gang[i]);
1554 1555 1556
	}
	return 0;
}
1557

C
Chris Mason 已提交
1558
int close_ctree(struct btrfs_root *root)
C
Chris Mason 已提交
1559
{
1560
	int ret;
1561
	struct btrfs_trans_handle *trans;
1562
	struct btrfs_fs_info *fs_info = root->fs_info;
1563

1564
	fs_info->closing = 1;
C
Chris Mason 已提交
1565
	btrfs_transaction_flush_work(root);
1566
	mutex_lock(&fs_info->fs_mutex);
1567
	btrfs_defrag_dirty_roots(root->fs_info);
C
Chris Mason 已提交
1568
	trans = btrfs_start_transaction(root, 1);
1569
	ret = btrfs_commit_transaction(trans, root);
C
Chris Mason 已提交
1570 1571 1572 1573
	/* run commit again to  drop the original snapshot */
	trans = btrfs_start_transaction(root, 1);
	btrfs_commit_transaction(trans, root);
	ret = btrfs_write_and_wait_transaction(NULL, root);
1574
	BUG_ON(ret);
1575

C
Chris Mason 已提交
1576
	write_ctree_super(NULL, root);
1577 1578
	mutex_unlock(&fs_info->fs_mutex);

1579 1580
	btrfs_transaction_flush_work(root);

C
Chris Mason 已提交
1581 1582 1583 1584
	if (fs_info->delalloc_bytes) {
		printk("btrfs: at unmount delalloc count %Lu\n",
		       fs_info->delalloc_bytes);
	}
1585
	if (fs_info->extent_root->node)
1586
		free_extent_buffer(fs_info->extent_root->node);
1587

1588
	if (fs_info->tree_root->node)
1589
		free_extent_buffer(fs_info->tree_root->node);
1590

1591 1592 1593 1594 1595 1596
	if (root->fs_info->chunk_root->node);
		free_extent_buffer(root->fs_info->chunk_root->node);

	if (root->fs_info->dev_root->node);
		free_extent_buffer(root->fs_info->dev_root->node);

C
Chris Mason 已提交
1597
	btrfs_free_block_groups(root->fs_info);
1598
	del_fs_roots(fs_info);
1599 1600 1601

	filemap_write_and_wait(fs_info->btree_inode->i_mapping);

1602 1603 1604 1605 1606 1607
	extent_io_tree_empty_lru(&fs_info->free_space_cache);
	extent_io_tree_empty_lru(&fs_info->block_group_cache);
	extent_io_tree_empty_lru(&fs_info->pinned_extents);
	extent_io_tree_empty_lru(&fs_info->pending_del);
	extent_io_tree_empty_lru(&fs_info->extent_ins);
	extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree);
1608

1609
	truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
1610

1611 1612
	btrfs_stop_workers(&fs_info->workers);
	btrfs_stop_workers(&fs_info->endio_workers);
1613
	btrfs_stop_workers(&fs_info->submit_workers);
1614

1615
	iput(fs_info->btree_inode);
1616 1617 1618 1619 1620 1621 1622 1623 1624 1625
#if 0
	while(!list_empty(&fs_info->hashers)) {
		struct btrfs_hasher *hasher;
		hasher = list_entry(fs_info->hashers.next, struct btrfs_hasher,
				    hashers);
		list_del(&hasher->hashers);
		crypto_free_hash(&fs_info->hash_tfm);
		kfree(hasher);
	}
#endif
1626
	btrfs_close_devices(fs_info->fs_devices);
1627
	btrfs_mapping_tree_free(&fs_info->mapping_tree);
1628

1629
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
C
Chris Mason 已提交
1630
	bdi_destroy(&fs_info->bdi);
1631
#endif
1632

1633 1634
	kfree(fs_info->extent_root);
	kfree(fs_info->tree_root);
1635 1636
	kfree(fs_info->chunk_root);
	kfree(fs_info->dev_root);
1637 1638 1639
	return 0;
}

1640
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
1641
{
1642
	int ret;
1643
	struct inode *btree_inode = buf->first_page->mapping->host;
1644 1645 1646 1647 1648 1649 1650 1651

	ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf);
	if (!ret)
		return ret;

	ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
				    parent_transid);
	return !ret;
1652 1653 1654
}

int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
C
Chris Mason 已提交
1655
{
1656
	struct inode *btree_inode = buf->first_page->mapping->host;
1657
	return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
1658 1659
					  buf);
}
1660

1661 1662
void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
{
1663
	struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1664 1665
	u64 transid = btrfs_header_generation(buf);
	struct inode *btree_inode = root->fs_info->btree_inode;
1666

1667
	WARN_ON(!btrfs_tree_locked(buf));
C
Chris Mason 已提交
1668 1669
	if (transid != root->fs_info->generation) {
		printk(KERN_CRIT "transid mismatch buffer %llu, found %Lu running %Lu\n",
1670
			(unsigned long long)buf->start,
C
Chris Mason 已提交
1671 1672 1673
			transid, root->fs_info->generation);
		WARN_ON(1);
	}
1674
	set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, buf);
1675 1676
}

1677 1678
void btrfs_throttle(struct btrfs_root *root)
{
1679 1680
	struct backing_dev_info *bdi;

1681
	bdi = &root->fs_info->bdi;
1682 1683
	if (root->fs_info->throttles && bdi_write_congested(bdi)) {
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
1684
		congestion_wait(WRITE, HZ/20);
1685 1686 1687 1688
#else
		blk_congestion_wait(WRITE, HZ/20);
#endif
	}
1689 1690
}

1691
void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
C
Chris Mason 已提交
1692
{
1693 1694 1695 1696
	/*
	 * looks as though older kernels can get into trouble with
	 * this code, they end up stuck in balance_dirty_pages forever
	 */
1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709
	struct extent_io_tree *tree;
	u64 num_dirty;
	u64 start = 0;
	unsigned long thresh = 16 * 1024 * 1024;
	tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;

	if (current_is_pdflush())
		return;

	num_dirty = count_range_bits(tree, &start, (u64)-1,
				     thresh, EXTENT_DIRTY);
	if (num_dirty > thresh) {
		balance_dirty_pages_ratelimited_nr(
C
Chris Mason 已提交
1710
				   root->fs_info->btree_inode->i_mapping, 1);
1711
	}
1712
	return;
C
Chris Mason 已提交
1713
}
1714 1715 1716

void btrfs_set_buffer_defrag(struct extent_buffer *buf)
{
1717
	struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1718
	struct inode *btree_inode = root->fs_info->btree_inode;
1719
	set_extent_bits(&BTRFS_I(btree_inode)->io_tree, buf->start,
1720 1721 1722 1723 1724
			buf->start + buf->len - 1, EXTENT_DEFRAG, GFP_NOFS);
}

void btrfs_set_buffer_defrag_done(struct extent_buffer *buf)
{
1725
	struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1726
	struct inode *btree_inode = root->fs_info->btree_inode;
1727
	set_extent_bits(&BTRFS_I(btree_inode)->io_tree, buf->start,
1728 1729 1730 1731 1732 1733
			buf->start + buf->len - 1, EXTENT_DEFRAG_DONE,
			GFP_NOFS);
}

int btrfs_buffer_defrag(struct extent_buffer *buf)
{
1734
	struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1735
	struct inode *btree_inode = root->fs_info->btree_inode;
1736
	return test_range_bit(&BTRFS_I(btree_inode)->io_tree,
1737 1738 1739 1740 1741
		     buf->start, buf->start + buf->len - 1, EXTENT_DEFRAG, 0);
}

int btrfs_buffer_defrag_done(struct extent_buffer *buf)
{
1742
	struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1743
	struct inode *btree_inode = root->fs_info->btree_inode;
1744
	return test_range_bit(&BTRFS_I(btree_inode)->io_tree,
1745 1746 1747 1748 1749 1750
		     buf->start, buf->start + buf->len - 1,
		     EXTENT_DEFRAG_DONE, 0);
}

int btrfs_clear_buffer_defrag_done(struct extent_buffer *buf)
{
1751
	struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1752
	struct inode *btree_inode = root->fs_info->btree_inode;
1753
	return clear_extent_bits(&BTRFS_I(btree_inode)->io_tree,
1754 1755 1756 1757 1758 1759
		     buf->start, buf->start + buf->len - 1,
		     EXTENT_DEFRAG_DONE, GFP_NOFS);
}

int btrfs_clear_buffer_defrag(struct extent_buffer *buf)
{
1760
	struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1761
	struct inode *btree_inode = root->fs_info->btree_inode;
1762
	return clear_extent_bits(&BTRFS_I(btree_inode)->io_tree,
1763 1764 1765 1766
		     buf->start, buf->start + buf->len - 1,
		     EXTENT_DEFRAG, GFP_NOFS);
}

1767
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
1768
{
1769
	struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1770
	int ret;
1771
	ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1772 1773 1774 1775
	if (ret == 0) {
		buf->flags |= EXTENT_UPTODATE;
	}
	return ret;
1776
}
1777

1778
static struct extent_io_ops btree_extent_io_ops = {
1779
	.writepage_io_hook = btree_writepage_io_hook,
1780
	.readpage_end_io_hook = btree_readpage_end_io_hook,
1781
	.submit_bio_hook = btree_submit_bio_hook,
1782 1783
	/* note we're sharing with inode.c for the merge bio hook */
	.merge_bio_hook = btrfs_merge_bio_hook,
1784
};