extent_io.c 131.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/bio.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/page-flags.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/pagevec.h>
12
#include <linux/prefetch.h>
D
Dan Magenheimer 已提交
13
#include <linux/cleancache.h>
14 15
#include "extent_io.h"
#include "extent_map.h"
16 17
#include "ctree.h"
#include "btrfs_inode.h"
18
#include "volumes.h"
19
#include "check-integrity.h"
20
#include "locking.h"
21
#include "rcu-string.h"
22
#include "backref.h"
23 24 25

static struct kmem_cache *extent_state_cache;
static struct kmem_cache *extent_buffer_cache;
26
static struct bio_set *btrfs_bioset;
27

28
#ifdef CONFIG_BTRFS_DEBUG
29 30
static LIST_HEAD(buffers);
static LIST_HEAD(states);
C
Chris Mason 已提交
31

C
Chris Mason 已提交
32
static DEFINE_SPINLOCK(leak_lock);
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63

static inline
void btrfs_leak_debug_add(struct list_head *new, struct list_head *head)
{
	unsigned long flags;

	spin_lock_irqsave(&leak_lock, flags);
	list_add(new, head);
	spin_unlock_irqrestore(&leak_lock, flags);
}

static inline
void btrfs_leak_debug_del(struct list_head *entry)
{
	unsigned long flags;

	spin_lock_irqsave(&leak_lock, flags);
	list_del(entry);
	spin_unlock_irqrestore(&leak_lock, flags);
}

static inline
void btrfs_leak_debug_check(void)
{
	struct extent_state *state;
	struct extent_buffer *eb;

	while (!list_empty(&states)) {
		state = list_entry(states.next, struct extent_state, leak_list);
		printk(KERN_ERR "btrfs state leak: start %llu end %llu "
		       "state %lu in tree %p refs %d\n",
64 65
		       state->start, state->end, state->state, state->tree,
		       atomic_read(&state->refs));
66 67 68 69 70 71 72
		list_del(&state->leak_list);
		kmem_cache_free(extent_state_cache, state);
	}

	while (!list_empty(&buffers)) {
		eb = list_entry(buffers.next, struct extent_buffer, leak_list);
		printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
73 74
		       "refs %d\n",
		       eb->start, eb->len, atomic_read(&eb->refs));
75 76 77 78
		list_del(&eb->leak_list);
		kmem_cache_free(extent_buffer_cache, eb);
	}
}
79

80 81
#define btrfs_debug_check_extent_io_range(tree, start, end)		\
	__btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
82
static inline void __btrfs_debug_check_extent_io_range(const char *caller,
83
		struct extent_io_tree *tree, u64 start, u64 end)
84
{
85 86
	struct inode *inode;
	u64 isize;
87

88 89 90 91 92
	if (!tree->mapping)
		return;

	inode = tree->mapping->host;
	isize = i_size_read(inode);
93 94 95
	if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
		printk_ratelimited(KERN_DEBUG
		    "btrfs: %s: ino %llu isize %llu odd range [%llu,%llu]\n",
96
				caller, btrfs_ino(inode), isize, start, end);
97 98
	}
}
99 100 101 102
#else
#define btrfs_leak_debug_add(new, head)	do {} while (0)
#define btrfs_leak_debug_del(entry)	do {} while (0)
#define btrfs_leak_debug_check()	do {} while (0)
103
#define btrfs_debug_check_extent_io_range(c, s, e)	do {} while (0)
C
Chris Mason 已提交
104
#endif
105 106 107 108 109 110 111 112 113 114 115 116 117

#define BUFFER_LRU_MAX 64

struct tree_entry {
	u64 start;
	u64 end;
	struct rb_node rb_node;
};

struct extent_page_data {
	struct bio *bio;
	struct extent_io_tree *tree;
	get_extent_t *get_extent;
118
	unsigned long bio_flags;
119 120 121 122

	/* tells writepage not to lock the state bits for this range
	 * it still does the unlocking
	 */
123 124 125 126
	unsigned int extent_locked:1;

	/* tells the submit_bio code to use a WRITE_SYNC */
	unsigned int sync_io:1;
127 128
};

129
static noinline void flush_write_bio(void *data);
130 131 132
static inline struct btrfs_fs_info *
tree_fs_info(struct extent_io_tree *tree)
{
133 134
	if (!tree->mapping)
		return NULL;
135 136
	return btrfs_sb(tree->mapping->host->i_sb);
}
137

138 139
int __init extent_io_init(void)
{
D
David Sterba 已提交
140
	extent_state_cache = kmem_cache_create("btrfs_extent_state",
141 142
			sizeof(struct extent_state), 0,
			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
143 144 145
	if (!extent_state_cache)
		return -ENOMEM;

D
David Sterba 已提交
146
	extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
147 148
			sizeof(struct extent_buffer), 0,
			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
149 150
	if (!extent_buffer_cache)
		goto free_state_cache;
151 152 153 154 155

	btrfs_bioset = bioset_create(BIO_POOL_SIZE,
				     offsetof(struct btrfs_io_bio, bio));
	if (!btrfs_bioset)
		goto free_buffer_cache;
156 157 158 159

	if (bioset_integrity_create(btrfs_bioset, BIO_POOL_SIZE))
		goto free_bioset;

160 161
	return 0;

162 163 164 165
free_bioset:
	bioset_free(btrfs_bioset);
	btrfs_bioset = NULL;

166 167 168 169
free_buffer_cache:
	kmem_cache_destroy(extent_buffer_cache);
	extent_buffer_cache = NULL;

170 171
free_state_cache:
	kmem_cache_destroy(extent_state_cache);
172
	extent_state_cache = NULL;
173 174 175 176 177
	return -ENOMEM;
}

void extent_io_exit(void)
{
178
	btrfs_leak_debug_check();
179 180 181 182 183 184

	/*
	 * Make sure all delayed rcu free are flushed before we
	 * destroy caches.
	 */
	rcu_barrier();
185 186 187 188
	if (extent_state_cache)
		kmem_cache_destroy(extent_state_cache);
	if (extent_buffer_cache)
		kmem_cache_destroy(extent_buffer_cache);
189 190
	if (btrfs_bioset)
		bioset_free(btrfs_bioset);
191 192 193
}

void extent_io_tree_init(struct extent_io_tree *tree,
194
			 struct address_space *mapping)
195
{
196
	tree->state = RB_ROOT;
197 198
	tree->ops = NULL;
	tree->dirty_bytes = 0;
199
	spin_lock_init(&tree->lock);
200 201 202
	tree->mapping = mapping;
}

203
static struct extent_state *alloc_extent_state(gfp_t mask)
204 205 206 207
{
	struct extent_state *state;

	state = kmem_cache_alloc(extent_state_cache, mask);
208
	if (!state)
209 210 211
		return state;
	state->state = 0;
	state->private = 0;
212
	state->tree = NULL;
213
	btrfs_leak_debug_add(&state->leak_list, &states);
214 215
	atomic_set(&state->refs, 1);
	init_waitqueue_head(&state->wq);
216
	trace_alloc_extent_state(state, mask, _RET_IP_);
217 218 219
	return state;
}

220
void free_extent_state(struct extent_state *state)
221 222 223 224
{
	if (!state)
		return;
	if (atomic_dec_and_test(&state->refs)) {
225
		WARN_ON(state->tree);
226
		btrfs_leak_debug_del(&state->leak_list);
227
		trace_free_extent_state(state, _RET_IP_);
228 229 230 231 232
		kmem_cache_free(extent_state_cache, state);
	}
}

static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
233 234 235
				   struct rb_node *node,
				   struct rb_node ***p_in,
				   struct rb_node **parent_in)
236
{
C
Chris Mason 已提交
237 238
	struct rb_node **p = &root->rb_node;
	struct rb_node *parent = NULL;
239 240
	struct tree_entry *entry;

241 242 243 244 245 246
	if (p_in && parent_in) {
		p = *p_in;
		parent = *parent_in;
		goto do_insert;
	}

C
Chris Mason 已提交
247
	while (*p) {
248 249 250 251 252 253 254 255 256 257 258
		parent = *p;
		entry = rb_entry(parent, struct tree_entry, rb_node);

		if (offset < entry->start)
			p = &(*p)->rb_left;
		else if (offset > entry->end)
			p = &(*p)->rb_right;
		else
			return parent;
	}

259
do_insert:
260 261 262 263 264
	rb_link_node(node, parent, p);
	rb_insert_color(node, root);
	return NULL;
}

265
static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
266 267 268 269
				      struct rb_node **prev_ret,
				      struct rb_node **next_ret,
				      struct rb_node ***p_ret,
				      struct rb_node **parent_ret)
270
{
271
	struct rb_root *root = &tree->state;
272
	struct rb_node **n = &root->rb_node;
273 274 275 276 277
	struct rb_node *prev = NULL;
	struct rb_node *orig_prev = NULL;
	struct tree_entry *entry;
	struct tree_entry *prev_entry = NULL;

278 279 280
	while (*n) {
		prev = *n;
		entry = rb_entry(prev, struct tree_entry, rb_node);
281 282 283
		prev_entry = entry;

		if (offset < entry->start)
284
			n = &(*n)->rb_left;
285
		else if (offset > entry->end)
286
			n = &(*n)->rb_right;
C
Chris Mason 已提交
287
		else
288
			return *n;
289 290
	}

291 292 293 294 295
	if (p_ret)
		*p_ret = n;
	if (parent_ret)
		*parent_ret = prev;

296 297
	if (prev_ret) {
		orig_prev = prev;
C
Chris Mason 已提交
298
		while (prev && offset > prev_entry->end) {
299 300 301 302 303 304 305 306 307
			prev = rb_next(prev);
			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
		}
		*prev_ret = prev;
		prev = orig_prev;
	}

	if (next_ret) {
		prev_entry = rb_entry(prev, struct tree_entry, rb_node);
C
Chris Mason 已提交
308
		while (prev && offset < prev_entry->start) {
309 310 311 312 313 314 315 316
			prev = rb_prev(prev);
			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
		}
		*next_ret = prev;
	}
	return NULL;
}

317 318 319 320 321
static inline struct rb_node *
tree_search_for_insert(struct extent_io_tree *tree,
		       u64 offset,
		       struct rb_node ***p_ret,
		       struct rb_node **parent_ret)
322
{
323
	struct rb_node *prev = NULL;
324
	struct rb_node *ret;
325

326
	ret = __etree_search(tree, offset, &prev, NULL, p_ret, parent_ret);
C
Chris Mason 已提交
327
	if (!ret)
328 329 330 331
		return prev;
	return ret;
}

332 333 334 335 336 337
static inline struct rb_node *tree_search(struct extent_io_tree *tree,
					  u64 offset)
{
	return tree_search_for_insert(tree, offset, NULL, NULL);
}

J
Josef Bacik 已提交
338 339 340 341 342 343 344 345
static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
		     struct extent_state *other)
{
	if (tree->ops && tree->ops->merge_extent_hook)
		tree->ops->merge_extent_hook(tree->mapping->host, new,
					     other);
}

346 347 348 349 350 351 352 353 354
/*
 * utility function to look for merge candidates inside a given range.
 * Any extents with matching state are merged together into a single
 * extent in the tree.  Extents with EXTENT_IO in their state field
 * are not merged because the end_io handlers need to be able to do
 * operations on them without sleeping (or doing allocations/splits).
 *
 * This should be called with the tree lock held.
 */
355 356
static void merge_state(struct extent_io_tree *tree,
		        struct extent_state *state)
357 358 359 360
{
	struct extent_state *other;
	struct rb_node *other_node;

361
	if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
362
		return;
363 364 365 366 367 368

	other_node = rb_prev(&state->rb_node);
	if (other_node) {
		other = rb_entry(other_node, struct extent_state, rb_node);
		if (other->end == state->start - 1 &&
		    other->state == state->state) {
J
Josef Bacik 已提交
369
			merge_cb(tree, state, other);
370
			state->start = other->start;
371
			other->tree = NULL;
372 373 374 375 376 377 378 379 380
			rb_erase(&other->rb_node, &tree->state);
			free_extent_state(other);
		}
	}
	other_node = rb_next(&state->rb_node);
	if (other_node) {
		other = rb_entry(other_node, struct extent_state, rb_node);
		if (other->start == state->end + 1 &&
		    other->state == state->state) {
J
Josef Bacik 已提交
381
			merge_cb(tree, state, other);
382 383 384 385
			state->end = other->end;
			other->tree = NULL;
			rb_erase(&other->rb_node, &tree->state);
			free_extent_state(other);
386 387 388 389
		}
	}
}

390
static void set_state_cb(struct extent_io_tree *tree,
391
			 struct extent_state *state, unsigned long *bits)
392
{
393 394
	if (tree->ops && tree->ops->set_bit_hook)
		tree->ops->set_bit_hook(tree->mapping->host, state, bits);
395 396 397
}

static void clear_state_cb(struct extent_io_tree *tree,
398
			   struct extent_state *state, unsigned long *bits)
399
{
J
Josef Bacik 已提交
400 401
	if (tree->ops && tree->ops->clear_bit_hook)
		tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
402 403
}

404
static void set_state_bits(struct extent_io_tree *tree,
405
			   struct extent_state *state, unsigned long *bits);
406

407 408 409 410 411 412 413 414 415 416 417 418
/*
 * insert an extent_state struct into the tree.  'bits' are set on the
 * struct before it is inserted.
 *
 * This may return -EEXIST if the extent is already there, in which case the
 * state struct is freed.
 *
 * The tree lock is not taken internally.  This is a utility function and
 * probably isn't what you want to call (see set/clear_extent_bit).
 */
static int insert_state(struct extent_io_tree *tree,
			struct extent_state *state, u64 start, u64 end,
419 420
			struct rb_node ***p,
			struct rb_node **parent,
421
			unsigned long *bits)
422 423 424
{
	struct rb_node *node;

J
Julia Lawall 已提交
425 426
	if (end < start)
		WARN(1, KERN_ERR "btrfs end < start %llu %llu\n",
427
		       end, start);
428 429
	state->start = start;
	state->end = end;
J
Josef Bacik 已提交
430

431 432
	set_state_bits(tree, state, bits);

433
	node = tree_insert(&tree->state, end, &state->rb_node, p, parent);
434 435 436
	if (node) {
		struct extent_state *found;
		found = rb_entry(node, struct extent_state, rb_node);
C
Chris Mason 已提交
437
		printk(KERN_ERR "btrfs found node %llu %llu on insert of "
438 439
		       "%llu %llu\n",
		       found->start, found->end, start, end);
440 441
		return -EEXIST;
	}
442
	state->tree = tree;
443 444 445 446
	merge_state(tree, state);
	return 0;
}

447
static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
J
Josef Bacik 已提交
448 449 450
		     u64 split)
{
	if (tree->ops && tree->ops->split_extent_hook)
451
		tree->ops->split_extent_hook(tree->mapping->host, orig, split);
J
Josef Bacik 已提交
452 453
}

454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
/*
 * split a given extent state struct in two, inserting the preallocated
 * struct 'prealloc' as the newly created second half.  'split' indicates an
 * offset inside 'orig' where it should be split.
 *
 * Before calling,
 * the tree has 'orig' at [orig->start, orig->end].  After calling, there
 * are two extent state structs in the tree:
 * prealloc: [orig->start, split - 1]
 * orig: [ split, orig->end ]
 *
 * The tree locks are not taken by this function. They need to be held
 * by the caller.
 */
static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
		       struct extent_state *prealloc, u64 split)
{
	struct rb_node *node;
J
Josef Bacik 已提交
472 473 474

	split_cb(tree, orig, split);

475 476 477 478 479
	prealloc->start = orig->start;
	prealloc->end = split - 1;
	prealloc->state = orig->state;
	orig->start = split;

480 481
	node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node,
			   NULL, NULL);
482 483 484 485
	if (node) {
		free_extent_state(prealloc);
		return -EEXIST;
	}
486
	prealloc->tree = tree;
487 488 489
	return 0;
}

490 491 492 493 494 495 496 497 498
static struct extent_state *next_state(struct extent_state *state)
{
	struct rb_node *next = rb_next(&state->rb_node);
	if (next)
		return rb_entry(next, struct extent_state, rb_node);
	else
		return NULL;
}

499 500
/*
 * utility function to clear some bits in an extent state struct.
501
 * it will optionally wake up any one waiting on this state (wake == 1).
502 503 504 505
 *
 * If no bits are set on the state struct after clearing things, the
 * struct is freed and removed from the tree
 */
506 507
static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
					    struct extent_state *state,
508
					    unsigned long *bits, int wake)
509
{
510
	struct extent_state *next;
511
	unsigned long bits_to_clear = *bits & ~EXTENT_CTLBITS;
512

513
	if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
514 515 516 517
		u64 range = state->end - state->start + 1;
		WARN_ON(range > tree->dirty_bytes);
		tree->dirty_bytes -= range;
	}
518
	clear_state_cb(tree, state, bits);
519
	state->state &= ~bits_to_clear;
520 521
	if (wake)
		wake_up(&state->wq);
522
	if (state->state == 0) {
523
		next = next_state(state);
524
		if (state->tree) {
525
			rb_erase(&state->rb_node, &tree->state);
526
			state->tree = NULL;
527 528 529 530 531 532
			free_extent_state(state);
		} else {
			WARN_ON(1);
		}
	} else {
		merge_state(tree, state);
533
		next = next_state(state);
534
	}
535
	return next;
536 537
}

538 539 540 541 542 543 544 545 546
static struct extent_state *
alloc_extent_state_atomic(struct extent_state *prealloc)
{
	if (!prealloc)
		prealloc = alloc_extent_state(GFP_ATOMIC);

	return prealloc;
}

547
static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
548 549 550 551 552 553
{
	btrfs_panic(tree_fs_info(tree), err, "Locking error: "
		    "Extent tree was modified by another "
		    "thread while locked.");
}

554 555 556 557 558 559 560 561 562 563
/*
 * clear some bits on a range in the tree.  This may require splitting
 * or inserting elements in the tree, so the gfp mask is used to
 * indicate which allocations or sleeping are allowed.
 *
 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
 * the given range from the tree regardless of state (ie for truncate).
 *
 * the range [start, end] is inclusive.
 *
564
 * This takes the tree lock, and returns 0 on success and < 0 on error.
565 566
 */
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
567
		     unsigned long bits, int wake, int delete,
568 569
		     struct extent_state **cached_state,
		     gfp_t mask)
570 571
{
	struct extent_state *state;
572
	struct extent_state *cached;
573 574
	struct extent_state *prealloc = NULL;
	struct rb_node *node;
575
	u64 last_end;
576
	int err;
577
	int clear = 0;
578

579
	btrfs_debug_check_extent_io_range(tree, start, end);
580

581 582 583
	if (bits & EXTENT_DELALLOC)
		bits |= EXTENT_NORESERVE;

584 585 586 587
	if (delete)
		bits |= ~EXTENT_CTLBITS;
	bits |= EXTENT_FIRST_DELALLOC;

588 589
	if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
		clear = 1;
590 591 592 593 594 595 596
again:
	if (!prealloc && (mask & __GFP_WAIT)) {
		prealloc = alloc_extent_state(mask);
		if (!prealloc)
			return -ENOMEM;
	}

597
	spin_lock(&tree->lock);
598 599
	if (cached_state) {
		cached = *cached_state;
600 601 602 603 604 605

		if (clear) {
			*cached_state = NULL;
			cached_state = NULL;
		}

606 607
		if (cached && cached->tree && cached->start <= start &&
		    cached->end > start) {
608 609
			if (clear)
				atomic_dec(&cached->refs);
610
			state = cached;
611
			goto hit_next;
612
		}
613 614
		if (clear)
			free_extent_state(cached);
615
	}
616 617 618 619
	/*
	 * this search will find the extents that end after
	 * our range starts
	 */
620
	node = tree_search(tree, start);
621 622 623
	if (!node)
		goto out;
	state = rb_entry(node, struct extent_state, rb_node);
624
hit_next:
625 626 627
	if (state->start > end)
		goto out;
	WARN_ON(state->end < start);
628
	last_end = state->end;
629

630
	/* the state doesn't have the wanted bits, go ahead */
631 632
	if (!(state->state & bits)) {
		state = next_state(state);
633
		goto next;
634
	}
635

636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652
	/*
	 *     | ---- desired range ---- |
	 *  | state | or
	 *  | ------------- state -------------- |
	 *
	 * We need to split the extent we found, and may flip
	 * bits on second half.
	 *
	 * If the extent we found extends past our range, we
	 * just split and search again.  It'll get split again
	 * the next time though.
	 *
	 * If the extent we found is inside our range, we clear
	 * the desired bit on it.
	 */

	if (state->start < start) {
653 654
		prealloc = alloc_extent_state_atomic(prealloc);
		BUG_ON(!prealloc);
655
		err = split_state(tree, state, prealloc, start);
656 657 658
		if (err)
			extent_io_tree_panic(tree, err);

659 660 661 662
		prealloc = NULL;
		if (err)
			goto out;
		if (state->end <= end) {
663 664
			state = clear_state_bit(tree, state, &bits, wake);
			goto next;
665 666 667 668 669 670 671 672 673 674
		}
		goto search_again;
	}
	/*
	 * | ---- desired range ---- |
	 *                        | state |
	 * We need to split the extent, and clear the bit
	 * on the first half
	 */
	if (state->start <= end && state->end > end) {
675 676
		prealloc = alloc_extent_state_atomic(prealloc);
		BUG_ON(!prealloc);
677
		err = split_state(tree, state, prealloc, end + 1);
678 679 680
		if (err)
			extent_io_tree_panic(tree, err);

681 682
		if (wake)
			wake_up(&state->wq);
683

684
		clear_state_bit(tree, prealloc, &bits, wake);
J
Josef Bacik 已提交
685

686 687 688
		prealloc = NULL;
		goto out;
	}
689

690
	state = clear_state_bit(tree, state, &bits, wake);
691
next:
692 693 694
	if (last_end == (u64)-1)
		goto out;
	start = last_end + 1;
695
	if (start <= end && state && !need_resched())
696
		goto hit_next;
697 698 699
	goto search_again;

out:
700
	spin_unlock(&tree->lock);
701 702 703
	if (prealloc)
		free_extent_state(prealloc);

704
	return 0;
705 706 707 708

search_again:
	if (start > end)
		goto out;
709
	spin_unlock(&tree->lock);
710 711 712 713 714
	if (mask & __GFP_WAIT)
		cond_resched();
	goto again;
}

715 716
static void wait_on_state(struct extent_io_tree *tree,
			  struct extent_state *state)
717 718
		__releases(tree->lock)
		__acquires(tree->lock)
719 720 721
{
	DEFINE_WAIT(wait);
	prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
722
	spin_unlock(&tree->lock);
723
	schedule();
724
	spin_lock(&tree->lock);
725 726 727 728 729 730 731 732
	finish_wait(&state->wq, &wait);
}

/*
 * waits for one or more bits to clear on a range in the state tree.
 * The range [start, end] is inclusive.
 * The tree lock is taken by this function
 */
733 734
static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
			    unsigned long bits)
735 736 737 738
{
	struct extent_state *state;
	struct rb_node *node;

739
	btrfs_debug_check_extent_io_range(tree, start, end);
740

741
	spin_lock(&tree->lock);
742 743 744 745 746 747
again:
	while (1) {
		/*
		 * this search will find all the extents that end after
		 * our range starts
		 */
748
		node = tree_search(tree, start);
749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768
		if (!node)
			break;

		state = rb_entry(node, struct extent_state, rb_node);

		if (state->start > end)
			goto out;

		if (state->state & bits) {
			start = state->start;
			atomic_inc(&state->refs);
			wait_on_state(tree, state);
			free_extent_state(state);
			goto again;
		}
		start = state->end + 1;

		if (start > end)
			break;

769
		cond_resched_lock(&tree->lock);
770 771
	}
out:
772
	spin_unlock(&tree->lock);
773 774
}

775
static void set_state_bits(struct extent_io_tree *tree,
776
			   struct extent_state *state,
777
			   unsigned long *bits)
778
{
779
	unsigned long bits_to_set = *bits & ~EXTENT_CTLBITS;
J
Josef Bacik 已提交
780

781
	set_state_cb(tree, state, bits);
782
	if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
783 784 785
		u64 range = state->end - state->start + 1;
		tree->dirty_bytes += range;
	}
786
	state->state |= bits_to_set;
787 788
}

789 790 791 792 793 794 795 796 797 798 799
static void cache_state(struct extent_state *state,
			struct extent_state **cached_ptr)
{
	if (cached_ptr && !(*cached_ptr)) {
		if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
			*cached_ptr = state;
			atomic_inc(&state->refs);
		}
	}
}

800
/*
801 802
 * set some bits on a range in the tree.  This may require allocations or
 * sleeping, so the gfp mask is used to indicate what is allowed.
803
 *
804 805 806
 * If any of the exclusive bits are set, this will fail with -EEXIST if some
 * part of the range already has the desired bits set.  The start of the
 * existing range is returned in failed_start in this case.
807
 *
808
 * [start, end] is inclusive This takes the tree lock.
809
 */
810

J
Jeff Mahoney 已提交
811 812
static int __must_check
__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
813 814 815
		 unsigned long bits, unsigned long exclusive_bits,
		 u64 *failed_start, struct extent_state **cached_state,
		 gfp_t mask)
816 817 818 819
{
	struct extent_state *state;
	struct extent_state *prealloc = NULL;
	struct rb_node *node;
820 821
	struct rb_node **p;
	struct rb_node *parent;
822 823 824
	int err = 0;
	u64 last_start;
	u64 last_end;
825

826
	btrfs_debug_check_extent_io_range(tree, start, end);
827

828
	bits |= EXTENT_FIRST_DELALLOC;
829 830 831
again:
	if (!prealloc && (mask & __GFP_WAIT)) {
		prealloc = alloc_extent_state(mask);
832
		BUG_ON(!prealloc);
833 834
	}

835
	spin_lock(&tree->lock);
836 837
	if (cached_state && *cached_state) {
		state = *cached_state;
838 839
		if (state->start <= start && state->end > start &&
		    state->tree) {
840 841 842 843
			node = &state->rb_node;
			goto hit_next;
		}
	}
844 845 846 847
	/*
	 * this search will find all the extents that end after
	 * our range starts.
	 */
848
	node = tree_search_for_insert(tree, start, &p, &parent);
849
	if (!node) {
850 851
		prealloc = alloc_extent_state_atomic(prealloc);
		BUG_ON(!prealloc);
852 853
		err = insert_state(tree, prealloc, start, end,
				   &p, &parent, &bits);
854 855 856
		if (err)
			extent_io_tree_panic(tree, err);

857
		cache_state(prealloc, cached_state);
858 859 860 861
		prealloc = NULL;
		goto out;
	}
	state = rb_entry(node, struct extent_state, rb_node);
C
Chris Mason 已提交
862
hit_next:
863 864 865 866 867 868 869 870 871 872
	last_start = state->start;
	last_end = state->end;

	/*
	 * | ---- desired range ---- |
	 * | state |
	 *
	 * Just lock what we found and keep going
	 */
	if (state->start == start && state->end <= end) {
873
		if (state->state & exclusive_bits) {
874 875 876 877
			*failed_start = state->start;
			err = -EEXIST;
			goto out;
		}
878

879
		set_state_bits(tree, state, &bits);
880
		cache_state(state, cached_state);
881
		merge_state(tree, state);
882 883 884
		if (last_end == (u64)-1)
			goto out;
		start = last_end + 1;
885 886 887 888
		state = next_state(state);
		if (start < end && state && state->start == start &&
		    !need_resched())
			goto hit_next;
889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908
		goto search_again;
	}

	/*
	 *     | ---- desired range ---- |
	 * | state |
	 *   or
	 * | ------------- state -------------- |
	 *
	 * We need to split the extent we found, and may flip bits on
	 * second half.
	 *
	 * If the extent we found extends past our
	 * range, we just split and search again.  It'll get split
	 * again the next time though.
	 *
	 * If the extent we found is inside our range, we set the
	 * desired bit on it.
	 */
	if (state->start < start) {
909
		if (state->state & exclusive_bits) {
910 911 912 913
			*failed_start = start;
			err = -EEXIST;
			goto out;
		}
914 915 916

		prealloc = alloc_extent_state_atomic(prealloc);
		BUG_ON(!prealloc);
917
		err = split_state(tree, state, prealloc, start);
918 919 920
		if (err)
			extent_io_tree_panic(tree, err);

921 922 923 924
		prealloc = NULL;
		if (err)
			goto out;
		if (state->end <= end) {
925
			set_state_bits(tree, state, &bits);
926
			cache_state(state, cached_state);
927
			merge_state(tree, state);
928 929 930
			if (last_end == (u64)-1)
				goto out;
			start = last_end + 1;
931 932 933 934
			state = next_state(state);
			if (start < end && state && state->start == start &&
			    !need_resched())
				goto hit_next;
935 936 937 938 939 940 941 942 943 944 945 946 947 948 949
		}
		goto search_again;
	}
	/*
	 * | ---- desired range ---- |
	 *     | state | or               | state |
	 *
	 * There's a hole, we need to insert something in it and
	 * ignore the extent we found.
	 */
	if (state->start > start) {
		u64 this_end;
		if (end < last_start)
			this_end = end;
		else
C
Chris Mason 已提交
950
			this_end = last_start - 1;
951 952 953

		prealloc = alloc_extent_state_atomic(prealloc);
		BUG_ON(!prealloc);
954 955 956 957 958

		/*
		 * Avoid to free 'prealloc' if it can be merged with
		 * the later extent.
		 */
959
		err = insert_state(tree, prealloc, start, this_end,
960
				   NULL, NULL, &bits);
961 962 963
		if (err)
			extent_io_tree_panic(tree, err);

J
Josef Bacik 已提交
964 965
		cache_state(prealloc, cached_state);
		prealloc = NULL;
966 967 968 969 970 971 972 973 974 975
		start = this_end + 1;
		goto search_again;
	}
	/*
	 * | ---- desired range ---- |
	 *                        | state |
	 * We need to split the extent, and set the bit
	 * on the first half
	 */
	if (state->start <= end && state->end > end) {
976
		if (state->state & exclusive_bits) {
977 978 979 980
			*failed_start = start;
			err = -EEXIST;
			goto out;
		}
981 982 983

		prealloc = alloc_extent_state_atomic(prealloc);
		BUG_ON(!prealloc);
984
		err = split_state(tree, state, prealloc, end + 1);
985 986
		if (err)
			extent_io_tree_panic(tree, err);
987

988
		set_state_bits(tree, prealloc, &bits);
989
		cache_state(prealloc, cached_state);
990 991 992 993 994 995 996 997
		merge_state(tree, prealloc);
		prealloc = NULL;
		goto out;
	}

	goto search_again;

out:
998
	spin_unlock(&tree->lock);
999 1000 1001 1002 1003 1004 1005 1006
	if (prealloc)
		free_extent_state(prealloc);

	return err;

search_again:
	if (start > end)
		goto out;
1007
	spin_unlock(&tree->lock);
1008 1009 1010 1011 1012
	if (mask & __GFP_WAIT)
		cond_resched();
	goto again;
}

1013 1014 1015
int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
		   unsigned long bits, u64 * failed_start,
		   struct extent_state **cached_state, gfp_t mask)
J
Jeff Mahoney 已提交
1016 1017 1018 1019 1020 1021
{
	return __set_extent_bit(tree, start, end, bits, 0, failed_start,
				cached_state, mask);
}


J
Josef Bacik 已提交
1022
/**
L
Liu Bo 已提交
1023 1024
 * convert_extent_bit - convert all bits in a given range from one bit to
 * 			another
J
Josef Bacik 已提交
1025 1026 1027 1028 1029
 * @tree:	the io tree to search
 * @start:	the start offset in bytes
 * @end:	the end offset in bytes (inclusive)
 * @bits:	the bits to set in this range
 * @clear_bits:	the bits to clear in this range
1030
 * @cached_state:	state that we're going to cache
J
Josef Bacik 已提交
1031 1032 1033 1034 1035 1036 1037 1038 1039
 * @mask:	the allocation mask
 *
 * This will go through and set bits for the given range.  If any states exist
 * already in this range they are set with the given bit and cleared of the
 * clear_bits.  This is only meant to be used by things that are mergeable, ie
 * converting from say DELALLOC to DIRTY.  This is not meant to be used with
 * boundary bits like LOCK.
 */
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1040
		       unsigned long bits, unsigned long clear_bits,
1041
		       struct extent_state **cached_state, gfp_t mask)
J
Josef Bacik 已提交
1042 1043 1044 1045
{
	struct extent_state *state;
	struct extent_state *prealloc = NULL;
	struct rb_node *node;
1046 1047
	struct rb_node **p;
	struct rb_node *parent;
J
Josef Bacik 已提交
1048 1049 1050 1051
	int err = 0;
	u64 last_start;
	u64 last_end;

1052
	btrfs_debug_check_extent_io_range(tree, start, end);
1053

J
Josef Bacik 已提交
1054 1055 1056 1057 1058 1059 1060 1061
again:
	if (!prealloc && (mask & __GFP_WAIT)) {
		prealloc = alloc_extent_state(mask);
		if (!prealloc)
			return -ENOMEM;
	}

	spin_lock(&tree->lock);
1062 1063 1064 1065 1066 1067 1068 1069 1070
	if (cached_state && *cached_state) {
		state = *cached_state;
		if (state->start <= start && state->end > start &&
		    state->tree) {
			node = &state->rb_node;
			goto hit_next;
		}
	}

J
Josef Bacik 已提交
1071 1072 1073 1074
	/*
	 * this search will find all the extents that end after
	 * our range starts.
	 */
1075
	node = tree_search_for_insert(tree, start, &p, &parent);
J
Josef Bacik 已提交
1076 1077
	if (!node) {
		prealloc = alloc_extent_state_atomic(prealloc);
1078 1079 1080 1081
		if (!prealloc) {
			err = -ENOMEM;
			goto out;
		}
1082 1083
		err = insert_state(tree, prealloc, start, end,
				   &p, &parent, &bits);
1084 1085
		if (err)
			extent_io_tree_panic(tree, err);
1086 1087
		cache_state(prealloc, cached_state);
		prealloc = NULL;
J
Josef Bacik 已提交
1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
		goto out;
	}
	state = rb_entry(node, struct extent_state, rb_node);
hit_next:
	last_start = state->start;
	last_end = state->end;

	/*
	 * | ---- desired range ---- |
	 * | state |
	 *
	 * Just lock what we found and keep going
	 */
	if (state->start == start && state->end <= end) {
		set_state_bits(tree, state, &bits);
1103
		cache_state(state, cached_state);
1104
		state = clear_state_bit(tree, state, &clear_bits, 0);
J
Josef Bacik 已提交
1105 1106 1107
		if (last_end == (u64)-1)
			goto out;
		start = last_end + 1;
1108 1109 1110
		if (start < end && state && state->start == start &&
		    !need_resched())
			goto hit_next;
J
Josef Bacik 已提交
1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
		goto search_again;
	}

	/*
	 *     | ---- desired range ---- |
	 * | state |
	 *   or
	 * | ------------- state -------------- |
	 *
	 * We need to split the extent we found, and may flip bits on
	 * second half.
	 *
	 * If the extent we found extends past our
	 * range, we just split and search again.  It'll get split
	 * again the next time though.
	 *
	 * If the extent we found is inside our range, we set the
	 * desired bit on it.
	 */
	if (state->start < start) {
		prealloc = alloc_extent_state_atomic(prealloc);
1132 1133 1134 1135
		if (!prealloc) {
			err = -ENOMEM;
			goto out;
		}
J
Josef Bacik 已提交
1136
		err = split_state(tree, state, prealloc, start);
1137 1138
		if (err)
			extent_io_tree_panic(tree, err);
J
Josef Bacik 已提交
1139 1140 1141 1142 1143
		prealloc = NULL;
		if (err)
			goto out;
		if (state->end <= end) {
			set_state_bits(tree, state, &bits);
1144
			cache_state(state, cached_state);
1145
			state = clear_state_bit(tree, state, &clear_bits, 0);
J
Josef Bacik 已提交
1146 1147 1148
			if (last_end == (u64)-1)
				goto out;
			start = last_end + 1;
1149 1150 1151
			if (start < end && state && state->start == start &&
			    !need_resched())
				goto hit_next;
J
Josef Bacik 已提交
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169
		}
		goto search_again;
	}
	/*
	 * | ---- desired range ---- |
	 *     | state | or               | state |
	 *
	 * There's a hole, we need to insert something in it and
	 * ignore the extent we found.
	 */
	if (state->start > start) {
		u64 this_end;
		if (end < last_start)
			this_end = end;
		else
			this_end = last_start - 1;

		prealloc = alloc_extent_state_atomic(prealloc);
1170 1171 1172 1173
		if (!prealloc) {
			err = -ENOMEM;
			goto out;
		}
J
Josef Bacik 已提交
1174 1175 1176 1177 1178 1179

		/*
		 * Avoid to free 'prealloc' if it can be merged with
		 * the later extent.
		 */
		err = insert_state(tree, prealloc, start, this_end,
1180
				   NULL, NULL, &bits);
1181 1182
		if (err)
			extent_io_tree_panic(tree, err);
1183
		cache_state(prealloc, cached_state);
J
Josef Bacik 已提交
1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
		prealloc = NULL;
		start = this_end + 1;
		goto search_again;
	}
	/*
	 * | ---- desired range ---- |
	 *                        | state |
	 * We need to split the extent, and set the bit
	 * on the first half
	 */
	if (state->start <= end && state->end > end) {
		prealloc = alloc_extent_state_atomic(prealloc);
1196 1197 1198 1199
		if (!prealloc) {
			err = -ENOMEM;
			goto out;
		}
J
Josef Bacik 已提交
1200 1201

		err = split_state(tree, state, prealloc, end + 1);
1202 1203
		if (err)
			extent_io_tree_panic(tree, err);
J
Josef Bacik 已提交
1204 1205

		set_state_bits(tree, prealloc, &bits);
1206
		cache_state(prealloc, cached_state);
J
Josef Bacik 已提交
1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229
		clear_state_bit(tree, prealloc, &clear_bits, 0);
		prealloc = NULL;
		goto out;
	}

	goto search_again;

out:
	spin_unlock(&tree->lock);
	if (prealloc)
		free_extent_state(prealloc);

	return err;

search_again:
	if (start > end)
		goto out;
	spin_unlock(&tree->lock);
	if (mask & __GFP_WAIT)
		cond_resched();
	goto again;
}

1230 1231 1232 1233
/* wrappers around set/clear extent bit */
int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
		     gfp_t mask)
{
J
Jeff Mahoney 已提交
1234
	return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
1235
			      NULL, mask);
1236 1237 1238
}

int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1239
		    unsigned long bits, gfp_t mask)
1240
{
J
Jeff Mahoney 已提交
1241
	return set_extent_bit(tree, start, end, bits, NULL,
1242
			      NULL, mask);
1243 1244 1245
}

int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1246
		      unsigned long bits, gfp_t mask)
1247
{
1248
	return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
1249 1250 1251
}

int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
1252
			struct extent_state **cached_state, gfp_t mask)
1253 1254
{
	return set_extent_bit(tree, start, end,
1255
			      EXTENT_DELALLOC | EXTENT_UPTODATE,
J
Jeff Mahoney 已提交
1256
			      NULL, cached_state, mask);
1257 1258
}

1259 1260 1261 1262 1263 1264 1265 1266
int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end,
		      struct extent_state **cached_state, gfp_t mask)
{
	return set_extent_bit(tree, start, end,
			      EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
			      NULL, cached_state, mask);
}

1267 1268 1269 1270
int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
		       gfp_t mask)
{
	return clear_extent_bit(tree, start, end,
1271
				EXTENT_DIRTY | EXTENT_DELALLOC |
1272
				EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
1273 1274 1275 1276 1277
}

int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
		     gfp_t mask)
{
J
Jeff Mahoney 已提交
1278
	return set_extent_bit(tree, start, end, EXTENT_NEW, NULL,
1279
			      NULL, mask);
1280 1281 1282
}

int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1283
			struct extent_state **cached_state, gfp_t mask)
1284
{
L
Liu Bo 已提交
1285
	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
J
Jeff Mahoney 已提交
1286
			      cached_state, mask);
1287 1288
}

1289 1290
int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
			  struct extent_state **cached_state, gfp_t mask)
1291
{
1292
	return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
1293
				cached_state, mask);
1294 1295
}

C
Chris Mason 已提交
1296 1297 1298 1299
/*
 * either insert or lock state struct between start and end use mask to tell
 * us if waiting is desired.
 */
1300
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1301
		     unsigned long bits, struct extent_state **cached_state)
1302 1303 1304 1305
{
	int err;
	u64 failed_start;
	while (1) {
J
Jeff Mahoney 已提交
1306 1307 1308
		err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
				       EXTENT_LOCKED, &failed_start,
				       cached_state, GFP_NOFS);
1309
		if (err == -EEXIST) {
1310 1311
			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
			start = failed_start;
1312
		} else
1313 1314 1315 1316 1317 1318
			break;
		WARN_ON(start > end);
	}
	return err;
}

1319
int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1320
{
1321
	return lock_extent_bits(tree, start, end, 0, NULL);
1322 1323
}

1324
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1325 1326 1327 1328
{
	int err;
	u64 failed_start;

J
Jeff Mahoney 已提交
1329 1330
	err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
			       &failed_start, NULL, GFP_NOFS);
Y
Yan Zheng 已提交
1331 1332 1333
	if (err == -EEXIST) {
		if (failed_start > start)
			clear_extent_bit(tree, start, failed_start - 1,
1334
					 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
1335
		return 0;
Y
Yan Zheng 已提交
1336
	}
1337 1338 1339
	return 1;
}

1340 1341 1342 1343 1344 1345 1346
int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
			 struct extent_state **cached, gfp_t mask)
{
	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
				mask);
}

1347
int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1348
{
1349
	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1350
				GFP_NOFS);
1351 1352
}

1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385
int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
{
	unsigned long index = start >> PAGE_CACHE_SHIFT;
	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
	struct page *page;

	while (index <= end_index) {
		page = find_get_page(inode->i_mapping, index);
		BUG_ON(!page); /* Pages should be in the extent_io_tree */
		clear_page_dirty_for_io(page);
		page_cache_release(page);
		index++;
	}
	return 0;
}

int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
{
	unsigned long index = start >> PAGE_CACHE_SHIFT;
	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
	struct page *page;

	while (index <= end_index) {
		page = find_get_page(inode->i_mapping, index);
		BUG_ON(!page); /* Pages should be in the extent_io_tree */
		account_page_redirty(page);
		__set_page_dirty_nobuffers(page);
		page_cache_release(page);
		index++;
	}
	return 0;
}

1386 1387 1388
/*
 * helper function to set both pages and extents in the tree writeback
 */
1389
static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1390 1391 1392 1393 1394 1395 1396
{
	unsigned long index = start >> PAGE_CACHE_SHIFT;
	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
	struct page *page;

	while (index <= end_index) {
		page = find_get_page(tree->mapping, index);
1397
		BUG_ON(!page); /* Pages should be in the extent_io_tree */
1398 1399 1400 1401 1402 1403 1404
		set_page_writeback(page);
		page_cache_release(page);
		index++;
	}
	return 0;
}

C
Chris Mason 已提交
1405 1406 1407 1408
/* find the first state struct with 'bits' set after 'start', and
 * return it.  tree->lock must be held.  NULL will returned if
 * nothing was found after 'start'
 */
1409 1410
static struct extent_state *
find_first_extent_bit_state(struct extent_io_tree *tree,
1411
			    u64 start, unsigned long bits)
C
Chris Mason 已提交
1412 1413 1414 1415 1416 1417 1418 1419 1420
{
	struct rb_node *node;
	struct extent_state *state;

	/*
	 * this search will find all the extents that end after
	 * our range starts.
	 */
	node = tree_search(tree, start);
C
Chris Mason 已提交
1421
	if (!node)
C
Chris Mason 已提交
1422 1423
		goto out;

C
Chris Mason 已提交
1424
	while (1) {
C
Chris Mason 已提交
1425
		state = rb_entry(node, struct extent_state, rb_node);
C
Chris Mason 已提交
1426
		if (state->end >= start && (state->state & bits))
C
Chris Mason 已提交
1427
			return state;
C
Chris Mason 已提交
1428

C
Chris Mason 已提交
1429 1430 1431 1432 1433 1434 1435 1436
		node = rb_next(node);
		if (!node)
			break;
	}
out:
	return NULL;
}

1437 1438 1439 1440 1441
/*
 * find the first offset in the io tree with 'bits' set. zero is
 * returned if we find something, and *start_ret and *end_ret are
 * set to reflect the state struct that was found.
 *
1442
 * If nothing was found, 1 is returned. If found something, return 0.
1443 1444
 */
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1445
			  u64 *start_ret, u64 *end_ret, unsigned long bits,
1446
			  struct extent_state **cached_state)
1447 1448
{
	struct extent_state *state;
1449
	struct rb_node *n;
1450 1451 1452
	int ret = 1;

	spin_lock(&tree->lock);
1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471
	if (cached_state && *cached_state) {
		state = *cached_state;
		if (state->end == start - 1 && state->tree) {
			n = rb_next(&state->rb_node);
			while (n) {
				state = rb_entry(n, struct extent_state,
						 rb_node);
				if (state->state & bits)
					goto got_it;
				n = rb_next(n);
			}
			free_extent_state(*cached_state);
			*cached_state = NULL;
			goto out;
		}
		free_extent_state(*cached_state);
		*cached_state = NULL;
	}

1472
	state = find_first_extent_bit_state(tree, start, bits);
1473
got_it:
1474
	if (state) {
1475
		cache_state(state, cached_state);
1476 1477 1478 1479
		*start_ret = state->start;
		*end_ret = state->end;
		ret = 0;
	}
1480
out:
1481 1482 1483 1484
	spin_unlock(&tree->lock);
	return ret;
}

C
Chris Mason 已提交
1485 1486 1487 1488 1489 1490
/*
 * find a contiguous range of bytes in the file marked as delalloc, not
 * more than 'max_bytes'.  start and end are used to return the range,
 *
 * 1 is returned if we find something, 0 if nothing was in the tree
 */
C
Chris Mason 已提交
1491
static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1492 1493
					u64 *start, u64 *end, u64 max_bytes,
					struct extent_state **cached_state)
1494 1495 1496 1497 1498 1499 1500
{
	struct rb_node *node;
	struct extent_state *state;
	u64 cur_start = *start;
	u64 found = 0;
	u64 total_bytes = 0;

1501
	spin_lock(&tree->lock);
C
Chris Mason 已提交
1502

1503 1504 1505 1506
	/*
	 * this search will find all the extents that end after
	 * our range starts.
	 */
1507
	node = tree_search(tree, cur_start);
1508
	if (!node) {
1509 1510
		if (!found)
			*end = (u64)-1;
1511 1512 1513
		goto out;
	}

C
Chris Mason 已提交
1514
	while (1) {
1515
		state = rb_entry(node, struct extent_state, rb_node);
1516 1517
		if (found && (state->start != cur_start ||
			      (state->state & EXTENT_BOUNDARY))) {
1518 1519 1520 1521 1522 1523 1524
			goto out;
		}
		if (!(state->state & EXTENT_DELALLOC)) {
			if (!found)
				*end = state->end;
			goto out;
		}
1525
		if (!found) {
1526
			*start = state->start;
1527 1528 1529
			*cached_state = state;
			atomic_inc(&state->refs);
		}
1530 1531 1532 1533 1534
		found++;
		*end = state->end;
		cur_start = state->end + 1;
		node = rb_next(node);
		total_bytes += state->end - state->start + 1;
1535
		if (total_bytes >= max_bytes)
1536 1537
			break;
		if (!node)
1538 1539 1540
			break;
	}
out:
1541
	spin_unlock(&tree->lock);
1542 1543 1544
	return found;
}

1545 1546 1547
static noinline void __unlock_for_delalloc(struct inode *inode,
					   struct page *locked_page,
					   u64 start, u64 end)
C
Chris Mason 已提交
1548 1549 1550 1551 1552 1553 1554 1555 1556
{
	int ret;
	struct page *pages[16];
	unsigned long index = start >> PAGE_CACHE_SHIFT;
	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
	unsigned long nr_pages = end_index - index + 1;
	int i;

	if (index == locked_page->index && end_index == index)
1557
		return;
C
Chris Mason 已提交
1558

C
Chris Mason 已提交
1559
	while (nr_pages > 0) {
C
Chris Mason 已提交
1560
		ret = find_get_pages_contig(inode->i_mapping, index,
1561 1562
				     min_t(unsigned long, nr_pages,
				     ARRAY_SIZE(pages)), pages);
C
Chris Mason 已提交
1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593
		for (i = 0; i < ret; i++) {
			if (pages[i] != locked_page)
				unlock_page(pages[i]);
			page_cache_release(pages[i]);
		}
		nr_pages -= ret;
		index += ret;
		cond_resched();
	}
}

static noinline int lock_delalloc_pages(struct inode *inode,
					struct page *locked_page,
					u64 delalloc_start,
					u64 delalloc_end)
{
	unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
	unsigned long start_index = index;
	unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
	unsigned long pages_locked = 0;
	struct page *pages[16];
	unsigned long nrpages;
	int ret;
	int i;

	/* the caller is responsible for locking the start index */
	if (index == locked_page->index && index == end_index)
		return 0;

	/* skip the page at the start index */
	nrpages = end_index - index + 1;
C
Chris Mason 已提交
1594
	while (nrpages > 0) {
C
Chris Mason 已提交
1595
		ret = find_get_pages_contig(inode->i_mapping, index,
1596 1597
				     min_t(unsigned long,
				     nrpages, ARRAY_SIZE(pages)), pages);
C
Chris Mason 已提交
1598 1599 1600 1601 1602 1603 1604 1605 1606 1607
		if (ret == 0) {
			ret = -EAGAIN;
			goto done;
		}
		/* now we have an array of pages, lock them all */
		for (i = 0; i < ret; i++) {
			/*
			 * the caller is taking responsibility for
			 * locked_page
			 */
1608
			if (pages[i] != locked_page) {
C
Chris Mason 已提交
1609
				lock_page(pages[i]);
1610 1611
				if (!PageDirty(pages[i]) ||
				    pages[i]->mapping != inode->i_mapping) {
1612 1613 1614 1615 1616 1617
					ret = -EAGAIN;
					unlock_page(pages[i]);
					page_cache_release(pages[i]);
					goto done;
				}
			}
C
Chris Mason 已提交
1618
			page_cache_release(pages[i]);
1619
			pages_locked++;
C
Chris Mason 已提交
1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641
		}
		nrpages -= ret;
		index += ret;
		cond_resched();
	}
	ret = 0;
done:
	if (ret && pages_locked) {
		__unlock_for_delalloc(inode, locked_page,
			      delalloc_start,
			      ((u64)(start_index + pages_locked - 1)) <<
			      PAGE_CACHE_SHIFT);
	}
	return ret;
}

/*
 * find a contiguous range of bytes in the file marked as delalloc, not
 * more than 'max_bytes'.  start and end are used to return the range,
 *
 * 1 is returned if we find something, 0 if nothing was in the tree
 */
1642 1643 1644 1645
STATIC u64 find_lock_delalloc_range(struct inode *inode,
				    struct extent_io_tree *tree,
				    struct page *locked_page, u64 *start,
				    u64 *end, u64 max_bytes)
C
Chris Mason 已提交
1646 1647 1648 1649
{
	u64 delalloc_start;
	u64 delalloc_end;
	u64 found;
1650
	struct extent_state *cached_state = NULL;
C
Chris Mason 已提交
1651 1652 1653 1654 1655 1656 1657 1658
	int ret;
	int loops = 0;

again:
	/* step one, find a bunch of delalloc bytes starting at start */
	delalloc_start = *start;
	delalloc_end = 0;
	found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1659
				    max_bytes, &cached_state);
C
Chris Mason 已提交
1660
	if (!found || delalloc_end <= *start) {
C
Chris Mason 已提交
1661 1662
		*start = delalloc_start;
		*end = delalloc_end;
1663
		free_extent_state(cached_state);
L
Liu Bo 已提交
1664
		return 0;
C
Chris Mason 已提交
1665 1666
	}

C
Chris Mason 已提交
1667 1668 1669 1670 1671
	/*
	 * start comes from the offset of locked_page.  We have to lock
	 * pages in order, so we can't process delalloc bytes before
	 * locked_page
	 */
C
Chris Mason 已提交
1672
	if (delalloc_start < *start)
C
Chris Mason 已提交
1673 1674
		delalloc_start = *start;

C
Chris Mason 已提交
1675 1676 1677
	/*
	 * make sure to limit the number of pages we try to lock down
	 */
1678 1679
	if (delalloc_end + 1 - delalloc_start > max_bytes)
		delalloc_end = delalloc_start + max_bytes - 1;
C
Chris Mason 已提交
1680

C
Chris Mason 已提交
1681 1682 1683 1684 1685 1686 1687
	/* step two, lock all the pages after the page that has start */
	ret = lock_delalloc_pages(inode, locked_page,
				  delalloc_start, delalloc_end);
	if (ret == -EAGAIN) {
		/* some of the pages are gone, lets avoid looping by
		 * shortening the size of the delalloc range we're searching
		 */
1688
		free_extent_state(cached_state);
C
Chris Mason 已提交
1689
		if (!loops) {
1690
			max_bytes = PAGE_CACHE_SIZE;
C
Chris Mason 已提交
1691 1692 1693 1694 1695 1696 1697
			loops = 1;
			goto again;
		} else {
			found = 0;
			goto out_failed;
		}
	}
1698
	BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
C
Chris Mason 已提交
1699 1700

	/* step three, lock the state bits for the whole range */
1701
	lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
C
Chris Mason 已提交
1702 1703 1704

	/* then test to make sure it is all still delalloc */
	ret = test_range_bit(tree, delalloc_start, delalloc_end,
1705
			     EXTENT_DELALLOC, 1, cached_state);
C
Chris Mason 已提交
1706
	if (!ret) {
1707 1708
		unlock_extent_cached(tree, delalloc_start, delalloc_end,
				     &cached_state, GFP_NOFS);
C
Chris Mason 已提交
1709 1710 1711 1712 1713
		__unlock_for_delalloc(inode, locked_page,
			      delalloc_start, delalloc_end);
		cond_resched();
		goto again;
	}
1714
	free_extent_state(cached_state);
C
Chris Mason 已提交
1715 1716 1717 1718 1719 1720
	*start = delalloc_start;
	*end = delalloc_end;
out_failed:
	return found;
}

1721 1722 1723 1724
int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
				 struct page *locked_page,
				 unsigned long clear_bits,
				 unsigned long page_ops)
C
Chris Mason 已提交
1725
{
1726
	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
C
Chris Mason 已提交
1727 1728 1729 1730 1731 1732
	int ret;
	struct page *pages[16];
	unsigned long index = start >> PAGE_CACHE_SHIFT;
	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
	unsigned long nr_pages = end_index - index + 1;
	int i;
1733

1734
	clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1735
	if (page_ops == 0)
1736
		return 0;
C
Chris Mason 已提交
1737

C
Chris Mason 已提交
1738
	while (nr_pages > 0) {
C
Chris Mason 已提交
1739
		ret = find_get_pages_contig(inode->i_mapping, index,
1740 1741
				     min_t(unsigned long,
				     nr_pages, ARRAY_SIZE(pages)), pages);
C
Chris Mason 已提交
1742
		for (i = 0; i < ret; i++) {
1743

1744
			if (page_ops & PAGE_SET_PRIVATE2)
1745 1746
				SetPagePrivate2(pages[i]);

C
Chris Mason 已提交
1747 1748 1749 1750
			if (pages[i] == locked_page) {
				page_cache_release(pages[i]);
				continue;
			}
1751
			if (page_ops & PAGE_CLEAR_DIRTY)
C
Chris Mason 已提交
1752
				clear_page_dirty_for_io(pages[i]);
1753
			if (page_ops & PAGE_SET_WRITEBACK)
C
Chris Mason 已提交
1754
				set_page_writeback(pages[i]);
1755
			if (page_ops & PAGE_END_WRITEBACK)
C
Chris Mason 已提交
1756
				end_page_writeback(pages[i]);
1757
			if (page_ops & PAGE_UNLOCK)
1758
				unlock_page(pages[i]);
C
Chris Mason 已提交
1759 1760 1761 1762 1763 1764 1765 1766 1767
			page_cache_release(pages[i]);
		}
		nr_pages -= ret;
		index += ret;
		cond_resched();
	}
	return 0;
}

C
Chris Mason 已提交
1768 1769 1770 1771 1772
/*
 * count the number of bytes in the tree that have a given bit(s)
 * set.  This can be fairly slow, except for EXTENT_DIRTY which is
 * cached.  The total number found is returned.
 */
1773 1774
u64 count_range_bits(struct extent_io_tree *tree,
		     u64 *start, u64 search_end, u64 max_bytes,
1775
		     unsigned long bits, int contig)
1776 1777 1778 1779 1780
{
	struct rb_node *node;
	struct extent_state *state;
	u64 cur_start = *start;
	u64 total_bytes = 0;
1781
	u64 last = 0;
1782 1783
	int found = 0;

1784
	if (WARN_ON(search_end <= cur_start))
1785 1786
		return 0;

1787
	spin_lock(&tree->lock);
1788 1789 1790 1791 1792 1793 1794 1795
	if (cur_start == 0 && bits == EXTENT_DIRTY) {
		total_bytes = tree->dirty_bytes;
		goto out;
	}
	/*
	 * this search will find all the extents that end after
	 * our range starts.
	 */
1796
	node = tree_search(tree, cur_start);
C
Chris Mason 已提交
1797
	if (!node)
1798 1799
		goto out;

C
Chris Mason 已提交
1800
	while (1) {
1801 1802 1803
		state = rb_entry(node, struct extent_state, rb_node);
		if (state->start > search_end)
			break;
1804 1805 1806
		if (contig && found && state->start > last + 1)
			break;
		if (state->end >= cur_start && (state->state & bits) == bits) {
1807 1808 1809 1810 1811
			total_bytes += min(search_end, state->end) + 1 -
				       max(cur_start, state->start);
			if (total_bytes >= max_bytes)
				break;
			if (!found) {
1812
				*start = max(cur_start, state->start);
1813 1814
				found = 1;
			}
1815 1816 1817
			last = state->end;
		} else if (contig && found) {
			break;
1818 1819 1820 1821 1822 1823
		}
		node = rb_next(node);
		if (!node)
			break;
	}
out:
1824
	spin_unlock(&tree->lock);
1825 1826
	return total_bytes;
}
1827

C
Chris Mason 已提交
1828 1829 1830 1831
/*
 * set the private field for a given byte offset in the tree.  If there isn't
 * an extent_state there already, this does nothing.
 */
1832
static int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1833 1834 1835 1836 1837
{
	struct rb_node *node;
	struct extent_state *state;
	int ret = 0;

1838
	spin_lock(&tree->lock);
1839 1840 1841 1842
	/*
	 * this search will find all the extents that end after
	 * our range starts.
	 */
1843
	node = tree_search(tree, start);
1844
	if (!node) {
1845 1846 1847 1848 1849 1850 1851 1852 1853 1854
		ret = -ENOENT;
		goto out;
	}
	state = rb_entry(node, struct extent_state, rb_node);
	if (state->start != start) {
		ret = -ENOENT;
		goto out;
	}
	state->private = private;
out:
1855
	spin_unlock(&tree->lock);
1856 1857 1858 1859 1860 1861 1862 1863 1864
	return ret;
}

int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
{
	struct rb_node *node;
	struct extent_state *state;
	int ret = 0;

1865
	spin_lock(&tree->lock);
1866 1867 1868 1869
	/*
	 * this search will find all the extents that end after
	 * our range starts.
	 */
1870
	node = tree_search(tree, start);
1871
	if (!node) {
1872 1873 1874 1875 1876 1877 1878 1879 1880 1881
		ret = -ENOENT;
		goto out;
	}
	state = rb_entry(node, struct extent_state, rb_node);
	if (state->start != start) {
		ret = -ENOENT;
		goto out;
	}
	*private = state->private;
out:
1882
	spin_unlock(&tree->lock);
1883 1884 1885 1886 1887
	return ret;
}

/*
 * searches a range in the state tree for a given mask.
1888
 * If 'filled' == 1, this returns 1 only if every extent in the tree
1889 1890 1891 1892
 * has the bits set.  Otherwise, 1 is returned if any bit in the
 * range is found set.
 */
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1893
		   unsigned long bits, int filled, struct extent_state *cached)
1894 1895 1896 1897 1898
{
	struct extent_state *state = NULL;
	struct rb_node *node;
	int bitset = 0;

1899
	spin_lock(&tree->lock);
1900 1901
	if (cached && cached->tree && cached->start <= start &&
	    cached->end > start)
1902 1903 1904
		node = &cached->rb_node;
	else
		node = tree_search(tree, start);
1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923
	while (node && start <= end) {
		state = rb_entry(node, struct extent_state, rb_node);

		if (filled && state->start > start) {
			bitset = 0;
			break;
		}

		if (state->start > end)
			break;

		if (state->state & bits) {
			bitset = 1;
			if (!filled)
				break;
		} else if (filled) {
			bitset = 0;
			break;
		}
1924 1925 1926 1927

		if (state->end == (u64)-1)
			break;

1928 1929 1930 1931 1932 1933 1934 1935 1936 1937
		start = state->end + 1;
		if (start > end)
			break;
		node = rb_next(node);
		if (!node) {
			if (filled)
				bitset = 0;
			break;
		}
	}
1938
	spin_unlock(&tree->lock);
1939 1940 1941 1942 1943 1944 1945
	return bitset;
}

/*
 * helper function to set a given page up to date if all the
 * extents in the tree for that page are up to date
 */
1946
static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1947
{
M
Miao Xie 已提交
1948
	u64 start = page_offset(page);
1949
	u64 end = start + PAGE_CACHE_SIZE - 1;
1950
	if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1951 1952 1953
		SetPageUptodate(page);
}

1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986
/*
 * When IO fails, either with EIO or csum verification fails, we
 * try other mirrors that might have a good copy of the data.  This
 * io_failure_record is used to record state as we go through all the
 * mirrors.  If another mirror has good data, the page is set up to date
 * and things continue.  If a good mirror can't be found, the original
 * bio end_io callback is called to indicate things have failed.
 */
struct io_failure_record {
	struct page *page;
	u64 start;
	u64 len;
	u64 logical;
	unsigned long bio_flags;
	int this_mirror;
	int failed_mirror;
	int in_validation;
};

static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
				int did_repair)
{
	int ret;
	int err = 0;
	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;

	set_state_private(failure_tree, rec->start, 0);
	ret = clear_extent_bits(failure_tree, rec->start,
				rec->start + rec->len - 1,
				EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
	if (ret)
		err = ret;

D
David Woodhouse 已提交
1987 1988 1989 1990 1991
	ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
				rec->start + rec->len - 1,
				EXTENT_DAMAGED, GFP_NOFS);
	if (ret && !err)
		err = ret;
1992 1993 1994 1995 1996 1997 1998 1999 2000 2001

	kfree(rec);
	return err;
}

/*
 * this bypasses the standard btrfs submit functions deliberately, as
 * the standard behavior is to write all copies in a raid setup. here we only
 * want to write the one bad copy. so we do the mapping for ourselves and issue
 * submit_bio directly.
2002
 * to avoid any synchronization issues, wait for the data after writing, which
2003 2004 2005 2006
 * actually prevents the read that triggered the error from finishing.
 * currently, there can be no more than two copies of every data bit. thus,
 * exactly one rewrite is required.
 */
2007
int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
2008 2009 2010 2011 2012 2013 2014 2015
			u64 length, u64 logical, struct page *page,
			int mirror_num)
{
	struct bio *bio;
	struct btrfs_device *dev;
	u64 map_length = 0;
	u64 sector;
	struct btrfs_bio *bbio = NULL;
D
David Woodhouse 已提交
2016
	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
2017 2018
	int ret;

2019
	ASSERT(!(fs_info->sb->s_flags & MS_RDONLY));
2020 2021
	BUG_ON(!mirror_num);

D
David Woodhouse 已提交
2022 2023 2024 2025
	/* we can't repair anything in raid56 yet */
	if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num))
		return 0;

2026
	bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
2027 2028 2029 2030 2031
	if (!bio)
		return -EIO;
	bio->bi_size = 0;
	map_length = length;

2032
	ret = btrfs_map_block(fs_info, WRITE, logical,
2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047
			      &map_length, &bbio, mirror_num);
	if (ret) {
		bio_put(bio);
		return -EIO;
	}
	BUG_ON(mirror_num != bbio->mirror_num);
	sector = bbio->stripes[mirror_num-1].physical >> 9;
	bio->bi_sector = sector;
	dev = bbio->stripes[mirror_num-1].dev;
	kfree(bbio);
	if (!dev || !dev->bdev || !dev->writeable) {
		bio_put(bio);
		return -EIO;
	}
	bio->bi_bdev = dev->bdev;
M
Miao Xie 已提交
2048
	bio_add_page(bio, page, length, start - page_offset(page));
2049

2050
	if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) {
2051 2052
		/* try to remap that extent elsewhere? */
		bio_put(bio);
2053
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
2054 2055 2056
		return -EIO;
	}

2057
	printk_ratelimited_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu "
2058 2059
		      "(dev %s sector %llu)\n", page->mapping->host->i_ino,
		      start, rcu_str_deref(dev->name), sector);
2060 2061 2062 2063 2064

	bio_put(bio);
	return 0;
}

2065 2066 2067 2068 2069
int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
			 int mirror_num)
{
	u64 start = eb->start;
	unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
2070
	int ret = 0;
2071

2072 2073 2074
	if (root->fs_info->sb->s_flags & MS_RDONLY)
		return -EROFS;

2075 2076
	for (i = 0; i < num_pages; i++) {
		struct page *p = extent_buffer_page(eb, i);
2077
		ret = repair_io_failure(root->fs_info, start, PAGE_CACHE_SIZE,
2078 2079 2080 2081 2082 2083 2084 2085 2086
					start, p, mirror_num);
		if (ret)
			break;
		start += PAGE_CACHE_SIZE;
	}

	return ret;
}

2087 2088 2089 2090 2091 2092 2093 2094 2095
/*
 * each time an IO finishes, we do a fast check in the IO failure tree
 * to see if we need to process or clean up an io_failure_record
 */
static int clean_io_failure(u64 start, struct page *page)
{
	u64 private;
	u64 private_failure;
	struct io_failure_record *failrec;
2096 2097
	struct inode *inode = page->mapping->host;
	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123
	struct extent_state *state;
	int num_copies;
	int did_repair = 0;
	int ret;

	private = 0;
	ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
				(u64)-1, 1, EXTENT_DIRTY, 0);
	if (!ret)
		return 0;

	ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
				&private_failure);
	if (ret)
		return 0;

	failrec = (struct io_failure_record *)(unsigned long) private_failure;
	BUG_ON(!failrec->this_mirror);

	if (failrec->in_validation) {
		/* there was no real error, just free the record */
		pr_debug("clean_io_failure: freeing dummy error at %llu\n",
			 failrec->start);
		did_repair = 1;
		goto out;
	}
2124 2125
	if (fs_info->sb->s_flags & MS_RDONLY)
		goto out;
2126 2127 2128 2129 2130 2131 2132

	spin_lock(&BTRFS_I(inode)->io_tree.lock);
	state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
					    failrec->start,
					    EXTENT_LOCKED);
	spin_unlock(&BTRFS_I(inode)->io_tree.lock);

2133 2134
	if (state && state->start <= failrec->start &&
	    state->end >= failrec->start + failrec->len - 1) {
2135 2136
		num_copies = btrfs_num_copies(fs_info, failrec->logical,
					      failrec->len);
2137
		if (num_copies > 1)  {
2138
			ret = repair_io_failure(fs_info, start, failrec->len,
2139 2140 2141 2142
						failrec->logical, page,
						failrec->failed_mirror);
			did_repair = !ret;
		}
D
David Woodhouse 已提交
2143
		ret = 0;
2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160
	}

out:
	if (!ret)
		ret = free_io_failure(inode, failrec, did_repair);

	return ret;
}

/*
 * this is a generic handler for readpage errors (default
 * readpage_io_failed_hook). if other copies exist, read those and write back
 * good data to the failed position. does not investigate in remapping the
 * failed extent elsewhere, hoping the device will be smart enough to do this as
 * needed
 */

2161 2162 2163
static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
			      struct page *page, u64 start, u64 end,
			      int failed_mirror)
2164 2165 2166 2167 2168 2169 2170 2171 2172
{
	struct io_failure_record *failrec = NULL;
	u64 private;
	struct extent_map *em;
	struct inode *inode = page->mapping->host;
	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
	struct bio *bio;
2173 2174
	struct btrfs_io_bio *btrfs_failed_bio;
	struct btrfs_io_bio *btrfs_bio;
2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200
	int num_copies;
	int ret;
	int read_mode;
	u64 logical;

	BUG_ON(failed_bio->bi_rw & REQ_WRITE);

	ret = get_state_private(failure_tree, start, &private);
	if (ret) {
		failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
		if (!failrec)
			return -ENOMEM;
		failrec->start = start;
		failrec->len = end - start + 1;
		failrec->this_mirror = 0;
		failrec->bio_flags = 0;
		failrec->in_validation = 0;

		read_lock(&em_tree->lock);
		em = lookup_extent_mapping(em_tree, start, failrec->len);
		if (!em) {
			read_unlock(&em_tree->lock);
			kfree(failrec);
			return -EIO;
		}

2201
		if (em->start > start || em->start + em->len <= start) {
2202 2203 2204 2205 2206
			free_extent_map(em);
			em = NULL;
		}
		read_unlock(&em_tree->lock);

2207
		if (!em) {
2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249
			kfree(failrec);
			return -EIO;
		}
		logical = start - em->start;
		logical = em->block_start + logical;
		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
			logical = em->block_start;
			failrec->bio_flags = EXTENT_BIO_COMPRESSED;
			extent_set_compress_type(&failrec->bio_flags,
						 em->compress_type);
		}
		pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, "
			 "len=%llu\n", logical, start, failrec->len);
		failrec->logical = logical;
		free_extent_map(em);

		/* set the bits in the private failure tree */
		ret = set_extent_bits(failure_tree, start, end,
					EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
		if (ret >= 0)
			ret = set_state_private(failure_tree, start,
						(u64)(unsigned long)failrec);
		/* set the bits in the inode's tree */
		if (ret >= 0)
			ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
						GFP_NOFS);
		if (ret < 0) {
			kfree(failrec);
			return ret;
		}
	} else {
		failrec = (struct io_failure_record *)(unsigned long)private;
		pr_debug("bio_readpage_error: (found) logical=%llu, "
			 "start=%llu, len=%llu, validation=%d\n",
			 failrec->logical, failrec->start, failrec->len,
			 failrec->in_validation);
		/*
		 * when data can be on disk more than twice, add to failrec here
		 * (e.g. with a list for failed_mirror) to make
		 * clean_io_failure() clean all those errors at once.
		 */
	}
2250 2251
	num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
				      failrec->logical, failrec->len);
2252 2253 2254 2255 2256 2257
	if (num_copies == 1) {
		/*
		 * we only have a single copy of the data, so don't bother with
		 * all the retry and error correction code that follows. no
		 * matter what the error is, it is very likely to persist.
		 */
2258 2259
		pr_debug("bio_readpage_error: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
			 num_copies, failrec->this_mirror, failed_mirror);
2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299
		free_io_failure(inode, failrec, 0);
		return -EIO;
	}

	/*
	 * there are two premises:
	 *	a) deliver good data to the caller
	 *	b) correct the bad sectors on disk
	 */
	if (failed_bio->bi_vcnt > 1) {
		/*
		 * to fulfill b), we need to know the exact failing sectors, as
		 * we don't want to rewrite any more than the failed ones. thus,
		 * we need separate read requests for the failed bio
		 *
		 * if the following BUG_ON triggers, our validation request got
		 * merged. we need separate requests for our algorithm to work.
		 */
		BUG_ON(failrec->in_validation);
		failrec->in_validation = 1;
		failrec->this_mirror = failed_mirror;
		read_mode = READ_SYNC | REQ_FAILFAST_DEV;
	} else {
		/*
		 * we're ready to fulfill a) and b) alongside. get a good copy
		 * of the failed sector and if we succeed, we have setup
		 * everything for repair_io_failure to do the rest for us.
		 */
		if (failrec->in_validation) {
			BUG_ON(failrec->this_mirror != failed_mirror);
			failrec->in_validation = 0;
			failrec->this_mirror = 0;
		}
		failrec->failed_mirror = failed_mirror;
		failrec->this_mirror++;
		if (failrec->this_mirror == failed_mirror)
			failrec->this_mirror++;
		read_mode = READ_SYNC;
	}

2300 2301
	if (failrec->this_mirror > num_copies) {
		pr_debug("bio_readpage_error: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
2302 2303 2304 2305 2306
			 num_copies, failrec->this_mirror, failed_mirror);
		free_io_failure(inode, failrec, 0);
		return -EIO;
	}

2307
	bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
2308 2309 2310 2311
	if (!bio) {
		free_io_failure(inode, failrec, 0);
		return -EIO;
	}
2312 2313 2314 2315 2316
	bio->bi_end_io = failed_bio->bi_end_io;
	bio->bi_sector = failrec->logical >> 9;
	bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
	bio->bi_size = 0;

2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329
	btrfs_failed_bio = btrfs_io_bio(failed_bio);
	if (btrfs_failed_bio->csum) {
		struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
		u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);

		btrfs_bio = btrfs_io_bio(bio);
		btrfs_bio->csum = btrfs_bio->csum_inline;
		phy_offset >>= inode->i_sb->s_blocksize_bits;
		phy_offset *= csum_size;
		memcpy(btrfs_bio->csum, btrfs_failed_bio->csum + phy_offset,
		       csum_size);
	}

2330 2331 2332 2333 2334 2335
	bio_add_page(bio, page, failrec->len, start - page_offset(page));

	pr_debug("bio_readpage_error: submitting new read[%#x] to "
		 "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
		 failrec->this_mirror, num_copies, failrec->in_validation);

2336 2337 2338 2339
	ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
					 failrec->this_mirror,
					 failrec->bio_flags, 0);
	return ret;
2340 2341
}

2342 2343
/* lots and lots of room for performance fixes in the end_bio funcs */

2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365
int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
{
	int uptodate = (err == 0);
	struct extent_io_tree *tree;
	int ret;

	tree = &BTRFS_I(page->mapping->host)->io_tree;

	if (tree->ops && tree->ops->writepage_end_io_hook) {
		ret = tree->ops->writepage_end_io_hook(page, start,
					       end, NULL, uptodate);
		if (ret)
			uptodate = 0;
	}

	if (!uptodate) {
		ClearPageUptodate(page);
		SetPageError(page);
	}
	return 0;
}

2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382
/*
 * after a writepage IO is done, we need to:
 * clear the uptodate bits on error
 * clear the writeback bits in the extent tree for this IO
 * end_page_writeback if the page has no more pending IO
 *
 * Scheduling is not allowed, so the extent state tree is expected
 * to have one and only one object corresponding to this IO.
 */
static void end_bio_extent_writepage(struct bio *bio, int err)
{
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
	u64 start;
	u64 end;

	do {
		struct page *page = bvec->bv_page;
2383

2384 2385 2386 2387 2388 2389 2390 2391 2392 2393
		/* We always issue full-page reads, but if some block
		 * in a page fails to read, blk_update_request() will
		 * advance bv_offset and adjust bv_len to compensate.
		 * Print a warning for nonzero offsets, and an error
		 * if they don't add up to a full page.  */
		if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
			printk("%s page write in btrfs with offset %u and length %u\n",
			       bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
			       ? KERN_ERR "partial" : KERN_INFO "incomplete",
			       bvec->bv_offset, bvec->bv_len);
2394

2395 2396
		start = page_offset(page);
		end = start + bvec->bv_offset + bvec->bv_len - 1;
2397 2398 2399

		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);
2400

2401 2402
		if (end_extent_writepage(page, err, start, end))
			continue;
2403

2404
		end_page_writeback(page);
2405
	} while (bvec >= bio->bi_io_vec);
2406

2407 2408 2409
	bio_put(bio);
}

2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421
static void
endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
			      int uptodate)
{
	struct extent_state *cached = NULL;
	u64 end = start + len - 1;

	if (uptodate && tree->track_uptodate)
		set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
	unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
}

2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435
/*
 * after a readpage IO is done, we need to:
 * clear the uptodate bits on error
 * set the uptodate bits if things worked
 * set the page up to date if all extents in the tree are uptodate
 * clear the lock bit in the extent tree
 * unlock the page if there are no other extents locked for it
 *
 * Scheduling is not allowed, so the extent state tree is expected
 * to have one and only one object corresponding to this IO.
 */
static void end_bio_extent_readpage(struct bio *bio, int err)
{
	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
2436 2437
	struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
	struct bio_vec *bvec = bio->bi_io_vec;
2438
	struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
2439
	struct extent_io_tree *tree;
2440
	u64 offset = 0;
2441 2442
	u64 start;
	u64 end;
2443
	u64 len;
2444 2445
	u64 extent_start = 0;
	u64 extent_len = 0;
2446
	int mirror;
2447 2448
	int ret;

2449 2450 2451
	if (err)
		uptodate = 0;

2452 2453
	do {
		struct page *page = bvec->bv_page;
2454
		struct inode *inode = page->mapping->host;
2455

2456
		pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
2457 2458
			 "mirror=%lu\n", (u64)bio->bi_sector, err,
			 io_bio->mirror_num);
2459
		tree = &BTRFS_I(inode)->io_tree;
2460

2461 2462 2463 2464 2465 2466 2467 2468 2469 2470
		/* We always issue full-page reads, but if some block
		 * in a page fails to read, blk_update_request() will
		 * advance bv_offset and adjust bv_len to compensate.
		 * Print a warning for nonzero offsets, and an error
		 * if they don't add up to a full page.  */
		if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
			printk("%s page read in btrfs with offset %u and length %u\n",
			       bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
			       ? KERN_ERR "partial" : KERN_INFO "incomplete",
			       bvec->bv_offset, bvec->bv_len);
2471

2472 2473
		start = page_offset(page);
		end = start + bvec->bv_offset + bvec->bv_len - 1;
2474
		len = bvec->bv_len;
2475

2476
		if (++bvec <= bvec_end)
2477 2478
			prefetchw(&bvec->bv_page->flags);

2479
		mirror = io_bio->mirror_num;
2480 2481
		if (likely(uptodate && tree->ops &&
			   tree->ops->readpage_end_io_hook)) {
2482 2483 2484
			ret = tree->ops->readpage_end_io_hook(io_bio, offset,
							      page, start, end,
							      mirror);
2485
			if (ret)
2486
				uptodate = 0;
2487
			else
2488
				clean_io_failure(start, page);
2489
		}
2490

2491 2492 2493 2494
		if (likely(uptodate))
			goto readpage_ok;

		if (tree->ops && tree->ops->readpage_io_failed_hook) {
2495
			ret = tree->ops->readpage_io_failed_hook(page, mirror);
2496 2497 2498
			if (!ret && !err &&
			    test_bit(BIO_UPTODATE, &bio->bi_flags))
				uptodate = 1;
2499
		} else {
2500 2501 2502 2503 2504 2505 2506 2507 2508 2509
			/*
			 * The generic bio_readpage_error handles errors the
			 * following way: If possible, new read requests are
			 * created and submitted and will end up in
			 * end_bio_extent_readpage as well (if we're lucky, not
			 * in the !uptodate case). In that case it returns 0 and
			 * we just go on with the next page in our bio. If it
			 * can't handle the error it will return -EIO and we
			 * remain responsible for that page.
			 */
2510 2511
			ret = bio_readpage_error(bio, offset, page, start, end,
						 mirror);
2512
			if (ret == 0) {
2513 2514
				uptodate =
					test_bit(BIO_UPTODATE, &bio->bi_flags);
2515 2516
				if (err)
					uptodate = 0;
2517 2518 2519
				continue;
			}
		}
2520
readpage_ok:
2521
		if (likely(uptodate)) {
2522 2523 2524 2525 2526 2527 2528 2529
			loff_t i_size = i_size_read(inode);
			pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
			unsigned offset;

			/* Zero out the end if this page straddles i_size */
			offset = i_size & (PAGE_CACHE_SIZE-1);
			if (page->index == end_index && offset)
				zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2530
			SetPageUptodate(page);
2531
		} else {
2532 2533
			ClearPageUptodate(page);
			SetPageError(page);
2534
		}
2535
		unlock_page(page);
2536
		offset += len;
2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558

		if (unlikely(!uptodate)) {
			if (extent_len) {
				endio_readpage_release_extent(tree,
							      extent_start,
							      extent_len, 1);
				extent_start = 0;
				extent_len = 0;
			}
			endio_readpage_release_extent(tree, start,
						      end - start + 1, 0);
		} else if (!extent_len) {
			extent_start = start;
			extent_len = end + 1 - start;
		} else if (extent_start + extent_len == start) {
			extent_len += end + 1 - start;
		} else {
			endio_readpage_release_extent(tree, extent_start,
						      extent_len, uptodate);
			extent_start = start;
			extent_len = end + 1 - start;
		}
2559
	} while (bvec <= bvec_end);
2560

2561 2562 2563
	if (extent_len)
		endio_readpage_release_extent(tree, extent_start, extent_len,
					      uptodate);
2564 2565
	if (io_bio->end_io)
		io_bio->end_io(io_bio, err);
2566 2567 2568
	bio_put(bio);
}

2569 2570 2571 2572
/*
 * this allocates from the btrfs_bioset.  We're returning a bio right now
 * but you can call btrfs_io_bio for the appropriate container_of magic
 */
2573 2574 2575
struct bio *
btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
		gfp_t gfp_flags)
2576
{
2577
	struct btrfs_io_bio *btrfs_bio;
2578 2579
	struct bio *bio;

2580
	bio = bio_alloc_bioset(gfp_flags, nr_vecs, btrfs_bioset);
2581 2582

	if (bio == NULL && (current->flags & PF_MEMALLOC)) {
2583 2584 2585 2586
		while (!bio && (nr_vecs /= 2)) {
			bio = bio_alloc_bioset(gfp_flags,
					       nr_vecs, btrfs_bioset);
		}
2587 2588 2589
	}

	if (bio) {
2590
		bio->bi_size = 0;
2591 2592
		bio->bi_bdev = bdev;
		bio->bi_sector = first_sector;
2593 2594 2595 2596
		btrfs_bio = btrfs_io_bio(bio);
		btrfs_bio->csum = NULL;
		btrfs_bio->csum_allocated = NULL;
		btrfs_bio->end_io = NULL;
2597 2598 2599 2600
	}
	return bio;
}

2601 2602 2603 2604 2605 2606 2607 2608 2609
struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask)
{
	return bio_clone_bioset(bio, gfp_mask, btrfs_bioset);
}


/* this also allocates from the btrfs_bioset */
struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
{
2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620
	struct btrfs_io_bio *btrfs_bio;
	struct bio *bio;

	bio = bio_alloc_bioset(gfp_mask, nr_iovecs, btrfs_bioset);
	if (bio) {
		btrfs_bio = btrfs_io_bio(bio);
		btrfs_bio->csum = NULL;
		btrfs_bio->csum_allocated = NULL;
		btrfs_bio->end_io = NULL;
	}
	return bio;
2621 2622 2623
}


2624 2625
static int __must_check submit_one_bio(int rw, struct bio *bio,
				       int mirror_num, unsigned long bio_flags)
2626 2627
{
	int ret = 0;
2628 2629 2630 2631 2632
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
	struct page *page = bvec->bv_page;
	struct extent_io_tree *tree = bio->bi_private;
	u64 start;

M
Miao Xie 已提交
2633
	start = page_offset(page) + bvec->bv_offset;
2634

2635
	bio->bi_private = NULL;
2636 2637 2638

	bio_get(bio);

2639
	if (tree->ops && tree->ops->submit_bio_hook)
2640
		ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
2641
					   mirror_num, bio_flags, start);
2642
	else
2643
		btrfsic_submit_bio(rw, bio);
2644

2645 2646 2647 2648 2649 2650
	if (bio_flagged(bio, BIO_EOPNOTSUPP))
		ret = -EOPNOTSUPP;
	bio_put(bio);
	return ret;
}

2651
static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page,
2652 2653 2654 2655 2656
		     unsigned long offset, size_t size, struct bio *bio,
		     unsigned long bio_flags)
{
	int ret = 0;
	if (tree->ops && tree->ops->merge_bio_hook)
2657
		ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio,
2658 2659 2660 2661 2662 2663
						bio_flags);
	BUG_ON(ret < 0);
	return ret;

}

2664 2665 2666 2667 2668 2669
static int submit_extent_page(int rw, struct extent_io_tree *tree,
			      struct page *page, sector_t sector,
			      size_t size, unsigned long offset,
			      struct block_device *bdev,
			      struct bio **bio_ret,
			      unsigned long max_pages,
2670
			      bio_end_io_t end_io_func,
C
Chris Mason 已提交
2671 2672 2673
			      int mirror_num,
			      unsigned long prev_bio_flags,
			      unsigned long bio_flags)
2674 2675 2676 2677
{
	int ret = 0;
	struct bio *bio;
	int nr;
C
Chris Mason 已提交
2678 2679 2680
	int contig = 0;
	int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
	int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
2681
	size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
2682 2683 2684

	if (bio_ret && *bio_ret) {
		bio = *bio_ret;
C
Chris Mason 已提交
2685 2686 2687
		if (old_compressed)
			contig = bio->bi_sector == sector;
		else
K
Kent Overstreet 已提交
2688
			contig = bio_end_sector(bio) == sector;
C
Chris Mason 已提交
2689 2690

		if (prev_bio_flags != bio_flags || !contig ||
2691
		    merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
C
Chris Mason 已提交
2692 2693 2694
		    bio_add_page(bio, page, page_size, offset) < page_size) {
			ret = submit_one_bio(rw, bio, mirror_num,
					     prev_bio_flags);
2695 2696
			if (ret < 0)
				return ret;
2697 2698 2699 2700 2701
			bio = NULL;
		} else {
			return 0;
		}
	}
C
Chris Mason 已提交
2702 2703 2704 2705 2706
	if (this_compressed)
		nr = BIO_MAX_PAGES;
	else
		nr = bio_get_nr_vecs(bdev);

2707
	bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
2708 2709
	if (!bio)
		return -ENOMEM;
2710

C
Chris Mason 已提交
2711
	bio_add_page(bio, page, page_size, offset);
2712 2713
	bio->bi_end_io = end_io_func;
	bio->bi_private = tree;
2714

C
Chris Mason 已提交
2715
	if (bio_ret)
2716
		*bio_ret = bio;
C
Chris Mason 已提交
2717
	else
C
Chris Mason 已提交
2718
		ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
2719 2720 2721 2722

	return ret;
}

2723 2724
static void attach_extent_buffer_page(struct extent_buffer *eb,
				      struct page *page)
2725 2726 2727 2728
{
	if (!PagePrivate(page)) {
		SetPagePrivate(page);
		page_cache_get(page);
J
Josef Bacik 已提交
2729 2730 2731
		set_page_private(page, (unsigned long)eb);
	} else {
		WARN_ON(page->private != (unsigned long)eb);
2732 2733 2734
	}
}

J
Josef Bacik 已提交
2735
void set_page_extent_mapped(struct page *page)
2736
{
J
Josef Bacik 已提交
2737 2738 2739 2740 2741
	if (!PagePrivate(page)) {
		SetPagePrivate(page);
		page_cache_get(page);
		set_page_private(page, EXTENT_PAGE_PRIVATE);
	}
2742 2743
}

2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770
static struct extent_map *
__get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
		 u64 start, u64 len, get_extent_t *get_extent,
		 struct extent_map **em_cached)
{
	struct extent_map *em;

	if (em_cached && *em_cached) {
		em = *em_cached;
		if (em->in_tree && start >= em->start &&
		    start < extent_map_end(em)) {
			atomic_inc(&em->refs);
			return em;
		}

		free_extent_map(em);
		*em_cached = NULL;
	}

	em = get_extent(inode, page, pg_offset, start, len, 0);
	if (em_cached && !IS_ERR_OR_NULL(em)) {
		BUG_ON(*em_cached);
		atomic_inc(&em->refs);
		*em_cached = em;
	}
	return em;
}
2771 2772 2773 2774
/*
 * basic readpage implementation.  Locked extent state structs are inserted
 * into the tree that are removed when the IO is done (by the end_io
 * handlers)
2775
 * XXX JDM: This needs looking at to ensure proper page locking
2776
 */
2777 2778 2779
static int __do_readpage(struct extent_io_tree *tree,
			 struct page *page,
			 get_extent_t *get_extent,
2780
			 struct extent_map **em_cached,
2781 2782
			 struct bio **bio, int mirror_num,
			 unsigned long *bio_flags, int rw)
2783 2784
{
	struct inode *inode = page->mapping->host;
M
Miao Xie 已提交
2785
	u64 start = page_offset(page);
2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797
	u64 page_end = start + PAGE_CACHE_SIZE - 1;
	u64 end;
	u64 cur = start;
	u64 extent_offset;
	u64 last_byte = i_size_read(inode);
	u64 block_start;
	u64 cur_end;
	sector_t sector;
	struct extent_map *em;
	struct block_device *bdev;
	int ret;
	int nr = 0;
2798
	int parent_locked = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
2799
	size_t pg_offset = 0;
2800
	size_t iosize;
C
Chris Mason 已提交
2801
	size_t disk_io_size;
2802
	size_t blocksize = inode->i_sb->s_blocksize;
2803
	unsigned long this_bio_flag = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
2804 2805 2806

	set_page_extent_mapped(page);

2807
	end = page_end;
D
Dan Magenheimer 已提交
2808 2809 2810
	if (!PageUptodate(page)) {
		if (cleancache_get_page(page) == 0) {
			BUG_ON(blocksize != PAGE_SIZE);
2811
			unlock_extent(tree, start, end);
D
Dan Magenheimer 已提交
2812 2813 2814 2815
			goto out;
		}
	}

C
Chris Mason 已提交
2816 2817 2818 2819 2820 2821
	if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
		char *userpage;
		size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);

		if (zero_offset) {
			iosize = PAGE_CACHE_SIZE - zero_offset;
2822
			userpage = kmap_atomic(page);
C
Chris Mason 已提交
2823 2824
			memset(userpage + zero_offset, 0, iosize);
			flush_dcache_page(page);
2825
			kunmap_atomic(userpage);
C
Chris Mason 已提交
2826 2827
		}
	}
2828
	while (cur <= end) {
2829 2830
		unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;

2831 2832
		if (cur >= last_byte) {
			char *userpage;
2833 2834
			struct extent_state *cached = NULL;

2835
			iosize = PAGE_CACHE_SIZE - pg_offset;
2836
			userpage = kmap_atomic(page);
2837
			memset(userpage + pg_offset, 0, iosize);
2838
			flush_dcache_page(page);
2839
			kunmap_atomic(userpage);
2840
			set_extent_uptodate(tree, cur, cur + iosize - 1,
2841
					    &cached, GFP_NOFS);
2842 2843 2844 2845
			if (!parent_locked)
				unlock_extent_cached(tree, cur,
						     cur + iosize - 1,
						     &cached, GFP_NOFS);
2846 2847
			break;
		}
2848 2849
		em = __get_extent_map(inode, page, pg_offset, cur,
				      end - cur + 1, get_extent, em_cached);
2850
		if (IS_ERR_OR_NULL(em)) {
2851
			SetPageError(page);
2852 2853
			if (!parent_locked)
				unlock_extent(tree, cur, end);
2854 2855 2856 2857 2858 2859
			break;
		}
		extent_offset = cur - em->start;
		BUG_ON(extent_map_end(em) <= cur);
		BUG_ON(end < cur);

2860
		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2861
			this_bio_flag |= EXTENT_BIO_COMPRESSED;
2862 2863 2864
			extent_set_compress_type(&this_bio_flag,
						 em->compress_type);
		}
C
Chris Mason 已提交
2865

2866 2867
		iosize = min(extent_map_end(em) - cur, end - cur + 1);
		cur_end = min(extent_map_end(em) - 1, end);
2868
		iosize = ALIGN(iosize, blocksize);
C
Chris Mason 已提交
2869 2870 2871 2872 2873 2874 2875
		if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
			disk_io_size = em->block_len;
			sector = em->block_start >> 9;
		} else {
			sector = (em->block_start + extent_offset) >> 9;
			disk_io_size = iosize;
		}
2876 2877
		bdev = em->bdev;
		block_start = em->block_start;
Y
Yan Zheng 已提交
2878 2879
		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
			block_start = EXTENT_MAP_HOLE;
2880 2881 2882 2883 2884 2885
		free_extent_map(em);
		em = NULL;

		/* we've found a hole, just zero and go on */
		if (block_start == EXTENT_MAP_HOLE) {
			char *userpage;
2886 2887
			struct extent_state *cached = NULL;

2888
			userpage = kmap_atomic(page);
2889
			memset(userpage + pg_offset, 0, iosize);
2890
			flush_dcache_page(page);
2891
			kunmap_atomic(userpage);
2892 2893

			set_extent_uptodate(tree, cur, cur + iosize - 1,
2894 2895 2896
					    &cached, GFP_NOFS);
			unlock_extent_cached(tree, cur, cur + iosize - 1,
			                     &cached, GFP_NOFS);
2897
			cur = cur + iosize;
2898
			pg_offset += iosize;
2899 2900 2901
			continue;
		}
		/* the get_extent function already copied into the page */
2902 2903
		if (test_range_bit(tree, cur, cur_end,
				   EXTENT_UPTODATE, 1, NULL)) {
2904
			check_page_uptodate(tree, page);
2905 2906
			if (!parent_locked)
				unlock_extent(tree, cur, cur + iosize - 1);
2907
			cur = cur + iosize;
2908
			pg_offset += iosize;
2909 2910
			continue;
		}
2911 2912 2913 2914 2915
		/* we have an inline extent but it didn't get marked up
		 * to date.  Error out
		 */
		if (block_start == EXTENT_MAP_INLINE) {
			SetPageError(page);
2916 2917
			if (!parent_locked)
				unlock_extent(tree, cur, cur + iosize - 1);
2918
			cur = cur + iosize;
2919
			pg_offset += iosize;
2920 2921
			continue;
		}
2922

2923
		pnr -= page->index;
2924
		ret = submit_extent_page(rw, tree, page,
2925
					 sector, disk_io_size, pg_offset,
2926
					 bdev, bio, pnr,
C
Chris Mason 已提交
2927 2928 2929
					 end_bio_extent_readpage, mirror_num,
					 *bio_flags,
					 this_bio_flag);
2930 2931 2932 2933
		if (!ret) {
			nr++;
			*bio_flags = this_bio_flag;
		} else {
2934
			SetPageError(page);
2935 2936
			if (!parent_locked)
				unlock_extent(tree, cur, cur + iosize - 1);
2937
		}
2938
		cur = cur + iosize;
2939
		pg_offset += iosize;
2940
	}
D
Dan Magenheimer 已提交
2941
out:
2942 2943 2944 2945 2946 2947 2948 2949
	if (!nr) {
		if (!PageError(page))
			SetPageUptodate(page);
		unlock_page(page);
	}
	return 0;
}

2950 2951 2952 2953
static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
					     struct page *pages[], int nr_pages,
					     u64 start, u64 end,
					     get_extent_t *get_extent,
2954
					     struct extent_map **em_cached,
2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974
					     struct bio **bio, int mirror_num,
					     unsigned long *bio_flags, int rw)
{
	struct inode *inode;
	struct btrfs_ordered_extent *ordered;
	int index;

	inode = pages[0]->mapping->host;
	while (1) {
		lock_extent(tree, start, end);
		ordered = btrfs_lookup_ordered_range(inode, start,
						     end - start + 1);
		if (!ordered)
			break;
		unlock_extent(tree, start, end);
		btrfs_start_ordered_extent(inode, ordered, 1);
		btrfs_put_ordered_extent(ordered);
	}

	for (index = 0; index < nr_pages; index++) {
2975 2976
		__do_readpage(tree, pages[index], get_extent, em_cached, bio,
			      mirror_num, bio_flags, rw);
2977 2978 2979 2980 2981 2982 2983
		page_cache_release(pages[index]);
	}
}

static void __extent_readpages(struct extent_io_tree *tree,
			       struct page *pages[],
			       int nr_pages, get_extent_t *get_extent,
2984
			       struct extent_map **em_cached,
2985 2986 2987
			       struct bio **bio, int mirror_num,
			       unsigned long *bio_flags, int rw)
{
2988
	u64 start = 0;
2989 2990 2991
	u64 end = 0;
	u64 page_start;
	int index;
2992
	int first_index = 0;
2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004

	for (index = 0; index < nr_pages; index++) {
		page_start = page_offset(pages[index]);
		if (!end) {
			start = page_start;
			end = start + PAGE_CACHE_SIZE - 1;
			first_index = index;
		} else if (end + 1 == page_start) {
			end += PAGE_CACHE_SIZE;
		} else {
			__do_contiguous_readpages(tree, &pages[first_index],
						  index - first_index, start,
3005 3006 3007
						  end, get_extent, em_cached,
						  bio, mirror_num, bio_flags,
						  rw);
3008 3009 3010 3011 3012 3013 3014 3015 3016
			start = page_start;
			end = start + PAGE_CACHE_SIZE - 1;
			first_index = index;
		}
	}

	if (end)
		__do_contiguous_readpages(tree, &pages[first_index],
					  index - first_index, start,
3017
					  end, get_extent, em_cached, bio,
3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042
					  mirror_num, bio_flags, rw);
}

static int __extent_read_full_page(struct extent_io_tree *tree,
				   struct page *page,
				   get_extent_t *get_extent,
				   struct bio **bio, int mirror_num,
				   unsigned long *bio_flags, int rw)
{
	struct inode *inode = page->mapping->host;
	struct btrfs_ordered_extent *ordered;
	u64 start = page_offset(page);
	u64 end = start + PAGE_CACHE_SIZE - 1;
	int ret;

	while (1) {
		lock_extent(tree, start, end);
		ordered = btrfs_lookup_ordered_extent(inode, start);
		if (!ordered)
			break;
		unlock_extent(tree, start, end);
		btrfs_start_ordered_extent(inode, ordered, 1);
		btrfs_put_ordered_extent(ordered);
	}

3043 3044
	ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
			    bio_flags, rw);
3045 3046 3047
	return ret;
}

3048
int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
3049
			    get_extent_t *get_extent, int mirror_num)
3050 3051
{
	struct bio *bio = NULL;
C
Chris Mason 已提交
3052
	unsigned long bio_flags = 0;
3053 3054
	int ret;

3055
	ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
3056
				      &bio_flags, READ);
3057
	if (bio)
3058
		ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
3059 3060 3061
	return ret;
}

3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075
int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
				 get_extent_t *get_extent, int mirror_num)
{
	struct bio *bio = NULL;
	unsigned long bio_flags = EXTENT_BIO_PARENT_LOCKED;
	int ret;

	ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num,
				      &bio_flags, READ);
	if (bio)
		ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
	return ret;
}

3076 3077 3078 3079 3080 3081 3082 3083 3084 3085
static noinline void update_nr_written(struct page *page,
				      struct writeback_control *wbc,
				      unsigned long nr_written)
{
	wbc->nr_to_write -= nr_written;
	if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
	    wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
		page->mapping->writeback_index = page->index + nr_written;
}

3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097
/*
 * the writepage semantics are similar to regular writepage.  extent
 * records are inserted to lock ranges in the tree, and as dirty areas
 * are found, they are marked writeback.  Then the lock bits are removed
 * and the end_io handler clears the writeback ranges
 */
static int __extent_writepage(struct page *page, struct writeback_control *wbc,
			      void *data)
{
	struct inode *inode = page->mapping->host;
	struct extent_page_data *epd = data;
	struct extent_io_tree *tree = epd->tree;
M
Miao Xie 已提交
3098
	u64 start = page_offset(page);
3099 3100 3101 3102 3103 3104 3105 3106 3107
	u64 delalloc_start;
	u64 page_end = start + PAGE_CACHE_SIZE - 1;
	u64 end;
	u64 cur = start;
	u64 extent_offset;
	u64 last_byte = i_size_read(inode);
	u64 block_start;
	u64 iosize;
	sector_t sector;
3108
	struct extent_state *cached_state = NULL;
3109 3110 3111 3112
	struct extent_map *em;
	struct block_device *bdev;
	int ret;
	int nr = 0;
3113
	size_t pg_offset = 0;
3114 3115 3116 3117 3118
	size_t blocksize;
	loff_t i_size = i_size_read(inode);
	unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
	u64 nr_delalloc;
	u64 delalloc_end;
C
Chris Mason 已提交
3119 3120
	int page_started;
	int compressed;
3121
	int write_flags;
3122
	unsigned long nr_written = 0;
3123
	bool fill_delalloc = true;
3124

3125
	if (wbc->sync_mode == WB_SYNC_ALL)
J
Jens Axboe 已提交
3126
		write_flags = WRITE_SYNC;
3127 3128 3129
	else
		write_flags = WRITE;

3130 3131
	trace___extent_writepage(page, inode, wbc);

3132
	WARN_ON(!PageLocked(page));
3133 3134 3135

	ClearPageError(page);

3136
	pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
3137
	if (page->index > end_index ||
3138
	   (page->index == end_index && !pg_offset)) {
3139
		page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
3140 3141 3142 3143 3144 3145 3146
		unlock_page(page);
		return 0;
	}

	if (page->index == end_index) {
		char *userpage;

3147
		userpage = kmap_atomic(page);
3148 3149
		memset(userpage + pg_offset, 0,
		       PAGE_CACHE_SIZE - pg_offset);
3150
		kunmap_atomic(userpage);
3151
		flush_dcache_page(page);
3152
	}
3153
	pg_offset = 0;
3154 3155 3156

	set_page_extent_mapped(page);

3157 3158 3159
	if (!tree->ops || !tree->ops->fill_delalloc)
		fill_delalloc = false;

3160 3161
	delalloc_start = start;
	delalloc_end = 0;
C
Chris Mason 已提交
3162
	page_started = 0;
3163
	if (!epd->extent_locked && fill_delalloc) {
3164
		u64 delalloc_to_write = 0;
3165 3166 3167 3168 3169 3170
		/*
		 * make sure the wbc mapping index is at least updated
		 * to this page.
		 */
		update_nr_written(page, wbc, 0);

C
Chris Mason 已提交
3171
		while (delalloc_end < page_end) {
3172
			nr_delalloc = find_lock_delalloc_range(inode, tree,
C
Chris Mason 已提交
3173 3174
						       page,
						       &delalloc_start,
3175 3176
						       &delalloc_end,
						       128 * 1024 * 1024);
3177 3178 3179 3180
			if (nr_delalloc == 0) {
				delalloc_start = delalloc_end + 1;
				continue;
			}
3181 3182 3183 3184 3185
			ret = tree->ops->fill_delalloc(inode, page,
						       delalloc_start,
						       delalloc_end,
						       &page_started,
						       &nr_written);
3186 3187 3188 3189 3190
			/* File system has been set read-only */
			if (ret) {
				SetPageError(page);
				goto done;
			}
3191 3192 3193 3194 3195 3196 3197 3198
			/*
			 * delalloc_end is already one less than the total
			 * length, so we don't subtract one from
			 * PAGE_CACHE_SIZE
			 */
			delalloc_to_write += (delalloc_end - delalloc_start +
					      PAGE_CACHE_SIZE) >>
					      PAGE_CACHE_SHIFT;
3199 3200
			delalloc_start = delalloc_end + 1;
		}
3201 3202 3203 3204 3205 3206 3207 3208
		if (wbc->nr_to_write < delalloc_to_write) {
			int thresh = 8192;

			if (delalloc_to_write < thresh * 2)
				thresh = delalloc_to_write;
			wbc->nr_to_write = min_t(u64, delalloc_to_write,
						 thresh);
		}
C
Chris Mason 已提交
3209

3210 3211 3212 3213 3214
		/* did the fill delalloc function already unlock and start
		 * the IO?
		 */
		if (page_started) {
			ret = 0;
3215 3216 3217 3218 3219 3220 3221
			/*
			 * we've unlocked the page, so we can't update
			 * the mapping's writeback index, just update
			 * nr_to_write.
			 */
			wbc->nr_to_write -= nr_written;
			goto done_unlocked;
3222
		}
C
Chris Mason 已提交
3223
	}
3224
	if (tree->ops && tree->ops->writepage_start_hook) {
C
Chris Mason 已提交
3225 3226
		ret = tree->ops->writepage_start_hook(page, start,
						      page_end);
3227 3228 3229 3230 3231 3232
		if (ret) {
			/* Fixup worker will requeue */
			if (ret == -EBUSY)
				wbc->pages_skipped++;
			else
				redirty_page_for_writepage(wbc, page);
3233
			update_nr_written(page, wbc, nr_written);
3234
			unlock_page(page);
3235
			ret = 0;
3236
			goto done_unlocked;
3237 3238 3239
		}
	}

3240 3241 3242 3243 3244
	/*
	 * we don't want to touch the inode after unlocking the page,
	 * so we update the mapping writeback index now
	 */
	update_nr_written(page, wbc, nr_written + 1);
3245

3246 3247
	end = page_end;
	if (last_byte <= start) {
3248 3249 3250
		if (tree->ops && tree->ops->writepage_end_io_hook)
			tree->ops->writepage_end_io_hook(page, start,
							 page_end, NULL, 1);
3251 3252 3253 3254 3255 3256 3257
		goto done;
	}

	blocksize = inode->i_sb->s_blocksize;

	while (cur <= end) {
		if (cur >= last_byte) {
3258 3259 3260
			if (tree->ops && tree->ops->writepage_end_io_hook)
				tree->ops->writepage_end_io_hook(page, cur,
							 page_end, NULL, 1);
3261 3262
			break;
		}
3263
		em = epd->get_extent(inode, page, pg_offset, cur,
3264
				     end - cur + 1, 1);
3265
		if (IS_ERR_OR_NULL(em)) {
3266 3267 3268 3269 3270 3271 3272 3273
			SetPageError(page);
			break;
		}

		extent_offset = cur - em->start;
		BUG_ON(extent_map_end(em) <= cur);
		BUG_ON(end < cur);
		iosize = min(extent_map_end(em) - cur, end - cur + 1);
3274
		iosize = ALIGN(iosize, blocksize);
3275 3276 3277
		sector = (em->block_start + extent_offset) >> 9;
		bdev = em->bdev;
		block_start = em->block_start;
C
Chris Mason 已提交
3278
		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3279 3280 3281
		free_extent_map(em);
		em = NULL;

C
Chris Mason 已提交
3282 3283 3284 3285 3286
		/*
		 * compressed and inline extents are written through other
		 * paths in the FS
		 */
		if (compressed || block_start == EXTENT_MAP_HOLE ||
3287
		    block_start == EXTENT_MAP_INLINE) {
C
Chris Mason 已提交
3288 3289 3290 3291 3292 3293
			/*
			 * end_io notification does not happen here for
			 * compressed extents
			 */
			if (!compressed && tree->ops &&
			    tree->ops->writepage_end_io_hook)
3294 3295 3296
				tree->ops->writepage_end_io_hook(page, cur,
							 cur + iosize - 1,
							 NULL, 1);
C
Chris Mason 已提交
3297 3298 3299 3300 3301 3302 3303 3304 3305
			else if (compressed) {
				/* we don't want to end_page_writeback on
				 * a compressed extent.  this happens
				 * elsewhere
				 */
				nr++;
			}

			cur += iosize;
3306
			pg_offset += iosize;
3307 3308 3309 3310
			continue;
		}
		/* leave this out until we have a page_mkwrite call */
		if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
3311
				   EXTENT_DIRTY, 0, NULL)) {
3312
			cur = cur + iosize;
3313
			pg_offset += iosize;
3314 3315
			continue;
		}
C
Chris Mason 已提交
3316

3317 3318 3319 3320 3321 3322
		if (tree->ops && tree->ops->writepage_io_hook) {
			ret = tree->ops->writepage_io_hook(page, cur,
						cur + iosize - 1);
		} else {
			ret = 0;
		}
3323
		if (ret) {
3324
			SetPageError(page);
3325
		} else {
3326
			unsigned long max_nr = end_index + 1;
3327

3328 3329
			set_range_writeback(tree, cur, cur + iosize - 1);
			if (!PageWriteback(page)) {
C
Chris Mason 已提交
3330 3331
				printk(KERN_ERR "btrfs warning page %lu not "
				       "writeback, cur %llu end %llu\n",
3332
				       page->index, cur, end);
3333 3334
			}

3335 3336 3337
			ret = submit_extent_page(write_flags, tree, page,
						 sector, iosize, pg_offset,
						 bdev, &epd->bio, max_nr,
C
Chris Mason 已提交
3338 3339
						 end_bio_extent_writepage,
						 0, 0, 0);
3340 3341 3342 3343
			if (ret)
				SetPageError(page);
		}
		cur = cur + iosize;
3344
		pg_offset += iosize;
3345 3346 3347 3348 3349 3350 3351 3352 3353
		nr++;
	}
done:
	if (nr == 0) {
		/* make sure the mapping tag for page dirty gets cleared */
		set_page_writeback(page);
		end_page_writeback(page);
	}
	unlock_page(page);
3354

3355 3356
done_unlocked:

3357 3358
	/* drop our reference on any cached states */
	free_extent_state(cached_state);
3359 3360 3361
	return 0;
}

3362 3363 3364 3365 3366 3367
static int eb_wait(void *word)
{
	io_schedule();
	return 0;
}

3368
void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395
{
	wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
		    TASK_UNINTERRUPTIBLE);
}

static int lock_extent_buffer_for_io(struct extent_buffer *eb,
				     struct btrfs_fs_info *fs_info,
				     struct extent_page_data *epd)
{
	unsigned long i, num_pages;
	int flush = 0;
	int ret = 0;

	if (!btrfs_try_tree_write_lock(eb)) {
		flush = 1;
		flush_write_bio(epd);
		btrfs_tree_lock(eb);
	}

	if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
		btrfs_tree_unlock(eb);
		if (!epd->sync_io)
			return 0;
		if (!flush) {
			flush_write_bio(epd);
			flush = 1;
		}
C
Chris Mason 已提交
3396 3397 3398 3399 3400
		while (1) {
			wait_on_extent_buffer_writeback(eb);
			btrfs_tree_lock(eb);
			if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
				break;
3401 3402 3403 3404
			btrfs_tree_unlock(eb);
		}
	}

3405 3406 3407 3408 3409 3410
	/*
	 * We need to do this to prevent races in people who check if the eb is
	 * under IO since we can end up having no IO bits set for a short period
	 * of time.
	 */
	spin_lock(&eb->refs_lock);
3411 3412
	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
		set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3413
		spin_unlock(&eb->refs_lock);
3414
		btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3415 3416 3417
		__percpu_counter_add(&fs_info->dirty_metadata_bytes,
				     -eb->len,
				     fs_info->dirty_metadata_batch);
3418
		ret = 1;
3419 3420
	} else {
		spin_unlock(&eb->refs_lock);
3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489
	}

	btrfs_tree_unlock(eb);

	if (!ret)
		return ret;

	num_pages = num_extent_pages(eb->start, eb->len);
	for (i = 0; i < num_pages; i++) {
		struct page *p = extent_buffer_page(eb, i);

		if (!trylock_page(p)) {
			if (!flush) {
				flush_write_bio(epd);
				flush = 1;
			}
			lock_page(p);
		}
	}

	return ret;
}

static void end_extent_buffer_writeback(struct extent_buffer *eb)
{
	clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
	smp_mb__after_clear_bit();
	wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
}

static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
{
	int uptodate = err == 0;
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
	struct extent_buffer *eb;
	int done;

	do {
		struct page *page = bvec->bv_page;

		bvec--;
		eb = (struct extent_buffer *)page->private;
		BUG_ON(!eb);
		done = atomic_dec_and_test(&eb->io_pages);

		if (!uptodate || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
			set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
			ClearPageUptodate(page);
			SetPageError(page);
		}

		end_page_writeback(page);

		if (!done)
			continue;

		end_extent_buffer_writeback(eb);
	} while (bvec >= bio->bi_io_vec);

	bio_put(bio);

}

static int write_one_eb(struct extent_buffer *eb,
			struct btrfs_fs_info *fs_info,
			struct writeback_control *wbc,
			struct extent_page_data *epd)
{
	struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3490
	struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
3491 3492
	u64 offset = eb->start;
	unsigned long i, num_pages;
3493
	unsigned long bio_flags = 0;
3494
	int rw = (epd->sync_io ? WRITE_SYNC : WRITE) | REQ_META;
3495
	int ret = 0;
3496 3497 3498 3499

	clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
	num_pages = num_extent_pages(eb->start, eb->len);
	atomic_set(&eb->io_pages, num_pages);
3500 3501 3502
	if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
		bio_flags = EXTENT_BIO_TREE_LOG;

3503 3504 3505 3506 3507
	for (i = 0; i < num_pages; i++) {
		struct page *p = extent_buffer_page(eb, i);

		clear_page_dirty_for_io(p);
		set_page_writeback(p);
3508
		ret = submit_extent_page(rw, tree, p, offset >> 9,
3509 3510
					 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
					 -1, end_bio_extent_buffer_writepage,
3511 3512
					 0, epd->bio_flags, bio_flags);
		epd->bio_flags = bio_flags;
3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546
		if (ret) {
			set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
			SetPageError(p);
			if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
				end_extent_buffer_writeback(eb);
			ret = -EIO;
			break;
		}
		offset += PAGE_CACHE_SIZE;
		update_nr_written(p, wbc, 1);
		unlock_page(p);
	}

	if (unlikely(ret)) {
		for (; i < num_pages; i++) {
			struct page *p = extent_buffer_page(eb, i);
			unlock_page(p);
		}
	}

	return ret;
}

int btree_write_cache_pages(struct address_space *mapping,
				   struct writeback_control *wbc)
{
	struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
	struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
	struct extent_buffer *eb, *prev_eb = NULL;
	struct extent_page_data epd = {
		.bio = NULL,
		.tree = tree,
		.extent_locked = 0,
		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
3547
		.bio_flags = 0,
3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591
	};
	int ret = 0;
	int done = 0;
	int nr_to_write_done = 0;
	struct pagevec pvec;
	int nr_pages;
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
	int scanned = 0;
	int tag;

	pagevec_init(&pvec, 0);
	if (wbc->range_cyclic) {
		index = mapping->writeback_index; /* Start from prev offset */
		end = -1;
	} else {
		index = wbc->range_start >> PAGE_CACHE_SHIFT;
		end = wbc->range_end >> PAGE_CACHE_SHIFT;
		scanned = 1;
	}
	if (wbc->sync_mode == WB_SYNC_ALL)
		tag = PAGECACHE_TAG_TOWRITE;
	else
		tag = PAGECACHE_TAG_DIRTY;
retry:
	if (wbc->sync_mode == WB_SYNC_ALL)
		tag_pages_for_writeback(mapping, index, end);
	while (!done && !nr_to_write_done && (index <= end) &&
	       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
		unsigned i;

		scanned = 1;
		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];

			if (!PagePrivate(page))
				continue;

			if (!wbc->range_cyclic && page->index > end) {
				done = 1;
				break;
			}

3592 3593 3594 3595 3596 3597
			spin_lock(&mapping->private_lock);
			if (!PagePrivate(page)) {
				spin_unlock(&mapping->private_lock);
				continue;
			}

3598
			eb = (struct extent_buffer *)page->private;
3599 3600 3601 3602 3603 3604

			/*
			 * Shouldn't happen and normally this would be a BUG_ON
			 * but no sense in crashing the users box for something
			 * we can survive anyway.
			 */
3605
			if (WARN_ON(!eb)) {
3606
				spin_unlock(&mapping->private_lock);
3607 3608 3609
				continue;
			}

3610 3611
			if (eb == prev_eb) {
				spin_unlock(&mapping->private_lock);
3612
				continue;
3613
			}
3614

3615 3616 3617
			ret = atomic_inc_not_zero(&eb->refs);
			spin_unlock(&mapping->private_lock);
			if (!ret)
3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657
				continue;

			prev_eb = eb;
			ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
			if (!ret) {
				free_extent_buffer(eb);
				continue;
			}

			ret = write_one_eb(eb, fs_info, wbc, &epd);
			if (ret) {
				done = 1;
				free_extent_buffer(eb);
				break;
			}
			free_extent_buffer(eb);

			/*
			 * the filesystem may choose to bump up nr_to_write.
			 * We have to make sure to honor the new nr_to_write
			 * at any time
			 */
			nr_to_write_done = wbc->nr_to_write <= 0;
		}
		pagevec_release(&pvec);
		cond_resched();
	}
	if (!scanned && !done) {
		/*
		 * We hit the last page and there is more work to be done: wrap
		 * back to the start of the file
		 */
		scanned = 1;
		index = 0;
		goto retry;
	}
	flush_write_bio(&epd);
	return ret;
}

3658
/**
C
Chris Mason 已提交
3659
 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672
 * @mapping: address space structure to write
 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
 * @writepage: function called for each page
 * @data: data passed to writepage function
 *
 * If a page is already under I/O, write_cache_pages() skips it, even
 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
 * and msync() need to guarantee that all the data which was dirty at the time
 * the call was made get new I/O started against them.  If wbc->sync_mode is
 * WB_SYNC_ALL then we were called for data integrity and we must wait for
 * existing IO to complete.
 */
3673
static int extent_write_cache_pages(struct extent_io_tree *tree,
C
Chris Mason 已提交
3674 3675
			     struct address_space *mapping,
			     struct writeback_control *wbc,
C
Chris Mason 已提交
3676 3677
			     writepage_t writepage, void *data,
			     void (*flush_fn)(void *))
3678
{
3679
	struct inode *inode = mapping->host;
3680 3681
	int ret = 0;
	int done = 0;
3682
	int nr_to_write_done = 0;
3683 3684 3685 3686 3687
	struct pagevec pvec;
	int nr_pages;
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
	int scanned = 0;
3688
	int tag;
3689

3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701
	/*
	 * We have to hold onto the inode so that ordered extents can do their
	 * work when the IO finishes.  The alternative to this is failing to add
	 * an ordered extent if the igrab() fails there and that is a huge pain
	 * to deal with, so instead just hold onto the inode throughout the
	 * writepages operation.  If it fails here we are freeing up the inode
	 * anyway and we'd rather not waste our time writing out stuff that is
	 * going to be truncated anyway.
	 */
	if (!igrab(inode))
		return 0;

3702 3703 3704 3705 3706 3707 3708 3709 3710
	pagevec_init(&pvec, 0);
	if (wbc->range_cyclic) {
		index = mapping->writeback_index; /* Start from prev offset */
		end = -1;
	} else {
		index = wbc->range_start >> PAGE_CACHE_SHIFT;
		end = wbc->range_end >> PAGE_CACHE_SHIFT;
		scanned = 1;
	}
3711 3712 3713 3714
	if (wbc->sync_mode == WB_SYNC_ALL)
		tag = PAGECACHE_TAG_TOWRITE;
	else
		tag = PAGECACHE_TAG_DIRTY;
3715
retry:
3716 3717
	if (wbc->sync_mode == WB_SYNC_ALL)
		tag_pages_for_writeback(mapping, index, end);
3718
	while (!done && !nr_to_write_done && (index <= end) &&
3719 3720
	       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733
		unsigned i;

		scanned = 1;
		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];

			/*
			 * At this point we hold neither mapping->tree_lock nor
			 * lock on the page itself: the page may be truncated or
			 * invalidated (changing page->mapping to NULL), or even
			 * swizzled back from swapper_space to tmpfs file
			 * mapping
			 */
3734 3735 3736
			if (!trylock_page(page)) {
				flush_fn(data);
				lock_page(page);
3737
			}
3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749

			if (unlikely(page->mapping != mapping)) {
				unlock_page(page);
				continue;
			}

			if (!wbc->range_cyclic && page->index > end) {
				done = 1;
				unlock_page(page);
				continue;
			}

C
Chris Mason 已提交
3750
			if (wbc->sync_mode != WB_SYNC_NONE) {
3751 3752
				if (PageWriteback(page))
					flush_fn(data);
3753
				wait_on_page_writeback(page);
C
Chris Mason 已提交
3754
			}
3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767

			if (PageWriteback(page) ||
			    !clear_page_dirty_for_io(page)) {
				unlock_page(page);
				continue;
			}

			ret = (*writepage)(page, wbc, data);

			if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
				unlock_page(page);
				ret = 0;
			}
3768
			if (ret)
3769
				done = 1;
3770 3771 3772 3773 3774 3775 3776

			/*
			 * the filesystem may choose to bump up nr_to_write.
			 * We have to make sure to honor the new nr_to_write
			 * at any time
			 */
			nr_to_write_done = wbc->nr_to_write <= 0;
3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789
		}
		pagevec_release(&pvec);
		cond_resched();
	}
	if (!scanned && !done) {
		/*
		 * We hit the last page and there is more work to be done: wrap
		 * back to the start of the file
		 */
		scanned = 1;
		index = 0;
		goto retry;
	}
3790
	btrfs_add_delayed_iput(inode);
3791 3792 3793
	return ret;
}

3794
static void flush_epd_write_bio(struct extent_page_data *epd)
C
Chris Mason 已提交
3795 3796
{
	if (epd->bio) {
3797 3798 3799
		int rw = WRITE;
		int ret;

3800
		if (epd->sync_io)
3801 3802
			rw = WRITE_SYNC;

3803
		ret = submit_one_bio(rw, epd->bio, 0, epd->bio_flags);
3804
		BUG_ON(ret < 0); /* -ENOMEM */
C
Chris Mason 已提交
3805 3806 3807 3808
		epd->bio = NULL;
	}
}

3809 3810 3811 3812 3813 3814
static noinline void flush_write_bio(void *data)
{
	struct extent_page_data *epd = data;
	flush_epd_write_bio(epd);
}

3815 3816 3817 3818 3819 3820 3821 3822 3823
int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
			  get_extent_t *get_extent,
			  struct writeback_control *wbc)
{
	int ret;
	struct extent_page_data epd = {
		.bio = NULL,
		.tree = tree,
		.get_extent = get_extent,
3824
		.extent_locked = 0,
3825
		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
3826
		.bio_flags = 0,
3827 3828 3829 3830
	};

	ret = __extent_writepage(page, wbc, &epd);

3831
	flush_epd_write_bio(&epd);
3832 3833 3834
	return ret;
}

3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849
int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
			      u64 start, u64 end, get_extent_t *get_extent,
			      int mode)
{
	int ret = 0;
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
	unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
		PAGE_CACHE_SHIFT;

	struct extent_page_data epd = {
		.bio = NULL,
		.tree = tree,
		.get_extent = get_extent,
		.extent_locked = 1,
3850
		.sync_io = mode == WB_SYNC_ALL,
3851
		.bio_flags = 0,
3852 3853 3854 3855 3856 3857 3858 3859
	};
	struct writeback_control wbc_writepages = {
		.sync_mode	= mode,
		.nr_to_write	= nr_pages * 2,
		.range_start	= start,
		.range_end	= end + 1,
	};

C
Chris Mason 已提交
3860
	while (start <= end) {
3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874
		page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
		if (clear_page_dirty_for_io(page))
			ret = __extent_writepage(page, &wbc_writepages, &epd);
		else {
			if (tree->ops && tree->ops->writepage_end_io_hook)
				tree->ops->writepage_end_io_hook(page, start,
						 start + PAGE_CACHE_SIZE - 1,
						 NULL, 1);
			unlock_page(page);
		}
		page_cache_release(page);
		start += PAGE_CACHE_SIZE;
	}

3875
	flush_epd_write_bio(&epd);
3876 3877
	return ret;
}
3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888

int extent_writepages(struct extent_io_tree *tree,
		      struct address_space *mapping,
		      get_extent_t *get_extent,
		      struct writeback_control *wbc)
{
	int ret = 0;
	struct extent_page_data epd = {
		.bio = NULL,
		.tree = tree,
		.get_extent = get_extent,
3889
		.extent_locked = 0,
3890
		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
3891
		.bio_flags = 0,
3892 3893
	};

C
Chris Mason 已提交
3894
	ret = extent_write_cache_pages(tree, mapping, wbc,
C
Chris Mason 已提交
3895 3896
				       __extent_writepage, &epd,
				       flush_write_bio);
3897
	flush_epd_write_bio(&epd);
3898 3899 3900 3901 3902 3903 3904 3905 3906 3907
	return ret;
}

int extent_readpages(struct extent_io_tree *tree,
		     struct address_space *mapping,
		     struct list_head *pages, unsigned nr_pages,
		     get_extent_t get_extent)
{
	struct bio *bio = NULL;
	unsigned page_idx;
C
Chris Mason 已提交
3908
	unsigned long bio_flags = 0;
L
Liu Bo 已提交
3909 3910
	struct page *pagepool[16];
	struct page *page;
3911
	struct extent_map *em_cached = NULL;
L
Liu Bo 已提交
3912
	int nr = 0;
3913 3914

	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
L
Liu Bo 已提交
3915
		page = list_entry(pages->prev, struct page, lru);
3916 3917 3918

		prefetchw(&page->flags);
		list_del(&page->lru);
L
Liu Bo 已提交
3919
		if (add_to_page_cache_lru(page, mapping,
3920
					page->index, GFP_NOFS)) {
L
Liu Bo 已提交
3921 3922
			page_cache_release(page);
			continue;
3923
		}
L
Liu Bo 已提交
3924 3925 3926 3927

		pagepool[nr++] = page;
		if (nr < ARRAY_SIZE(pagepool))
			continue;
3928
		__extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
3929
				   &bio, 0, &bio_flags, READ);
L
Liu Bo 已提交
3930
		nr = 0;
3931
	}
3932
	if (nr)
3933
		__extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
3934
				   &bio, 0, &bio_flags, READ);
L
Liu Bo 已提交
3935

3936 3937 3938
	if (em_cached)
		free_extent_map(em_cached);

3939 3940
	BUG_ON(!list_empty(pages));
	if (bio)
3941
		return submit_one_bio(READ, bio, 0, bio_flags);
3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952
	return 0;
}

/*
 * basic invalidatepage code, this waits on any locked or writeback
 * ranges corresponding to the page, and then deletes any extent state
 * records from the tree
 */
int extent_invalidatepage(struct extent_io_tree *tree,
			  struct page *page, unsigned long offset)
{
3953
	struct extent_state *cached_state = NULL;
M
Miao Xie 已提交
3954
	u64 start = page_offset(page);
3955 3956 3957
	u64 end = start + PAGE_CACHE_SIZE - 1;
	size_t blocksize = page->mapping->host->i_sb->s_blocksize;

3958
	start += ALIGN(offset, blocksize);
3959 3960 3961
	if (start > end)
		return 0;

3962
	lock_extent_bits(tree, start, end, 0, &cached_state);
3963
	wait_on_page_writeback(page);
3964
	clear_extent_bit(tree, start, end,
3965 3966
			 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
			 EXTENT_DO_ACCOUNTING,
3967
			 1, 1, &cached_state, GFP_NOFS);
3968 3969 3970
	return 0;
}

3971 3972 3973 3974 3975
/*
 * a helper for releasepage, this tests for areas of the page that
 * are locked or under IO and drops the related state bits if it is safe
 * to drop the page.
 */
3976 3977 3978
static int try_release_extent_state(struct extent_map_tree *map,
				    struct extent_io_tree *tree,
				    struct page *page, gfp_t mask)
3979
{
M
Miao Xie 已提交
3980
	u64 start = page_offset(page);
3981 3982 3983
	u64 end = start + PAGE_CACHE_SIZE - 1;
	int ret = 1;

3984
	if (test_range_bit(tree, start, end,
3985
			   EXTENT_IOBITS, 0, NULL))
3986 3987 3988 3989
		ret = 0;
	else {
		if ((mask & GFP_NOFS) == GFP_NOFS)
			mask = GFP_NOFS;
3990 3991 3992 3993
		/*
		 * at this point we can safely clear everything except the
		 * locked bit and the nodatasum bit
		 */
3994
		ret = clear_extent_bit(tree, start, end,
3995 3996
				 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
				 0, 0, NULL, mask);
3997 3998 3999 4000 4001 4002 4003 4004

		/* if clear_extent_bit failed for enomem reasons,
		 * we can't allow the release to continue.
		 */
		if (ret < 0)
			ret = 0;
		else
			ret = 1;
4005 4006 4007 4008
	}
	return ret;
}

4009 4010 4011 4012 4013 4014
/*
 * a helper for releasepage.  As long as there are no locked extents
 * in the range corresponding to the page, both state records and extent
 * map records are removed
 */
int try_release_extent_mapping(struct extent_map_tree *map,
4015 4016
			       struct extent_io_tree *tree, struct page *page,
			       gfp_t mask)
4017 4018
{
	struct extent_map *em;
M
Miao Xie 已提交
4019
	u64 start = page_offset(page);
4020
	u64 end = start + PAGE_CACHE_SIZE - 1;
4021

4022 4023
	if ((mask & __GFP_WAIT) &&
	    page->mapping->host->i_size > 16 * 1024 * 1024) {
4024
		u64 len;
4025
		while (start <= end) {
4026
			len = end - start + 1;
4027
			write_lock(&map->lock);
4028
			em = lookup_extent_mapping(map, start, len);
4029
			if (!em) {
4030
				write_unlock(&map->lock);
4031 4032
				break;
			}
4033 4034
			if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
			    em->start != start) {
4035
				write_unlock(&map->lock);
4036 4037 4038 4039 4040
				free_extent_map(em);
				break;
			}
			if (!test_range_bit(tree, em->start,
					    extent_map_end(em) - 1,
4041
					    EXTENT_LOCKED | EXTENT_WRITEBACK,
4042
					    0, NULL)) {
4043 4044 4045 4046 4047
				remove_extent_mapping(map, em);
				/* once for the rb tree */
				free_extent_map(em);
			}
			start = extent_map_end(em);
4048
			write_unlock(&map->lock);
4049 4050

			/* once for us */
4051 4052 4053
			free_extent_map(em);
		}
	}
4054
	return try_release_extent_state(map, tree, page, mask);
4055 4056
}

4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072
/*
 * helper function for fiemap, which doesn't want to see any holes.
 * This maps until we find something past 'last'
 */
static struct extent_map *get_extent_skip_holes(struct inode *inode,
						u64 offset,
						u64 last,
						get_extent_t *get_extent)
{
	u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
	struct extent_map *em;
	u64 len;

	if (offset >= last)
		return NULL;

4073
	while (1) {
4074 4075 4076
		len = last - offset;
		if (len == 0)
			break;
4077
		len = ALIGN(len, sectorsize);
4078
		em = get_extent(inode, NULL, 0, offset, len, 0);
4079
		if (IS_ERR_OR_NULL(em))
4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096
			return em;

		/* if this isn't a hole return it */
		if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
		    em->block_start != EXTENT_MAP_HOLE) {
			return em;
		}

		/* this is a hole, advance to the next extent */
		offset = extent_map_end(em);
		free_extent_map(em);
		if (offset >= last)
			break;
	}
	return NULL;
}

4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109
static noinline int count_ext_ref(u64 inum, u64 offset, u64 root_id, void *ctx)
{
	unsigned long cnt = *((unsigned long *)ctx);

	cnt++;
	*((unsigned long *)ctx) = cnt;

	/* Now we're sure that the extent is shared. */
	if (cnt > 1)
		return 1;
	return 0;
}

Y
Yehuda Sadeh 已提交
4110 4111 4112
int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		__u64 start, __u64 len, get_extent_t *get_extent)
{
J
Josef Bacik 已提交
4113
	int ret = 0;
Y
Yehuda Sadeh 已提交
4114 4115 4116
	u64 off = start;
	u64 max = start + len;
	u32 flags = 0;
J
Josef Bacik 已提交
4117 4118
	u32 found_type;
	u64 last;
4119
	u64 last_for_get_extent = 0;
Y
Yehuda Sadeh 已提交
4120
	u64 disko = 0;
4121
	u64 isize = i_size_read(inode);
J
Josef Bacik 已提交
4122
	struct btrfs_key found_key;
Y
Yehuda Sadeh 已提交
4123
	struct extent_map *em = NULL;
4124
	struct extent_state *cached_state = NULL;
J
Josef Bacik 已提交
4125
	struct btrfs_path *path;
Y
Yehuda Sadeh 已提交
4126
	int end = 0;
4127 4128 4129
	u64 em_start = 0;
	u64 em_len = 0;
	u64 em_end = 0;
Y
Yehuda Sadeh 已提交
4130 4131 4132 4133

	if (len == 0)
		return -EINVAL;

J
Josef Bacik 已提交
4134 4135 4136 4137 4138
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
	path->leave_spinning = 1;

4139 4140 4141
	start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
	len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);

4142 4143 4144 4145
	/*
	 * lookup the last file extent.  We're not using i_size here
	 * because there might be preallocation past i_size
	 */
J
Josef Bacik 已提交
4146
	ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
L
Li Zefan 已提交
4147
				       path, btrfs_ino(inode), -1, 0);
J
Josef Bacik 已提交
4148 4149 4150 4151 4152 4153 4154 4155 4156
	if (ret < 0) {
		btrfs_free_path(path);
		return ret;
	}
	WARN_ON(!ret);
	path->slots[0]--;
	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
	found_type = btrfs_key_type(&found_key);

4157
	/* No extents, but there might be delalloc bits */
L
Li Zefan 已提交
4158
	if (found_key.objectid != btrfs_ino(inode) ||
J
Josef Bacik 已提交
4159
	    found_type != BTRFS_EXTENT_DATA_KEY) {
4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170
		/* have to trust i_size as the end */
		last = (u64)-1;
		last_for_get_extent = isize;
	} else {
		/*
		 * remember the start of the last extent.  There are a
		 * bunch of different factors that go into the length of the
		 * extent, so its much less complex to remember where it started
		 */
		last = found_key.offset;
		last_for_get_extent = last + 1;
J
Josef Bacik 已提交
4171
	}
4172
	btrfs_release_path(path);
J
Josef Bacik 已提交
4173

4174 4175 4176 4177 4178 4179 4180 4181 4182 4183
	/*
	 * we might have some extents allocated but more delalloc past those
	 * extents.  so, we trust isize unless the start of the last extent is
	 * beyond isize
	 */
	if (last < isize) {
		last = (u64)-1;
		last_for_get_extent = isize;
	}

L
Liu Bo 已提交
4184
	lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1, 0,
4185
			 &cached_state);
4186

4187
	em = get_extent_skip_holes(inode, start, last_for_get_extent,
4188
				   get_extent);
Y
Yehuda Sadeh 已提交
4189 4190 4191 4192 4193 4194
	if (!em)
		goto out;
	if (IS_ERR(em)) {
		ret = PTR_ERR(em);
		goto out;
	}
J
Josef Bacik 已提交
4195

Y
Yehuda Sadeh 已提交
4196
	while (!end) {
4197
		u64 offset_in_extent = 0;
4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209

		/* break if the extent we found is outside the range */
		if (em->start >= max || extent_map_end(em) < off)
			break;

		/*
		 * get_extent may return an extent that starts before our
		 * requested range.  We have to make sure the ranges
		 * we return to fiemap always move forward and don't
		 * overlap, so adjust the offsets here
		 */
		em_start = max(em->start, off);
Y
Yehuda Sadeh 已提交
4210

4211 4212
		/*
		 * record the offset from the start of the extent
4213 4214 4215
		 * for adjusting the disk offset below.  Only do this if the
		 * extent isn't compressed since our in ram offset may be past
		 * what we have actually allocated on disk.
4216
		 */
4217 4218
		if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
			offset_in_extent = em_start - em->start;
4219
		em_end = extent_map_end(em);
4220
		em_len = em_end - em_start;
Y
Yehuda Sadeh 已提交
4221 4222 4223
		disko = 0;
		flags = 0;

4224 4225 4226 4227 4228 4229 4230
		/*
		 * bump off for our next call to get_extent
		 */
		off = extent_map_end(em);
		if (off >= max)
			end = 1;

4231
		if (em->block_start == EXTENT_MAP_LAST_BYTE) {
Y
Yehuda Sadeh 已提交
4232 4233
			end = 1;
			flags |= FIEMAP_EXTENT_LAST;
4234
		} else if (em->block_start == EXTENT_MAP_INLINE) {
Y
Yehuda Sadeh 已提交
4235 4236
			flags |= (FIEMAP_EXTENT_DATA_INLINE |
				  FIEMAP_EXTENT_NOT_ALIGNED);
4237
		} else if (em->block_start == EXTENT_MAP_DELALLOC) {
Y
Yehuda Sadeh 已提交
4238 4239
			flags |= (FIEMAP_EXTENT_DELALLOC |
				  FIEMAP_EXTENT_UNKNOWN);
4240
		} else {
4241 4242
			unsigned long ref_cnt = 0;

4243
			disko = em->block_start + offset_in_extent;
4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258

			/*
			 * As btrfs supports shared space, this information
			 * can be exported to userspace tools via
			 * flag FIEMAP_EXTENT_SHARED.
			 */
			ret = iterate_inodes_from_logical(
					em->block_start,
					BTRFS_I(inode)->root->fs_info,
					path, count_ext_ref, &ref_cnt);
			if (ret < 0 && ret != -ENOENT)
				goto out_free;

			if (ref_cnt > 1)
				flags |= FIEMAP_EXTENT_SHARED;
Y
Yehuda Sadeh 已提交
4259 4260 4261 4262 4263 4264
		}
		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
			flags |= FIEMAP_EXTENT_ENCODED;

		free_extent_map(em);
		em = NULL;
4265 4266
		if ((em_start >= last) || em_len == (u64)-1 ||
		   (last == (u64)-1 && isize <= em_end)) {
Y
Yehuda Sadeh 已提交
4267 4268 4269 4270
			flags |= FIEMAP_EXTENT_LAST;
			end = 1;
		}

4271 4272 4273 4274 4275 4276 4277 4278
		/* now scan forward to see if this is really the last extent. */
		em = get_extent_skip_holes(inode, off, last_for_get_extent,
					   get_extent);
		if (IS_ERR(em)) {
			ret = PTR_ERR(em);
			goto out;
		}
		if (!em) {
J
Josef Bacik 已提交
4279 4280 4281
			flags |= FIEMAP_EXTENT_LAST;
			end = 1;
		}
4282 4283 4284 4285
		ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
					      em_len, flags);
		if (ret)
			goto out_free;
Y
Yehuda Sadeh 已提交
4286 4287 4288 4289
	}
out_free:
	free_extent_map(em);
out:
4290
	btrfs_free_path(path);
L
Liu Bo 已提交
4291
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
4292
			     &cached_state, GFP_NOFS);
Y
Yehuda Sadeh 已提交
4293 4294 4295
	return ret;
}

4296 4297
static void __free_extent_buffer(struct extent_buffer *eb)
{
4298
	btrfs_leak_debug_del(&eb->leak_list);
4299 4300 4301
	kmem_cache_free(extent_buffer_cache, eb);
}

4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371
static int extent_buffer_under_io(struct extent_buffer *eb)
{
	return (atomic_read(&eb->io_pages) ||
		test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
}

/*
 * Helper for releasing extent buffer page.
 */
static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
						unsigned long start_idx)
{
	unsigned long index;
	unsigned long num_pages;
	struct page *page;
	int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);

	BUG_ON(extent_buffer_under_io(eb));

	num_pages = num_extent_pages(eb->start, eb->len);
	index = start_idx + num_pages;
	if (start_idx >= index)
		return;

	do {
		index--;
		page = extent_buffer_page(eb, index);
		if (page && mapped) {
			spin_lock(&page->mapping->private_lock);
			/*
			 * We do this since we'll remove the pages after we've
			 * removed the eb from the radix tree, so we could race
			 * and have this page now attached to the new eb.  So
			 * only clear page_private if it's still connected to
			 * this eb.
			 */
			if (PagePrivate(page) &&
			    page->private == (unsigned long)eb) {
				BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
				BUG_ON(PageDirty(page));
				BUG_ON(PageWriteback(page));
				/*
				 * We need to make sure we haven't be attached
				 * to a new eb.
				 */
				ClearPagePrivate(page);
				set_page_private(page, 0);
				/* One for the page private */
				page_cache_release(page);
			}
			spin_unlock(&page->mapping->private_lock);

		}
		if (page) {
			/* One for when we alloced the page */
			page_cache_release(page);
		}
	} while (index != start_idx);
}

/*
 * Helper for releasing the extent buffer.
 */
static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
{
	btrfs_release_extent_buffer_page(eb, 0);
	__free_extent_buffer(eb);
}

4372 4373 4374
static struct extent_buffer *
__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
		      unsigned long len, gfp_t mask)
4375 4376 4377 4378
{
	struct extent_buffer *eb = NULL;

	eb = kmem_cache_zalloc(extent_buffer_cache, mask);
T
Tsutomu Itoh 已提交
4379 4380
	if (eb == NULL)
		return NULL;
4381 4382
	eb->start = start;
	eb->len = len;
4383
	eb->fs_info = fs_info;
4384
	eb->bflags = 0;
4385 4386 4387 4388 4389 4390 4391
	rwlock_init(&eb->lock);
	atomic_set(&eb->write_locks, 0);
	atomic_set(&eb->read_locks, 0);
	atomic_set(&eb->blocking_readers, 0);
	atomic_set(&eb->blocking_writers, 0);
	atomic_set(&eb->spinning_readers, 0);
	atomic_set(&eb->spinning_writers, 0);
4392
	eb->lock_nested = 0;
4393 4394
	init_waitqueue_head(&eb->write_lock_wq);
	init_waitqueue_head(&eb->read_lock_wq);
4395

4396 4397
	btrfs_leak_debug_add(&eb->leak_list, &buffers);

4398
	spin_lock_init(&eb->refs_lock);
4399
	atomic_set(&eb->refs, 1);
4400
	atomic_set(&eb->io_pages, 0);
4401

4402 4403 4404 4405 4406 4407
	/*
	 * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
	 */
	BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
		> MAX_INLINE_EXTENT_BUFFER_SIZE);
	BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
4408 4409 4410 4411

	return eb;
}

4412 4413 4414 4415 4416 4417 4418
struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
{
	unsigned long i;
	struct page *p;
	struct extent_buffer *new;
	unsigned long num_pages = num_extent_pages(src->start, src->len);

4419
	new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_NOFS);
4420 4421 4422 4423
	if (new == NULL)
		return NULL;

	for (i = 0; i < num_pages; i++) {
4424
		p = alloc_page(GFP_NOFS);
4425 4426 4427 4428
		if (!p) {
			btrfs_release_extent_buffer(new);
			return NULL;
		}
4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447
		attach_extent_buffer_page(new, p);
		WARN_ON(PageDirty(p));
		SetPageUptodate(p);
		new->pages[i] = p;
	}

	copy_extent_buffer(new, src, 0, 0, src->len);
	set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
	set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);

	return new;
}

struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
{
	struct extent_buffer *eb;
	unsigned long num_pages = num_extent_pages(0, len);
	unsigned long i;

4448
	eb = __alloc_extent_buffer(NULL, start, len, GFP_NOFS);
4449 4450 4451 4452
	if (!eb)
		return NULL;

	for (i = 0; i < num_pages; i++) {
4453
		eb->pages[i] = alloc_page(GFP_NOFS);
4454 4455 4456 4457 4458 4459 4460 4461 4462
		if (!eb->pages[i])
			goto err;
	}
	set_extent_buffer_uptodate(eb);
	btrfs_set_header_nritems(eb, 0);
	set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);

	return eb;
err:
4463 4464
	for (; i > 0; i--)
		__free_page(eb->pages[i - 1]);
4465 4466 4467 4468
	__free_extent_buffer(eb);
	return NULL;
}

4469 4470
static void check_buffer_tree_ref(struct extent_buffer *eb)
{
4471
	int refs;
4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491
	/* the ref bit is tricky.  We have to make sure it is set
	 * if we have the buffer dirty.   Otherwise the
	 * code to free a buffer can end up dropping a dirty
	 * page
	 *
	 * Once the ref bit is set, it won't go away while the
	 * buffer is dirty or in writeback, and it also won't
	 * go away while we have the reference count on the
	 * eb bumped.
	 *
	 * We can't just set the ref bit without bumping the
	 * ref on the eb because free_extent_buffer might
	 * see the ref bit and try to clear it.  If this happens
	 * free_extent_buffer might end up dropping our original
	 * ref by mistake and freeing the page before we are able
	 * to add one more ref.
	 *
	 * So bump the ref count first, then set the bit.  If someone
	 * beat us to it, drop the ref we added.
	 */
4492 4493 4494 4495
	refs = atomic_read(&eb->refs);
	if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
		return;

4496 4497
	spin_lock(&eb->refs_lock);
	if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4498
		atomic_inc(&eb->refs);
4499
	spin_unlock(&eb->refs_lock);
4500 4501
}

4502 4503 4504 4505
static void mark_extent_buffer_accessed(struct extent_buffer *eb)
{
	unsigned long num_pages, i;

4506 4507
	check_buffer_tree_ref(eb);

4508 4509 4510 4511 4512 4513 4514
	num_pages = num_extent_pages(eb->start, eb->len);
	for (i = 0; i < num_pages; i++) {
		struct page *p = extent_buffer_page(eb, i);
		mark_page_accessed(p);
	}
}

4515 4516
struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
					 u64 start)
4517 4518 4519 4520
{
	struct extent_buffer *eb;

	rcu_read_lock();
4521 4522
	eb = radix_tree_lookup(&fs_info->buffer_radix,
			       start >> PAGE_CACHE_SHIFT);
4523 4524 4525 4526 4527 4528 4529 4530 4531 4532
	if (eb && atomic_inc_not_zero(&eb->refs)) {
		rcu_read_unlock();
		mark_extent_buffer_accessed(eb);
		return eb;
	}
	rcu_read_unlock();

	return NULL;
}

4533
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4534
					  u64 start, unsigned long len)
4535 4536 4537 4538 4539
{
	unsigned long num_pages = num_extent_pages(start, len);
	unsigned long i;
	unsigned long index = start >> PAGE_CACHE_SHIFT;
	struct extent_buffer *eb;
4540
	struct extent_buffer *exists = NULL;
4541
	struct page *p;
4542
	struct address_space *mapping = fs_info->btree_inode->i_mapping;
4543
	int uptodate = 1;
4544
	int ret;
4545

4546
	eb = find_extent_buffer(fs_info, start);
4547
	if (eb)
4548 4549
		return eb;

4550
	eb = __alloc_extent_buffer(fs_info, start, len, GFP_NOFS);
4551
	if (!eb)
4552 4553
		return NULL;

4554
	for (i = 0; i < num_pages; i++, index++) {
4555
		p = find_or_create_page(mapping, index, GFP_NOFS);
4556
		if (!p)
4557
			goto free_eb;
J
Josef Bacik 已提交
4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571

		spin_lock(&mapping->private_lock);
		if (PagePrivate(p)) {
			/*
			 * We could have already allocated an eb for this page
			 * and attached one so lets see if we can get a ref on
			 * the existing eb, and if we can we know it's good and
			 * we can just return that one, else we know we can just
			 * overwrite page->private.
			 */
			exists = (struct extent_buffer *)p->private;
			if (atomic_inc_not_zero(&exists->refs)) {
				spin_unlock(&mapping->private_lock);
				unlock_page(p);
4572
				page_cache_release(p);
4573
				mark_extent_buffer_accessed(exists);
J
Josef Bacik 已提交
4574 4575 4576
				goto free_eb;
			}

4577
			/*
J
Josef Bacik 已提交
4578 4579 4580 4581
			 * Do this so attach doesn't complain and we need to
			 * drop the ref the old guy had.
			 */
			ClearPagePrivate(p);
4582
			WARN_ON(PageDirty(p));
J
Josef Bacik 已提交
4583
			page_cache_release(p);
4584
		}
J
Josef Bacik 已提交
4585 4586
		attach_extent_buffer_page(eb, p);
		spin_unlock(&mapping->private_lock);
4587
		WARN_ON(PageDirty(p));
4588
		mark_page_accessed(p);
4589
		eb->pages[i] = p;
4590 4591
		if (!PageUptodate(p))
			uptodate = 0;
C
Chris Mason 已提交
4592 4593 4594 4595 4596

		/*
		 * see below about how we avoid a nasty race with release page
		 * and why we unlock later
		 */
4597 4598
	}
	if (uptodate)
4599
		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4600
again:
4601 4602 4603 4604
	ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
	if (ret)
		goto free_eb;

4605 4606 4607 4608
	spin_lock(&fs_info->buffer_lock);
	ret = radix_tree_insert(&fs_info->buffer_radix,
				start >> PAGE_CACHE_SHIFT, eb);
	spin_unlock(&fs_info->buffer_lock);
4609
	radix_tree_preload_end();
4610
	if (ret == -EEXIST) {
4611
		exists = find_extent_buffer(fs_info, start);
4612 4613 4614
		if (exists)
			goto free_eb;
		else
4615
			goto again;
4616 4617
	}
	/* add one reference for the tree */
4618
	check_buffer_tree_ref(eb);
4619
	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
C
Chris Mason 已提交
4620 4621 4622 4623 4624 4625 4626 4627 4628 4629

	/*
	 * there is a race where release page may have
	 * tried to find this extent buffer in the radix
	 * but failed.  It will tell the VM it is safe to
	 * reclaim the, and it will clear the page private bit.
	 * We must make sure to set the page private bit properly
	 * after the extent buffer is in the radix tree so
	 * it doesn't get lost
	 */
4630 4631 4632 4633 4634 4635 4636
	SetPageChecked(eb->pages[0]);
	for (i = 1; i < num_pages; i++) {
		p = extent_buffer_page(eb, i);
		ClearPageChecked(p);
		unlock_page(p);
	}
	unlock_page(eb->pages[0]);
4637 4638
	return eb;

4639
free_eb:
4640 4641 4642 4643
	for (i = 0; i < num_pages; i++) {
		if (eb->pages[i])
			unlock_page(eb->pages[i]);
	}
C
Chris Mason 已提交
4644

4645
	WARN_ON(!atomic_dec_and_test(&eb->refs));
4646
	btrfs_release_extent_buffer(eb);
4647
	return exists;
4648 4649
}

4650 4651 4652 4653 4654 4655 4656 4657 4658
static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
{
	struct extent_buffer *eb =
			container_of(head, struct extent_buffer, rcu_head);

	__free_extent_buffer(eb);
}

/* Expects to have eb->eb_lock already held */
4659
static int release_extent_buffer(struct extent_buffer *eb)
4660 4661 4662
{
	WARN_ON(atomic_read(&eb->refs) == 0);
	if (atomic_dec_and_test(&eb->refs)) {
4663
		if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
4664
			struct btrfs_fs_info *fs_info = eb->fs_info;
4665

4666
			spin_unlock(&eb->refs_lock);
4667

4668 4669
			spin_lock(&fs_info->buffer_lock);
			radix_tree_delete(&fs_info->buffer_radix,
4670
					  eb->start >> PAGE_CACHE_SHIFT);
4671
			spin_unlock(&fs_info->buffer_lock);
4672 4673
		} else {
			spin_unlock(&eb->refs_lock);
4674
		}
4675 4676 4677 4678

		/* Should be safe to release our pages at this point */
		btrfs_release_extent_buffer_page(eb, 0);
		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
4679
		return 1;
4680 4681
	}
	spin_unlock(&eb->refs_lock);
4682 4683

	return 0;
4684 4685
}

4686 4687
void free_extent_buffer(struct extent_buffer *eb)
{
4688 4689
	int refs;
	int old;
4690 4691 4692
	if (!eb)
		return;

4693 4694 4695 4696 4697 4698 4699 4700 4701
	while (1) {
		refs = atomic_read(&eb->refs);
		if (refs <= 3)
			break;
		old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
		if (old == refs)
			return;
	}

4702
	spin_lock(&eb->refs_lock);
4703 4704 4705 4706
	if (atomic_read(&eb->refs) == 2 &&
	    test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
		atomic_dec(&eb->refs);

4707 4708
	if (atomic_read(&eb->refs) == 2 &&
	    test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
4709
	    !extent_buffer_under_io(eb) &&
4710 4711 4712 4713 4714 4715 4716
	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
		atomic_dec(&eb->refs);

	/*
	 * I know this is terrible, but it's temporary until we stop tracking
	 * the uptodate bits and such for the extent buffers.
	 */
4717
	release_extent_buffer(eb);
4718 4719 4720 4721 4722
}

void free_extent_buffer_stale(struct extent_buffer *eb)
{
	if (!eb)
4723 4724
		return;

4725 4726 4727
	spin_lock(&eb->refs_lock);
	set_bit(EXTENT_BUFFER_STALE, &eb->bflags);

4728
	if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
4729 4730
	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
		atomic_dec(&eb->refs);
4731
	release_extent_buffer(eb);
4732 4733
}

4734
void clear_extent_buffer_dirty(struct extent_buffer *eb)
4735 4736 4737 4738 4739 4740 4741 4742 4743
{
	unsigned long i;
	unsigned long num_pages;
	struct page *page;

	num_pages = num_extent_pages(eb->start, eb->len);

	for (i = 0; i < num_pages; i++) {
		page = extent_buffer_page(eb, i);
4744
		if (!PageDirty(page))
C
Chris Mason 已提交
4745 4746
			continue;

4747
		lock_page(page);
C
Chris Mason 已提交
4748 4749
		WARN_ON(!PagePrivate(page));

4750
		clear_page_dirty_for_io(page);
4751
		spin_lock_irq(&page->mapping->tree_lock);
4752 4753 4754 4755 4756
		if (!PageDirty(page)) {
			radix_tree_tag_clear(&page->mapping->page_tree,
						page_index(page),
						PAGECACHE_TAG_DIRTY);
		}
4757
		spin_unlock_irq(&page->mapping->tree_lock);
4758
		ClearPageError(page);
4759
		unlock_page(page);
4760
	}
4761
	WARN_ON(atomic_read(&eb->refs) == 0);
4762 4763
}

4764
int set_extent_buffer_dirty(struct extent_buffer *eb)
4765 4766 4767
{
	unsigned long i;
	unsigned long num_pages;
4768
	int was_dirty = 0;
4769

4770 4771
	check_buffer_tree_ref(eb);

4772
	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
4773

4774
	num_pages = num_extent_pages(eb->start, eb->len);
4775
	WARN_ON(atomic_read(&eb->refs) == 0);
4776 4777
	WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));

4778
	for (i = 0; i < num_pages; i++)
4779
		set_page_dirty(extent_buffer_page(eb, i));
4780
	return was_dirty;
4781 4782
}

4783
int clear_extent_buffer_uptodate(struct extent_buffer *eb)
4784 4785 4786 4787 4788
{
	unsigned long i;
	struct page *page;
	unsigned long num_pages;

4789
	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4790
	num_pages = num_extent_pages(eb->start, eb->len);
4791 4792
	for (i = 0; i < num_pages; i++) {
		page = extent_buffer_page(eb, i);
C
Chris Mason 已提交
4793 4794
		if (page)
			ClearPageUptodate(page);
4795 4796 4797 4798
	}
	return 0;
}

4799
int set_extent_buffer_uptodate(struct extent_buffer *eb)
4800 4801 4802 4803 4804
{
	unsigned long i;
	struct page *page;
	unsigned long num_pages;

4805
	set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4806 4807 4808 4809 4810 4811 4812 4813
	num_pages = num_extent_pages(eb->start, eb->len);
	for (i = 0; i < num_pages; i++) {
		page = extent_buffer_page(eb, i);
		SetPageUptodate(page);
	}
	return 0;
}

4814
int extent_buffer_uptodate(struct extent_buffer *eb)
4815
{
4816
	return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4817 4818 4819
}

int read_extent_buffer_pages(struct extent_io_tree *tree,
4820
			     struct extent_buffer *eb, u64 start, int wait,
4821
			     get_extent_t *get_extent, int mirror_num)
4822 4823 4824 4825 4826 4827
{
	unsigned long i;
	unsigned long start_i;
	struct page *page;
	int err;
	int ret = 0;
4828 4829
	int locked_pages = 0;
	int all_uptodate = 1;
4830
	unsigned long num_pages;
4831
	unsigned long num_reads = 0;
4832
	struct bio *bio = NULL;
C
Chris Mason 已提交
4833
	unsigned long bio_flags = 0;
4834

4835
	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848
		return 0;

	if (start) {
		WARN_ON(start < eb->start);
		start_i = (start >> PAGE_CACHE_SHIFT) -
			(eb->start >> PAGE_CACHE_SHIFT);
	} else {
		start_i = 0;
	}

	num_pages = num_extent_pages(eb->start, eb->len);
	for (i = start_i; i < num_pages; i++) {
		page = extent_buffer_page(eb, i);
4849
		if (wait == WAIT_NONE) {
4850
			if (!trylock_page(page))
4851
				goto unlock_exit;
4852 4853 4854
		} else {
			lock_page(page);
		}
4855
		locked_pages++;
4856 4857
		if (!PageUptodate(page)) {
			num_reads++;
4858
			all_uptodate = 0;
4859
		}
4860 4861 4862
	}
	if (all_uptodate) {
		if (start_i == 0)
4863
			set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4864 4865 4866
		goto unlock_exit;
	}

4867
	clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
4868
	eb->read_mirror = 0;
4869
	atomic_set(&eb->io_pages, num_reads);
4870 4871 4872
	for (i = start_i; i < num_pages; i++) {
		page = extent_buffer_page(eb, i);
		if (!PageUptodate(page)) {
4873
			ClearPageError(page);
4874
			err = __extent_read_full_page(tree, page,
4875
						      get_extent, &bio,
4876 4877
						      mirror_num, &bio_flags,
						      READ | REQ_META);
C
Chris Mason 已提交
4878
			if (err)
4879 4880 4881 4882 4883 4884
				ret = err;
		} else {
			unlock_page(page);
		}
	}

4885
	if (bio) {
4886 4887
		err = submit_one_bio(READ | REQ_META, bio, mirror_num,
				     bio_flags);
4888 4889
		if (err)
			return err;
4890
	}
4891

4892
	if (ret || wait != WAIT_COMPLETE)
4893
		return ret;
C
Chris Mason 已提交
4894

4895 4896 4897
	for (i = start_i; i < num_pages; i++) {
		page = extent_buffer_page(eb, i);
		wait_on_page_locked(page);
C
Chris Mason 已提交
4898
		if (!PageUptodate(page))
4899 4900
			ret = -EIO;
	}
C
Chris Mason 已提交
4901

4902
	return ret;
4903 4904 4905

unlock_exit:
	i = start_i;
C
Chris Mason 已提交
4906
	while (locked_pages > 0) {
4907 4908 4909 4910 4911 4912
		page = extent_buffer_page(eb, i);
		i++;
		unlock_page(page);
		locked_pages--;
	}
	return ret;
4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929
}

void read_extent_buffer(struct extent_buffer *eb, void *dstv,
			unsigned long start,
			unsigned long len)
{
	size_t cur;
	size_t offset;
	struct page *page;
	char *kaddr;
	char *dst = (char *)dstv;
	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;

	WARN_ON(start > eb->len);
	WARN_ON(start + len > eb->start + eb->len);

4930
	offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
4931

C
Chris Mason 已提交
4932
	while (len > 0) {
4933 4934 4935
		page = extent_buffer_page(eb, i);

		cur = min(len, (PAGE_CACHE_SIZE - offset));
4936
		kaddr = page_address(page);
4937 4938 4939 4940 4941 4942 4943 4944 4945 4946
		memcpy(dst, kaddr + offset, cur);

		dst += cur;
		len -= cur;
		offset = 0;
		i++;
	}
}

int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
4947
			       unsigned long min_len, char **map,
4948
			       unsigned long *map_start,
4949
			       unsigned long *map_len)
4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968
{
	size_t offset = start & (PAGE_CACHE_SIZE - 1);
	char *kaddr;
	struct page *p;
	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
	unsigned long end_i = (start_offset + start + min_len - 1) >>
		PAGE_CACHE_SHIFT;

	if (i != end_i)
		return -EINVAL;

	if (i == 0) {
		offset = start_offset;
		*map_start = 0;
	} else {
		offset = 0;
		*map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
	}
C
Chris Mason 已提交
4969

4970
	if (start + min_len > eb->len) {
J
Julia Lawall 已提交
4971
		WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
4972 4973
		       "wanted %lu %lu\n",
		       eb->start, eb->len, start, min_len);
4974
		return -EINVAL;
4975 4976 4977
	}

	p = extent_buffer_page(eb, i);
4978
	kaddr = page_address(p);
4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999
	*map = kaddr + offset;
	*map_len = PAGE_CACHE_SIZE - offset;
	return 0;
}

int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
			  unsigned long start,
			  unsigned long len)
{
	size_t cur;
	size_t offset;
	struct page *page;
	char *kaddr;
	char *ptr = (char *)ptrv;
	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
	int ret = 0;

	WARN_ON(start > eb->len);
	WARN_ON(start + len > eb->start + eb->len);

5000
	offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
5001

C
Chris Mason 已提交
5002
	while (len > 0) {
5003 5004 5005 5006
		page = extent_buffer_page(eb, i);

		cur = min(len, (PAGE_CACHE_SIZE - offset));

5007
		kaddr = page_address(page);
5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033
		ret = memcmp(ptr, kaddr + offset, cur);
		if (ret)
			break;

		ptr += cur;
		len -= cur;
		offset = 0;
		i++;
	}
	return ret;
}

void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
			 unsigned long start, unsigned long len)
{
	size_t cur;
	size_t offset;
	struct page *page;
	char *kaddr;
	char *src = (char *)srcv;
	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;

	WARN_ON(start > eb->len);
	WARN_ON(start + len > eb->start + eb->len);

5034
	offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
5035

C
Chris Mason 已提交
5036
	while (len > 0) {
5037 5038 5039 5040
		page = extent_buffer_page(eb, i);
		WARN_ON(!PageUptodate(page));

		cur = min(len, PAGE_CACHE_SIZE - offset);
5041
		kaddr = page_address(page);
5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063
		memcpy(kaddr + offset, src, cur);

		src += cur;
		len -= cur;
		offset = 0;
		i++;
	}
}

void memset_extent_buffer(struct extent_buffer *eb, char c,
			  unsigned long start, unsigned long len)
{
	size_t cur;
	size_t offset;
	struct page *page;
	char *kaddr;
	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;

	WARN_ON(start > eb->len);
	WARN_ON(start + len > eb->start + eb->len);

5064
	offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
5065

C
Chris Mason 已提交
5066
	while (len > 0) {
5067 5068 5069 5070
		page = extent_buffer_page(eb, i);
		WARN_ON(!PageUptodate(page));

		cur = min(len, PAGE_CACHE_SIZE - offset);
5071
		kaddr = page_address(page);
5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094
		memset(kaddr + offset, c, cur);

		len -= cur;
		offset = 0;
		i++;
	}
}

void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
			unsigned long dst_offset, unsigned long src_offset,
			unsigned long len)
{
	u64 dst_len = dst->len;
	size_t cur;
	size_t offset;
	struct page *page;
	char *kaddr;
	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
	unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;

	WARN_ON(src->len != dst_len);

	offset = (start_offset + dst_offset) &
5095
		(PAGE_CACHE_SIZE - 1);
5096

C
Chris Mason 已提交
5097
	while (len > 0) {
5098 5099 5100 5101 5102
		page = extent_buffer_page(dst, i);
		WARN_ON(!PageUptodate(page));

		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));

5103
		kaddr = page_address(page);
5104 5105 5106 5107 5108 5109 5110 5111 5112
		read_extent_buffer(src, kaddr + offset, src_offset, cur);

		src_offset += cur;
		len -= cur;
		offset = 0;
		i++;
	}
}

5113 5114 5115 5116 5117 5118
static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
{
	unsigned long distance = (src > dst) ? src - dst : dst - src;
	return distance < len;
}

5119 5120 5121 5122
static void copy_pages(struct page *dst_page, struct page *src_page,
		       unsigned long dst_off, unsigned long src_off,
		       unsigned long len)
{
5123
	char *dst_kaddr = page_address(dst_page);
5124
	char *src_kaddr;
5125
	int must_memmove = 0;
5126

5127
	if (dst_page != src_page) {
5128
		src_kaddr = page_address(src_page);
5129
	} else {
5130
		src_kaddr = dst_kaddr;
5131 5132
		if (areas_overlap(src_off, dst_off, len))
			must_memmove = 1;
5133
	}
5134

5135 5136 5137 5138
	if (must_memmove)
		memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
	else
		memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151
}

void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
			   unsigned long src_offset, unsigned long len)
{
	size_t cur;
	size_t dst_off_in_page;
	size_t src_off_in_page;
	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
	unsigned long dst_i;
	unsigned long src_i;

	if (src_offset + len > dst->len) {
C
Chris Mason 已提交
5152 5153
		printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
		       "len %lu dst len %lu\n", src_offset, len, dst->len);
5154 5155 5156
		BUG_ON(1);
	}
	if (dst_offset + len > dst->len) {
C
Chris Mason 已提交
5157 5158
		printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
		       "len %lu dst len %lu\n", dst_offset, len, dst->len);
5159 5160 5161
		BUG_ON(1);
	}

C
Chris Mason 已提交
5162
	while (len > 0) {
5163
		dst_off_in_page = (start_offset + dst_offset) &
5164
			(PAGE_CACHE_SIZE - 1);
5165
		src_off_in_page = (start_offset + src_offset) &
5166
			(PAGE_CACHE_SIZE - 1);
5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198

		dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
		src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;

		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
					       src_off_in_page));
		cur = min_t(unsigned long, cur,
			(unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));

		copy_pages(extent_buffer_page(dst, dst_i),
			   extent_buffer_page(dst, src_i),
			   dst_off_in_page, src_off_in_page, cur);

		src_offset += cur;
		dst_offset += cur;
		len -= cur;
	}
}

void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
			   unsigned long src_offset, unsigned long len)
{
	size_t cur;
	size_t dst_off_in_page;
	size_t src_off_in_page;
	unsigned long dst_end = dst_offset + len - 1;
	unsigned long src_end = src_offset + len - 1;
	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
	unsigned long dst_i;
	unsigned long src_i;

	if (src_offset + len > dst->len) {
C
Chris Mason 已提交
5199 5200
		printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
		       "len %lu len %lu\n", src_offset, len, dst->len);
5201 5202 5203
		BUG_ON(1);
	}
	if (dst_offset + len > dst->len) {
C
Chris Mason 已提交
5204 5205
		printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
		       "len %lu len %lu\n", dst_offset, len, dst->len);
5206 5207
		BUG_ON(1);
	}
5208
	if (dst_offset < src_offset) {
5209 5210 5211
		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
		return;
	}
C
Chris Mason 已提交
5212
	while (len > 0) {
5213 5214 5215 5216
		dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
		src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;

		dst_off_in_page = (start_offset + dst_end) &
5217
			(PAGE_CACHE_SIZE - 1);
5218
		src_off_in_page = (start_offset + src_end) &
5219
			(PAGE_CACHE_SIZE - 1);
5220 5221 5222

		cur = min_t(unsigned long, len, src_off_in_page + 1);
		cur = min(cur, dst_off_in_page + 1);
Z
Zach Brown 已提交
5223
		copy_pages(extent_buffer_page(dst, dst_i),
5224 5225 5226 5227 5228 5229 5230 5231 5232
			   extent_buffer_page(dst, src_i),
			   dst_off_in_page - cur + 1,
			   src_off_in_page - cur + 1, cur);

		dst_end -= cur;
		src_end -= cur;
		len -= cur;
	}
}
5233

5234
int try_release_extent_buffer(struct page *page)
5235
{
5236 5237
	struct extent_buffer *eb;

5238 5239 5240 5241 5242 5243 5244
	/*
	 * We need to make sure noboody is attaching this page to an eb right
	 * now.
	 */
	spin_lock(&page->mapping->private_lock);
	if (!PagePrivate(page)) {
		spin_unlock(&page->mapping->private_lock);
J
Josef Bacik 已提交
5245
		return 1;
5246
	}
5247

5248 5249
	eb = (struct extent_buffer *)page->private;
	BUG_ON(!eb);
5250 5251

	/*
5252 5253 5254
	 * This is a little awful but should be ok, we need to make sure that
	 * the eb doesn't disappear out from under us while we're looking at
	 * this page.
5255
	 */
5256
	spin_lock(&eb->refs_lock);
5257
	if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
5258 5259 5260
		spin_unlock(&eb->refs_lock);
		spin_unlock(&page->mapping->private_lock);
		return 0;
5261
	}
5262
	spin_unlock(&page->mapping->private_lock);
5263

5264
	/*
5265 5266
	 * If tree ref isn't set then we know the ref on this eb is a real ref,
	 * so just return, this page will likely be freed soon anyway.
5267
	 */
5268 5269 5270
	if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
		spin_unlock(&eb->refs_lock);
		return 0;
5271
	}
5272

5273
	return release_extent_buffer(eb);
5274
}