extent_io.c 197.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2

3 4 5 6 7 8
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/bio.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/page-flags.h>
9
#include <linux/sched/mm.h>
10 11 12 13 14
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/pagevec.h>
15
#include <linux/prefetch.h>
B
Boris Burkov 已提交
16
#include <linux/fsverity.h>
17
#include "misc.h"
18
#include "extent_io.h"
19
#include "extent-io-tree.h"
20
#include "extent_map.h"
21 22
#include "ctree.h"
#include "btrfs_inode.h"
23
#include "volumes.h"
24
#include "check-integrity.h"
25
#include "locking.h"
26
#include "rcu-string.h"
27
#include "backref.h"
28
#include "disk-io.h"
29
#include "subpage.h"
30
#include "zoned.h"
31
#include "block-group.h"
32
#include "compression.h"
33 34 35

static struct kmem_cache *extent_state_cache;
static struct kmem_cache *extent_buffer_cache;
36
static struct bio_set btrfs_bioset;
37

38 39 40 41 42
static inline bool extent_state_in_tree(const struct extent_state *state)
{
	return !RB_EMPTY_NODE(&state->rb_node);
}

43
#ifdef CONFIG_BTRFS_DEBUG
44
static LIST_HEAD(states);
C
Chris Mason 已提交
45
static DEFINE_SPINLOCK(leak_lock);
46

47 48 49
static inline void btrfs_leak_debug_add(spinlock_t *lock,
					struct list_head *new,
					struct list_head *head)
50 51 52
{
	unsigned long flags;

53
	spin_lock_irqsave(lock, flags);
54
	list_add(new, head);
55
	spin_unlock_irqrestore(lock, flags);
56 57
}

58 59
static inline void btrfs_leak_debug_del(spinlock_t *lock,
					struct list_head *entry)
60 61 62
{
	unsigned long flags;

63
	spin_lock_irqsave(lock, flags);
64
	list_del(entry);
65
	spin_unlock_irqrestore(lock, flags);
66 67
}

68
void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
69 70
{
	struct extent_buffer *eb;
71
	unsigned long flags;
72

73 74 75 76 77 78 79
	/*
	 * If we didn't get into open_ctree our allocated_ebs will not be
	 * initialized, so just skip this.
	 */
	if (!fs_info->allocated_ebs.next)
		return;

80
	WARN_ON(!list_empty(&fs_info->allocated_ebs));
81 82 83 84
	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
	while (!list_empty(&fs_info->allocated_ebs)) {
		eb = list_first_entry(&fs_info->allocated_ebs,
				      struct extent_buffer, leak_list);
85 86 87 88
		pr_err(
	"BTRFS: buffer leak start %llu len %lu refs %d bflags %lu owner %llu\n",
		       eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
		       btrfs_header_owner(eb));
89 90 91
		list_del(&eb->leak_list);
		kmem_cache_free(extent_buffer_cache, eb);
	}
92
	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
93 94 95 96 97 98
}

static inline void btrfs_extent_state_leak_debug_check(void)
{
	struct extent_state *state;

99 100
	while (!list_empty(&states)) {
		state = list_entry(states.next, struct extent_state, leak_list);
101
		pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
102 103
		       state->start, state->end, state->state,
		       extent_state_in_tree(state),
104
		       refcount_read(&state->refs));
105 106 107 108
		list_del(&state->leak_list);
		kmem_cache_free(extent_state_cache, state);
	}
}
109

110 111
#define btrfs_debug_check_extent_io_range(tree, start, end)		\
	__btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
112
static inline void __btrfs_debug_check_extent_io_range(const char *caller,
113
		struct extent_io_tree *tree, u64 start, u64 end)
114
{
115 116 117 118 119 120 121 122 123 124 125 126
	struct inode *inode = tree->private_data;
	u64 isize;

	if (!inode || !is_data_inode(inode))
		return;

	isize = i_size_read(inode);
	if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
		btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
		    "%s: ino %llu isize %llu odd range [%llu,%llu]",
			caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
	}
127
}
128
#else
129 130
#define btrfs_leak_debug_add(lock, new, head)	do {} while (0)
#define btrfs_leak_debug_del(lock, entry)	do {} while (0)
131
#define btrfs_extent_state_leak_debug_check()	do {} while (0)
132
#define btrfs_debug_check_extent_io_range(c, s, e)	do {} while (0)
C
Chris Mason 已提交
133
#endif
134 135 136 137 138 139 140

struct tree_entry {
	u64 start;
	u64 end;
	struct rb_node rb_node;
};

141 142 143 144 145 146
/*
 * Structure to record info about the bio being assembled, and other info like
 * how many bytes are there before stripe/ordered extent boundary.
 */
struct btrfs_bio_ctrl {
	struct bio *bio;
147
	enum btrfs_compression_type compress_type;
148 149 150 151
	u32 len_to_stripe_boundary;
	u32 len_to_oe_boundary;
};

152
struct extent_page_data {
153
	struct btrfs_bio_ctrl bio_ctrl;
154 155 156
	/* tells writepage not to lock the state bits for this range
	 * it still does the unlocking
	 */
157 158
	unsigned int extent_locked:1;

159
	/* tells the submit_bio code to use REQ_SYNC */
160
	unsigned int sync_io:1;
161 162
};

163
static int add_extent_changeset(struct extent_state *state, u32 bits,
164 165 166 167 168 169
				 struct extent_changeset *changeset,
				 int set)
{
	int ret;

	if (!changeset)
170
		return 0;
171
	if (set && (state->state & bits) == bits)
172
		return 0;
173
	if (!set && (state->state & bits) == 0)
174
		return 0;
175
	changeset->bytes_changed += state->end - state->start + 1;
176
	ret = ulist_add(&changeset->range_changed, state->start, state->end,
177
			GFP_ATOMIC);
178
	return ret;
179 180
}

181 182
static void submit_one_bio(struct bio *bio, int mirror_num,
			   enum btrfs_compression_type compress_type)
183
{
184
	struct inode *inode = bio_first_page_all(bio)->mapping->host;
185

186 187
	/* Caller should ensure the bio has at least some range added */
	ASSERT(bio->bi_iter.bi_size);
188

189 190 191 192
	if (!is_data_inode(inode))
		btrfs_submit_metadata_bio(inode, bio, mirror_num);
	else if (btrfs_op(bio) == BTRFS_MAP_WRITE)
		btrfs_submit_data_write_bio(inode, bio, mirror_num);
193
	else
194 195
		btrfs_submit_data_read_bio(inode, bio, mirror_num, compress_type);

196 197 198 199 200 201
	/*
	 * Above submission hooks will handle the error by ending the bio,
	 * which will do the cleanup properly.  So here we should not return
	 * any error, or the caller of submit_extent_page() will do cleanup
	 * again, causing problems.
	 */
202 203
}

204
/*
205
 * Submit or fail the current bio in an extent_page_data structure.
206
 */
207
static void submit_write_bio(struct extent_page_data *epd, int ret)
208
{
209
	struct bio *bio = epd->bio_ctrl.bio;
210

211 212 213 214 215 216 217 218
	if (!bio)
		return;

	if (ret) {
		ASSERT(ret < 0);
		bio->bi_status = errno_to_blk_status(ret);
		bio_endio(bio);
	} else {
219
		submit_one_bio(bio, 0, 0);
220
	}
221 222 223

	/* The bio is owned by the bi_end_io handler now */
	epd->bio_ctrl.bio = NULL;
224
}
225

226
int __init extent_state_cache_init(void)
227
{
D
David Sterba 已提交
228
	extent_state_cache = kmem_cache_create("btrfs_extent_state",
229
			sizeof(struct extent_state), 0,
230
			SLAB_MEM_SPREAD, NULL);
231 232
	if (!extent_state_cache)
		return -ENOMEM;
233 234
	return 0;
}
235

236 237
int __init extent_io_init(void)
{
D
David Sterba 已提交
238
	extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
239
			sizeof(struct extent_buffer), 0,
240
			SLAB_MEM_SPREAD, NULL);
241
	if (!extent_buffer_cache)
242
		return -ENOMEM;
243

244
	if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE,
245
			offsetof(struct btrfs_bio, bio),
246
			BIOSET_NEED_BVECS))
247
		goto free_buffer_cache;
248

249
	if (bioset_integrity_create(&btrfs_bioset, BIO_POOL_SIZE))
250 251
		goto free_bioset;

252 253
	return 0;

254
free_bioset:
255
	bioset_exit(&btrfs_bioset);
256

257 258 259
free_buffer_cache:
	kmem_cache_destroy(extent_buffer_cache);
	extent_buffer_cache = NULL;
260 261
	return -ENOMEM;
}
262

263 264 265
void __cold extent_state_cache_exit(void)
{
	btrfs_extent_state_leak_debug_check();
266 267 268
	kmem_cache_destroy(extent_state_cache);
}

269
void __cold extent_io_exit(void)
270
{
271 272 273 274 275
	/*
	 * Make sure all delayed rcu free are flushed before we
	 * destroy caches.
	 */
	rcu_barrier();
276
	kmem_cache_destroy(extent_buffer_cache);
277
	bioset_exit(&btrfs_bioset);
278 279
}

280 281 282 283 284 285 286 287 288
/*
 * For the file_extent_tree, we want to hold the inode lock when we lookup and
 * update the disk_i_size, but lockdep will complain because our io_tree we hold
 * the tree lock and get the inode lock when setting delalloc.  These two things
 * are unrelated, so make a class for the file_extent_tree so we don't get the
 * two locking patterns mixed up.
 */
static struct lock_class_key file_extent_tree_class;

289
void extent_io_tree_init(struct btrfs_fs_info *fs_info,
290 291
			 struct extent_io_tree *tree, unsigned int owner,
			 void *private_data)
292
{
293
	tree->fs_info = fs_info;
294
	tree->state = RB_ROOT;
295
	tree->dirty_bytes = 0;
296
	spin_lock_init(&tree->lock);
297
	tree->private_data = private_data;
298
	tree->owner = owner;
299 300
	if (owner == IO_TREE_INODE_FILE_EXTENT)
		lockdep_set_class(&tree->lock, &file_extent_tree_class);
301 302
}

303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
void extent_io_tree_release(struct extent_io_tree *tree)
{
	spin_lock(&tree->lock);
	/*
	 * Do a single barrier for the waitqueue_active check here, the state
	 * of the waitqueue should not change once extent_io_tree_release is
	 * called.
	 */
	smp_mb();
	while (!RB_EMPTY_ROOT(&tree->state)) {
		struct rb_node *node;
		struct extent_state *state;

		node = rb_first(&tree->state);
		state = rb_entry(node, struct extent_state, rb_node);
		rb_erase(&state->rb_node, &tree->state);
		RB_CLEAR_NODE(&state->rb_node);
		/*
		 * btree io trees aren't supposed to have tasks waiting for
		 * changes in the flags of extent states ever.
		 */
		ASSERT(!waitqueue_active(&state->wq));
		free_extent_state(state);

		cond_resched_lock(&tree->lock);
	}
	spin_unlock(&tree->lock);
}

332
static struct extent_state *alloc_extent_state(gfp_t mask)
333 334 335
{
	struct extent_state *state;

336 337 338 339 340
	/*
	 * The given mask might be not appropriate for the slab allocator,
	 * drop the unsupported bits
	 */
	mask &= ~(__GFP_DMA32|__GFP_HIGHMEM);
341
	state = kmem_cache_alloc(extent_state_cache, mask);
342
	if (!state)
343 344
		return state;
	state->state = 0;
345
	state->failrec = NULL;
346
	RB_CLEAR_NODE(&state->rb_node);
347
	btrfs_leak_debug_add(&leak_lock, &state->leak_list, &states);
348
	refcount_set(&state->refs, 1);
349
	init_waitqueue_head(&state->wq);
350
	trace_alloc_extent_state(state, mask, _RET_IP_);
351 352 353
	return state;
}

354
void free_extent_state(struct extent_state *state)
355 356 357
{
	if (!state)
		return;
358
	if (refcount_dec_and_test(&state->refs)) {
359
		WARN_ON(extent_state_in_tree(state));
360
		btrfs_leak_debug_del(&leak_lock, &state->leak_list);
361
		trace_free_extent_state(state, _RET_IP_);
362 363 364 365
		kmem_cache_free(extent_state_cache, state);
	}
}

366 367 368
static struct rb_node *tree_insert(struct rb_root *root,
				   struct rb_node *search_start,
				   u64 offset,
369 370 371
				   struct rb_node *node,
				   struct rb_node ***p_in,
				   struct rb_node **parent_in)
372
{
373
	struct rb_node **p;
C
Chris Mason 已提交
374
	struct rb_node *parent = NULL;
375 376
	struct tree_entry *entry;

377 378 379 380 381 382
	if (p_in && parent_in) {
		p = *p_in;
		parent = *parent_in;
		goto do_insert;
	}

383
	p = search_start ? &search_start : &root->rb_node;
C
Chris Mason 已提交
384
	while (*p) {
385 386 387 388 389 390 391 392 393 394 395
		parent = *p;
		entry = rb_entry(parent, struct tree_entry, rb_node);

		if (offset < entry->start)
			p = &(*p)->rb_left;
		else if (offset > entry->end)
			p = &(*p)->rb_right;
		else
			return parent;
	}

396
do_insert:
397 398 399 400 401
	rb_link_node(node, parent, p);
	rb_insert_color(node, root);
	return NULL;
}

N
Nikolay Borisov 已提交
402
/**
403 404
 * Search @tree for an entry that contains @offset. Such entry would have
 * entry->start <= offset && entry->end >= offset.
N
Nikolay Borisov 已提交
405
 *
406 407 408 409 410 411 412
 * @tree:       the tree to search
 * @offset:     offset that should fall within an entry in @tree
 * @next_ret:   pointer to the first entry whose range ends after @offset
 * @prev_ret:   pointer to the first entry whose range begins before @offset
 * @p_ret:      pointer where new node should be anchored (used when inserting an
 *	        entry in the tree)
 * @parent_ret: points to entry which would have been the parent of the entry,
N
Nikolay Borisov 已提交
413 414 415 416 417 418 419
 *               containing @offset
 *
 * This function returns a pointer to the entry that contains @offset byte
 * address. If no such entry exists, then NULL is returned and the other
 * pointer arguments to the function are filled, otherwise the found entry is
 * returned and other pointers are left untouched.
 */
420
static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
421
				      struct rb_node **next_ret,
422
				      struct rb_node **prev_ret,
423 424
				      struct rb_node ***p_ret,
				      struct rb_node **parent_ret)
425
{
426
	struct rb_root *root = &tree->state;
427
	struct rb_node **n = &root->rb_node;
428 429 430 431 432
	struct rb_node *prev = NULL;
	struct rb_node *orig_prev = NULL;
	struct tree_entry *entry;
	struct tree_entry *prev_entry = NULL;

433 434 435
	while (*n) {
		prev = *n;
		entry = rb_entry(prev, struct tree_entry, rb_node);
436 437 438
		prev_entry = entry;

		if (offset < entry->start)
439
			n = &(*n)->rb_left;
440
		else if (offset > entry->end)
441
			n = &(*n)->rb_right;
C
Chris Mason 已提交
442
		else
443
			return *n;
444 445
	}

446 447 448 449 450
	if (p_ret)
		*p_ret = n;
	if (parent_ret)
		*parent_ret = prev;

451
	if (next_ret) {
452
		orig_prev = prev;
C
Chris Mason 已提交
453
		while (prev && offset > prev_entry->end) {
454 455 456
			prev = rb_next(prev);
			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
		}
457
		*next_ret = prev;
458 459 460
		prev = orig_prev;
	}

461
	if (prev_ret) {
462
		prev_entry = rb_entry(prev, struct tree_entry, rb_node);
C
Chris Mason 已提交
463
		while (prev && offset < prev_entry->start) {
464 465 466
			prev = rb_prev(prev);
			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
		}
467
		*prev_ret = prev;
468 469 470 471
	}
	return NULL;
}

472 473 474 475 476
static inline struct rb_node *
tree_search_for_insert(struct extent_io_tree *tree,
		       u64 offset,
		       struct rb_node ***p_ret,
		       struct rb_node **parent_ret)
477
{
478
	struct rb_node *next= NULL;
479
	struct rb_node *ret;
480

481
	ret = __etree_search(tree, offset, &next, NULL, p_ret, parent_ret);
C
Chris Mason 已提交
482
	if (!ret)
483
		return next;
484 485 486
	return ret;
}

487 488 489 490 491 492
static inline struct rb_node *tree_search(struct extent_io_tree *tree,
					  u64 offset)
{
	return tree_search_for_insert(tree, offset, NULL, NULL);
}

493 494 495 496 497 498 499 500 501
/*
 * utility function to look for merge candidates inside a given range.
 * Any extents with matching state are merged together into a single
 * extent in the tree.  Extents with EXTENT_IO in their state field
 * are not merged because the end_io handlers need to be able to do
 * operations on them without sleeping (or doing allocations/splits).
 *
 * This should be called with the tree lock held.
 */
502 503
static void merge_state(struct extent_io_tree *tree,
		        struct extent_state *state)
504 505 506 507
{
	struct extent_state *other;
	struct rb_node *other_node;

N
Nikolay Borisov 已提交
508
	if (state->state & (EXTENT_LOCKED | EXTENT_BOUNDARY))
509
		return;
510 511 512 513 514 515

	other_node = rb_prev(&state->rb_node);
	if (other_node) {
		other = rb_entry(other_node, struct extent_state, rb_node);
		if (other->end == state->start - 1 &&
		    other->state == state->state) {
516 517 518 519
			if (tree->private_data &&
			    is_data_inode(tree->private_data))
				btrfs_merge_delalloc_extent(tree->private_data,
							    state, other);
520 521
			state->start = other->start;
			rb_erase(&other->rb_node, &tree->state);
522
			RB_CLEAR_NODE(&other->rb_node);
523 524 525 526 527 528 529 530
			free_extent_state(other);
		}
	}
	other_node = rb_next(&state->rb_node);
	if (other_node) {
		other = rb_entry(other_node, struct extent_state, rb_node);
		if (other->start == state->end + 1 &&
		    other->state == state->state) {
531 532 533 534
			if (tree->private_data &&
			    is_data_inode(tree->private_data))
				btrfs_merge_delalloc_extent(tree->private_data,
							    state, other);
535 536
			state->end = other->end;
			rb_erase(&other->rb_node, &tree->state);
537
			RB_CLEAR_NODE(&other->rb_node);
538
			free_extent_state(other);
539 540 541 542
		}
	}
}

543
static void set_state_bits(struct extent_io_tree *tree,
544
			   struct extent_state *state, u32 *bits,
545
			   struct extent_changeset *changeset);
546

547 548 549 550 551 552 553 554 555 556 557 558
/*
 * insert an extent_state struct into the tree.  'bits' are set on the
 * struct before it is inserted.
 *
 * This may return -EEXIST if the extent is already there, in which case the
 * state struct is freed.
 *
 * The tree lock is not taken internally.  This is a utility function and
 * probably isn't what you want to call (see set/clear_extent_bit).
 */
static int insert_state(struct extent_io_tree *tree,
			struct extent_state *state, u64 start, u64 end,
559 560
			struct rb_node ***p,
			struct rb_node **parent,
561
			u32 *bits, struct extent_changeset *changeset)
562 563 564
{
	struct rb_node *node;

565 566 567 568 569
	if (end < start) {
		btrfs_err(tree->fs_info,
			"insert state: end < start %llu %llu", end, start);
		WARN_ON(1);
	}
570 571
	state->start = start;
	state->end = end;
J
Josef Bacik 已提交
572

573
	set_state_bits(tree, state, bits, changeset);
574

575
	node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
576 577 578
	if (node) {
		struct extent_state *found;
		found = rb_entry(node, struct extent_state, rb_node);
579 580
		btrfs_err(tree->fs_info,
		       "found node %llu %llu on insert of %llu %llu",
581
		       found->start, found->end, start, end);
582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
		return -EEXIST;
	}
	merge_state(tree, state);
	return 0;
}

/*
 * split a given extent state struct in two, inserting the preallocated
 * struct 'prealloc' as the newly created second half.  'split' indicates an
 * offset inside 'orig' where it should be split.
 *
 * Before calling,
 * the tree has 'orig' at [orig->start, orig->end].  After calling, there
 * are two extent state structs in the tree:
 * prealloc: [orig->start, split - 1]
 * orig: [ split, orig->end ]
 *
 * The tree locks are not taken by this function. They need to be held
 * by the caller.
 */
static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
		       struct extent_state *prealloc, u64 split)
{
	struct rb_node *node;
J
Josef Bacik 已提交
606

607 608
	if (tree->private_data && is_data_inode(tree->private_data))
		btrfs_split_delalloc_extent(tree->private_data, orig, split);
J
Josef Bacik 已提交
609

610 611 612 613 614
	prealloc->start = orig->start;
	prealloc->end = split - 1;
	prealloc->state = orig->state;
	orig->start = split;

615 616
	node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
			   &prealloc->rb_node, NULL, NULL);
617 618 619 620 621 622 623
	if (node) {
		free_extent_state(prealloc);
		return -EEXIST;
	}
	return 0;
}

624 625 626 627 628 629 630 631 632
static struct extent_state *next_state(struct extent_state *state)
{
	struct rb_node *next = rb_next(&state->rb_node);
	if (next)
		return rb_entry(next, struct extent_state, rb_node);
	else
		return NULL;
}

633 634
/*
 * utility function to clear some bits in an extent state struct.
635
 * it will optionally wake up anyone waiting on this state (wake == 1).
636 637 638 639
 *
 * If no bits are set on the state struct after clearing things, the
 * struct is freed and removed from the tree
 */
640 641
static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
					    struct extent_state *state,
642
					    u32 *bits, int wake,
643
					    struct extent_changeset *changeset)
644
{
645
	struct extent_state *next;
646
	u32 bits_to_clear = *bits & ~EXTENT_CTLBITS;
647
	int ret;
648

649
	if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
650 651 652 653
		u64 range = state->end - state->start + 1;
		WARN_ON(range > tree->dirty_bytes);
		tree->dirty_bytes -= range;
	}
654 655 656 657

	if (tree->private_data && is_data_inode(tree->private_data))
		btrfs_clear_delalloc_extent(tree->private_data, state, bits);

658 659
	ret = add_extent_changeset(state, bits_to_clear, changeset, 0);
	BUG_ON(ret < 0);
660
	state->state &= ~bits_to_clear;
661 662
	if (wake)
		wake_up(&state->wq);
663
	if (state->state == 0) {
664
		next = next_state(state);
665
		if (extent_state_in_tree(state)) {
666
			rb_erase(&state->rb_node, &tree->state);
667
			RB_CLEAR_NODE(&state->rb_node);
668 669 670 671 672 673
			free_extent_state(state);
		} else {
			WARN_ON(1);
		}
	} else {
		merge_state(tree, state);
674
		next = next_state(state);
675
	}
676
	return next;
677 678
}

679 680 681 682 683 684 685 686 687
static struct extent_state *
alloc_extent_state_atomic(struct extent_state *prealloc)
{
	if (!prealloc)
		prealloc = alloc_extent_state(GFP_ATOMIC);

	return prealloc;
}

688
static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
689
{
690
	btrfs_panic(tree->fs_info, err,
691
	"locking error: extent tree was modified by another thread while locked");
692 693
}

694 695 696 697 698 699 700 701 702 703
/*
 * clear some bits on a range in the tree.  This may require splitting
 * or inserting elements in the tree, so the gfp mask is used to
 * indicate which allocations or sleeping are allowed.
 *
 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
 * the given range from the tree regardless of state (ie for truncate).
 *
 * the range [start, end] is inclusive.
 *
704
 * This takes the tree lock, and returns 0 on success and < 0 on error.
705
 */
706
int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
707 708 709
		       u32 bits, int wake, int delete,
		       struct extent_state **cached_state,
		       gfp_t mask, struct extent_changeset *changeset)
710 711
{
	struct extent_state *state;
712
	struct extent_state *cached;
713 714
	struct extent_state *prealloc = NULL;
	struct rb_node *node;
715
	u64 last_end;
716
	int err;
717
	int clear = 0;
718

719
	btrfs_debug_check_extent_io_range(tree, start, end);
720
	trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits);
721

722 723 724
	if (bits & EXTENT_DELALLOC)
		bits |= EXTENT_NORESERVE;

725 726 727
	if (delete)
		bits |= ~EXTENT_CTLBITS;

N
Nikolay Borisov 已提交
728
	if (bits & (EXTENT_LOCKED | EXTENT_BOUNDARY))
729
		clear = 1;
730
again:
731
	if (!prealloc && gfpflags_allow_blocking(mask)) {
732 733 734 735 736 737 738
		/*
		 * Don't care for allocation failure here because we might end
		 * up not needing the pre-allocated extent state at all, which
		 * is the case if we only have in the tree extent states that
		 * cover our input range and don't cover too any other range.
		 * If we end up needing a new extent state we allocate it later.
		 */
739 740 741
		prealloc = alloc_extent_state(mask);
	}

742
	spin_lock(&tree->lock);
743 744
	if (cached_state) {
		cached = *cached_state;
745 746 747 748 749 750

		if (clear) {
			*cached_state = NULL;
			cached_state = NULL;
		}

751 752
		if (cached && extent_state_in_tree(cached) &&
		    cached->start <= start && cached->end > start) {
753
			if (clear)
754
				refcount_dec(&cached->refs);
755
			state = cached;
756
			goto hit_next;
757
		}
758 759
		if (clear)
			free_extent_state(cached);
760
	}
761 762 763 764
	/*
	 * this search will find the extents that end after
	 * our range starts
	 */
765
	node = tree_search(tree, start);
766 767 768
	if (!node)
		goto out;
	state = rb_entry(node, struct extent_state, rb_node);
769
hit_next:
770 771 772
	if (state->start > end)
		goto out;
	WARN_ON(state->end < start);
773
	last_end = state->end;
774

775
	/* the state doesn't have the wanted bits, go ahead */
776 777
	if (!(state->state & bits)) {
		state = next_state(state);
778
		goto next;
779
	}
780

781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797
	/*
	 *     | ---- desired range ---- |
	 *  | state | or
	 *  | ------------- state -------------- |
	 *
	 * We need to split the extent we found, and may flip
	 * bits on second half.
	 *
	 * If the extent we found extends past our range, we
	 * just split and search again.  It'll get split again
	 * the next time though.
	 *
	 * If the extent we found is inside our range, we clear
	 * the desired bit on it.
	 */

	if (state->start < start) {
798 799
		prealloc = alloc_extent_state_atomic(prealloc);
		BUG_ON(!prealloc);
800
		err = split_state(tree, state, prealloc, start);
801 802 803
		if (err)
			extent_io_tree_panic(tree, err);

804 805 806 807
		prealloc = NULL;
		if (err)
			goto out;
		if (state->end <= end) {
808 809
			state = clear_state_bit(tree, state, &bits, wake,
						changeset);
810
			goto next;
811 812 813 814 815 816 817 818 819 820
		}
		goto search_again;
	}
	/*
	 * | ---- desired range ---- |
	 *                        | state |
	 * We need to split the extent, and clear the bit
	 * on the first half
	 */
	if (state->start <= end && state->end > end) {
821 822
		prealloc = alloc_extent_state_atomic(prealloc);
		BUG_ON(!prealloc);
823
		err = split_state(tree, state, prealloc, end + 1);
824 825 826
		if (err)
			extent_io_tree_panic(tree, err);

827 828
		if (wake)
			wake_up(&state->wq);
829

830
		clear_state_bit(tree, prealloc, &bits, wake, changeset);
J
Josef Bacik 已提交
831

832 833 834
		prealloc = NULL;
		goto out;
	}
835

836
	state = clear_state_bit(tree, state, &bits, wake, changeset);
837
next:
838 839 840
	if (last_end == (u64)-1)
		goto out;
	start = last_end + 1;
841
	if (start <= end && state && !need_resched())
842
		goto hit_next;
843 844 845 846

search_again:
	if (start > end)
		goto out;
847
	spin_unlock(&tree->lock);
848
	if (gfpflags_allow_blocking(mask))
849 850
		cond_resched();
	goto again;
851 852 853 854 855 856 857 858

out:
	spin_unlock(&tree->lock);
	if (prealloc)
		free_extent_state(prealloc);

	return 0;

859 860
}

861 862
static void wait_on_state(struct extent_io_tree *tree,
			  struct extent_state *state)
863 864
		__releases(tree->lock)
		__acquires(tree->lock)
865 866 867
{
	DEFINE_WAIT(wait);
	prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
868
	spin_unlock(&tree->lock);
869
	schedule();
870
	spin_lock(&tree->lock);
871 872 873 874 875 876 877 878
	finish_wait(&state->wq, &wait);
}

/*
 * waits for one or more bits to clear on a range in the state tree.
 * The range [start, end] is inclusive.
 * The tree lock is taken by this function
 */
879
static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
880
			    u32 bits)
881 882 883 884
{
	struct extent_state *state;
	struct rb_node *node;

885
	btrfs_debug_check_extent_io_range(tree, start, end);
886

887
	spin_lock(&tree->lock);
888 889 890 891 892 893
again:
	while (1) {
		/*
		 * this search will find all the extents that end after
		 * our range starts
		 */
894
		node = tree_search(tree, start);
895
process_node:
896 897 898 899 900 901 902 903 904 905
		if (!node)
			break;

		state = rb_entry(node, struct extent_state, rb_node);

		if (state->start > end)
			goto out;

		if (state->state & bits) {
			start = state->start;
906
			refcount_inc(&state->refs);
907 908 909 910 911 912 913 914 915
			wait_on_state(tree, state);
			free_extent_state(state);
			goto again;
		}
		start = state->end + 1;

		if (start > end)
			break;

916 917 918 919
		if (!cond_resched_lock(&tree->lock)) {
			node = rb_next(node);
			goto process_node;
		}
920 921
	}
out:
922
	spin_unlock(&tree->lock);
923 924
}

925
static void set_state_bits(struct extent_io_tree *tree,
926
			   struct extent_state *state,
927
			   u32 *bits, struct extent_changeset *changeset)
928
{
929
	u32 bits_to_set = *bits & ~EXTENT_CTLBITS;
930
	int ret;
J
Josef Bacik 已提交
931

932 933 934
	if (tree->private_data && is_data_inode(tree->private_data))
		btrfs_set_delalloc_extent(tree->private_data, state, bits);

935
	if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
936 937 938
		u64 range = state->end - state->start + 1;
		tree->dirty_bytes += range;
	}
939 940
	ret = add_extent_changeset(state, bits_to_set, changeset, 1);
	BUG_ON(ret < 0);
941
	state->state |= bits_to_set;
942 943
}

944 945
static void cache_state_if_flags(struct extent_state *state,
				 struct extent_state **cached_ptr,
946
				 unsigned flags)
947 948
{
	if (cached_ptr && !(*cached_ptr)) {
949
		if (!flags || (state->state & flags)) {
950
			*cached_ptr = state;
951
			refcount_inc(&state->refs);
952 953 954 955
		}
	}
}

956 957 958 959
static void cache_state(struct extent_state *state,
			struct extent_state **cached_ptr)
{
	return cache_state_if_flags(state, cached_ptr,
N
Nikolay Borisov 已提交
960
				    EXTENT_LOCKED | EXTENT_BOUNDARY);
961 962
}

963
/*
964 965
 * set some bits on a range in the tree.  This may require allocations or
 * sleeping, so the gfp mask is used to indicate what is allowed.
966
 *
967 968 969
 * If any of the exclusive bits are set, this will fail with -EEXIST if some
 * part of the range already has the desired bits set.  The start of the
 * existing range is returned in failed_start in this case.
970
 *
971
 * [start, end] is inclusive This takes the tree lock.
972
 */
973 974
int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
		   u32 exclusive_bits, u64 *failed_start,
975 976
		   struct extent_state **cached_state, gfp_t mask,
		   struct extent_changeset *changeset)
977 978 979 980
{
	struct extent_state *state;
	struct extent_state *prealloc = NULL;
	struct rb_node *node;
981 982
	struct rb_node **p;
	struct rb_node *parent;
983 984 985
	int err = 0;
	u64 last_start;
	u64 last_end;
986

987
	btrfs_debug_check_extent_io_range(tree, start, end);
988
	trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits);
989

990 991 992 993
	if (exclusive_bits)
		ASSERT(failed_start);
	else
		ASSERT(failed_start == NULL);
994
again:
995
	if (!prealloc && gfpflags_allow_blocking(mask)) {
996 997 998 999 1000 1001 1002
		/*
		 * Don't care for allocation failure here because we might end
		 * up not needing the pre-allocated extent state at all, which
		 * is the case if we only have in the tree extent states that
		 * cover our input range and don't cover too any other range.
		 * If we end up needing a new extent state we allocate it later.
		 */
1003 1004 1005
		prealloc = alloc_extent_state(mask);
	}

1006
	spin_lock(&tree->lock);
1007 1008
	if (cached_state && *cached_state) {
		state = *cached_state;
1009
		if (state->start <= start && state->end > start &&
1010
		    extent_state_in_tree(state)) {
1011 1012 1013 1014
			node = &state->rb_node;
			goto hit_next;
		}
	}
1015 1016 1017 1018
	/*
	 * this search will find all the extents that end after
	 * our range starts.
	 */
1019
	node = tree_search_for_insert(tree, start, &p, &parent);
1020
	if (!node) {
1021 1022
		prealloc = alloc_extent_state_atomic(prealloc);
		BUG_ON(!prealloc);
1023
		err = insert_state(tree, prealloc, start, end,
1024
				   &p, &parent, &bits, changeset);
1025 1026 1027
		if (err)
			extent_io_tree_panic(tree, err);

1028
		cache_state(prealloc, cached_state);
1029 1030 1031 1032
		prealloc = NULL;
		goto out;
	}
	state = rb_entry(node, struct extent_state, rb_node);
C
Chris Mason 已提交
1033
hit_next:
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
	last_start = state->start;
	last_end = state->end;

	/*
	 * | ---- desired range ---- |
	 * | state |
	 *
	 * Just lock what we found and keep going
	 */
	if (state->start == start && state->end <= end) {
1044
		if (state->state & exclusive_bits) {
1045 1046 1047 1048
			*failed_start = state->start;
			err = -EEXIST;
			goto out;
		}
1049

1050
		set_state_bits(tree, state, &bits, changeset);
1051
		cache_state(state, cached_state);
1052
		merge_state(tree, state);
1053 1054 1055
		if (last_end == (u64)-1)
			goto out;
		start = last_end + 1;
1056 1057 1058 1059
		state = next_state(state);
		if (start < end && state && state->start == start &&
		    !need_resched())
			goto hit_next;
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
		goto search_again;
	}

	/*
	 *     | ---- desired range ---- |
	 * | state |
	 *   or
	 * | ------------- state -------------- |
	 *
	 * We need to split the extent we found, and may flip bits on
	 * second half.
	 *
	 * If the extent we found extends past our
	 * range, we just split and search again.  It'll get split
	 * again the next time though.
	 *
	 * If the extent we found is inside our range, we set the
	 * desired bit on it.
	 */
	if (state->start < start) {
1080
		if (state->state & exclusive_bits) {
1081 1082 1083 1084
			*failed_start = start;
			err = -EEXIST;
			goto out;
		}
1085

1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
		/*
		 * If this extent already has all the bits we want set, then
		 * skip it, not necessary to split it or do anything with it.
		 */
		if ((state->state & bits) == bits) {
			start = state->end + 1;
			cache_state(state, cached_state);
			goto search_again;
		}

1096 1097
		prealloc = alloc_extent_state_atomic(prealloc);
		BUG_ON(!prealloc);
1098
		err = split_state(tree, state, prealloc, start);
1099 1100 1101
		if (err)
			extent_io_tree_panic(tree, err);

1102 1103 1104 1105
		prealloc = NULL;
		if (err)
			goto out;
		if (state->end <= end) {
1106
			set_state_bits(tree, state, &bits, changeset);
1107
			cache_state(state, cached_state);
1108
			merge_state(tree, state);
1109 1110 1111
			if (last_end == (u64)-1)
				goto out;
			start = last_end + 1;
1112 1113 1114 1115
			state = next_state(state);
			if (start < end && state && state->start == start &&
			    !need_resched())
				goto hit_next;
1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
		}
		goto search_again;
	}
	/*
	 * | ---- desired range ---- |
	 *     | state | or               | state |
	 *
	 * There's a hole, we need to insert something in it and
	 * ignore the extent we found.
	 */
	if (state->start > start) {
		u64 this_end;
		if (end < last_start)
			this_end = end;
		else
C
Chris Mason 已提交
1131
			this_end = last_start - 1;
1132 1133 1134

		prealloc = alloc_extent_state_atomic(prealloc);
		BUG_ON(!prealloc);
1135 1136 1137 1138 1139

		/*
		 * Avoid to free 'prealloc' if it can be merged with
		 * the later extent.
		 */
1140
		err = insert_state(tree, prealloc, start, this_end,
1141
				   NULL, NULL, &bits, changeset);
1142 1143 1144
		if (err)
			extent_io_tree_panic(tree, err);

J
Josef Bacik 已提交
1145 1146
		cache_state(prealloc, cached_state);
		prealloc = NULL;
1147 1148 1149 1150 1151 1152 1153 1154 1155 1156
		start = this_end + 1;
		goto search_again;
	}
	/*
	 * | ---- desired range ---- |
	 *                        | state |
	 * We need to split the extent, and set the bit
	 * on the first half
	 */
	if (state->start <= end && state->end > end) {
1157
		if (state->state & exclusive_bits) {
1158 1159 1160 1161
			*failed_start = start;
			err = -EEXIST;
			goto out;
		}
1162 1163 1164

		prealloc = alloc_extent_state_atomic(prealloc);
		BUG_ON(!prealloc);
1165
		err = split_state(tree, state, prealloc, end + 1);
1166 1167
		if (err)
			extent_io_tree_panic(tree, err);
1168

1169
		set_state_bits(tree, prealloc, &bits, changeset);
1170
		cache_state(prealloc, cached_state);
1171 1172 1173 1174 1175
		merge_state(tree, prealloc);
		prealloc = NULL;
		goto out;
	}

1176 1177 1178 1179 1180 1181 1182
search_again:
	if (start > end)
		goto out;
	spin_unlock(&tree->lock);
	if (gfpflags_allow_blocking(mask))
		cond_resched();
	goto again;
1183 1184

out:
1185
	spin_unlock(&tree->lock);
1186 1187 1188 1189 1190 1191 1192
	if (prealloc)
		free_extent_state(prealloc);

	return err;

}

J
Josef Bacik 已提交
1193
/**
L
Liu Bo 已提交
1194 1195
 * convert_extent_bit - convert all bits in a given range from one bit to
 * 			another
J
Josef Bacik 已提交
1196 1197 1198 1199 1200
 * @tree:	the io tree to search
 * @start:	the start offset in bytes
 * @end:	the end offset in bytes (inclusive)
 * @bits:	the bits to set in this range
 * @clear_bits:	the bits to clear in this range
1201
 * @cached_state:	state that we're going to cache
J
Josef Bacik 已提交
1202 1203 1204 1205 1206 1207
 *
 * This will go through and set bits for the given range.  If any states exist
 * already in this range they are set with the given bit and cleared of the
 * clear_bits.  This is only meant to be used by things that are mergeable, ie
 * converting from say DELALLOC to DIRTY.  This is not meant to be used with
 * boundary bits like LOCK.
1208 1209
 *
 * All allocations are done with GFP_NOFS.
J
Josef Bacik 已提交
1210 1211
 */
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1212
		       u32 bits, u32 clear_bits,
1213
		       struct extent_state **cached_state)
J
Josef Bacik 已提交
1214 1215 1216 1217
{
	struct extent_state *state;
	struct extent_state *prealloc = NULL;
	struct rb_node *node;
1218 1219
	struct rb_node **p;
	struct rb_node *parent;
J
Josef Bacik 已提交
1220 1221 1222
	int err = 0;
	u64 last_start;
	u64 last_end;
1223
	bool first_iteration = true;
J
Josef Bacik 已提交
1224

1225
	btrfs_debug_check_extent_io_range(tree, start, end);
1226 1227
	trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits,
				       clear_bits);
1228

J
Josef Bacik 已提交
1229
again:
1230
	if (!prealloc) {
1231 1232 1233 1234 1235 1236 1237
		/*
		 * Best effort, don't worry if extent state allocation fails
		 * here for the first iteration. We might have a cached state
		 * that matches exactly the target range, in which case no
		 * extent state allocations are needed. We'll only know this
		 * after locking the tree.
		 */
1238
		prealloc = alloc_extent_state(GFP_NOFS);
1239
		if (!prealloc && !first_iteration)
J
Josef Bacik 已提交
1240 1241 1242 1243
			return -ENOMEM;
	}

	spin_lock(&tree->lock);
1244 1245 1246
	if (cached_state && *cached_state) {
		state = *cached_state;
		if (state->start <= start && state->end > start &&
1247
		    extent_state_in_tree(state)) {
1248 1249 1250 1251 1252
			node = &state->rb_node;
			goto hit_next;
		}
	}

J
Josef Bacik 已提交
1253 1254 1255 1256
	/*
	 * this search will find all the extents that end after
	 * our range starts.
	 */
1257
	node = tree_search_for_insert(tree, start, &p, &parent);
J
Josef Bacik 已提交
1258 1259
	if (!node) {
		prealloc = alloc_extent_state_atomic(prealloc);
1260 1261 1262 1263
		if (!prealloc) {
			err = -ENOMEM;
			goto out;
		}
1264
		err = insert_state(tree, prealloc, start, end,
1265
				   &p, &parent, &bits, NULL);
1266 1267
		if (err)
			extent_io_tree_panic(tree, err);
1268 1269
		cache_state(prealloc, cached_state);
		prealloc = NULL;
J
Josef Bacik 已提交
1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283
		goto out;
	}
	state = rb_entry(node, struct extent_state, rb_node);
hit_next:
	last_start = state->start;
	last_end = state->end;

	/*
	 * | ---- desired range ---- |
	 * | state |
	 *
	 * Just lock what we found and keep going
	 */
	if (state->start == start && state->end <= end) {
1284
		set_state_bits(tree, state, &bits, NULL);
1285
		cache_state(state, cached_state);
1286
		state = clear_state_bit(tree, state, &clear_bits, 0, NULL);
J
Josef Bacik 已提交
1287 1288 1289
		if (last_end == (u64)-1)
			goto out;
		start = last_end + 1;
1290 1291 1292
		if (start < end && state && state->start == start &&
		    !need_resched())
			goto hit_next;
J
Josef Bacik 已提交
1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313
		goto search_again;
	}

	/*
	 *     | ---- desired range ---- |
	 * | state |
	 *   or
	 * | ------------- state -------------- |
	 *
	 * We need to split the extent we found, and may flip bits on
	 * second half.
	 *
	 * If the extent we found extends past our
	 * range, we just split and search again.  It'll get split
	 * again the next time though.
	 *
	 * If the extent we found is inside our range, we set the
	 * desired bit on it.
	 */
	if (state->start < start) {
		prealloc = alloc_extent_state_atomic(prealloc);
1314 1315 1316 1317
		if (!prealloc) {
			err = -ENOMEM;
			goto out;
		}
J
Josef Bacik 已提交
1318
		err = split_state(tree, state, prealloc, start);
1319 1320
		if (err)
			extent_io_tree_panic(tree, err);
J
Josef Bacik 已提交
1321 1322 1323 1324
		prealloc = NULL;
		if (err)
			goto out;
		if (state->end <= end) {
1325
			set_state_bits(tree, state, &bits, NULL);
1326
			cache_state(state, cached_state);
1327 1328
			state = clear_state_bit(tree, state, &clear_bits, 0,
						NULL);
J
Josef Bacik 已提交
1329 1330 1331
			if (last_end == (u64)-1)
				goto out;
			start = last_end + 1;
1332 1333 1334
			if (start < end && state && state->start == start &&
			    !need_resched())
				goto hit_next;
J
Josef Bacik 已提交
1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352
		}
		goto search_again;
	}
	/*
	 * | ---- desired range ---- |
	 *     | state | or               | state |
	 *
	 * There's a hole, we need to insert something in it and
	 * ignore the extent we found.
	 */
	if (state->start > start) {
		u64 this_end;
		if (end < last_start)
			this_end = end;
		else
			this_end = last_start - 1;

		prealloc = alloc_extent_state_atomic(prealloc);
1353 1354 1355 1356
		if (!prealloc) {
			err = -ENOMEM;
			goto out;
		}
J
Josef Bacik 已提交
1357 1358 1359 1360 1361 1362

		/*
		 * Avoid to free 'prealloc' if it can be merged with
		 * the later extent.
		 */
		err = insert_state(tree, prealloc, start, this_end,
1363
				   NULL, NULL, &bits, NULL);
1364 1365
		if (err)
			extent_io_tree_panic(tree, err);
1366
		cache_state(prealloc, cached_state);
J
Josef Bacik 已提交
1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
		prealloc = NULL;
		start = this_end + 1;
		goto search_again;
	}
	/*
	 * | ---- desired range ---- |
	 *                        | state |
	 * We need to split the extent, and set the bit
	 * on the first half
	 */
	if (state->start <= end && state->end > end) {
		prealloc = alloc_extent_state_atomic(prealloc);
1379 1380 1381 1382
		if (!prealloc) {
			err = -ENOMEM;
			goto out;
		}
J
Josef Bacik 已提交
1383 1384

		err = split_state(tree, state, prealloc, end + 1);
1385 1386
		if (err)
			extent_io_tree_panic(tree, err);
J
Josef Bacik 已提交
1387

1388
		set_state_bits(tree, prealloc, &bits, NULL);
1389
		cache_state(prealloc, cached_state);
1390
		clear_state_bit(tree, prealloc, &clear_bits, 0, NULL);
J
Josef Bacik 已提交
1391 1392 1393 1394 1395 1396 1397 1398
		prealloc = NULL;
		goto out;
	}

search_again:
	if (start > end)
		goto out;
	spin_unlock(&tree->lock);
1399
	cond_resched();
1400
	first_iteration = false;
J
Josef Bacik 已提交
1401 1402 1403 1404 1405 1406 1407 1408 1409 1410
	goto again;

out:
	spin_unlock(&tree->lock);
	if (prealloc)
		free_extent_state(prealloc);

	return err;
}

1411
/* wrappers around set/clear extent bit */
1412
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1413
			   u32 bits, struct extent_changeset *changeset)
1414 1415 1416 1417 1418 1419 1420 1421 1422
{
	/*
	 * We don't support EXTENT_LOCKED yet, as current changeset will
	 * record any bits changed, so for EXTENT_LOCKED case, it will
	 * either fail with -EEXIST or changeset will record the whole
	 * range.
	 */
	BUG_ON(bits & EXTENT_LOCKED);

1423 1424
	return set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
			      changeset);
1425 1426
}

1427
int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
1428
			   u32 bits)
1429
{
1430 1431
	return set_extent_bit(tree, start, end, bits, 0, NULL, NULL,
			      GFP_NOWAIT, NULL);
1432 1433
}

1434
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1435
		     u32 bits, int wake, int delete,
1436
		     struct extent_state **cached)
1437 1438
{
	return __clear_extent_bit(tree, start, end, bits, wake, delete,
1439
				  cached, GFP_NOFS, NULL);
1440 1441 1442
}

int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1443
		u32 bits, struct extent_changeset *changeset)
1444 1445 1446 1447 1448 1449 1450
{
	/*
	 * Don't support EXTENT_LOCKED case, same reason as
	 * set_record_extent_bits().
	 */
	BUG_ON(bits & EXTENT_LOCKED);

1451
	return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
1452 1453 1454
				  changeset);
}

C
Chris Mason 已提交
1455 1456 1457 1458
/*
 * either insert or lock state struct between start and end use mask to tell
 * us if waiting is desired.
 */
1459
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1460
		     struct extent_state **cached_state)
1461 1462 1463
{
	int err;
	u64 failed_start;
1464

1465
	while (1) {
1466 1467 1468
		err = set_extent_bit(tree, start, end, EXTENT_LOCKED,
				     EXTENT_LOCKED, &failed_start,
				     cached_state, GFP_NOFS, NULL);
1469
		if (err == -EEXIST) {
1470 1471
			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
			start = failed_start;
1472
		} else
1473 1474 1475 1476 1477 1478
			break;
		WARN_ON(start > end);
	}
	return err;
}

1479
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1480 1481 1482 1483
{
	int err;
	u64 failed_start;

1484 1485
	err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
			     &failed_start, NULL, GFP_NOFS, NULL);
Y
Yan Zheng 已提交
1486 1487 1488
	if (err == -EEXIST) {
		if (failed_start > start)
			clear_extent_bit(tree, start, failed_start - 1,
1489
					 EXTENT_LOCKED, 1, 0, NULL);
1490
		return 0;
Y
Yan Zheng 已提交
1491
	}
1492 1493 1494
	return 1;
}

1495
void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1496
{
1497 1498
	unsigned long index = start >> PAGE_SHIFT;
	unsigned long end_index = end >> PAGE_SHIFT;
1499 1500 1501 1502 1503 1504
	struct page *page;

	while (index <= end_index) {
		page = find_get_page(inode->i_mapping, index);
		BUG_ON(!page); /* Pages should be in the extent_io_tree */
		clear_page_dirty_for_io(page);
1505
		put_page(page);
1506 1507 1508 1509
		index++;
	}
}

1510
void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1511
{
1512
	struct address_space *mapping = inode->i_mapping;
1513 1514
	unsigned long index = start >> PAGE_SHIFT;
	unsigned long end_index = end >> PAGE_SHIFT;
1515
	struct folio *folio;
1516 1517

	while (index <= end_index) {
1518 1519 1520 1521 1522
		folio = filemap_get_folio(mapping, index);
		filemap_dirty_folio(mapping, folio);
		folio_account_redirty(folio);
		index += folio_nr_pages(folio);
		folio_put(folio);
1523 1524 1525
	}
}

C
Chris Mason 已提交
1526 1527 1528 1529
/* find the first state struct with 'bits' set after 'start', and
 * return it.  tree->lock must be held.  NULL will returned if
 * nothing was found after 'start'
 */
1530
static struct extent_state *
1531
find_first_extent_bit_state(struct extent_io_tree *tree, u64 start, u32 bits)
C
Chris Mason 已提交
1532 1533 1534 1535 1536 1537 1538 1539 1540
{
	struct rb_node *node;
	struct extent_state *state;

	/*
	 * this search will find all the extents that end after
	 * our range starts.
	 */
	node = tree_search(tree, start);
C
Chris Mason 已提交
1541
	if (!node)
C
Chris Mason 已提交
1542 1543
		goto out;

C
Chris Mason 已提交
1544
	while (1) {
C
Chris Mason 已提交
1545
		state = rb_entry(node, struct extent_state, rb_node);
C
Chris Mason 已提交
1546
		if (state->end >= start && (state->state & bits))
C
Chris Mason 已提交
1547
			return state;
C
Chris Mason 已提交
1548

C
Chris Mason 已提交
1549 1550 1551 1552 1553 1554 1555 1556
		node = rb_next(node);
		if (!node)
			break;
	}
out:
	return NULL;
}

1557
/*
1558
 * Find the first offset in the io tree with one or more @bits set.
1559
 *
1560 1561 1562 1563
 * Note: If there are multiple bits set in @bits, any of them will match.
 *
 * Return 0 if we find something, and update @start_ret and @end_ret.
 * Return 1 if we found nothing.
1564 1565
 */
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1566
			  u64 *start_ret, u64 *end_ret, u32 bits,
1567
			  struct extent_state **cached_state)
1568 1569 1570 1571 1572
{
	struct extent_state *state;
	int ret = 1;

	spin_lock(&tree->lock);
1573 1574
	if (cached_state && *cached_state) {
		state = *cached_state;
1575
		if (state->end == start - 1 && extent_state_in_tree(state)) {
1576
			while ((state = next_state(state)) != NULL) {
1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587
				if (state->state & bits)
					goto got_it;
			}
			free_extent_state(*cached_state);
			*cached_state = NULL;
			goto out;
		}
		free_extent_state(*cached_state);
		*cached_state = NULL;
	}

1588
	state = find_first_extent_bit_state(tree, start, bits);
1589
got_it:
1590
	if (state) {
1591
		cache_state_if_flags(state, cached_state, 0);
1592 1593 1594 1595
		*start_ret = state->start;
		*end_ret = state->end;
		ret = 0;
	}
1596
out:
1597 1598 1599 1600
	spin_unlock(&tree->lock);
	return ret;
}

1601
/**
1602 1603 1604 1605 1606 1607 1608
 * Find a contiguous area of bits
 *
 * @tree:      io tree to check
 * @start:     offset to start the search from
 * @start_ret: the first offset we found with the bits set
 * @end_ret:   the final contiguous range of the bits that were set
 * @bits:      bits to look for
1609 1610 1611 1612 1613 1614 1615 1616 1617
 *
 * set_extent_bit and clear_extent_bit can temporarily split contiguous ranges
 * to set bits appropriately, and then merge them again.  During this time it
 * will drop the tree->lock, so use this helper if you want to find the actual
 * contiguous area for given bits.  We will search to the first bit we find, and
 * then walk down the tree until we find a non-contiguous area.  The area
 * returned will be the full contiguous area with the bits set.
 */
int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
1618
			       u64 *start_ret, u64 *end_ret, u32 bits)
1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638
{
	struct extent_state *state;
	int ret = 1;

	spin_lock(&tree->lock);
	state = find_first_extent_bit_state(tree, start, bits);
	if (state) {
		*start_ret = state->start;
		*end_ret = state->end;
		while ((state = next_state(state)) != NULL) {
			if (state->start > (*end_ret + 1))
				break;
			*end_ret = state->end;
		}
		ret = 0;
	}
	spin_unlock(&tree->lock);
	return ret;
}

1639
/**
1640 1641
 * Find the first range that has @bits not set. This range could start before
 * @start.
1642
 *
1643 1644 1645 1646 1647
 * @tree:      the tree to search
 * @start:     offset at/after which the found extent should start
 * @start_ret: records the beginning of the range
 * @end_ret:   records the end of the range (inclusive)
 * @bits:      the set of bits which must be unset
1648 1649 1650 1651 1652 1653 1654
 *
 * Since unallocated range is also considered one which doesn't have the bits
 * set it's possible that @end_ret contains -1, this happens in case the range
 * spans (last_range_end, end of device]. In this case it's up to the caller to
 * trim @end_ret to the appropriate size.
 */
void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
1655
				 u64 *start_ret, u64 *end_ret, u32 bits)
1656 1657 1658 1659 1660 1661 1662 1663 1664
{
	struct extent_state *state;
	struct rb_node *node, *prev = NULL, *next;

	spin_lock(&tree->lock);

	/* Find first extent with bits cleared */
	while (1) {
		node = __etree_search(tree, start, &next, &prev, NULL, NULL);
1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682
		if (!node && !next && !prev) {
			/*
			 * Tree is completely empty, send full range and let
			 * caller deal with it
			 */
			*start_ret = 0;
			*end_ret = -1;
			goto out;
		} else if (!node && !next) {
			/*
			 * We are past the last allocated chunk, set start at
			 * the end of the last extent.
			 */
			state = rb_entry(prev, struct extent_state, rb_node);
			*start_ret = state->end + 1;
			*end_ret = -1;
			goto out;
		} else if (!node) {
1683 1684
			node = next;
		}
1685 1686 1687 1688
		/*
		 * At this point 'node' either contains 'start' or start is
		 * before 'node'
		 */
1689
		state = rb_entry(node, struct extent_state, rb_node);
1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711

		if (in_range(start, state->start, state->end - state->start + 1)) {
			if (state->state & bits) {
				/*
				 * |--range with bits sets--|
				 *    |
				 *    start
				 */
				start = state->end + 1;
			} else {
				/*
				 * 'start' falls within a range that doesn't
				 * have the bits set, so take its start as
				 * the beginning of the desired range
				 *
				 * |--range with bits cleared----|
				 *      |
				 *      start
				 */
				*start_ret = state->start;
				break;
			}
1712
		} else {
1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730
			/*
			 * |---prev range---|---hole/unset---|---node range---|
			 *                          |
			 *                        start
			 *
			 *                        or
			 *
			 * |---hole/unset--||--first node--|
			 * 0   |
			 *    start
			 */
			if (prev) {
				state = rb_entry(prev, struct extent_state,
						 rb_node);
				*start_ret = state->end + 1;
			} else {
				*start_ret = 0;
			}
1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755
			break;
		}
	}

	/*
	 * Find the longest stretch from start until an entry which has the
	 * bits set
	 */
	while (1) {
		state = rb_entry(node, struct extent_state, rb_node);
		if (state->end >= start && !(state->state & bits)) {
			*end_ret = state->end;
		} else {
			*end_ret = state->start - 1;
			break;
		}

		node = rb_next(node);
		if (!node)
			break;
	}
out:
	spin_unlock(&tree->lock);
}

C
Chris Mason 已提交
1756 1757 1758 1759
/*
 * find a contiguous range of bytes in the file marked as delalloc, not
 * more than 'max_bytes'.  start and end are used to return the range,
 *
1760
 * true is returned if we find something, false if nothing was in the tree
C
Chris Mason 已提交
1761
 */
J
Josef Bacik 已提交
1762 1763 1764
bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
			       u64 *end, u64 max_bytes,
			       struct extent_state **cached_state)
1765 1766 1767 1768
{
	struct rb_node *node;
	struct extent_state *state;
	u64 cur_start = *start;
1769
	bool found = false;
1770 1771
	u64 total_bytes = 0;

1772
	spin_lock(&tree->lock);
C
Chris Mason 已提交
1773

1774 1775 1776 1777
	/*
	 * this search will find all the extents that end after
	 * our range starts.
	 */
1778
	node = tree_search(tree, cur_start);
1779
	if (!node) {
1780
		*end = (u64)-1;
1781 1782 1783
		goto out;
	}

C
Chris Mason 已提交
1784
	while (1) {
1785
		state = rb_entry(node, struct extent_state, rb_node);
1786 1787
		if (found && (state->start != cur_start ||
			      (state->state & EXTENT_BOUNDARY))) {
1788 1789 1790 1791 1792 1793 1794
			goto out;
		}
		if (!(state->state & EXTENT_DELALLOC)) {
			if (!found)
				*end = state->end;
			goto out;
		}
1795
		if (!found) {
1796
			*start = state->start;
1797
			*cached_state = state;
1798
			refcount_inc(&state->refs);
1799
		}
1800
		found = true;
1801 1802 1803 1804
		*end = state->end;
		cur_start = state->end + 1;
		node = rb_next(node);
		total_bytes += state->end - state->start + 1;
1805
		if (total_bytes >= max_bytes)
1806 1807
			break;
		if (!node)
1808 1809 1810
			break;
	}
out:
1811
	spin_unlock(&tree->lock);
1812 1813 1814
	return found;
}

1815 1816 1817 1818 1819 1820 1821 1822
/*
 * Process one page for __process_pages_contig().
 *
 * Return >0 if we hit @page == @locked_page.
 * Return 0 if we updated the page status.
 * Return -EGAIN if the we need to try again.
 * (For PAGE_LOCK case but got dirty page or page not belong to mapping)
 */
1823 1824
static int process_one_page(struct btrfs_fs_info *fs_info,
			    struct address_space *mapping,
1825
			    struct page *page, struct page *locked_page,
1826
			    unsigned long page_ops, u64 start, u64 end)
1827
{
1828 1829 1830 1831 1832
	u32 len;

	ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
	len = end + 1 - start;

1833
	if (page_ops & PAGE_SET_ORDERED)
1834
		btrfs_page_clamp_set_ordered(fs_info, page, start, len);
1835
	if (page_ops & PAGE_SET_ERROR)
1836
		btrfs_page_clamp_set_error(fs_info, page, start, len);
1837
	if (page_ops & PAGE_START_WRITEBACK) {
1838 1839
		btrfs_page_clamp_clear_dirty(fs_info, page, start, len);
		btrfs_page_clamp_set_writeback(fs_info, page, start, len);
1840 1841
	}
	if (page_ops & PAGE_END_WRITEBACK)
1842
		btrfs_page_clamp_clear_writeback(fs_info, page, start, len);
1843 1844 1845 1846

	if (page == locked_page)
		return 1;

1847
	if (page_ops & PAGE_LOCK) {
1848 1849 1850 1851 1852
		int ret;

		ret = btrfs_page_start_writer_lock(fs_info, page, start, len);
		if (ret)
			return ret;
1853
		if (!PageDirty(page) || page->mapping != mapping) {
1854
			btrfs_page_end_writer_lock(fs_info, page, start, len);
1855 1856 1857 1858
			return -EAGAIN;
		}
	}
	if (page_ops & PAGE_UNLOCK)
1859
		btrfs_page_end_writer_lock(fs_info, page, start, len);
1860 1861 1862
	return 0;
}

1863 1864
static int __process_pages_contig(struct address_space *mapping,
				  struct page *locked_page,
1865
				  u64 start, u64 end, unsigned long page_ops,
1866 1867
				  u64 *processed_end)
{
1868
	struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904
	pgoff_t start_index = start >> PAGE_SHIFT;
	pgoff_t end_index = end >> PAGE_SHIFT;
	pgoff_t index = start_index;
	unsigned long nr_pages = end_index - start_index + 1;
	unsigned long pages_processed = 0;
	struct page *pages[16];
	int err = 0;
	int i;

	if (page_ops & PAGE_LOCK) {
		ASSERT(page_ops == PAGE_LOCK);
		ASSERT(processed_end && *processed_end == start);
	}

	if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
		mapping_set_error(mapping, -EIO);

	while (nr_pages > 0) {
		int found_pages;

		found_pages = find_get_pages_contig(mapping, index,
				     min_t(unsigned long,
				     nr_pages, ARRAY_SIZE(pages)), pages);
		if (found_pages == 0) {
			/*
			 * Only if we're going to lock these pages, we can find
			 * nothing at @index.
			 */
			ASSERT(page_ops & PAGE_LOCK);
			err = -EAGAIN;
			goto out;
		}

		for (i = 0; i < found_pages; i++) {
			int process_ret;

1905 1906 1907
			process_ret = process_one_page(fs_info, mapping,
					pages[i], locked_page, page_ops,
					start, end);
1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938
			if (process_ret < 0) {
				for (; i < found_pages; i++)
					put_page(pages[i]);
				err = -EAGAIN;
				goto out;
			}
			put_page(pages[i]);
			pages_processed++;
		}
		nr_pages -= found_pages;
		index += found_pages;
		cond_resched();
	}
out:
	if (err && processed_end) {
		/*
		 * Update @processed_end. I know this is awful since it has
		 * two different return value patterns (inclusive vs exclusive).
		 *
		 * But the exclusive pattern is necessary if @start is 0, or we
		 * underflow and check against processed_end won't work as
		 * expected.
		 */
		if (pages_processed)
			*processed_end = min(end,
			((u64)(start_index + pages_processed) << PAGE_SHIFT) - 1);
		else
			*processed_end = start;
	}
	return err;
}
1939

1940 1941 1942
static noinline void __unlock_for_delalloc(struct inode *inode,
					   struct page *locked_page,
					   u64 start, u64 end)
C
Chris Mason 已提交
1943
{
1944 1945
	unsigned long index = start >> PAGE_SHIFT;
	unsigned long end_index = end >> PAGE_SHIFT;
C
Chris Mason 已提交
1946

1947
	ASSERT(locked_page);
C
Chris Mason 已提交
1948
	if (index == locked_page->index && end_index == index)
1949
		return;
C
Chris Mason 已提交
1950

1951
	__process_pages_contig(inode->i_mapping, locked_page, start, end,
1952
			       PAGE_UNLOCK, NULL);
C
Chris Mason 已提交
1953 1954 1955 1956 1957 1958 1959
}

static noinline int lock_delalloc_pages(struct inode *inode,
					struct page *locked_page,
					u64 delalloc_start,
					u64 delalloc_end)
{
1960 1961
	unsigned long index = delalloc_start >> PAGE_SHIFT;
	unsigned long end_index = delalloc_end >> PAGE_SHIFT;
1962
	u64 processed_end = delalloc_start;
C
Chris Mason 已提交
1963 1964
	int ret;

1965
	ASSERT(locked_page);
C
Chris Mason 已提交
1966 1967 1968
	if (index == locked_page->index && index == end_index)
		return 0;

1969 1970 1971
	ret = __process_pages_contig(inode->i_mapping, locked_page, delalloc_start,
				     delalloc_end, PAGE_LOCK, &processed_end);
	if (ret == -EAGAIN && processed_end > delalloc_start)
1972
		__unlock_for_delalloc(inode, locked_page, delalloc_start,
1973
				      processed_end);
C
Chris Mason 已提交
1974 1975 1976 1977
	return ret;
}

/*
1978
 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
1979
 * more than @max_bytes.
C
Chris Mason 已提交
1980
 *
1981 1982 1983 1984 1985 1986 1987 1988 1989 1990
 * @start:	The original start bytenr to search.
 *		Will store the extent range start bytenr.
 * @end:	The original end bytenr of the search range
 *		Will store the extent range end bytenr.
 *
 * Return true if we find a delalloc range which starts inside the original
 * range, and @start/@end will store the delalloc range start/end.
 *
 * Return false if we can't find any delalloc range which starts inside the
 * original range, and @start/@end will be the non-delalloc range start/end.
C
Chris Mason 已提交
1991
 */
1992
EXPORT_FOR_TESTS
1993
noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
1994
				    struct page *locked_page, u64 *start,
1995
				    u64 *end)
C
Chris Mason 已提交
1996
{
1997
	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1998 1999
	const u64 orig_start = *start;
	const u64 orig_end = *end;
2000
	u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
C
Chris Mason 已提交
2001 2002
	u64 delalloc_start;
	u64 delalloc_end;
2003
	bool found;
2004
	struct extent_state *cached_state = NULL;
C
Chris Mason 已提交
2005 2006 2007
	int ret;
	int loops = 0;

2008 2009 2010 2011 2012 2013
	/* Caller should pass a valid @end to indicate the search range end */
	ASSERT(orig_end > orig_start);

	/* The range should at least cover part of the page */
	ASSERT(!(orig_start >= page_offset(locked_page) + PAGE_SIZE ||
		 orig_end <= page_offset(locked_page)));
C
Chris Mason 已提交
2014 2015 2016 2017
again:
	/* step one, find a bunch of delalloc bytes starting at start */
	delalloc_start = *start;
	delalloc_end = 0;
J
Josef Bacik 已提交
2018 2019
	found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
					  max_bytes, &cached_state);
2020
	if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
C
Chris Mason 已提交
2021
		*start = delalloc_start;
2022 2023 2024

		/* @delalloc_end can be -1, never go beyond @orig_end */
		*end = min(delalloc_end, orig_end);
2025
		free_extent_state(cached_state);
2026
		return false;
C
Chris Mason 已提交
2027 2028
	}

C
Chris Mason 已提交
2029 2030 2031 2032 2033
	/*
	 * start comes from the offset of locked_page.  We have to lock
	 * pages in order, so we can't process delalloc bytes before
	 * locked_page
	 */
C
Chris Mason 已提交
2034
	if (delalloc_start < *start)
C
Chris Mason 已提交
2035 2036
		delalloc_start = *start;

C
Chris Mason 已提交
2037 2038 2039
	/*
	 * make sure to limit the number of pages we try to lock down
	 */
2040 2041
	if (delalloc_end + 1 - delalloc_start > max_bytes)
		delalloc_end = delalloc_start + max_bytes - 1;
C
Chris Mason 已提交
2042

C
Chris Mason 已提交
2043 2044 2045
	/* step two, lock all the pages after the page that has start */
	ret = lock_delalloc_pages(inode, locked_page,
				  delalloc_start, delalloc_end);
2046
	ASSERT(!ret || ret == -EAGAIN);
C
Chris Mason 已提交
2047 2048 2049 2050
	if (ret == -EAGAIN) {
		/* some of the pages are gone, lets avoid looping by
		 * shortening the size of the delalloc range we're searching
		 */
2051
		free_extent_state(cached_state);
2052
		cached_state = NULL;
C
Chris Mason 已提交
2053
		if (!loops) {
2054
			max_bytes = PAGE_SIZE;
C
Chris Mason 已提交
2055 2056 2057
			loops = 1;
			goto again;
		} else {
2058
			found = false;
C
Chris Mason 已提交
2059 2060 2061 2062 2063
			goto out_failed;
		}
	}

	/* step three, lock the state bits for the whole range */
2064
	lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
C
Chris Mason 已提交
2065 2066 2067

	/* then test to make sure it is all still delalloc */
	ret = test_range_bit(tree, delalloc_start, delalloc_end,
2068
			     EXTENT_DELALLOC, 1, cached_state);
C
Chris Mason 已提交
2069
	if (!ret) {
2070
		unlock_extent_cached(tree, delalloc_start, delalloc_end,
2071
				     &cached_state);
C
Chris Mason 已提交
2072 2073 2074 2075 2076
		__unlock_for_delalloc(inode, locked_page,
			      delalloc_start, delalloc_end);
		cond_resched();
		goto again;
	}
2077
	free_extent_state(cached_state);
C
Chris Mason 已提交
2078 2079 2080 2081 2082 2083
	*start = delalloc_start;
	*end = delalloc_end;
out_failed:
	return found;
}

2084
void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
2085
				  struct page *locked_page,
2086
				  u32 clear_bits, unsigned long page_ops)
2087
{
2088
	clear_extent_bit(&inode->io_tree, start, end, clear_bits, 1, 0, NULL);
2089

2090
	__process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
2091
			       start, end, page_ops, NULL);
2092 2093
}

C
Chris Mason 已提交
2094 2095 2096 2097 2098
/*
 * count the number of bytes in the tree that have a given bit(s)
 * set.  This can be fairly slow, except for EXTENT_DIRTY which is
 * cached.  The total number found is returned.
 */
2099 2100
u64 count_range_bits(struct extent_io_tree *tree,
		     u64 *start, u64 search_end, u64 max_bytes,
2101
		     u32 bits, int contig)
2102 2103 2104 2105 2106
{
	struct rb_node *node;
	struct extent_state *state;
	u64 cur_start = *start;
	u64 total_bytes = 0;
2107
	u64 last = 0;
2108 2109
	int found = 0;

2110
	if (WARN_ON(search_end <= cur_start))
2111 2112
		return 0;

2113
	spin_lock(&tree->lock);
2114 2115 2116 2117 2118 2119 2120 2121
	if (cur_start == 0 && bits == EXTENT_DIRTY) {
		total_bytes = tree->dirty_bytes;
		goto out;
	}
	/*
	 * this search will find all the extents that end after
	 * our range starts.
	 */
2122
	node = tree_search(tree, cur_start);
C
Chris Mason 已提交
2123
	if (!node)
2124 2125
		goto out;

C
Chris Mason 已提交
2126
	while (1) {
2127 2128 2129
		state = rb_entry(node, struct extent_state, rb_node);
		if (state->start > search_end)
			break;
2130 2131 2132
		if (contig && found && state->start > last + 1)
			break;
		if (state->end >= cur_start && (state->state & bits) == bits) {
2133 2134 2135 2136 2137
			total_bytes += min(search_end, state->end) + 1 -
				       max(cur_start, state->start);
			if (total_bytes >= max_bytes)
				break;
			if (!found) {
2138
				*start = max(cur_start, state->start);
2139 2140
				found = 1;
			}
2141 2142 2143
			last = state->end;
		} else if (contig && found) {
			break;
2144 2145 2146 2147 2148 2149
		}
		node = rb_next(node);
		if (!node)
			break;
	}
out:
2150
	spin_unlock(&tree->lock);
2151 2152
	return total_bytes;
}
2153

C
Chris Mason 已提交
2154 2155 2156 2157
/*
 * set the private field for a given byte offset in the tree.  If there isn't
 * an extent_state there already, this does nothing.
 */
2158 2159
int set_state_failrec(struct extent_io_tree *tree, u64 start,
		      struct io_failure_record *failrec)
2160 2161 2162 2163 2164
{
	struct rb_node *node;
	struct extent_state *state;
	int ret = 0;

2165
	spin_lock(&tree->lock);
2166 2167 2168 2169
	/*
	 * this search will find all the extents that end after
	 * our range starts.
	 */
2170
	node = tree_search(tree, start);
2171
	if (!node) {
2172 2173 2174 2175 2176 2177 2178 2179
		ret = -ENOENT;
		goto out;
	}
	state = rb_entry(node, struct extent_state, rb_node);
	if (state->start != start) {
		ret = -ENOENT;
		goto out;
	}
2180
	state->failrec = failrec;
2181
out:
2182
	spin_unlock(&tree->lock);
2183 2184 2185
	return ret;
}

2186
struct io_failure_record *get_state_failrec(struct extent_io_tree *tree, u64 start)
2187 2188 2189
{
	struct rb_node *node;
	struct extent_state *state;
2190
	struct io_failure_record *failrec;
2191

2192
	spin_lock(&tree->lock);
2193 2194 2195 2196
	/*
	 * this search will find all the extents that end after
	 * our range starts.
	 */
2197
	node = tree_search(tree, start);
2198
	if (!node) {
2199
		failrec = ERR_PTR(-ENOENT);
2200 2201 2202 2203
		goto out;
	}
	state = rb_entry(node, struct extent_state, rb_node);
	if (state->start != start) {
2204
		failrec = ERR_PTR(-ENOENT);
2205 2206
		goto out;
	}
2207 2208

	failrec = state->failrec;
2209
out:
2210
	spin_unlock(&tree->lock);
2211
	return failrec;
2212 2213 2214 2215
}

/*
 * searches a range in the state tree for a given mask.
2216
 * If 'filled' == 1, this returns 1 only if every extent in the tree
2217 2218 2219 2220
 * has the bits set.  Otherwise, 1 is returned if any bit in the
 * range is found set.
 */
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
2221
		   u32 bits, int filled, struct extent_state *cached)
2222 2223 2224 2225 2226
{
	struct extent_state *state = NULL;
	struct rb_node *node;
	int bitset = 0;

2227
	spin_lock(&tree->lock);
2228
	if (cached && extent_state_in_tree(cached) && cached->start <= start &&
2229
	    cached->end > start)
2230 2231 2232
		node = &cached->rb_node;
	else
		node = tree_search(tree, start);
2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251
	while (node && start <= end) {
		state = rb_entry(node, struct extent_state, rb_node);

		if (filled && state->start > start) {
			bitset = 0;
			break;
		}

		if (state->start > end)
			break;

		if (state->state & bits) {
			bitset = 1;
			if (!filled)
				break;
		} else if (filled) {
			bitset = 0;
			break;
		}
2252 2253 2254 2255

		if (state->end == (u64)-1)
			break;

2256 2257 2258 2259 2260 2261 2262 2263 2264 2265
		start = state->end + 1;
		if (start > end)
			break;
		node = rb_next(node);
		if (!node) {
			if (filled)
				bitset = 0;
			break;
		}
	}
2266
	spin_unlock(&tree->lock);
2267 2268 2269
	return bitset;
}

2270 2271 2272
int free_io_failure(struct extent_io_tree *failure_tree,
		    struct extent_io_tree *io_tree,
		    struct io_failure_record *rec)
2273 2274 2275 2276
{
	int ret;
	int err = 0;

2277
	set_state_failrec(failure_tree, rec->start, NULL);
2278 2279
	ret = clear_extent_bits(failure_tree, rec->start,
				rec->start + rec->len - 1,
2280
				EXTENT_LOCKED | EXTENT_DIRTY);
2281 2282 2283
	if (ret)
		err = ret;

2284
	ret = clear_extent_bits(io_tree, rec->start,
D
David Woodhouse 已提交
2285
				rec->start + rec->len - 1,
2286
				EXTENT_DAMAGED);
D
David Woodhouse 已提交
2287 2288
	if (ret && !err)
		err = ret;
2289 2290 2291 2292 2293 2294 2295 2296 2297 2298

	kfree(rec);
	return err;
}

/*
 * this bypasses the standard btrfs submit functions deliberately, as
 * the standard behavior is to write all copies in a raid setup. here we only
 * want to write the one bad copy. so we do the mapping for ourselves and issue
 * submit_bio directly.
2299
 * to avoid any synchronization issues, wait for the data after writing, which
2300 2301 2302 2303
 * actually prevents the read that triggered the error from finishing.
 * currently, there can be no more than two copies of every data bit. thus,
 * exactly one rewrite is required.
 */
Q
Qu Wenruo 已提交
2304 2305 2306
static int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
			     u64 length, u64 logical, struct page *page,
			     unsigned int pg_offset, int mirror_num)
2307 2308
{
	struct btrfs_device *dev;
2309 2310
	struct bio_vec bvec;
	struct bio bio;
2311 2312
	u64 map_length = 0;
	u64 sector;
2313
	struct btrfs_io_context *bioc = NULL;
2314
	int ret = 0;
2315

2316
	ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
2317 2318
	BUG_ON(!mirror_num);

2319 2320
	if (btrfs_repair_one_zone(fs_info, logical))
		return 0;
2321

2322 2323
	map_length = length;

2324
	/*
2325
	 * Avoid races with device replace and make sure our bioc has devices
2326 2327 2328 2329
	 * associated to its stripes that don't go away while we are doing the
	 * read repair operation.
	 */
	btrfs_bio_counter_inc_blocked(fs_info);
2330
	if (btrfs_is_parity_mirror(fs_info, logical, length)) {
2331 2332 2333 2334 2335 2336 2337
		/*
		 * Note that we don't use BTRFS_MAP_WRITE because it's supposed
		 * to update all raid stripes, but here we just want to correct
		 * bad stripe, thus BTRFS_MAP_READ is abused to only get the bad
		 * stripe's dev and sector.
		 */
		ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
2338
				      &map_length, &bioc, 0);
2339 2340
		if (ret)
			goto out_counter_dec;
2341
		ASSERT(bioc->mirror_num == 1);
2342 2343
	} else {
		ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical,
2344
				      &map_length, &bioc, mirror_num);
2345 2346
		if (ret)
			goto out_counter_dec;
2347
		BUG_ON(mirror_num != bioc->mirror_num);
2348
	}
2349

2350 2351 2352
	sector = bioc->stripes[bioc->mirror_num - 1].physical >> 9;
	dev = bioc->stripes[bioc->mirror_num - 1].dev;
	btrfs_put_bioc(bioc);
2353

2354 2355
	if (!dev || !dev->bdev ||
	    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
2356 2357
		ret = -EIO;
		goto out_counter_dec;
2358 2359
	}

2360 2361 2362 2363 2364 2365 2366
	bio_init(&bio, dev->bdev, &bvec, 1, REQ_OP_WRITE | REQ_SYNC);
	bio.bi_iter.bi_sector = sector;
	__bio_add_page(&bio, page, length, pg_offset);

	btrfsic_check_bio(&bio);
	ret = submit_bio_wait(&bio);
	if (ret) {
2367
		/* try to remap that extent elsewhere? */
2368
		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
2369
		goto out_bio_uninit;
2370 2371
	}

2372 2373
	btrfs_info_rl_in_rcu(fs_info,
		"read error corrected: ino %llu off %llu (dev %s sector %llu)",
2374
				  ino, start,
2375
				  rcu_str_deref(dev->name), sector);
2376 2377 2378 2379 2380
	ret = 0;

out_bio_uninit:
	bio_uninit(&bio);
out_counter_dec:
2381
	btrfs_bio_counter_dec(fs_info);
2382
	return ret;
2383 2384
}

2385
int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num)
2386
{
2387
	struct btrfs_fs_info *fs_info = eb->fs_info;
2388
	u64 start = eb->start;
2389
	int i, num_pages = num_extent_pages(eb);
2390
	int ret = 0;
2391

2392
	if (sb_rdonly(fs_info->sb))
2393 2394
		return -EROFS;

2395
	for (i = 0; i < num_pages; i++) {
2396
		struct page *p = eb->pages[i];
2397

2398
		ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p,
2399
					start - page_offset(p), mirror_num);
2400 2401
		if (ret)
			break;
2402
		start += PAGE_SIZE;
2403 2404 2405 2406 2407
	}

	return ret;
}

2408 2409 2410 2411
/*
 * each time an IO finishes, we do a fast check in the IO failure tree
 * to see if we need to process or clean up an io_failure_record
 */
2412 2413 2414 2415
int clean_io_failure(struct btrfs_fs_info *fs_info,
		     struct extent_io_tree *failure_tree,
		     struct extent_io_tree *io_tree, u64 start,
		     struct page *page, u64 ino, unsigned int pg_offset)
2416 2417 2418 2419 2420 2421 2422 2423
{
	u64 private;
	struct io_failure_record *failrec;
	struct extent_state *state;
	int num_copies;
	int ret;

	private = 0;
2424 2425
	ret = count_range_bits(failure_tree, &private, (u64)-1, 1,
			       EXTENT_DIRTY, 0);
2426 2427 2428
	if (!ret)
		return 0;

2429 2430
	failrec = get_state_failrec(failure_tree, start);
	if (IS_ERR(failrec))
2431 2432 2433 2434
		return 0;

	BUG_ON(!failrec->this_mirror);

2435
	if (sb_rdonly(fs_info->sb))
2436
		goto out;
2437

2438 2439
	spin_lock(&io_tree->lock);
	state = find_first_extent_bit_state(io_tree,
2440 2441
					    failrec->start,
					    EXTENT_LOCKED);
2442
	spin_unlock(&io_tree->lock);
2443

2444 2445
	if (state && state->start <= failrec->start &&
	    state->end >= failrec->start + failrec->len - 1) {
2446 2447
		num_copies = btrfs_num_copies(fs_info, failrec->logical,
					      failrec->len);
2448
		if (num_copies > 1)  {
2449 2450 2451
			repair_io_failure(fs_info, ino, start, failrec->len,
					  failrec->logical, page, pg_offset,
					  failrec->failed_mirror);
2452 2453 2454 2455
		}
	}

out:
2456
	free_io_failure(failure_tree, io_tree, failrec);
2457

2458
	return 0;
2459 2460
}

2461 2462 2463 2464 2465 2466
/*
 * Can be called when
 * - hold extent lock
 * - under ordered extent
 * - the inode is freeing
 */
2467
void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end)
2468
{
2469
	struct extent_io_tree *failure_tree = &inode->io_failure_tree;
2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485
	struct io_failure_record *failrec;
	struct extent_state *state, *next;

	if (RB_EMPTY_ROOT(&failure_tree->state))
		return;

	spin_lock(&failure_tree->lock);
	state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
	while (state) {
		if (state->start > end)
			break;

		ASSERT(state->end <= end);

		next = next_state(state);

2486
		failrec = state->failrec;
2487 2488 2489 2490 2491 2492 2493 2494
		free_extent_state(state);
		kfree(failrec);

		state = next;
	}
	spin_unlock(&failure_tree->lock);
}

2495
static struct io_failure_record *btrfs_get_io_failure_record(struct inode *inode,
2496
							     u64 start)
2497
{
2498
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2499
	struct io_failure_record *failrec;
2500 2501 2502 2503
	struct extent_map *em;
	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2504
	const u32 sectorsize = fs_info->sectorsize;
2505 2506 2507
	int ret;
	u64 logical;

2508
	failrec = get_state_failrec(failure_tree, start);
2509
	if (!IS_ERR(failrec)) {
2510
		btrfs_debug(fs_info,
2511 2512
	"Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu",
			failrec->logical, failrec->start, failrec->len);
2513 2514 2515 2516 2517
		/*
		 * when data can be on disk more than twice, add to failrec here
		 * (e.g. with a list for failed_mirror) to make
		 * clean_io_failure() clean all those errors at once.
		 */
2518 2519

		return failrec;
2520
	}
2521

2522 2523 2524
	failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
	if (!failrec)
		return ERR_PTR(-ENOMEM);
2525

2526
	failrec->start = start;
2527
	failrec->len = sectorsize;
2528
	failrec->this_mirror = 0;
2529
	failrec->compress_type = BTRFS_COMPRESS_NONE;
2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552

	read_lock(&em_tree->lock);
	em = lookup_extent_mapping(em_tree, start, failrec->len);
	if (!em) {
		read_unlock(&em_tree->lock);
		kfree(failrec);
		return ERR_PTR(-EIO);
	}

	if (em->start > start || em->start + em->len <= start) {
		free_extent_map(em);
		em = NULL;
	}
	read_unlock(&em_tree->lock);
	if (!em) {
		kfree(failrec);
		return ERR_PTR(-EIO);
	}

	logical = start - em->start;
	logical = em->block_start + logical;
	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
		logical = em->block_start;
2553
		failrec->compress_type = em->compress_type;
2554 2555 2556 2557 2558 2559 2560 2561 2562 2563
	}

	btrfs_debug(fs_info,
		    "Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu",
		    logical, start, failrec->len);

	failrec->logical = logical;
	free_extent_map(em);

	/* Set the bits in the private failure tree */
2564
	ret = set_extent_bits(failure_tree, start, start + sectorsize - 1,
2565 2566 2567 2568
			      EXTENT_LOCKED | EXTENT_DIRTY);
	if (ret >= 0) {
		ret = set_state_failrec(failure_tree, start, failrec);
		/* Set the bits in the inode's tree */
2569 2570
		ret = set_extent_bits(tree, start, start + sectorsize - 1,
				      EXTENT_DAMAGED);
2571 2572 2573 2574 2575 2576
	} else if (ret < 0) {
		kfree(failrec);
		return ERR_PTR(ret);
	}

	return failrec;
2577 2578
}

2579
static bool btrfs_check_repairable(struct inode *inode,
2580 2581
				   struct io_failure_record *failrec,
				   int failed_mirror)
2582
{
2583
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2584 2585
	int num_copies;

2586
	num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
2587 2588 2589 2590 2591 2592
	if (num_copies == 1) {
		/*
		 * we only have a single copy of the data, so don't bother with
		 * all the retry and error correction code that follows. no
		 * matter what the error is, it is very likely to persist.
		 */
2593 2594 2595
		btrfs_debug(fs_info,
			"Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
			num_copies, failrec->this_mirror, failed_mirror);
2596
		return false;
2597 2598
	}

2599 2600 2601
	/* The failure record should only contain one sector */
	ASSERT(failrec->len == fs_info->sectorsize);

2602
	/*
2603 2604 2605 2606 2607 2608 2609
	 * There are two premises:
	 * a) deliver good data to the caller
	 * b) correct the bad sectors on disk
	 *
	 * Since we're only doing repair for one sector, we only need to get
	 * a good copy of the failed sector and if we succeed, we have setup
	 * everything for repair_io_failure to do the rest for us.
2610
	 */
2611
	ASSERT(failed_mirror);
2612 2613 2614
	failrec->failed_mirror = failed_mirror;
	failrec->this_mirror++;
	if (failrec->this_mirror == failed_mirror)
2615 2616
		failrec->this_mirror++;

2617
	if (failrec->this_mirror > num_copies) {
2618 2619 2620
		btrfs_debug(fs_info,
			"Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
			num_copies, failrec->this_mirror, failed_mirror);
2621
		return false;
2622 2623
	}

2624
	return true;
2625 2626
}

2627 2628 2629 2630 2631
int btrfs_repair_one_sector(struct inode *inode,
			    struct bio *failed_bio, u32 bio_offset,
			    struct page *page, unsigned int pgoff,
			    u64 start, int failed_mirror,
			    submit_bio_hook_t *submit_bio_hook)
2632 2633
{
	struct io_failure_record *failrec;
2634
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2635
	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2636
	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2637
	struct btrfs_bio *failed_bbio = btrfs_bio(failed_bio);
2638
	const int icsum = bio_offset >> fs_info->sectorsize_bits;
2639
	struct bio *repair_bio;
2640
	struct btrfs_bio *repair_bbio;
2641

2642 2643
	btrfs_debug(fs_info,
		   "repair read error: read error at %llu", start);
2644

2645
	BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
2646

2647
	failrec = btrfs_get_io_failure_record(inode, start);
2648
	if (IS_ERR(failrec))
2649
		return PTR_ERR(failrec);
2650

2651 2652

	if (!btrfs_check_repairable(inode, failrec, failed_mirror)) {
2653
		free_io_failure(failure_tree, tree, failrec);
2654
		return -EIO;
2655 2656
	}

2657 2658
	repair_bio = btrfs_bio_alloc(1);
	repair_bbio = btrfs_bio(repair_bio);
2659
	repair_bbio->file_offset = start;
2660 2661 2662 2663
	repair_bio->bi_opf = REQ_OP_READ;
	repair_bio->bi_end_io = failed_bio->bi_end_io;
	repair_bio->bi_iter.bi_sector = failrec->logical >> 9;
	repair_bio->bi_private = failed_bio->bi_private;
2664

2665
	if (failed_bbio->csum) {
2666
		const u32 csum_size = fs_info->csum_size;
2667

2668 2669 2670
		repair_bbio->csum = repair_bbio->csum_inline;
		memcpy(repair_bbio->csum,
		       failed_bbio->csum + csum_size * icsum, csum_size);
2671
	}
2672

2673
	bio_add_page(repair_bio, page, failrec->len, pgoff);
2674
	repair_bbio->iter = repair_bio->bi_iter;
2675

2676
	btrfs_debug(btrfs_sb(inode->i_sb),
2677 2678
		    "repair read error: submitting new read to mirror %d",
		    failrec->this_mirror);
2679

2680 2681 2682 2683 2684
	/*
	 * At this point we have a bio, so any errors from submit_bio_hook()
	 * will be handled by the endio on the repair_bio, so we can't return an
	 * error here.
	 */
2685
	submit_bio_hook(inode, repair_bio, failrec->this_mirror, failrec->compress_type);
2686
	return BLK_STS_OK;
2687 2688 2689 2690 2691 2692 2693 2694 2695 2696
}

static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
{
	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);

	ASSERT(page_offset(page) <= start &&
	       start + len <= page_offset(page) + PAGE_SIZE);

	if (uptodate) {
B
Boris Burkov 已提交
2697 2698 2699 2700 2701 2702 2703 2704 2705
		if (fsverity_active(page->mapping->host) &&
		    !PageError(page) &&
		    !PageUptodate(page) &&
		    start < i_size_read(page->mapping->host) &&
		    !fsverity_verify_page(page)) {
			btrfs_page_set_error(fs_info, page, start, len);
		} else {
			btrfs_page_set_uptodate(fs_info, page, start, len);
		}
2706 2707 2708 2709 2710
	} else {
		btrfs_page_clear_uptodate(fs_info, page, start, len);
		btrfs_page_set_error(fs_info, page, start, len);
	}

2711
	if (!btrfs_is_subpage(fs_info, page))
2712
		unlock_page(page);
2713
	else
2714 2715 2716
		btrfs_subpage_end_reader(fs_info, page, start, len);
}

2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730
static void end_sector_io(struct page *page, u64 offset, bool uptodate)
{
	struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
	const u32 sectorsize = inode->root->fs_info->sectorsize;
	struct extent_state *cached = NULL;

	end_page_read(page, uptodate, offset, sectorsize);
	if (uptodate)
		set_extent_uptodate(&inode->io_tree, offset,
				    offset + sectorsize - 1, &cached, GFP_ATOMIC);
	unlock_extent_cached_atomic(&inode->io_tree, offset,
				    offset + sectorsize - 1, &cached);
}

2731 2732 2733
static void submit_data_read_repair(struct inode *inode, struct bio *failed_bio,
				    u32 bio_offset, const struct bio_vec *bvec,
				    int failed_mirror, unsigned int error_bitmap)
2734
{
2735
	const unsigned int pgoff = bvec->bv_offset;
2736
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2737 2738 2739
	struct page *page = bvec->bv_page;
	const u64 start = page_offset(bvec->bv_page) + bvec->bv_offset;
	const u64 end = start + bvec->bv_len - 1;
2740 2741 2742 2743 2744 2745
	const u32 sectorsize = fs_info->sectorsize;
	const int nr_bits = (end + 1 - start) >> fs_info->sectorsize_bits;
	int i;

	BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);

2746 2747 2748
	/* This repair is only for data */
	ASSERT(is_data_inode(inode));

2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775
	/* We're here because we had some read errors or csum mismatch */
	ASSERT(error_bitmap);

	/*
	 * We only get called on buffered IO, thus page must be mapped and bio
	 * must not be cloned.
	 */
	ASSERT(page->mapping && !bio_flagged(failed_bio, BIO_CLONED));

	/* Iterate through all the sectors in the range */
	for (i = 0; i < nr_bits; i++) {
		const unsigned int offset = i * sectorsize;
		bool uptodate = false;
		int ret;

		if (!(error_bitmap & (1U << i))) {
			/*
			 * This sector has no error, just end the page read
			 * and unlock the range.
			 */
			uptodate = true;
			goto next;
		}

		ret = btrfs_repair_one_sector(inode, failed_bio,
				bio_offset + offset,
				page, pgoff + offset, start + offset,
2776
				failed_mirror, btrfs_submit_data_read_bio);
2777 2778 2779 2780 2781 2782 2783 2784 2785 2786
		if (!ret) {
			/*
			 * We have submitted the read repair, the page release
			 * will be handled by the endio function of the
			 * submitted repair bio.
			 * Thus we don't need to do any thing here.
			 */
			continue;
		}
		/*
2787 2788
		 * Continue on failed repair, otherwise the remaining sectors
		 * will not be properly unlocked.
2789 2790
		 */
next:
2791
		end_sector_io(page, start + offset, uptodate);
2792
	}
2793 2794
}

2795 2796
/* lots and lots of room for performance fixes in the end_bio funcs */

2797
void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2798
{
2799
	struct btrfs_inode *inode;
2800
	const bool uptodate = (err == 0);
2801
	int ret = 0;
2802

2803 2804 2805
	ASSERT(page && page->mapping);
	inode = BTRFS_I(page->mapping->host);
	btrfs_writepage_endio_finish_ordered(inode, page, start, end, uptodate);
2806 2807

	if (!uptodate) {
2808 2809 2810 2811 2812 2813 2814 2815
		const struct btrfs_fs_info *fs_info = inode->root->fs_info;
		u32 len;

		ASSERT(end + 1 - start <= U32_MAX);
		len = end + 1 - start;

		btrfs_page_clear_uptodate(fs_info, page, start, len);
		btrfs_page_set_error(fs_info, page, start, len);
2816
		ret = err < 0 ? err : -EIO;
2817
		mapping_set_error(page->mapping, ret);
2818 2819 2820
	}
}

2821 2822 2823 2824 2825 2826 2827 2828 2829
/*
 * after a writepage IO is done, we need to:
 * clear the uptodate bits on error
 * clear the writeback bits in the extent tree for this IO
 * end_page_writeback if the page has no more pending IO
 *
 * Scheduling is not allowed, so the extent state tree is expected
 * to have one and only one object corresponding to this IO.
 */
2830
static void end_bio_extent_writepage(struct bio *bio)
2831
{
2832
	int error = blk_status_to_errno(bio->bi_status);
2833
	struct bio_vec *bvec;
2834 2835
	u64 start;
	u64 end;
2836
	struct bvec_iter_all iter_all;
2837
	bool first_bvec = true;
2838

2839
	ASSERT(!bio_flagged(bio, BIO_CLONED));
2840
	bio_for_each_segment_all(bvec, bio, iter_all) {
2841
		struct page *page = bvec->bv_page;
2842 2843
		struct inode *inode = page->mapping->host;
		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857
		const u32 sectorsize = fs_info->sectorsize;

		/* Our read/write should always be sector aligned. */
		if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
			btrfs_err(fs_info,
		"partial page write in btrfs with offset %u and length %u",
				  bvec->bv_offset, bvec->bv_len);
		else if (!IS_ALIGNED(bvec->bv_len, sectorsize))
			btrfs_info(fs_info,
		"incomplete page write with offset %u and length %u",
				   bvec->bv_offset, bvec->bv_len);

		start = page_offset(page) + bvec->bv_offset;
		end = start + bvec->bv_len - 1;
2858

2859 2860 2861 2862 2863
		if (first_bvec) {
			btrfs_record_physical_zoned(inode, start, bio);
			first_bvec = false;
		}

2864
		end_extent_writepage(page, error, start, end);
2865 2866

		btrfs_page_clear_writeback(fs_info, page, start, bvec->bv_len);
2867
	}
2868

2869 2870 2871
	bio_put(bio);
}

2872 2873 2874 2875 2876 2877 2878 2879 2880 2881
/*
 * Record previously processed extent range
 *
 * For endio_readpage_release_extent() to handle a full extent range, reducing
 * the extent io operations.
 */
struct processed_extent {
	struct btrfs_inode *inode;
	/* Start of the range in @inode */
	u64 start;
2882
	/* End of the range in @inode */
2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900
	u64 end;
	bool uptodate;
};

/*
 * Try to release processed extent range
 *
 * May not release the extent range right now if the current range is
 * contiguous to processed extent.
 *
 * Will release processed extent when any of @inode, @uptodate, the range is
 * no longer contiguous to the processed range.
 *
 * Passing @inode == NULL will force processed extent to be released.
 */
static void endio_readpage_release_extent(struct processed_extent *processed,
			      struct btrfs_inode *inode, u64 start, u64 end,
			      bool uptodate)
2901 2902
{
	struct extent_state *cached = NULL;
2903 2904 2905 2906 2907
	struct extent_io_tree *tree;

	/* The first extent, initialize @processed */
	if (!processed->inode)
		goto update;
2908

2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942
	/*
	 * Contiguous to processed extent, just uptodate the end.
	 *
	 * Several things to notice:
	 *
	 * - bio can be merged as long as on-disk bytenr is contiguous
	 *   This means we can have page belonging to other inodes, thus need to
	 *   check if the inode still matches.
	 * - bvec can contain range beyond current page for multi-page bvec
	 *   Thus we need to do processed->end + 1 >= start check
	 */
	if (processed->inode == inode && processed->uptodate == uptodate &&
	    processed->end + 1 >= start && end >= processed->end) {
		processed->end = end;
		return;
	}

	tree = &processed->inode->io_tree;
	/*
	 * Now we don't have range contiguous to the processed range, release
	 * the processed range now.
	 */
	if (processed->uptodate && tree->track_uptodate)
		set_extent_uptodate(tree, processed->start, processed->end,
				    &cached, GFP_ATOMIC);
	unlock_extent_cached_atomic(tree, processed->start, processed->end,
				    &cached);

update:
	/* Update processed to current range */
	processed->inode = inode;
	processed->start = start;
	processed->end = end;
	processed->uptodate = uptodate;
2943 2944
}

2945 2946 2947
static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
{
	ASSERT(PageLocked(page));
2948
	if (!btrfs_is_subpage(fs_info, page))
2949 2950 2951 2952 2953 2954
		return;

	ASSERT(PagePrivate(page));
	btrfs_subpage_start_reader(fs_info, page, page_offset(page), PAGE_SIZE);
}

2955
/*
2956
 * Find extent buffer for a givne bytenr.
2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969
 *
 * This is for end_bio_extent_readpage(), thus we can't do any unsafe locking
 * in endio context.
 */
static struct extent_buffer *find_extent_buffer_readpage(
		struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
{
	struct extent_buffer *eb;

	/*
	 * For regular sectorsize, we can use page->private to grab extent
	 * buffer
	 */
2970
	if (fs_info->nodesize >= PAGE_SIZE) {
2971 2972 2973 2974
		ASSERT(PagePrivate(page) && page->private);
		return (struct extent_buffer *)page->private;
	}

2975 2976 2977 2978 2979
	/* For subpage case, we need to lookup buffer radix tree */
	rcu_read_lock();
	eb = radix_tree_lookup(&fs_info->buffer_radix,
			       bytenr >> fs_info->sectorsize_bits);
	rcu_read_unlock();
2980 2981 2982 2983
	ASSERT(eb);
	return eb;
}

2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994
/*
 * after a readpage IO is done, we need to:
 * clear the uptodate bits on error
 * set the uptodate bits if things worked
 * set the page up to date if all extents in the tree are uptodate
 * clear the lock bit in the extent tree
 * unlock the page if there are no other extents locked for it
 *
 * Scheduling is not allowed, so the extent state tree is expected
 * to have one and only one object corresponding to this IO.
 */
2995
static void end_bio_extent_readpage(struct bio *bio)
2996
{
2997
	struct bio_vec *bvec;
2998
	struct btrfs_bio *bbio = btrfs_bio(bio);
2999
	struct extent_io_tree *tree, *failure_tree;
3000
	struct processed_extent processed = { 0 };
3001 3002 3003 3004 3005
	/*
	 * The offset to the beginning of a bio, since one bio can never be
	 * larger than UINT_MAX, u32 here is enough.
	 */
	u32 bio_offset = 0;
3006
	int mirror;
3007
	struct bvec_iter_all iter_all;
3008

3009
	ASSERT(!bio_flagged(bio, BIO_CLONED));
3010
	bio_for_each_segment_all(bvec, bio, iter_all) {
3011
		bool uptodate = !bio->bi_status;
3012
		struct page *page = bvec->bv_page;
3013
		struct inode *inode = page->mapping->host;
3014
		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3015
		const u32 sectorsize = fs_info->sectorsize;
3016
		unsigned int error_bitmap = (unsigned int)-1;
3017
		bool repair = false;
3018 3019 3020
		u64 start;
		u64 end;
		u32 len;
3021

3022 3023
		btrfs_debug(fs_info,
			"end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
D
David Sterba 已提交
3024
			bio->bi_iter.bi_sector, bio->bi_status,
3025
			bbio->mirror_num);
3026
		tree = &BTRFS_I(inode)->io_tree;
3027
		failure_tree = &BTRFS_I(inode)->io_failure_tree;
3028

3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047
		/*
		 * We always issue full-sector reads, but if some block in a
		 * page fails to read, blk_update_request() will advance
		 * bv_offset and adjust bv_len to compensate.  Print a warning
		 * for unaligned offsets, and an error if they don't add up to
		 * a full sector.
		 */
		if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
			btrfs_err(fs_info,
		"partial page read in btrfs with offset %u and length %u",
				  bvec->bv_offset, bvec->bv_len);
		else if (!IS_ALIGNED(bvec->bv_offset + bvec->bv_len,
				     sectorsize))
			btrfs_info(fs_info,
		"incomplete page read with offset %u and length %u",
				   bvec->bv_offset, bvec->bv_len);

		start = page_offset(page) + bvec->bv_offset;
		end = start + bvec->bv_len - 1;
3048
		len = bvec->bv_len;
3049

3050
		mirror = bbio->mirror_num;
3051
		if (likely(uptodate)) {
3052
			if (is_data_inode(inode)) {
3053
				error_bitmap = btrfs_verify_data_csum(bbio,
3054
						bio_offset, page, start, end);
3055 3056
				if (error_bitmap)
					uptodate = false;
3057
			} else {
3058 3059 3060
				if (btrfs_validate_metadata_buffer(bbio,
						page, start, end, mirror))
					uptodate = false;
3061
			}
3062
		}
3063

3064
		if (likely(uptodate)) {
3065
			loff_t i_size = i_size_read(inode);
3066
			pgoff_t end_index = i_size >> PAGE_SHIFT;
3067

3068 3069 3070 3071
			clean_io_failure(BTRFS_I(inode)->root->fs_info,
					 failure_tree, tree, start, page,
					 btrfs_ino(BTRFS_I(inode)), 0);

3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082
			/*
			 * Zero out the remaining part if this range straddles
			 * i_size.
			 *
			 * Here we should only zero the range inside the bvec,
			 * not touch anything else.
			 *
			 * NOTE: i_size is exclusive while end is inclusive.
			 */
			if (page->index == end_index && i_size <= end) {
				u32 zero_start = max(offset_in_page(i_size),
3083
						     offset_in_page(start));
3084 3085 3086 3087

				zero_user_segment(page, zero_start,
						  offset_in_page(end) + 1);
			}
3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102
		} else if (is_data_inode(inode)) {
			/*
			 * Only try to repair bios that actually made it to a
			 * device.  If the bio failed to be submitted mirror
			 * is 0 and we need to fail it without retrying.
			 */
			if (mirror > 0)
				repair = true;
		} else {
			struct extent_buffer *eb;

			eb = find_extent_buffer_readpage(fs_info, page, start);
			set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
			eb->read_mirror = mirror;
			atomic_dec(&eb->io_pages);
3103
		}
3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118

		if (repair) {
			/*
			 * submit_data_read_repair() will handle all the good
			 * and bad sectors, we just continue to the next bvec.
			 */
			submit_data_read_repair(inode, bio, bio_offset, bvec,
						mirror, error_bitmap);
		} else {
			/* Update page status and unlock */
			end_page_read(page, uptodate, start, len);
			endio_readpage_release_extent(&processed, BTRFS_I(inode),
					start, end, PageUptodate(page));
		}

3119 3120
		ASSERT(bio_offset + len > bio_offset);
		bio_offset += len;
3121

3122
	}
3123 3124
	/* Release the last extent */
	endio_readpage_release_extent(&processed, NULL, 0, 0, false);
3125
	btrfs_bio_free_csum(bbio);
3126 3127 3128
	bio_put(bio);
}

3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141
/**
 * Populate every free slot in a provided array with pages.
 *
 * @nr_pages:   number of pages to allocate
 * @page_array: the array to fill with pages; any existing non-null entries in
 * 		the array will be skipped
 *
 * Return: 0        if all pages were able to be allocated;
 *         -ENOMEM  otherwise, and the caller is responsible for freeing all
 *                  non-null page pointers in the array.
 */
int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array)
{
3142
	unsigned int allocated;
3143

3144 3145
	for (allocated = 0; allocated < nr_pages;) {
		unsigned int last = allocated;
3146

3147 3148
		allocated = alloc_pages_bulk_array(GFP_NOFS, nr_pages, page_array);

3149 3150 3151
		if (allocated == nr_pages)
			return 0;

3152 3153 3154 3155 3156 3157
		/*
		 * During this iteration, no page could be allocated, even
		 * though alloc_pages_bulk_array() falls back to alloc_page()
		 * if  it could not bulk-allocate. So we must be out of memory.
		 */
		if (allocated == last)
3158
			return -ENOMEM;
3159 3160

		memalloc_retry_wait(GFP_NOFS);
3161 3162 3163 3164
	}
	return 0;
}

3165
/*
3166 3167 3168
 * Initialize the members up to but not including 'bio'. Use after allocating a
 * new bio by bio_alloc_bioset as it does not initialize the bytes outside of
 * 'bio' because use of __GFP_ZERO is not supported.
3169
 */
3170
static inline void btrfs_bio_init(struct btrfs_bio *bbio)
3171
{
3172
	memset(bbio, 0, offsetof(struct btrfs_bio, bio));
3173
}
3174

3175
/*
Q
Qu Wenruo 已提交
3176 3177 3178
 * Allocate a btrfs_io_bio, with @nr_iovecs as maximum number of iovecs.
 *
 * The bio allocation is backed by bioset and does not fail.
3179
 */
3180
struct bio *btrfs_bio_alloc(unsigned int nr_iovecs)
3181 3182 3183
{
	struct bio *bio;

Q
Qu Wenruo 已提交
3184
	ASSERT(0 < nr_iovecs && nr_iovecs <= BIO_MAX_VECS);
3185
	bio = bio_alloc_bioset(NULL, nr_iovecs, 0, GFP_NOFS, &btrfs_bioset);
3186
	btrfs_bio_init(btrfs_bio(bio));
3187 3188 3189
	return bio;
}

3190
struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size)
3191 3192
{
	struct bio *bio;
3193
	struct btrfs_bio *bbio;
3194

3195 3196
	ASSERT(offset <= UINT_MAX && size <= UINT_MAX);

3197
	/* this will never fail when it's backed by a bioset */
3198
	bio = bio_alloc_clone(orig->bi_bdev, orig, GFP_NOFS, &btrfs_bioset);
3199 3200
	ASSERT(bio);

3201 3202
	bbio = btrfs_bio(bio);
	btrfs_bio_init(bbio);
3203 3204

	bio_trim(bio, offset >> 9, size >> 9);
3205
	bbio->iter = bio->bi_iter;
3206 3207
	return bio;
}
3208

3209 3210 3211
/**
 * Attempt to add a page to bio
 *
3212
 * @bio_ctrl:	record both the bio, and its bio_flags
3213 3214 3215 3216
 * @page:	page to add to the bio
 * @disk_bytenr:  offset of the new bio or to check whether we are adding
 *                a contiguous page to the previous one
 * @size:	portion of page that we want to write
3217
 * @pg_offset:	starting offset in the page
3218
 * @compress_type:   compression type of the current bio to see if we can merge them
3219 3220 3221
 *
 * Attempt to add a page to bio considering stripe alignment etc.
 *
3222 3223 3224
 * Return >= 0 for the number of bytes added to the bio.
 * Can return 0 if the current bio is already at stripe/zone boundary.
 * Return <0 for error.
3225
 */
3226 3227 3228 3229
static int btrfs_bio_add_page(struct btrfs_bio_ctrl *bio_ctrl,
			      struct page *page,
			      u64 disk_bytenr, unsigned int size,
			      unsigned int pg_offset,
3230
			      enum btrfs_compression_type compress_type)
3231
{
3232 3233
	struct bio *bio = bio_ctrl->bio;
	u32 bio_size = bio->bi_iter.bi_size;
3234
	u32 real_size;
3235 3236
	const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
	bool contig;
3237
	int ret;
3238

3239 3240 3241
	ASSERT(bio);
	/* The limit should be calculated when bio_ctrl->bio is allocated */
	ASSERT(bio_ctrl->len_to_oe_boundary && bio_ctrl->len_to_stripe_boundary);
3242
	if (bio_ctrl->compress_type != compress_type)
3243
		return 0;
3244

3245
	if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
3246 3247 3248 3249
		contig = bio->bi_iter.bi_sector == sector;
	else
		contig = bio_end_sector(bio) == sector;
	if (!contig)
3250
		return 0;
3251

3252 3253 3254 3255 3256 3257 3258 3259 3260 3261
	real_size = min(bio_ctrl->len_to_oe_boundary,
			bio_ctrl->len_to_stripe_boundary) - bio_size;
	real_size = min(real_size, size);

	/*
	 * If real_size is 0, never call bio_add_*_page(), as even size is 0,
	 * bio will still execute its endio function on the page!
	 */
	if (real_size == 0)
		return 0;
3262

3263
	if (bio_op(bio) == REQ_OP_ZONE_APPEND)
3264
		ret = bio_add_zone_append_page(bio, page, real_size, pg_offset);
3265
	else
3266
		ret = bio_add_page(bio, page, real_size, pg_offset);
3267

3268
	return ret;
3269 3270
}

3271
static int calc_bio_boundaries(struct btrfs_bio_ctrl *bio_ctrl,
3272
			       struct btrfs_inode *inode, u64 file_offset)
3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287
{
	struct btrfs_fs_info *fs_info = inode->root->fs_info;
	struct btrfs_io_geometry geom;
	struct btrfs_ordered_extent *ordered;
	struct extent_map *em;
	u64 logical = (bio_ctrl->bio->bi_iter.bi_sector << SECTOR_SHIFT);
	int ret;

	/*
	 * Pages for compressed extent are never submitted to disk directly,
	 * thus it has no real boundary, just set them to U32_MAX.
	 *
	 * The split happens for real compressed bio, which happens in
	 * btrfs_submit_compressed_read/write().
	 */
3288
	if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) {
3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306
		bio_ctrl->len_to_oe_boundary = U32_MAX;
		bio_ctrl->len_to_stripe_boundary = U32_MAX;
		return 0;
	}
	em = btrfs_get_chunk_map(fs_info, logical, fs_info->sectorsize);
	if (IS_ERR(em))
		return PTR_ERR(em);
	ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(bio_ctrl->bio),
				    logical, &geom);
	free_extent_map(em);
	if (ret < 0) {
		return ret;
	}
	if (geom.len > U32_MAX)
		bio_ctrl->len_to_stripe_boundary = U32_MAX;
	else
		bio_ctrl->len_to_stripe_boundary = (u32)geom.len;

3307
	if (bio_op(bio_ctrl->bio) != REQ_OP_ZONE_APPEND) {
3308 3309 3310 3311 3312
		bio_ctrl->len_to_oe_boundary = U32_MAX;
		return 0;
	}

	/* Ordered extent not yet created, so we're good */
3313
	ordered = btrfs_lookup_ordered_extent(inode, file_offset);
3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324
	if (!ordered) {
		bio_ctrl->len_to_oe_boundary = U32_MAX;
		return 0;
	}

	bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
		ordered->disk_bytenr + ordered->disk_num_bytes - logical);
	btrfs_put_ordered_extent(ordered);
	return 0;
}

3325 3326 3327 3328 3329
static int alloc_new_bio(struct btrfs_inode *inode,
			 struct btrfs_bio_ctrl *bio_ctrl,
			 struct writeback_control *wbc,
			 unsigned int opf,
			 bio_end_io_t end_io_func,
3330
			 u64 disk_bytenr, u32 offset, u64 file_offset,
3331
			 enum btrfs_compression_type compress_type)
3332 3333 3334 3335 3336
{
	struct btrfs_fs_info *fs_info = inode->root->fs_info;
	struct bio *bio;
	int ret;

3337
	bio = btrfs_bio_alloc(BIO_MAX_VECS);
3338 3339 3340 3341
	/*
	 * For compressed page range, its disk_bytenr is always @disk_bytenr
	 * passed in, no matter if we have added any range into previous bio.
	 */
3342
	if (compress_type != BTRFS_COMPRESS_NONE)
Q
Qu Wenruo 已提交
3343
		bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
3344
	else
Q
Qu Wenruo 已提交
3345
		bio->bi_iter.bi_sector = (disk_bytenr + offset) >> SECTOR_SHIFT;
3346
	bio_ctrl->bio = bio;
3347
	bio_ctrl->compress_type = compress_type;
3348 3349
	bio->bi_end_io = end_io_func;
	bio->bi_opf = opf;
3350 3351 3352
	ret = calc_bio_boundaries(bio_ctrl, inode, file_offset);
	if (ret < 0)
		goto error;
3353

3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368
	if (wbc) {
		/*
		 * For Zone append we need the correct block_device that we are
		 * going to write to set in the bio to be able to respect the
		 * hardware limitation.  Look it up here:
		 */
		if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
			struct btrfs_device *dev;

			dev = btrfs_zoned_get_device(fs_info, disk_bytenr,
						     fs_info->sectorsize);
			if (IS_ERR(dev)) {
				ret = PTR_ERR(dev);
				goto error;
			}
3369

3370 3371 3372 3373 3374 3375 3376 3377 3378 3379
			bio_set_dev(bio, dev->bdev);
		} else {
			/*
			 * Otherwise pick the last added device to support
			 * cgroup writeback.  For multi-device file systems this
			 * means blk-cgroup policies have to always be set on the
			 * last added/replaced device.  This is a bit odd but has
			 * been like that for a long time.
			 */
			bio_set_dev(bio, fs_info->fs_devices->latest_dev->bdev);
3380
		}
3381 3382 3383
		wbc_init_bio(wbc, bio);
	} else {
		ASSERT(bio_op(bio) != REQ_OP_ZONE_APPEND);
3384 3385 3386 3387 3388 3389 3390 3391 3392
	}
	return 0;
error:
	bio_ctrl->bio = NULL;
	bio->bi_status = errno_to_blk_status(ret);
	bio_endio(bio);
	return ret;
}

3393 3394
/*
 * @opf:	bio REQ_OP_* and REQ_* flags as one value
3395 3396
 * @wbc:	optional writeback control for io accounting
 * @page:	page to add to the bio
3397 3398
 * @disk_bytenr: logical bytenr where the write will be
 * @size:	portion of page that we want to write to
3399 3400
 * @pg_offset:	offset of the new bio or to check whether we are adding
 *              a contiguous page to the previous one
3401
 * @bio_ret:	must be valid pointer, newly allocated bio will be stored there
3402 3403 3404
 * @end_io_func:     end_io callback for new bio
 * @mirror_num:	     desired mirror to read/write
 * @prev_bio_flags:  flags of previous bio to see if we can merge the current one
3405
 * @compress_type:   compress type for current bio
3406
 */
3407
static int submit_extent_page(unsigned int opf,
3408
			      struct writeback_control *wbc,
3409
			      struct btrfs_bio_ctrl *bio_ctrl,
3410
			      struct page *page, u64 disk_bytenr,
3411
			      size_t size, unsigned long pg_offset,
3412
			      bio_end_io_t end_io_func,
C
Chris Mason 已提交
3413
			      int mirror_num,
3414
			      enum btrfs_compression_type compress_type,
3415
			      bool force_bio_submit)
3416 3417
{
	int ret = 0;
3418
	struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
3419
	unsigned int cur = pg_offset;
3420

3421
	ASSERT(bio_ctrl);
3422

3423 3424
	ASSERT(pg_offset < PAGE_SIZE && size <= PAGE_SIZE &&
	       pg_offset + size <= PAGE_SIZE);
3425
	if (force_bio_submit && bio_ctrl->bio) {
3426
		submit_one_bio(bio_ctrl->bio, mirror_num, bio_ctrl->compress_type);
3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437
		bio_ctrl->bio = NULL;
	}

	while (cur < pg_offset + size) {
		u32 offset = cur - pg_offset;
		int added;

		/* Allocate new bio if needed */
		if (!bio_ctrl->bio) {
			ret = alloc_new_bio(inode, bio_ctrl, wbc, opf,
					    end_io_func, disk_bytenr, offset,
3438
					    page_offset(page) + cur,
3439
					    compress_type);
3440 3441 3442 3443 3444 3445 3446
			if (ret < 0)
				return ret;
		}
		/*
		 * We must go through btrfs_bio_add_page() to ensure each
		 * page range won't cross various boundaries.
		 */
3447
		if (compress_type != BTRFS_COMPRESS_NONE)
3448 3449
			added = btrfs_bio_add_page(bio_ctrl, page, disk_bytenr,
					size - offset, pg_offset + offset,
3450
					compress_type);
3451 3452 3453
		else
			added = btrfs_bio_add_page(bio_ctrl, page,
					disk_bytenr + offset, size - offset,
3454
					pg_offset + offset, compress_type);
3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467

		/* Metadata page range should never be split */
		if (!is_data_inode(&inode->vfs_inode))
			ASSERT(added == 0 || added == size - offset);

		/* At least we added some page, update the account */
		if (wbc && added)
			wbc_account_cgroup_owner(wbc, page, added);

		/* We have reached boundary, submit right now */
		if (added < size - offset) {
			/* The bio should contain some page(s) */
			ASSERT(bio_ctrl->bio->bi_iter.bi_size);
3468
			submit_one_bio(bio_ctrl->bio, mirror_num, bio_ctrl->compress_type);
3469
			bio_ctrl->bio = NULL;
3470
		}
3471
		cur += added;
3472
	}
3473
	return 0;
3474 3475
}

3476 3477 3478
static int attach_extent_buffer_page(struct extent_buffer *eb,
				     struct page *page,
				     struct btrfs_subpage *prealloc)
3479
{
3480 3481 3482
	struct btrfs_fs_info *fs_info = eb->fs_info;
	int ret = 0;

3483 3484 3485 3486 3487 3488 3489 3490 3491
	/*
	 * If the page is mapped to btree inode, we should hold the private
	 * lock to prevent race.
	 * For cloned or dummy extent buffers, their pages are not mapped and
	 * will not race with any other ebs.
	 */
	if (page->mapping)
		lockdep_assert_held(&page->mapping->private_lock);

3492
	if (fs_info->nodesize >= PAGE_SIZE) {
3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508
		if (!PagePrivate(page))
			attach_page_private(page, eb);
		else
			WARN_ON(page->private != (unsigned long)eb);
		return 0;
	}

	/* Already mapped, just free prealloc */
	if (PagePrivate(page)) {
		btrfs_free_subpage(prealloc);
		return 0;
	}

	if (prealloc)
		/* Has preallocated memory for subpage */
		attach_page_private(page, prealloc);
3509
	else
3510 3511 3512 3513
		/* Do new allocation to attach subpage */
		ret = btrfs_attach_subpage(fs_info, page,
					   BTRFS_SUBPAGE_METADATA);
	return ret;
3514 3515
}

3516
int set_page_extent_mapped(struct page *page)
3517
{
3518 3519 3520 3521 3522 3523 3524 3525 3526
	struct btrfs_fs_info *fs_info;

	ASSERT(page->mapping);

	if (PagePrivate(page))
		return 0;

	fs_info = btrfs_sb(page->mapping->host->i_sb);

3527
	if (btrfs_is_subpage(fs_info, page))
3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539
		return btrfs_attach_subpage(fs_info, page, BTRFS_SUBPAGE_DATA);

	attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE);
	return 0;
}

void clear_page_extent_mapped(struct page *page)
{
	struct btrfs_fs_info *fs_info;

	ASSERT(page->mapping);

3540
	if (!PagePrivate(page))
3541 3542 3543
		return;

	fs_info = btrfs_sb(page->mapping->host->i_sb);
3544
	if (btrfs_is_subpage(fs_info, page))
3545 3546 3547
		return btrfs_detach_subpage(fs_info, page);

	detach_page_private(page);
3548 3549
}

3550 3551
static struct extent_map *
__get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
3552
		 u64 start, u64 len, struct extent_map **em_cached)
3553 3554 3555 3556 3557
{
	struct extent_map *em;

	if (em_cached && *em_cached) {
		em = *em_cached;
3558
		if (extent_map_in_tree(em) && start >= em->start &&
3559
		    start < extent_map_end(em)) {
3560
			refcount_inc(&em->refs);
3561 3562 3563 3564 3565 3566 3567
			return em;
		}

		free_extent_map(em);
		*em_cached = NULL;
	}

3568
	em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, start, len);
3569
	if (em_cached && !IS_ERR(em)) {
3570
		BUG_ON(*em_cached);
3571
		refcount_inc(&em->refs);
3572 3573 3574 3575
		*em_cached = em;
	}
	return em;
}
3576 3577 3578 3579
/*
 * basic readpage implementation.  Locked extent state structs are inserted
 * into the tree that are removed when the IO is done (by the end_io
 * handlers)
3580
 * XXX JDM: This needs looking at to ensure proper page locking
3581
 * return 0 on success, otherwise return error
3582
 */
3583
static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
3584
		      struct btrfs_bio_ctrl *bio_ctrl,
3585
		      unsigned int read_flags, u64 *prev_em_start)
3586 3587
{
	struct inode *inode = page->mapping->host;
3588
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
M
Miao Xie 已提交
3589
	u64 start = page_offset(page);
3590
	const u64 end = start + PAGE_SIZE - 1;
3591 3592 3593 3594 3595 3596
	u64 cur = start;
	u64 extent_offset;
	u64 last_byte = i_size_read(inode);
	u64 block_start;
	u64 cur_end;
	struct extent_map *em;
3597
	int ret = 0;
3598
	size_t pg_offset = 0;
3599 3600
	size_t iosize;
	size_t blocksize = inode->i_sb->s_blocksize;
3601
	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
3602

3603 3604 3605
	ret = set_page_extent_mapped(page);
	if (ret < 0) {
		unlock_extent(tree, start, end);
3606 3607
		btrfs_page_set_error(fs_info, page, start, PAGE_SIZE);
		unlock_page(page);
3608 3609
		goto out;
	}
3610

3611
	if (page->index == last_byte >> PAGE_SHIFT) {
3612
		size_t zero_offset = offset_in_page(last_byte);
C
Chris Mason 已提交
3613 3614

		if (zero_offset) {
3615
			iosize = PAGE_SIZE - zero_offset;
3616
			memzero_page(page, zero_offset, iosize);
C
Chris Mason 已提交
3617 3618
		}
	}
3619
	begin_page_read(fs_info, page);
3620
	while (cur <= end) {
3621
		unsigned long this_bio_flag = 0;
3622
		bool force_bio_submit = false;
3623
		u64 disk_bytenr;
3624

3625
		ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
3626
		if (cur >= last_byte) {
3627 3628
			struct extent_state *cached = NULL;

3629
			iosize = PAGE_SIZE - pg_offset;
3630
			memzero_page(page, pg_offset, iosize);
3631
			set_extent_uptodate(tree, cur, cur + iosize - 1,
3632
					    &cached, GFP_NOFS);
3633
			unlock_extent_cached(tree, cur,
3634
					     cur + iosize - 1, &cached);
3635
			end_page_read(page, true, cur, iosize);
3636 3637
			break;
		}
3638
		em = __get_extent_map(inode, page, pg_offset, cur,
3639
				      end - cur + 1, em_cached);
3640
		if (IS_ERR(em)) {
3641
			unlock_extent(tree, cur, end);
3642
			end_page_read(page, false, cur, end + 1 - cur);
3643
			ret = PTR_ERR(em);
3644 3645 3646 3647 3648 3649
			break;
		}
		extent_offset = cur - em->start;
		BUG_ON(extent_map_end(em) <= cur);
		BUG_ON(end < cur);

3650 3651
		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
			this_bio_flag = em->compress_type;
C
Chris Mason 已提交
3652

3653 3654
		iosize = min(extent_map_end(em) - cur, end - cur + 1);
		cur_end = min(extent_map_end(em) - 1, end);
3655
		iosize = ALIGN(iosize, blocksize);
3656
		if (this_bio_flag != BTRFS_COMPRESS_NONE)
3657
			disk_bytenr = em->block_start;
3658
		else
3659
			disk_bytenr = em->block_start + extent_offset;
3660
		block_start = em->block_start;
Y
Yan Zheng 已提交
3661 3662
		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
			block_start = EXTENT_MAP_HOLE;
3663 3664 3665

		/*
		 * If we have a file range that points to a compressed extent
3666
		 * and it's followed by a consecutive file range that points
3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699
		 * to the same compressed extent (possibly with a different
		 * offset and/or length, so it either points to the whole extent
		 * or only part of it), we must make sure we do not submit a
		 * single bio to populate the pages for the 2 ranges because
		 * this makes the compressed extent read zero out the pages
		 * belonging to the 2nd range. Imagine the following scenario:
		 *
		 *  File layout
		 *  [0 - 8K]                     [8K - 24K]
		 *    |                               |
		 *    |                               |
		 * points to extent X,         points to extent X,
		 * offset 4K, length of 8K     offset 0, length 16K
		 *
		 * [extent X, compressed length = 4K uncompressed length = 16K]
		 *
		 * If the bio to read the compressed extent covers both ranges,
		 * it will decompress extent X into the pages belonging to the
		 * first range and then it will stop, zeroing out the remaining
		 * pages that belong to the other range that points to extent X.
		 * So here we make sure we submit 2 bios, one for the first
		 * range and another one for the third range. Both will target
		 * the same physical extent from disk, but we can't currently
		 * make the compressed bio endio callback populate the pages
		 * for both ranges because each compressed bio is tightly
		 * coupled with a single extent map, and each range can have
		 * an extent map with a different offset value relative to the
		 * uncompressed data of our extent and different lengths. This
		 * is a corner case so we prioritize correctness over
		 * non-optimal behavior (submitting 2 bios for the same extent).
		 */
		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
		    prev_em_start && *prev_em_start != (u64)-1 &&
3700
		    *prev_em_start != em->start)
3701 3702 3703
			force_bio_submit = true;

		if (prev_em_start)
3704
			*prev_em_start = em->start;
3705

3706 3707 3708 3709 3710
		free_extent_map(em);
		em = NULL;

		/* we've found a hole, just zero and go on */
		if (block_start == EXTENT_MAP_HOLE) {
3711 3712
			struct extent_state *cached = NULL;

3713
			memzero_page(page, pg_offset, iosize);
3714 3715

			set_extent_uptodate(tree, cur, cur + iosize - 1,
3716
					    &cached, GFP_NOFS);
3717
			unlock_extent_cached(tree, cur,
3718
					     cur + iosize - 1, &cached);
3719
			end_page_read(page, true, cur, iosize);
3720
			cur = cur + iosize;
3721
			pg_offset += iosize;
3722 3723 3724
			continue;
		}
		/* the get_extent function already copied into the page */
3725 3726
		if (test_range_bit(tree, cur, cur_end,
				   EXTENT_UPTODATE, 1, NULL)) {
3727
			unlock_extent(tree, cur, cur + iosize - 1);
3728
			end_page_read(page, true, cur, iosize);
3729
			cur = cur + iosize;
3730
			pg_offset += iosize;
3731 3732
			continue;
		}
3733 3734 3735 3736
		/* we have an inline extent but it didn't get marked up
		 * to date.  Error out
		 */
		if (block_start == EXTENT_MAP_INLINE) {
3737
			unlock_extent(tree, cur, cur + iosize - 1);
3738
			end_page_read(page, false, cur, iosize);
3739
			cur = cur + iosize;
3740
			pg_offset += iosize;
3741 3742
			continue;
		}
3743

3744
		ret = submit_extent_page(REQ_OP_READ | read_flags, NULL,
3745 3746
					 bio_ctrl, page, disk_bytenr, iosize,
					 pg_offset,
3747
					 end_bio_extent_readpage, 0,
3748 3749
					 this_bio_flag,
					 force_bio_submit);
3750
		if (ret) {
3751 3752 3753 3754 3755 3756
			/*
			 * We have to unlock the remaining range, or the page
			 * will never be unlocked.
			 */
			unlock_extent(tree, cur, end);
			end_page_read(page, false, cur, end + 1 - cur);
3757
			goto out;
3758
		}
3759
		cur = cur + iosize;
3760
		pg_offset += iosize;
3761
	}
D
Dan Magenheimer 已提交
3762
out:
3763
	return ret;
3764 3765
}

3766
int btrfs_read_folio(struct file *file, struct folio *folio)
3767
{
3768
	struct page *page = &folio->page;
3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782
	struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
	u64 start = page_offset(page);
	u64 end = start + PAGE_SIZE - 1;
	struct btrfs_bio_ctrl bio_ctrl = { 0 };
	int ret;

	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);

	ret = btrfs_do_readpage(page, NULL, &bio_ctrl, 0, NULL);
	/*
	 * If btrfs_do_readpage() failed we will want to submit the assembled
	 * bio to do the cleanup.
	 */
	if (bio_ctrl.bio)
3783
		submit_one_bio(bio_ctrl.bio, 0, bio_ctrl.compress_type);
3784 3785 3786
	return ret;
}

3787
static inline void contiguous_readpages(struct page *pages[], int nr_pages,
3788 3789 3790 3791
					u64 start, u64 end,
					struct extent_map **em_cached,
					struct btrfs_bio_ctrl *bio_ctrl,
					u64 *prev_em_start)
3792
{
3793
	struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
3794 3795
	int index;

3796
	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
3797 3798

	for (index = 0; index < nr_pages; index++) {
3799
		btrfs_do_readpage(pages[index], em_cached, bio_ctrl,
3800
				  REQ_RAHEAD, prev_em_start);
3801
		put_page(pages[index]);
3802 3803 3804
	}
}

3805
/*
3806 3807
 * helper for __extent_writepage, doing all of the delayed allocation setup.
 *
3808
 * This returns 1 if btrfs_run_delalloc_range function did all the work required
3809 3810 3811 3812 3813
 * to write the page (copy into inline extent).  In this case the IO has
 * been started and the page is already unlocked.
 *
 * This returns 0 if all went well (page still locked)
 * This returns < 0 if there were errors (page still locked)
3814
 */
3815
static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
3816
		struct page *page, struct writeback_control *wbc)
3817
{
3818
	const u64 page_end = page_offset(page) + PAGE_SIZE - 1;
3819
	u64 delalloc_start = page_offset(page);
3820
	u64 delalloc_to_write = 0;
3821 3822
	/* How many pages are started by btrfs_run_delalloc_range() */
	unsigned long nr_written = 0;
3823 3824 3825
	int ret;
	int page_started = 0;

3826 3827 3828
	while (delalloc_start < page_end) {
		u64 delalloc_end = page_end;
		bool found;
3829

3830
		found = find_lock_delalloc_range(&inode->vfs_inode, page,
3831
					       &delalloc_start,
3832
					       &delalloc_end);
3833
		if (!found) {
3834 3835 3836
			delalloc_start = delalloc_end + 1;
			continue;
		}
3837
		ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
3838
				delalloc_end, &page_started, &nr_written, wbc);
3839
		if (ret) {
3840 3841
			btrfs_page_set_error(inode->root->fs_info, page,
					     page_offset(page), PAGE_SIZE);
3842
			return ret;
3843 3844
		}
		/*
3845 3846
		 * delalloc_end is already one less than the total length, so
		 * we don't subtract one from PAGE_SIZE
3847 3848
		 */
		delalloc_to_write += (delalloc_end - delalloc_start +
3849
				      PAGE_SIZE) >> PAGE_SHIFT;
3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860
		delalloc_start = delalloc_end + 1;
	}
	if (wbc->nr_to_write < delalloc_to_write) {
		int thresh = 8192;

		if (delalloc_to_write < thresh * 2)
			thresh = delalloc_to_write;
		wbc->nr_to_write = min_t(u64, delalloc_to_write,
					 thresh);
	}

3861
	/* Did btrfs_run_dealloc_range() already unlock and start the IO? */
3862 3863
	if (page_started) {
		/*
3864 3865
		 * We've unlocked the page, so we can't update the mapping's
		 * writeback index, just update nr_to_write.
3866
		 */
3867
		wbc->nr_to_write -= nr_written;
3868 3869 3870
		return 1;
	}

3871
	return 0;
3872 3873
}

3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892
/*
 * Find the first byte we need to write.
 *
 * For subpage, one page can contain several sectors, and
 * __extent_writepage_io() will just grab all extent maps in the page
 * range and try to submit all non-inline/non-compressed extents.
 *
 * This is a big problem for subpage, we shouldn't re-submit already written
 * data at all.
 * This function will lookup subpage dirty bit to find which range we really
 * need to submit.
 *
 * Return the next dirty range in [@start, @end).
 * If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
 */
static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
				 struct page *page, u64 *start, u64 *end)
{
	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
3893
	struct btrfs_subpage_info *spi = fs_info->subpage_info;
3894 3895 3896
	u64 orig_start = *start;
	/* Declare as unsigned long so we can use bitmap ops */
	unsigned long flags;
3897
	int range_start_bit;
3898 3899 3900 3901 3902 3903
	int range_end_bit;

	/*
	 * For regular sector size == page size case, since one page only
	 * contains one sector, we return the page offset directly.
	 */
3904
	if (!btrfs_is_subpage(fs_info, page)) {
3905 3906 3907 3908 3909
		*start = page_offset(page);
		*end = page_offset(page) + PAGE_SIZE;
		return;
	}

3910 3911 3912
	range_start_bit = spi->dirty_offset +
			  (offset_in_page(orig_start) >> fs_info->sectorsize_bits);

3913 3914
	/* We should have the page locked, but just in case */
	spin_lock_irqsave(&subpage->lock, flags);
3915 3916
	bitmap_next_set_region(subpage->bitmaps, &range_start_bit, &range_end_bit,
			       spi->dirty_offset + spi->bitmap_nr_bits);
3917 3918
	spin_unlock_irqrestore(&subpage->lock, flags);

3919 3920 3921
	range_start_bit -= spi->dirty_offset;
	range_end_bit -= spi->dirty_offset;

3922 3923 3924 3925
	*start = page_offset(page) + range_start_bit * fs_info->sectorsize;
	*end = page_offset(page) + range_end_bit * fs_info->sectorsize;
}

3926 3927 3928 3929 3930 3931 3932 3933
/*
 * helper for __extent_writepage.  This calls the writepage start hooks,
 * and does the loop to map the page into extents and bios.
 *
 * We return 1 if the IO is started and the page is unlocked,
 * 0 if all went well (page still locked)
 * < 0 if there were errors (page still locked)
 */
3934
static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
3935 3936 3937 3938
				 struct page *page,
				 struct writeback_control *wbc,
				 struct extent_page_data *epd,
				 loff_t i_size,
3939
				 int *nr_ret)
3940
{
3941
	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3942 3943
	u64 cur = page_offset(page);
	u64 end = cur + PAGE_SIZE - 1;
3944 3945 3946
	u64 extent_offset;
	u64 block_start;
	struct extent_map *em;
3947
	int saved_ret = 0;
3948 3949
	int ret = 0;
	int nr = 0;
3950
	u32 opf = REQ_OP_WRITE;
3951
	const unsigned int write_flags = wbc_to_write_flags(wbc);
3952
	bool has_error = false;
3953
	bool compressed;
C
Chris Mason 已提交
3954

3955
	ret = btrfs_writepage_cow_fixup(page);
3956 3957
	if (ret) {
		/* Fixup worker will requeue */
3958
		redirty_page_for_writepage(wbc, page);
3959 3960
		unlock_page(page);
		return 1;
3961 3962
	}

3963 3964 3965 3966
	/*
	 * we don't want to touch the inode after unlocking the page,
	 * so we update the mapping writeback index now
	 */
3967
	wbc->nr_to_write--;
3968

3969
	while (cur <= end) {
3970
		u64 disk_bytenr;
3971
		u64 em_end;
3972 3973
		u64 dirty_range_start = cur;
		u64 dirty_range_end;
3974
		u32 iosize;
3975

3976
		if (cur >= i_size) {
3977
			btrfs_writepage_endio_finish_ordered(inode, page, cur,
3978
							     end, true);
3979 3980 3981 3982 3983 3984 3985 3986 3987
			/*
			 * This range is beyond i_size, thus we don't need to
			 * bother writing back.
			 * But we still need to clear the dirty subpage bit, or
			 * the next time the page gets dirtied, we will try to
			 * writeback the sectors with subpage dirty bits,
			 * causing writeback without ordered extent.
			 */
			btrfs_page_clear_dirty(fs_info, page, cur, end + 1 - cur);
3988 3989
			break;
		}
3990 3991 3992 3993 3994 3995 3996 3997

		find_next_dirty_byte(fs_info, page, &dirty_range_start,
				     &dirty_range_end);
		if (cur < dirty_range_start) {
			cur = dirty_range_start;
			continue;
		}

3998
		em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1);
3999
		if (IS_ERR(em)) {
4000
			btrfs_page_set_error(fs_info, page, cur, end - cur + 1);
4001
			ret = PTR_ERR_OR_ZERO(em);
4002 4003 4004
			has_error = true;
			if (!saved_ret)
				saved_ret = ret;
4005 4006 4007 4008
			break;
		}

		extent_offset = cur - em->start;
4009
		em_end = extent_map_end(em);
4010 4011 4012 4013
		ASSERT(cur <= em_end);
		ASSERT(cur < end);
		ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize));
		ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize));
4014
		block_start = em->block_start;
C
Chris Mason 已提交
4015
		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4016 4017
		disk_bytenr = em->block_start + extent_offset;

4018 4019 4020 4021 4022
		/*
		 * Note that em_end from extent_map_end() and dirty_range_end from
		 * find_next_dirty_byte() are all exclusive
		 */
		iosize = min(min(em_end, end + 1), dirty_range_end) - cur;
4023

4024
		if (btrfs_use_zone_append(inode, em->block_start))
4025 4026
			opf = REQ_OP_ZONE_APPEND;

4027 4028 4029
		free_extent_map(em);
		em = NULL;

C
Chris Mason 已提交
4030 4031 4032 4033 4034
		/*
		 * compressed and inline extents are written through other
		 * paths in the FS
		 */
		if (compressed || block_start == EXTENT_MAP_HOLE ||
4035
		    block_start == EXTENT_MAP_INLINE) {
4036
			if (compressed)
C
Chris Mason 已提交
4037
				nr++;
4038
			else
4039
				btrfs_writepage_endio_finish_ordered(inode,
4040
						page, cur, cur + iosize - 1, true);
4041
			btrfs_page_clear_dirty(fs_info, page, cur, iosize);
C
Chris Mason 已提交
4042
			cur += iosize;
4043 4044
			continue;
		}
C
Chris Mason 已提交
4045

4046
		btrfs_set_range_writeback(inode, cur, cur + iosize - 1);
4047
		if (!PageWriteback(page)) {
4048
			btrfs_err(inode->root->fs_info,
4049 4050
				   "page %lu not writeback, cur %llu end %llu",
			       page->index, cur, end);
4051
		}
4052

4053 4054 4055 4056 4057 4058 4059 4060
		/*
		 * Although the PageDirty bit is cleared before entering this
		 * function, subpage dirty bit is not cleared.
		 * So clear subpage dirty bit here so next time we won't submit
		 * page for range already written to disk.
		 */
		btrfs_page_clear_dirty(fs_info, page, cur, iosize);

4061 4062
		ret = submit_extent_page(opf | write_flags, wbc,
					 &epd->bio_ctrl, page,
4063
					 disk_bytenr, iosize,
4064
					 cur - page_offset(page),
4065
					 end_bio_extent_writepage,
4066
					 0, 0, false);
4067
		if (ret) {
4068 4069 4070 4071
			has_error = true;
			if (!saved_ret)
				saved_ret = ret;

4072
			btrfs_page_set_error(fs_info, page, cur, iosize);
4073
			if (PageWriteback(page))
4074 4075
				btrfs_page_clear_writeback(fs_info, page, cur,
							   iosize);
4076
		}
4077

4078
		cur += iosize;
4079 4080
		nr++;
	}
4081 4082 4083 4084
	/*
	 * If we finish without problem, we should not only clear page dirty,
	 * but also empty subpage dirty bits
	 */
4085
	if (!has_error)
4086
		btrfs_page_assert_not_dirty(fs_info, page);
4087 4088
	else
		ret = saved_ret;
4089 4090 4091 4092 4093 4094 4095 4096 4097
	*nr_ret = nr;
	return ret;
}

/*
 * the writepage semantics are similar to regular writepage.  extent
 * records are inserted to lock ranges in the tree, and as dirty areas
 * are found, they are marked writeback.  Then the lock bits are removed
 * and the end_io handler clears the writeback ranges
4098 4099 4100
 *
 * Return 0 if everything goes well.
 * Return <0 for error.
4101 4102
 */
static int __extent_writepage(struct page *page, struct writeback_control *wbc,
4103
			      struct extent_page_data *epd)
4104
{
4105
	struct folio *folio = page_folio(page);
4106
	struct inode *inode = page->mapping->host;
4107
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4108 4109
	const u64 page_start = page_offset(page);
	const u64 page_end = page_start + PAGE_SIZE - 1;
4110 4111
	int ret;
	int nr = 0;
4112
	size_t pg_offset;
4113
	loff_t i_size = i_size_read(inode);
4114
	unsigned long end_index = i_size >> PAGE_SHIFT;
4115 4116 4117 4118 4119

	trace___extent_writepage(page, inode, wbc);

	WARN_ON(!PageLocked(page));

4120 4121
	btrfs_page_clear_error(btrfs_sb(inode->i_sb), page,
			       page_offset(page), PAGE_SIZE);
4122

4123
	pg_offset = offset_in_page(i_size);
4124 4125
	if (page->index > end_index ||
	   (page->index == end_index && !pg_offset)) {
4126 4127
		folio_invalidate(folio, 0, folio_size(folio));
		folio_unlock(folio);
4128 4129 4130
		return 0;
	}

4131
	if (page->index == end_index)
4132
		memzero_page(page, pg_offset, PAGE_SIZE - pg_offset);
4133

4134 4135 4136 4137 4138
	ret = set_page_extent_mapped(page);
	if (ret < 0) {
		SetPageError(page);
		goto done;
	}
4139

4140
	if (!epd->extent_locked) {
4141
		ret = writepage_delalloc(BTRFS_I(inode), page, wbc);
4142
		if (ret == 1)
4143
			return 0;
4144 4145 4146
		if (ret)
			goto done;
	}
4147

4148
	ret = __extent_writepage_io(BTRFS_I(inode), page, wbc, epd, i_size,
4149
				    &nr);
4150
	if (ret == 1)
4151
		return 0;
4152

4153 4154 4155 4156 4157 4158
done:
	if (nr == 0) {
		/* make sure the mapping tag for page dirty gets cleared */
		set_page_writeback(page);
		end_page_writeback(page);
	}
4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190
	/*
	 * Here we used to have a check for PageError() and then set @ret and
	 * call end_extent_writepage().
	 *
	 * But in fact setting @ret here will cause different error paths
	 * between subpage and regular sectorsize.
	 *
	 * For regular page size, we never submit current page, but only add
	 * current page to current bio.
	 * The bio submission can only happen in next page.
	 * Thus if we hit the PageError() branch, @ret is already set to
	 * non-zero value and will not get updated for regular sectorsize.
	 *
	 * But for subpage case, it's possible we submit part of current page,
	 * thus can get PageError() set by submitted bio of the same page,
	 * while our @ret is still 0.
	 *
	 * So here we unify the behavior and don't set @ret.
	 * Error can still be properly passed to higher layer as page will
	 * be set error, here we just don't handle the IO failure.
	 *
	 * NOTE: This is just a hotfix for subpage.
	 * The root fix will be properly ending ordered extent when we hit
	 * an error during writeback.
	 *
	 * But that needs a bigger refactoring, as we not only need to grab the
	 * submitted OE, but also need to know exactly at which bytenr we hit
	 * the error.
	 * Currently the full page based __extent_writepage_io() is not
	 * capable of that.
	 */
	if (PageError(page))
4191
		end_extent_writepage(page, ret, page_start, page_end);
4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204
	if (epd->extent_locked) {
		/*
		 * If epd->extent_locked, it's from extent_write_locked_range(),
		 * the page can either be locked by lock_page() or
		 * process_one_page().
		 * Let btrfs_page_unlock_writer() handle both cases.
		 */
		ASSERT(wbc);
		btrfs_page_unlock_writer(fs_info, page, wbc->range_start,
					 wbc->range_end + 1 - wbc->range_start);
	} else {
		unlock_page(page);
	}
4205
	ASSERT(ret <= 0);
4206
	return ret;
4207 4208
}

4209
void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
4210
{
4211 4212
	wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
		       TASK_UNINTERRUPTIBLE);
4213 4214
}

4215 4216 4217 4218 4219 4220 4221
static void end_extent_buffer_writeback(struct extent_buffer *eb)
{
	clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
	smp_mb__after_atomic();
	wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
}

4222
/*
4223
 * Lock extent buffer status and pages for writeback.
4224
 *
4225 4226 4227 4228 4229 4230
 * May try to flush write bio if we can't get the lock.
 *
 * Return  0 if the extent buffer doesn't need to be submitted.
 *           (E.g. the extent buffer is not dirty)
 * Return >0 is the extent buffer is submitted to bio.
 * Return <0 if something went wrong, no page is locked.
4231
 */
4232
static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb,
4233
			  struct extent_page_data *epd)
4234
{
4235
	struct btrfs_fs_info *fs_info = eb->fs_info;
4236
	int i, num_pages;
4237 4238 4239 4240
	int flush = 0;
	int ret = 0;

	if (!btrfs_try_tree_write_lock(eb)) {
4241
		submit_write_bio(epd, 0);
4242
		flush = 1;
4243 4244 4245 4246 4247 4248 4249 4250
		btrfs_tree_lock(eb);
	}

	if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
		btrfs_tree_unlock(eb);
		if (!epd->sync_io)
			return 0;
		if (!flush) {
4251
			submit_write_bio(epd, 0);
4252 4253
			flush = 1;
		}
C
Chris Mason 已提交
4254 4255 4256 4257 4258
		while (1) {
			wait_on_extent_buffer_writeback(eb);
			btrfs_tree_lock(eb);
			if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
				break;
4259 4260 4261 4262
			btrfs_tree_unlock(eb);
		}
	}

4263 4264 4265 4266 4267 4268
	/*
	 * We need to do this to prevent races in people who check if the eb is
	 * under IO since we can end up having no IO bits set for a short period
	 * of time.
	 */
	spin_lock(&eb->refs_lock);
4269 4270
	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
		set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
4271
		spin_unlock(&eb->refs_lock);
4272
		btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
4273 4274 4275
		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
					 -eb->len,
					 fs_info->dirty_metadata_batch);
4276
		ret = 1;
4277 4278
	} else {
		spin_unlock(&eb->refs_lock);
4279 4280 4281 4282
	}

	btrfs_tree_unlock(eb);

4283 4284 4285 4286 4287 4288
	/*
	 * Either we don't need to submit any tree block, or we're submitting
	 * subpage eb.
	 * Subpage metadata doesn't use page locking at all, so we can skip
	 * the page locking.
	 */
4289
	if (!ret || fs_info->nodesize < PAGE_SIZE)
4290 4291
		return ret;

4292
	num_pages = num_extent_pages(eb);
4293
	for (i = 0; i < num_pages; i++) {
4294
		struct page *p = eb->pages[i];
4295 4296 4297

		if (!trylock_page(p)) {
			if (!flush) {
4298
				submit_write_bio(epd, 0);
4299 4300 4301 4302 4303 4304
				flush = 1;
			}
			lock_page(p);
		}
	}

4305
	return ret;
4306 4307
}

4308
static void set_btree_ioerr(struct page *page, struct extent_buffer *eb)
4309
{
4310
	struct btrfs_fs_info *fs_info = eb->fs_info;
4311

4312
	btrfs_page_set_error(fs_info, page, eb->start, eb->len);
4313 4314 4315
	if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
		return;

4316 4317 4318 4319 4320 4321
	/*
	 * A read may stumble upon this buffer later, make sure that it gets an
	 * error and knows there was an error.
	 */
	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);

4322 4323 4324 4325 4326 4327 4328 4329
	/*
	 * We need to set the mapping with the io error as well because a write
	 * error will flip the file system readonly, and then syncfs() will
	 * return a 0 because we are readonly if we don't modify the err seq for
	 * the superblock.
	 */
	mapping_set_error(page->mapping, -EIO);

4330 4331 4332 4333 4334 4335 4336
	/*
	 * If we error out, we should add back the dirty_metadata_bytes
	 * to make it consistent.
	 */
	percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
				 eb->len, fs_info->dirty_metadata_batch);

4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376
	/*
	 * If writeback for a btree extent that doesn't belong to a log tree
	 * failed, increment the counter transaction->eb_write_errors.
	 * We do this because while the transaction is running and before it's
	 * committing (when we call filemap_fdata[write|wait]_range against
	 * the btree inode), we might have
	 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
	 * returns an error or an error happens during writeback, when we're
	 * committing the transaction we wouldn't know about it, since the pages
	 * can be no longer dirty nor marked anymore for writeback (if a
	 * subsequent modification to the extent buffer didn't happen before the
	 * transaction commit), which makes filemap_fdata[write|wait]_range not
	 * able to find the pages tagged with SetPageError at transaction
	 * commit time. So if this happens we must abort the transaction,
	 * otherwise we commit a super block with btree roots that point to
	 * btree nodes/leafs whose content on disk is invalid - either garbage
	 * or the content of some node/leaf from a past generation that got
	 * cowed or deleted and is no longer valid.
	 *
	 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
	 * not be enough - we need to distinguish between log tree extents vs
	 * non-log tree extents, and the next filemap_fdatawait_range() call
	 * will catch and clear such errors in the mapping - and that call might
	 * be from a log sync and not from a transaction commit. Also, checking
	 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
	 * not done and would not be reliable - the eb might have been released
	 * from memory and reading it back again means that flag would not be
	 * set (since it's a runtime flag, not persisted on disk).
	 *
	 * Using the flags below in the btree inode also makes us achieve the
	 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
	 * writeback for all dirty pages and before filemap_fdatawait_range()
	 * is called, the writeback for all dirty pages had already finished
	 * with errors - because we were not using AS_EIO/AS_ENOSPC,
	 * filemap_fdatawait_range() would return success, as it could not know
	 * that writeback errors happened (the pages were no longer tagged for
	 * writeback).
	 */
	switch (eb->log_index) {
	case -1:
4377
		set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
4378 4379
		break;
	case 0:
4380
		set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
4381 4382
		break;
	case 1:
4383
		set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
4384 4385 4386 4387 4388 4389
		break;
	default:
		BUG(); /* unexpected, logic error */
	}
}

4390 4391 4392 4393 4394 4395 4396 4397 4398 4399
/*
 * The endio specific version which won't touch any unsafe spinlock in endio
 * context.
 */
static struct extent_buffer *find_extent_buffer_nolock(
		struct btrfs_fs_info *fs_info, u64 start)
{
	struct extent_buffer *eb;

	rcu_read_lock();
4400 4401
	eb = radix_tree_lookup(&fs_info->buffer_radix,
			       start >> fs_info->sectorsize_bits);
4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415
	if (eb && atomic_inc_not_zero(&eb->refs)) {
		rcu_read_unlock();
		return eb;
	}
	rcu_read_unlock();
	return NULL;
}

/*
 * The endio function for subpage extent buffer write.
 *
 * Unlike end_bio_extent_buffer_writepage(), we only call end_page_writeback()
 * after all extent buffers in the page has finished their writeback.
 */
4416
static void end_bio_subpage_eb_writepage(struct bio *bio)
4417
{
4418
	struct btrfs_fs_info *fs_info;
4419 4420 4421
	struct bio_vec *bvec;
	struct bvec_iter_all iter_all;

4422
	fs_info = btrfs_sb(bio_first_page_all(bio)->mapping->host->i_sb);
4423
	ASSERT(fs_info->nodesize < PAGE_SIZE);
4424

4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472
	ASSERT(!bio_flagged(bio, BIO_CLONED));
	bio_for_each_segment_all(bvec, bio, iter_all) {
		struct page *page = bvec->bv_page;
		u64 bvec_start = page_offset(page) + bvec->bv_offset;
		u64 bvec_end = bvec_start + bvec->bv_len - 1;
		u64 cur_bytenr = bvec_start;

		ASSERT(IS_ALIGNED(bvec->bv_len, fs_info->nodesize));

		/* Iterate through all extent buffers in the range */
		while (cur_bytenr <= bvec_end) {
			struct extent_buffer *eb;
			int done;

			/*
			 * Here we can't use find_extent_buffer(), as it may
			 * try to lock eb->refs_lock, which is not safe in endio
			 * context.
			 */
			eb = find_extent_buffer_nolock(fs_info, cur_bytenr);
			ASSERT(eb);

			cur_bytenr = eb->start + eb->len;

			ASSERT(test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags));
			done = atomic_dec_and_test(&eb->io_pages);
			ASSERT(done);

			if (bio->bi_status ||
			    test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
				ClearPageUptodate(page);
				set_btree_ioerr(page, eb);
			}

			btrfs_subpage_clear_writeback(fs_info, page, eb->start,
						      eb->len);
			end_extent_buffer_writeback(eb);
			/*
			 * free_extent_buffer() will grab spinlock which is not
			 * safe in endio context. Thus here we manually dec
			 * the ref.
			 */
			atomic_dec(&eb->refs);
		}
	}
	bio_put(bio);
}

4473
static void end_bio_extent_buffer_writepage(struct bio *bio)
4474
{
4475
	struct bio_vec *bvec;
4476
	struct extent_buffer *eb;
4477
	int done;
4478
	struct bvec_iter_all iter_all;
4479

4480
	ASSERT(!bio_flagged(bio, BIO_CLONED));
4481
	bio_for_each_segment_all(bvec, bio, iter_all) {
4482 4483 4484 4485 4486 4487
		struct page *page = bvec->bv_page;

		eb = (struct extent_buffer *)page->private;
		BUG_ON(!eb);
		done = atomic_dec_and_test(&eb->io_pages);

4488
		if (bio->bi_status ||
4489
		    test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
4490
			ClearPageUptodate(page);
4491
			set_btree_ioerr(page, eb);
4492 4493 4494 4495 4496 4497 4498 4499
		}

		end_page_writeback(page);

		if (!done)
			continue;

		end_extent_buffer_writeback(eb);
4500
	}
4501 4502 4503 4504

	bio_put(bio);
}

4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529
static void prepare_eb_write(struct extent_buffer *eb)
{
	u32 nritems;
	unsigned long start;
	unsigned long end;

	clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
	atomic_set(&eb->io_pages, num_extent_pages(eb));

	/* Set btree blocks beyond nritems with 0 to avoid stale content */
	nritems = btrfs_header_nritems(eb);
	if (btrfs_header_level(eb) > 0) {
		end = btrfs_node_key_ptr_offset(nritems);
		memzero_extent_buffer(eb, end, eb->len - end);
	} else {
		/*
		 * Leaf:
		 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
		 */
		start = btrfs_item_nr_offset(nritems);
		end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(eb);
		memzero_extent_buffer(eb, start, end - start);
	}
}

4530 4531 4532 4533 4534 4535 4536 4537 4538 4539
/*
 * Unlike the work in write_one_eb(), we rely completely on extent locking.
 * Page locking is only utilized at minimum to keep the VMM code happy.
 */
static int write_one_subpage_eb(struct extent_buffer *eb,
				struct writeback_control *wbc,
				struct extent_page_data *epd)
{
	struct btrfs_fs_info *fs_info = eb->fs_info;
	struct page *page = eb->pages[0];
4540
	unsigned int write_flags = wbc_to_write_flags(wbc);
4541 4542 4543
	bool no_dirty_ebs = false;
	int ret;

4544 4545
	prepare_eb_write(eb);

4546 4547 4548 4549 4550 4551 4552 4553 4554 4555
	/* clear_page_dirty_for_io() in subpage helper needs page locked */
	lock_page(page);
	btrfs_subpage_set_writeback(fs_info, page, eb->start, eb->len);

	/* Check if this is the last dirty bit to update nr_written */
	no_dirty_ebs = btrfs_subpage_clear_and_test_dirty(fs_info, page,
							  eb->start, eb->len);
	if (no_dirty_ebs)
		clear_page_dirty_for_io(page);

4556 4557 4558
	ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
			&epd->bio_ctrl, page, eb->start, eb->len,
			eb->start - page_offset(page),
4559
			end_bio_subpage_eb_writepage, 0, 0, false);
4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574
	if (ret) {
		btrfs_subpage_clear_writeback(fs_info, page, eb->start, eb->len);
		set_btree_ioerr(page, eb);
		unlock_page(page);

		if (atomic_dec_and_test(&eb->io_pages))
			end_extent_buffer_writeback(eb);
		return -EIO;
	}
	unlock_page(page);
	/*
	 * Submission finished without problem, if no range of the page is
	 * dirty anymore, we have submitted a page.  Update nr_written in wbc.
	 */
	if (no_dirty_ebs)
4575
		wbc->nr_to_write--;
4576 4577 4578
	return ret;
}

4579
static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
4580 4581 4582
			struct writeback_control *wbc,
			struct extent_page_data *epd)
{
4583
	u64 disk_bytenr = eb->start;
4584
	int i, num_pages;
4585
	unsigned int write_flags = wbc_to_write_flags(wbc);
4586
	int ret = 0;
4587

4588
	prepare_eb_write(eb);
4589

4590
	num_pages = num_extent_pages(eb);
4591
	for (i = 0; i < num_pages; i++) {
4592
		struct page *p = eb->pages[i];
4593 4594 4595

		clear_page_dirty_for_io(p);
		set_page_writeback(p);
4596
		ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
4597 4598
					 &epd->bio_ctrl, p, disk_bytenr,
					 PAGE_SIZE, 0,
4599
					 end_bio_extent_buffer_writepage,
4600
					 0, 0, false);
4601
		if (ret) {
4602
			set_btree_ioerr(p, eb);
4603 4604
			if (PageWriteback(p))
				end_page_writeback(p);
4605 4606 4607 4608 4609
			if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
				end_extent_buffer_writeback(eb);
			ret = -EIO;
			break;
		}
4610
		disk_bytenr += PAGE_SIZE;
4611
		wbc->nr_to_write--;
4612 4613 4614 4615 4616
		unlock_page(p);
	}

	if (unlikely(ret)) {
		for (; i < num_pages; i++) {
4617
			struct page *p = eb->pages[i];
4618
			clear_page_dirty_for_io(p);
4619 4620 4621 4622 4623 4624 4625
			unlock_page(p);
		}
	}

	return ret;
}

4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651
/*
 * Submit one subpage btree page.
 *
 * The main difference to submit_eb_page() is:
 * - Page locking
 *   For subpage, we don't rely on page locking at all.
 *
 * - Flush write bio
 *   We only flush bio if we may be unable to fit current extent buffers into
 *   current bio.
 *
 * Return >=0 for the number of submitted extent buffers.
 * Return <0 for fatal error.
 */
static int submit_eb_subpage(struct page *page,
			     struct writeback_control *wbc,
			     struct extent_page_data *epd)
{
	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
	int submitted = 0;
	u64 page_start = page_offset(page);
	int bit_start = 0;
	int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
	int ret;

	/* Lock and write each dirty extent buffers in the range */
4652
	while (bit_start < fs_info->subpage_info->bitmap_nr_bits) {
4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667
		struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
		struct extent_buffer *eb;
		unsigned long flags;
		u64 start;

		/*
		 * Take private lock to ensure the subpage won't be detached
		 * in the meantime.
		 */
		spin_lock(&page->mapping->private_lock);
		if (!PagePrivate(page)) {
			spin_unlock(&page->mapping->private_lock);
			break;
		}
		spin_lock_irqsave(&subpage->lock, flags);
4668 4669
		if (!test_bit(bit_start + fs_info->subpage_info->dirty_offset,
			      subpage->bitmaps)) {
4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703
			spin_unlock_irqrestore(&subpage->lock, flags);
			spin_unlock(&page->mapping->private_lock);
			bit_start++;
			continue;
		}

		start = page_start + bit_start * fs_info->sectorsize;
		bit_start += sectors_per_node;

		/*
		 * Here we just want to grab the eb without touching extra
		 * spin locks, so call find_extent_buffer_nolock().
		 */
		eb = find_extent_buffer_nolock(fs_info, start);
		spin_unlock_irqrestore(&subpage->lock, flags);
		spin_unlock(&page->mapping->private_lock);

		/*
		 * The eb has already reached 0 refs thus find_extent_buffer()
		 * doesn't return it. We don't need to write back such eb
		 * anyway.
		 */
		if (!eb)
			continue;

		ret = lock_extent_buffer_for_io(eb, epd);
		if (ret == 0) {
			free_extent_buffer(eb);
			continue;
		}
		if (ret < 0) {
			free_extent_buffer(eb);
			goto cleanup;
		}
4704
		ret = write_one_subpage_eb(eb, wbc, epd);
4705 4706 4707 4708 4709 4710 4711 4712 4713
		free_extent_buffer(eb);
		if (ret < 0)
			goto cleanup;
		submitted++;
	}
	return submitted;

cleanup:
	/* We hit error, end bio for the submitted extent buffers */
4714
	submit_write_bio(epd, ret);
4715 4716 4717
	return ret;
}

4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742
/*
 * Submit all page(s) of one extent buffer.
 *
 * @page:	the page of one extent buffer
 * @eb_context:	to determine if we need to submit this page, if current page
 *		belongs to this eb, we don't need to submit
 *
 * The caller should pass each page in their bytenr order, and here we use
 * @eb_context to determine if we have submitted pages of one extent buffer.
 *
 * If we have, we just skip until we hit a new page that doesn't belong to
 * current @eb_context.
 *
 * If not, we submit all the page(s) of the extent buffer.
 *
 * Return >0 if we have submitted the extent buffer successfully.
 * Return 0 if we don't need to submit the page, as it's already submitted by
 * previous call.
 * Return <0 for fatal error.
 */
static int submit_eb_page(struct page *page, struct writeback_control *wbc,
			  struct extent_page_data *epd,
			  struct extent_buffer **eb_context)
{
	struct address_space *mapping = page->mapping;
4743
	struct btrfs_block_group *cache = NULL;
4744 4745 4746 4747 4748 4749
	struct extent_buffer *eb;
	int ret;

	if (!PagePrivate(page))
		return 0;

4750
	if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
4751 4752
		return submit_eb_subpage(page, wbc, epd);

4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778
	spin_lock(&mapping->private_lock);
	if (!PagePrivate(page)) {
		spin_unlock(&mapping->private_lock);
		return 0;
	}

	eb = (struct extent_buffer *)page->private;

	/*
	 * Shouldn't happen and normally this would be a BUG_ON but no point
	 * crashing the machine for something we can survive anyway.
	 */
	if (WARN_ON(!eb)) {
		spin_unlock(&mapping->private_lock);
		return 0;
	}

	if (eb == *eb_context) {
		spin_unlock(&mapping->private_lock);
		return 0;
	}
	ret = atomic_inc_not_zero(&eb->refs);
	spin_unlock(&mapping->private_lock);
	if (!ret)
		return 0;

4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791
	if (!btrfs_check_meta_write_pointer(eb->fs_info, eb, &cache)) {
		/*
		 * If for_sync, this hole will be filled with
		 * trasnsaction commit.
		 */
		if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
			ret = -EAGAIN;
		else
			ret = 0;
		free_extent_buffer(eb);
		return ret;
	}

4792 4793 4794 4795
	*eb_context = eb;

	ret = lock_extent_buffer_for_io(eb, epd);
	if (ret <= 0) {
4796 4797 4798
		btrfs_revert_meta_write_pointer(cache, eb);
		if (cache)
			btrfs_put_block_group(cache);
4799 4800 4801
		free_extent_buffer(eb);
		return ret;
	}
4802
	if (cache) {
4803 4804 4805
		/*
		 * Implies write in zoned mode. Mark the last eb in a block group.
		 */
4806
		btrfs_schedule_zone_finish_bg(cache, eb);
4807
		btrfs_put_block_group(cache);
4808
	}
4809 4810 4811 4812 4813 4814 4815
	ret = write_one_eb(eb, wbc, epd);
	free_extent_buffer(eb);
	if (ret < 0)
		return ret;
	return 1;
}

4816 4817 4818
int btree_write_cache_pages(struct address_space *mapping,
				   struct writeback_control *wbc)
{
4819
	struct extent_buffer *eb_context = NULL;
4820
	struct extent_page_data epd = {
4821
		.bio_ctrl = { 0 },
4822 4823 4824
		.extent_locked = 0,
		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
	};
4825
	struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
4826 4827 4828 4829 4830 4831 4832 4833
	int ret = 0;
	int done = 0;
	int nr_to_write_done = 0;
	struct pagevec pvec;
	int nr_pages;
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
	int scanned = 0;
M
Matthew Wilcox 已提交
4834
	xa_mark_t tag;
4835

4836
	pagevec_init(&pvec);
4837 4838 4839
	if (wbc->range_cyclic) {
		index = mapping->writeback_index; /* Start from prev offset */
		end = -1;
4840 4841 4842 4843 4844
		/*
		 * Start from the beginning does not need to cycle over the
		 * range, mark it as scanned.
		 */
		scanned = (index == 0);
4845
	} else {
4846 4847
		index = wbc->range_start >> PAGE_SHIFT;
		end = wbc->range_end >> PAGE_SHIFT;
4848 4849 4850 4851 4852 4853
		scanned = 1;
	}
	if (wbc->sync_mode == WB_SYNC_ALL)
		tag = PAGECACHE_TAG_TOWRITE;
	else
		tag = PAGECACHE_TAG_DIRTY;
4854
	btrfs_zoned_meta_io_lock(fs_info);
4855 4856 4857 4858
retry:
	if (wbc->sync_mode == WB_SYNC_ALL)
		tag_pages_for_writeback(mapping, index, end);
	while (!done && !nr_to_write_done && (index <= end) &&
J
Jan Kara 已提交
4859
	       (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
4860
			tag))) {
4861 4862 4863 4864 4865
		unsigned i;

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];

4866 4867
			ret = submit_eb_page(page, wbc, &epd, &eb_context);
			if (ret == 0)
4868
				continue;
4869
			if (ret < 0) {
4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892
				done = 1;
				break;
			}

			/*
			 * the filesystem may choose to bump up nr_to_write.
			 * We have to make sure to honor the new nr_to_write
			 * at any time
			 */
			nr_to_write_done = wbc->nr_to_write <= 0;
		}
		pagevec_release(&pvec);
		cond_resched();
	}
	if (!scanned && !done) {
		/*
		 * We hit the last page and there is more work to be done: wrap
		 * back to the start of the file
		 */
		scanned = 1;
		index = 0;
		goto retry;
	}
4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918
	/*
	 * If something went wrong, don't allow any metadata write bio to be
	 * submitted.
	 *
	 * This would prevent use-after-free if we had dirty pages not
	 * cleaned up, which can still happen by fuzzed images.
	 *
	 * - Bad extent tree
	 *   Allowing existing tree block to be allocated for other trees.
	 *
	 * - Log tree operations
	 *   Exiting tree blocks get allocated to log tree, bumps its
	 *   generation, then get cleaned in tree re-balance.
	 *   Such tree block will not be written back, since it's clean,
	 *   thus no WRITTEN flag set.
	 *   And after log writes back, this tree block is not traced by
	 *   any dirty extent_io_tree.
	 *
	 * - Offending tree block gets re-dirtied from its original owner
	 *   Since it has bumped generation, no WRITTEN flag, it can be
	 *   reused without COWing. This tree block will not be traced
	 *   by btrfs_transaction::dirty_pages.
	 *
	 *   Now such dirty tree block will not be cleaned by any dirty
	 *   extent io tree. Thus we don't want to submit such wild eb
	 *   if the fs already has error.
4919
	 *
4920 4921 4922 4923 4924
	 * We can get ret > 0 from submit_extent_page() indicating how many ebs
	 * were submitted. Reset it to 0 to avoid false alerts for the caller.
	 */
	if (ret > 0)
		ret = 0;
4925 4926 4927 4928 4929
	if (!ret && BTRFS_FS_ERROR(fs_info))
		ret = -EROFS;
	submit_write_bio(&epd, ret);

	btrfs_zoned_meta_io_unlock(fs_info);
4930 4931 4932
	return ret;
}

4933
/**
4934 4935
 * Walk the list of dirty pages of the given address space and write all of them.
 *
4936
 * @mapping: address space structure to write
4937 4938
 * @wbc:     subtract the number of written pages from *@wbc->nr_to_write
 * @epd:     holds context for the write, namely the bio
4939 4940 4941 4942 4943 4944 4945 4946 4947
 *
 * If a page is already under I/O, write_cache_pages() skips it, even
 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
 * and msync() need to guarantee that all the data which was dirty at the time
 * the call was made get new I/O started against them.  If wbc->sync_mode is
 * WB_SYNC_ALL then we were called for data integrity and we must wait for
 * existing IO to complete.
 */
4948
static int extent_write_cache_pages(struct address_space *mapping,
C
Chris Mason 已提交
4949
			     struct writeback_control *wbc,
4950
			     struct extent_page_data *epd)
4951
{
4952
	struct inode *inode = mapping->host;
4953 4954
	int ret = 0;
	int done = 0;
4955
	int nr_to_write_done = 0;
4956 4957 4958 4959
	struct pagevec pvec;
	int nr_pages;
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
4960 4961
	pgoff_t done_index;
	int range_whole = 0;
4962
	int scanned = 0;
M
Matthew Wilcox 已提交
4963
	xa_mark_t tag;
4964

4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976
	/*
	 * We have to hold onto the inode so that ordered extents can do their
	 * work when the IO finishes.  The alternative to this is failing to add
	 * an ordered extent if the igrab() fails there and that is a huge pain
	 * to deal with, so instead just hold onto the inode throughout the
	 * writepages operation.  If it fails here we are freeing up the inode
	 * anyway and we'd rather not waste our time writing out stuff that is
	 * going to be truncated anyway.
	 */
	if (!igrab(inode))
		return 0;

4977
	pagevec_init(&pvec);
4978 4979 4980
	if (wbc->range_cyclic) {
		index = mapping->writeback_index; /* Start from prev offset */
		end = -1;
4981 4982 4983 4984 4985
		/*
		 * Start from the beginning does not need to cycle over the
		 * range, mark it as scanned.
		 */
		scanned = (index == 0);
4986
	} else {
4987 4988
		index = wbc->range_start >> PAGE_SHIFT;
		end = wbc->range_end >> PAGE_SHIFT;
4989 4990
		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
			range_whole = 1;
4991 4992
		scanned = 1;
	}
4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006

	/*
	 * We do the tagged writepage as long as the snapshot flush bit is set
	 * and we are the first one who do the filemap_flush() on this inode.
	 *
	 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
	 * not race in and drop the bit.
	 */
	if (range_whole && wbc->nr_to_write == LONG_MAX &&
	    test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
			       &BTRFS_I(inode)->runtime_flags))
		wbc->tagged_writepages = 1;

	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
5007 5008 5009
		tag = PAGECACHE_TAG_TOWRITE;
	else
		tag = PAGECACHE_TAG_DIRTY;
5010
retry:
5011
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
5012
		tag_pages_for_writeback(mapping, index, end);
5013
	done_index = index;
5014
	while (!done && !nr_to_write_done && (index <= end) &&
5015 5016
			(nr_pages = pagevec_lookup_range_tag(&pvec, mapping,
						&index, end, tag))) {
5017 5018 5019 5020 5021
		unsigned i;

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];

5022
			done_index = page->index + 1;
5023
			/*
M
Matthew Wilcox 已提交
5024 5025 5026 5027 5028
			 * At this point we hold neither the i_pages lock nor
			 * the page lock: the page may be truncated or
			 * invalidated (changing page->mapping to NULL),
			 * or even swizzled back from swapper_space to
			 * tmpfs file mapping
5029
			 */
5030
			if (!trylock_page(page)) {
5031
				submit_write_bio(epd, 0);
5032
				lock_page(page);
5033
			}
5034 5035 5036 5037 5038 5039

			if (unlikely(page->mapping != mapping)) {
				unlock_page(page);
				continue;
			}

C
Chris Mason 已提交
5040
			if (wbc->sync_mode != WB_SYNC_NONE) {
5041
				if (PageWriteback(page))
5042
					submit_write_bio(epd, 0);
5043
				wait_on_page_writeback(page);
C
Chris Mason 已提交
5044
			}
5045 5046 5047 5048 5049 5050 5051

			if (PageWriteback(page) ||
			    !clear_page_dirty_for_io(page)) {
				unlock_page(page);
				continue;
			}

5052
			ret = __extent_writepage(page, wbc, epd);
5053 5054 5055 5056
			if (ret < 0) {
				done = 1;
				break;
			}
5057 5058 5059 5060 5061 5062 5063

			/*
			 * the filesystem may choose to bump up nr_to_write.
			 * We have to make sure to honor the new nr_to_write
			 * at any time
			 */
			nr_to_write_done = wbc->nr_to_write <= 0;
5064 5065 5066 5067
		}
		pagevec_release(&pvec);
		cond_resched();
	}
5068
	if (!scanned && !done) {
5069 5070 5071 5072 5073 5074
		/*
		 * We hit the last page and there is more work to be done: wrap
		 * back to the start of the file
		 */
		scanned = 1;
		index = 0;
5075 5076 5077 5078 5079 5080 5081

		/*
		 * If we're looping we could run into a page that is locked by a
		 * writer and that writer could be waiting on writeback for a
		 * page in our current bio, and thus deadlock, so flush the
		 * write bio here.
		 */
5082
		submit_write_bio(epd, 0);
5083
		goto retry;
5084
	}
5085 5086 5087 5088

	if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
		mapping->writeback_index = done_index;

5089
	btrfs_add_delayed_iput(inode);
5090
	return ret;
5091 5092
}

5093
int extent_write_full_page(struct page *page, struct writeback_control *wbc)
5094 5095 5096
{
	int ret;
	struct extent_page_data epd = {
5097
		.bio_ctrl = { 0 },
5098
		.extent_locked = 0,
5099
		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
5100 5101 5102
	};

	ret = __extent_writepage(page, wbc, &epd);
5103
	submit_write_bio(&epd, ret);
5104 5105 5106
	return ret;
}

5107 5108 5109 5110 5111 5112
/*
 * Submit the pages in the range to bio for call sites which delalloc range has
 * already been ran (aka, ordered extent inserted) and all pages are still
 * locked.
 */
int extent_write_locked_range(struct inode *inode, u64 start, u64 end)
5113
{
5114 5115
	bool found_error = false;
	int first_error = 0;
5116 5117 5118
	int ret = 0;
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
5119
	u64 cur = start;
5120 5121
	unsigned long nr_pages;
	const u32 sectorsize = btrfs_sb(inode->i_sb)->sectorsize;
5122
	struct extent_page_data epd = {
5123
		.bio_ctrl = { 0 },
5124
		.extent_locked = 1,
5125
		.sync_io = 1,
5126 5127
	};
	struct writeback_control wbc_writepages = {
5128
		.sync_mode	= WB_SYNC_ALL,
5129 5130
		.range_start	= start,
		.range_end	= end + 1,
5131 5132 5133
		/* We're called from an async helper function */
		.punt_to_cgroup	= 1,
		.no_cgroup_owner = 1,
5134 5135
	};

5136 5137 5138 5139 5140
	ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
	nr_pages = (round_up(end, PAGE_SIZE) - round_down(start, PAGE_SIZE)) >>
		   PAGE_SHIFT;
	wbc_writepages.nr_to_write = nr_pages * 2;

5141
	wbc_attach_fdatawrite_inode(&wbc_writepages, inode);
5142
	while (cur <= end) {
5143 5144
		u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);

5145 5146 5147 5148 5149 5150
		page = find_get_page(mapping, cur >> PAGE_SHIFT);
		/*
		 * All pages in the range are locked since
		 * btrfs_run_delalloc_range(), thus there is no way to clear
		 * the page dirty flag.
		 */
5151
		ASSERT(PageLocked(page));
5152 5153 5154 5155 5156 5157 5158
		ASSERT(PageDirty(page));
		clear_page_dirty_for_io(page);
		ret = __extent_writepage(page, &wbc_writepages, &epd);
		ASSERT(ret <= 0);
		if (ret < 0) {
			found_error = true;
			first_error = ret;
5159
		}
5160
		put_page(page);
5161
		cur = cur_end + 1;
5162 5163
	}

5164
	submit_write_bio(&epd, found_error ? ret : 0);
5165 5166

	wbc_detach_inode(&wbc_writepages);
5167 5168
	if (found_error)
		return first_error;
5169 5170
	return ret;
}
5171

5172
int extent_writepages(struct address_space *mapping,
5173 5174
		      struct writeback_control *wbc)
{
5175
	struct inode *inode = mapping->host;
5176 5177
	int ret = 0;
	struct extent_page_data epd = {
5178
		.bio_ctrl = { 0 },
5179
		.extent_locked = 0,
5180
		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
5181 5182
	};

5183 5184 5185 5186
	/*
	 * Allow only a single thread to do the reloc work in zoned mode to
	 * protect the write pointer updates.
	 */
5187
	btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
5188
	ret = extent_write_cache_pages(mapping, wbc, &epd);
5189
	submit_write_bio(&epd, ret);
5190
	btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
5191 5192 5193
	return ret;
}

5194
void extent_readahead(struct readahead_control *rac)
5195
{
5196
	struct btrfs_bio_ctrl bio_ctrl = { 0 };
L
Liu Bo 已提交
5197
	struct page *pagepool[16];
5198
	struct extent_map *em_cached = NULL;
5199
	u64 prev_em_start = (u64)-1;
5200
	int nr;
5201

5202
	while ((nr = readahead_page_batch(rac, pagepool))) {
5203 5204
		u64 contig_start = readahead_pos(rac);
		u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
5205

5206
		contiguous_readpages(pagepool, nr, contig_start, contig_end,
5207
				&em_cached, &bio_ctrl, &prev_em_start);
5208
	}
L
Liu Bo 已提交
5209

5210 5211 5212
	if (em_cached)
		free_extent_map(em_cached);

5213
	if (bio_ctrl.bio)
5214
		submit_one_bio(bio_ctrl.bio, 0, bio_ctrl.compress_type);
5215 5216 5217
}

/*
5218 5219
 * basic invalidate_folio code, this waits on any locked or writeback
 * ranges corresponding to the folio, and then deletes any extent state
5220 5221
 * records from the tree
 */
5222 5223
int extent_invalidate_folio(struct extent_io_tree *tree,
			  struct folio *folio, size_t offset)
5224
{
5225
	struct extent_state *cached_state = NULL;
5226 5227 5228
	u64 start = folio_pos(folio);
	u64 end = start + folio_size(folio) - 1;
	size_t blocksize = folio->mapping->host->i_sb->s_blocksize;
5229

5230 5231 5232
	/* This function is only called for the btree inode */
	ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);

5233
	start += ALIGN(offset, blocksize);
5234 5235 5236
	if (start > end)
		return 0;

5237
	lock_extent_bits(tree, start, end, &cached_state);
5238
	folio_wait_writeback(folio);
5239 5240 5241 5242 5243 5244 5245

	/*
	 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
	 * so here we only need to unlock the extent range to free any
	 * existing extent state.
	 */
	unlock_extent_cached(tree, start, end, &cached_state);
5246 5247 5248
	return 0;
}

5249
/*
5250
 * a helper for release_folio, this tests for areas of the page that
5251 5252 5253
 * are locked or under IO and drops the related state bits if it is safe
 * to drop the page.
 */
5254
static int try_release_extent_state(struct extent_io_tree *tree,
5255
				    struct page *page, gfp_t mask)
5256
{
M
Miao Xie 已提交
5257
	u64 start = page_offset(page);
5258
	u64 end = start + PAGE_SIZE - 1;
5259 5260
	int ret = 1;

N
Nikolay Borisov 已提交
5261
	if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
5262
		ret = 0;
N
Nikolay Borisov 已提交
5263
	} else {
5264
		/*
5265 5266 5267 5268
		 * At this point we can safely clear everything except the
		 * locked bit, the nodatasum bit and the delalloc new bit.
		 * The delalloc new bit will be cleared by ordered extent
		 * completion.
5269
		 */
5270
		ret = __clear_extent_bit(tree, start, end,
5271 5272
			 ~(EXTENT_LOCKED | EXTENT_NODATASUM | EXTENT_DELALLOC_NEW),
			 0, 0, NULL, mask, NULL);
5273 5274 5275 5276 5277 5278 5279 5280

		/* if clear_extent_bit failed for enomem reasons,
		 * we can't allow the release to continue.
		 */
		if (ret < 0)
			ret = 0;
		else
			ret = 1;
5281 5282 5283 5284
	}
	return ret;
}

5285
/*
5286
 * a helper for release_folio.  As long as there are no locked extents
5287 5288 5289
 * in the range corresponding to the page, both state records and extent
 * map records are removed
 */
5290
int try_release_extent_mapping(struct page *page, gfp_t mask)
5291 5292
{
	struct extent_map *em;
M
Miao Xie 已提交
5293
	u64 start = page_offset(page);
5294
	u64 end = start + PAGE_SIZE - 1;
5295 5296 5297
	struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
	struct extent_io_tree *tree = &btrfs_inode->io_tree;
	struct extent_map_tree *map = &btrfs_inode->extent_tree;
5298

5299
	if (gfpflags_allow_blocking(mask) &&
5300
	    page->mapping->host->i_size > SZ_16M) {
5301
		u64 len;
5302
		while (start <= end) {
5303 5304 5305
			struct btrfs_fs_info *fs_info;
			u64 cur_gen;

5306
			len = end - start + 1;
5307
			write_lock(&map->lock);
5308
			em = lookup_extent_mapping(map, start, len);
5309
			if (!em) {
5310
				write_unlock(&map->lock);
5311 5312
				break;
			}
5313 5314
			if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
			    em->start != start) {
5315
				write_unlock(&map->lock);
5316 5317 5318
				free_extent_map(em);
				break;
			}
5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329
			if (test_range_bit(tree, em->start,
					   extent_map_end(em) - 1,
					   EXTENT_LOCKED, 0, NULL))
				goto next;
			/*
			 * If it's not in the list of modified extents, used
			 * by a fast fsync, we can remove it. If it's being
			 * logged we can safely remove it since fsync took an
			 * extra reference on the em.
			 */
			if (list_empty(&em->list) ||
5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345
			    test_bit(EXTENT_FLAG_LOGGING, &em->flags))
				goto remove_em;
			/*
			 * If it's in the list of modified extents, remove it
			 * only if its generation is older then the current one,
			 * in which case we don't need it for a fast fsync.
			 * Otherwise don't remove it, we could be racing with an
			 * ongoing fast fsync that could miss the new extent.
			 */
			fs_info = btrfs_inode->root->fs_info;
			spin_lock(&fs_info->trans_lock);
			cur_gen = fs_info->generation;
			spin_unlock(&fs_info->trans_lock);
			if (em->generation >= cur_gen)
				goto next;
remove_em:
5346 5347 5348 5349 5350 5351 5352 5353
			/*
			 * We only remove extent maps that are not in the list of
			 * modified extents or that are in the list but with a
			 * generation lower then the current generation, so there
			 * is no need to set the full fsync flag on the inode (it
			 * hurts the fsync performance for workloads with a data
			 * size that exceeds or is close to the system's memory).
			 */
5354 5355 5356
			remove_extent_mapping(map, em);
			/* once for the rb tree */
			free_extent_map(em);
5357
next:
5358
			start = extent_map_end(em);
5359
			write_unlock(&map->lock);
5360 5361

			/* once for us */
5362
			free_extent_map(em);
5363 5364

			cond_resched(); /* Allow large-extent preemption. */
5365 5366
		}
	}
5367
	return try_release_extent_state(tree, page, mask);
5368 5369
}

5370 5371 5372 5373
/*
 * helper function for fiemap, which doesn't want to see any holes.
 * This maps until we find something past 'last'
 */
5374
static struct extent_map *get_extent_skip_holes(struct btrfs_inode *inode,
5375
						u64 offset, u64 last)
5376
{
5377
	u64 sectorsize = btrfs_inode_sectorsize(inode);
5378 5379 5380 5381 5382 5383
	struct extent_map *em;
	u64 len;

	if (offset >= last)
		return NULL;

5384
	while (1) {
5385 5386 5387
		len = last - offset;
		if (len == 0)
			break;
5388
		len = ALIGN(len, sectorsize);
5389
		em = btrfs_get_extent_fiemap(inode, offset, len);
5390
		if (IS_ERR(em))
5391 5392 5393
			return em;

		/* if this isn't a hole return it */
5394
		if (em->block_start != EXTENT_MAP_HOLE)
5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405
			return em;

		/* this is a hole, advance to the next extent */
		offset = extent_map_end(em);
		free_extent_map(em);
		if (offset >= last)
			break;
	}
	return NULL;
}

5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439
/*
 * To cache previous fiemap extent
 *
 * Will be used for merging fiemap extent
 */
struct fiemap_cache {
	u64 offset;
	u64 phys;
	u64 len;
	u32 flags;
	bool cached;
};

/*
 * Helper to submit fiemap extent.
 *
 * Will try to merge current fiemap extent specified by @offset, @phys,
 * @len and @flags with cached one.
 * And only when we fails to merge, cached one will be submitted as
 * fiemap extent.
 *
 * Return value is the same as fiemap_fill_next_extent().
 */
static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
				struct fiemap_cache *cache,
				u64 offset, u64 phys, u64 len, u32 flags)
{
	int ret = 0;

	if (!cache->cached)
		goto assign;

	/*
	 * Sanity check, extent_fiemap() should have ensured that new
5440
	 * fiemap extent won't overlap with cached one.
5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491
	 * Not recoverable.
	 *
	 * NOTE: Physical address can overlap, due to compression
	 */
	if (cache->offset + cache->len > offset) {
		WARN_ON(1);
		return -EINVAL;
	}

	/*
	 * Only merges fiemap extents if
	 * 1) Their logical addresses are continuous
	 *
	 * 2) Their physical addresses are continuous
	 *    So truly compressed (physical size smaller than logical size)
	 *    extents won't get merged with each other
	 *
	 * 3) Share same flags except FIEMAP_EXTENT_LAST
	 *    So regular extent won't get merged with prealloc extent
	 */
	if (cache->offset + cache->len  == offset &&
	    cache->phys + cache->len == phys  &&
	    (cache->flags & ~FIEMAP_EXTENT_LAST) ==
			(flags & ~FIEMAP_EXTENT_LAST)) {
		cache->len += len;
		cache->flags |= flags;
		goto try_submit_last;
	}

	/* Not mergeable, need to submit cached one */
	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
				      cache->len, cache->flags);
	cache->cached = false;
	if (ret)
		return ret;
assign:
	cache->cached = true;
	cache->offset = offset;
	cache->phys = phys;
	cache->len = len;
	cache->flags = flags;
try_submit_last:
	if (cache->flags & FIEMAP_EXTENT_LAST) {
		ret = fiemap_fill_next_extent(fieinfo, cache->offset,
				cache->phys, cache->len, cache->flags);
		cache->cached = false;
	}
	return ret;
}

/*
5492
 * Emit last fiemap cache
5493
 *
5494 5495 5496 5497 5498 5499 5500
 * The last fiemap cache may still be cached in the following case:
 * 0		      4k		    8k
 * |<- Fiemap range ->|
 * |<------------  First extent ----------->|
 *
 * In this case, the first extent range will be cached but not emitted.
 * So we must emit it before ending extent_fiemap().
5501
 */
5502
static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
5503
				  struct fiemap_cache *cache)
5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517
{
	int ret;

	if (!cache->cached)
		return 0;

	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
				      cache->len, cache->flags);
	cache->cached = false;
	if (ret > 0)
		ret = 0;
	return ret;
}

5518
int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
5519
		  u64 start, u64 len)
Y
Yehuda Sadeh 已提交
5520
{
J
Josef Bacik 已提交
5521
	int ret = 0;
5522
	u64 off;
Y
Yehuda Sadeh 已提交
5523 5524
	u64 max = start + len;
	u32 flags = 0;
J
Josef Bacik 已提交
5525 5526
	u32 found_type;
	u64 last;
5527
	u64 last_for_get_extent = 0;
Y
Yehuda Sadeh 已提交
5528
	u64 disko = 0;
5529
	u64 isize = i_size_read(&inode->vfs_inode);
J
Josef Bacik 已提交
5530
	struct btrfs_key found_key;
Y
Yehuda Sadeh 已提交
5531
	struct extent_map *em = NULL;
5532
	struct extent_state *cached_state = NULL;
J
Josef Bacik 已提交
5533
	struct btrfs_path *path;
5534
	struct btrfs_root *root = inode->root;
5535
	struct fiemap_cache cache = { 0 };
5536 5537
	struct ulist *roots;
	struct ulist *tmp_ulist;
Y
Yehuda Sadeh 已提交
5538
	int end = 0;
5539 5540 5541
	u64 em_start = 0;
	u64 em_len = 0;
	u64 em_end = 0;
Y
Yehuda Sadeh 已提交
5542 5543 5544 5545

	if (len == 0)
		return -EINVAL;

J
Josef Bacik 已提交
5546 5547 5548 5549
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

5550 5551 5552 5553 5554 5555 5556
	roots = ulist_alloc(GFP_KERNEL);
	tmp_ulist = ulist_alloc(GFP_KERNEL);
	if (!roots || !tmp_ulist) {
		ret = -ENOMEM;
		goto out_free_ulist;
	}

5557 5558 5559 5560 5561
	/*
	 * We can't initialize that to 'start' as this could miss extents due
	 * to extent item merging
	 */
	off = 0;
5562 5563
	start = round_down(start, btrfs_inode_sectorsize(inode));
	len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
5564

5565 5566 5567 5568
	/*
	 * lookup the last file extent.  We're not using i_size here
	 * because there might be preallocation past i_size
	 */
5569 5570
	ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), -1,
				       0);
J
Josef Bacik 已提交
5571
	if (ret < 0) {
5572
		goto out_free_ulist;
5573 5574 5575 5576
	} else {
		WARN_ON(!ret);
		if (ret == 1)
			ret = 0;
J
Josef Bacik 已提交
5577
	}
5578

J
Josef Bacik 已提交
5579 5580
	path->slots[0]--;
	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
5581
	found_type = found_key.type;
J
Josef Bacik 已提交
5582

5583
	/* No extents, but there might be delalloc bits */
5584
	if (found_key.objectid != btrfs_ino(inode) ||
J
Josef Bacik 已提交
5585
	    found_type != BTRFS_EXTENT_DATA_KEY) {
5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596
		/* have to trust i_size as the end */
		last = (u64)-1;
		last_for_get_extent = isize;
	} else {
		/*
		 * remember the start of the last extent.  There are a
		 * bunch of different factors that go into the length of the
		 * extent, so its much less complex to remember where it started
		 */
		last = found_key.offset;
		last_for_get_extent = last + 1;
J
Josef Bacik 已提交
5597
	}
5598
	btrfs_release_path(path);
J
Josef Bacik 已提交
5599

5600 5601 5602 5603 5604 5605 5606 5607 5608 5609
	/*
	 * we might have some extents allocated but more delalloc past those
	 * extents.  so, we trust isize unless the start of the last extent is
	 * beyond isize
	 */
	if (last < isize) {
		last = (u64)-1;
		last_for_get_extent = isize;
	}

5610
	lock_extent_bits(&inode->io_tree, start, start + len - 1,
5611
			 &cached_state);
5612

5613
	em = get_extent_skip_holes(inode, start, last_for_get_extent);
Y
Yehuda Sadeh 已提交
5614 5615 5616 5617 5618 5619
	if (!em)
		goto out;
	if (IS_ERR(em)) {
		ret = PTR_ERR(em);
		goto out;
	}
J
Josef Bacik 已提交
5620

Y
Yehuda Sadeh 已提交
5621
	while (!end) {
5622
		u64 offset_in_extent = 0;
5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634

		/* break if the extent we found is outside the range */
		if (em->start >= max || extent_map_end(em) < off)
			break;

		/*
		 * get_extent may return an extent that starts before our
		 * requested range.  We have to make sure the ranges
		 * we return to fiemap always move forward and don't
		 * overlap, so adjust the offsets here
		 */
		em_start = max(em->start, off);
Y
Yehuda Sadeh 已提交
5635

5636 5637
		/*
		 * record the offset from the start of the extent
5638 5639 5640
		 * for adjusting the disk offset below.  Only do this if the
		 * extent isn't compressed since our in ram offset may be past
		 * what we have actually allocated on disk.
5641
		 */
5642 5643
		if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
			offset_in_extent = em_start - em->start;
5644
		em_end = extent_map_end(em);
5645
		em_len = em_end - em_start;
Y
Yehuda Sadeh 已提交
5646
		flags = 0;
5647 5648 5649 5650
		if (em->block_start < EXTENT_MAP_LAST_BYTE)
			disko = em->block_start + offset_in_extent;
		else
			disko = 0;
Y
Yehuda Sadeh 已提交
5651

5652 5653 5654 5655 5656 5657 5658
		/*
		 * bump off for our next call to get_extent
		 */
		off = extent_map_end(em);
		if (off >= max)
			end = 1;

5659
		if (em->block_start == EXTENT_MAP_LAST_BYTE) {
Y
Yehuda Sadeh 已提交
5660 5661
			end = 1;
			flags |= FIEMAP_EXTENT_LAST;
5662
		} else if (em->block_start == EXTENT_MAP_INLINE) {
Y
Yehuda Sadeh 已提交
5663 5664
			flags |= (FIEMAP_EXTENT_DATA_INLINE |
				  FIEMAP_EXTENT_NOT_ALIGNED);
5665
		} else if (em->block_start == EXTENT_MAP_DELALLOC) {
Y
Yehuda Sadeh 已提交
5666 5667
			flags |= (FIEMAP_EXTENT_DELALLOC |
				  FIEMAP_EXTENT_UNKNOWN);
5668 5669 5670
		} else if (fieinfo->fi_extents_max) {
			u64 bytenr = em->block_start -
				(em->start - em->orig_start);
5671 5672 5673 5674

			/*
			 * As btrfs supports shared space, this information
			 * can be exported to userspace tools via
5675 5676 5677
			 * flag FIEMAP_EXTENT_SHARED.  If fi_extents_max == 0
			 * then we're just getting a count and we can skip the
			 * lookup stuff.
5678
			 */
5679
			ret = btrfs_check_shared(root, btrfs_ino(inode),
5680
						 bytenr, roots, tmp_ulist);
5681
			if (ret < 0)
5682
				goto out_free;
5683
			if (ret)
5684
				flags |= FIEMAP_EXTENT_SHARED;
5685
			ret = 0;
Y
Yehuda Sadeh 已提交
5686 5687 5688
		}
		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
			flags |= FIEMAP_EXTENT_ENCODED;
5689 5690
		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
			flags |= FIEMAP_EXTENT_UNWRITTEN;
Y
Yehuda Sadeh 已提交
5691 5692 5693

		free_extent_map(em);
		em = NULL;
5694 5695
		if ((em_start >= last) || em_len == (u64)-1 ||
		   (last == (u64)-1 && isize <= em_end)) {
Y
Yehuda Sadeh 已提交
5696 5697 5698 5699
			flags |= FIEMAP_EXTENT_LAST;
			end = 1;
		}

5700
		/* now scan forward to see if this is really the last extent. */
5701
		em = get_extent_skip_holes(inode, off, last_for_get_extent);
5702 5703 5704 5705 5706
		if (IS_ERR(em)) {
			ret = PTR_ERR(em);
			goto out;
		}
		if (!em) {
J
Josef Bacik 已提交
5707 5708 5709
			flags |= FIEMAP_EXTENT_LAST;
			end = 1;
		}
5710 5711
		ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko,
					   em_len, flags);
5712 5713 5714
		if (ret) {
			if (ret == 1)
				ret = 0;
5715
			goto out_free;
5716
		}
Y
Yehuda Sadeh 已提交
5717 5718
	}
out_free:
5719
	if (!ret)
5720
		ret = emit_last_fiemap_cache(fieinfo, &cache);
Y
Yehuda Sadeh 已提交
5721 5722
	free_extent_map(em);
out:
5723
	unlock_extent_cached(&inode->io_tree, start, start + len - 1,
5724
			     &cached_state);
5725 5726

out_free_ulist:
5727
	btrfs_free_path(path);
5728 5729
	ulist_free(roots);
	ulist_free(tmp_ulist);
Y
Yehuda Sadeh 已提交
5730 5731 5732
	return ret;
}

5733 5734 5735 5736 5737
static void __free_extent_buffer(struct extent_buffer *eb)
{
	kmem_cache_free(extent_buffer_cache, eb);
}

5738
int extent_buffer_under_io(const struct extent_buffer *eb)
5739 5740 5741 5742 5743 5744
{
	return (atomic_read(&eb->io_pages) ||
		test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
}

5745
static bool page_range_has_eb(struct btrfs_fs_info *fs_info, struct page *page)
5746
{
5747
	struct btrfs_subpage *subpage;
5748

5749
	lockdep_assert_held(&page->mapping->private_lock);
5750

5751 5752 5753 5754
	if (PagePrivate(page)) {
		subpage = (struct btrfs_subpage *)page->private;
		if (atomic_read(&subpage->eb_refs))
			return true;
5755 5756 5757 5758 5759 5760
		/*
		 * Even there is no eb refs here, we may still have
		 * end_page_read() call relying on page::private.
		 */
		if (atomic_read(&subpage->readers))
			return true;
5761 5762 5763
	}
	return false;
}
5764

5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777
static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
{
	struct btrfs_fs_info *fs_info = eb->fs_info;
	const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);

	/*
	 * For mapped eb, we're going to change the page private, which should
	 * be done under the private_lock.
	 */
	if (mapped)
		spin_lock(&page->mapping->private_lock);

	if (!PagePrivate(page)) {
5778
		if (mapped)
5779 5780 5781 5782
			spin_unlock(&page->mapping->private_lock);
		return;
	}

5783
	if (fs_info->nodesize >= PAGE_SIZE) {
5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795
		/*
		 * We do this since we'll remove the pages after we've
		 * removed the eb from the radix tree, so we could race
		 * and have this page now attached to the new eb.  So
		 * only clear page_private if it's still connected to
		 * this eb.
		 */
		if (PagePrivate(page) &&
		    page->private == (unsigned long)eb) {
			BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
			BUG_ON(PageDirty(page));
			BUG_ON(PageWriteback(page));
5796
			/*
5797 5798
			 * We need to make sure we haven't be attached
			 * to a new eb.
5799
			 */
5800
			detach_page_private(page);
5801
		}
5802 5803
		if (mapped)
			spin_unlock(&page->mapping->private_lock);
5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820
		return;
	}

	/*
	 * For subpage, we can have dummy eb with page private.  In this case,
	 * we can directly detach the private as such page is only attached to
	 * one dummy eb, no sharing.
	 */
	if (!mapped) {
		btrfs_detach_subpage(fs_info, page);
		return;
	}

	btrfs_page_dec_eb_refs(fs_info, page);

	/*
	 * We can only detach the page private if there are no other ebs in the
5821
	 * page range and no unfinished IO.
5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844
	 */
	if (!page_range_has_eb(fs_info, page))
		btrfs_detach_subpage(fs_info, page);

	spin_unlock(&page->mapping->private_lock);
}

/* Release all pages attached to the extent buffer */
static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
{
	int i;
	int num_pages;

	ASSERT(!extent_buffer_under_io(eb));

	num_pages = num_extent_pages(eb);
	for (i = 0; i < num_pages; i++) {
		struct page *page = eb->pages[i];

		if (!page)
			continue;

		detach_extent_buffer_page(eb, page);
5845

5846
		/* One for when we allocated the page */
5847
		put_page(page);
5848
	}
5849 5850 5851 5852 5853 5854 5855
}

/*
 * Helper for releasing the extent buffer.
 */
static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
{
5856
	btrfs_release_extent_buffer_pages(eb);
5857
	btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
5858 5859 5860
	__free_extent_buffer(eb);
}

5861 5862
static struct extent_buffer *
__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
5863
		      unsigned long len)
5864 5865 5866
{
	struct extent_buffer *eb = NULL;

5867
	eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
5868 5869
	eb->start = start;
	eb->len = len;
5870
	eb->fs_info = fs_info;
5871
	eb->bflags = 0;
5872
	init_rwsem(&eb->lock);
5873

5874 5875
	btrfs_leak_debug_add(&fs_info->eb_leak_lock, &eb->leak_list,
			     &fs_info->allocated_ebs);
5876
	INIT_LIST_HEAD(&eb->release_list);
5877

5878
	spin_lock_init(&eb->refs_lock);
5879
	atomic_set(&eb->refs, 1);
5880
	atomic_set(&eb->io_pages, 0);
5881

5882
	ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
5883 5884 5885 5886

	return eb;
}

5887
struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
5888
{
5889
	int i;
5890
	struct extent_buffer *new;
5891
	int num_pages = num_extent_pages(src);
5892
	int ret;
5893

5894
	new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
5895 5896 5897
	if (new == NULL)
		return NULL;

5898 5899 5900 5901 5902 5903 5904
	/*
	 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
	 * btrfs_release_extent_buffer() have different behavior for
	 * UNMAPPED subpage extent buffer.
	 */
	set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);

5905 5906 5907 5908 5909 5910 5911
	memset(new->pages, 0, sizeof(*new->pages) * num_pages);
	ret = btrfs_alloc_page_array(num_pages, new->pages);
	if (ret) {
		btrfs_release_extent_buffer(new);
		return NULL;
	}

5912
	for (i = 0; i < num_pages; i++) {
5913
		int ret;
5914
		struct page *p = new->pages[i];
5915 5916 5917 5918 5919 5920

		ret = attach_extent_buffer_page(new, p, NULL);
		if (ret < 0) {
			btrfs_release_extent_buffer(new);
			return NULL;
		}
5921
		WARN_ON(PageDirty(p));
5922
		copy_page(page_address(p), page_address(src->pages[i]));
5923
	}
5924
	set_extent_buffer_uptodate(new);
5925 5926 5927 5928

	return new;
}

5929 5930
struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
						  u64 start, unsigned long len)
5931 5932
{
	struct extent_buffer *eb;
5933 5934
	int num_pages;
	int i;
5935
	int ret;
5936

5937
	eb = __alloc_extent_buffer(fs_info, start, len);
5938 5939 5940
	if (!eb)
		return NULL;

5941
	num_pages = num_extent_pages(eb);
5942 5943 5944 5945
	ret = btrfs_alloc_page_array(num_pages, eb->pages);
	if (ret)
		goto err;

5946
	for (i = 0; i < num_pages; i++) {
5947
		struct page *p = eb->pages[i];
5948

5949
		ret = attach_extent_buffer_page(eb, p, NULL);
5950 5951
		if (ret < 0)
			goto err;
5952
	}
5953

5954 5955
	set_extent_buffer_uptodate(eb);
	btrfs_set_header_nritems(eb, 0);
5956
	set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
5957 5958 5959

	return eb;
err:
5960 5961 5962 5963 5964
	for (i = 0; i < num_pages; i++) {
		if (eb->pages[i]) {
			detach_extent_buffer_page(eb, eb->pages[i]);
			__free_page(eb->pages[i]);
		}
5965
	}
5966 5967 5968 5969
	__free_extent_buffer(eb);
	return NULL;
}

5970
struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
5971
						u64 start)
5972
{
5973
	return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
5974 5975
}

5976 5977
static void check_buffer_tree_ref(struct extent_buffer *eb)
{
5978
	int refs;
5979 5980 5981 5982
	/*
	 * The TREE_REF bit is first set when the extent_buffer is added
	 * to the radix tree. It is also reset, if unset, when a new reference
	 * is created by find_extent_buffer.
5983
	 *
5984 5985
	 * It is only cleared in two cases: freeing the last non-tree
	 * reference to the extent_buffer when its STALE bit is set or
5986
	 * calling release_folio when the tree reference is the only reference.
5987
	 *
5988
	 * In both cases, care is taken to ensure that the extent_buffer's
5989
	 * pages are not under io. However, release_folio can be concurrently
5990 5991 5992
	 * called with creating new references, which is prone to race
	 * conditions between the calls to check_buffer_tree_ref in those
	 * codepaths and clearing TREE_REF in try_release_extent_buffer.
5993
	 *
5994 5995 5996 5997 5998 5999 6000
	 * The actual lifetime of the extent_buffer in the radix tree is
	 * adequately protected by the refcount, but the TREE_REF bit and
	 * its corresponding reference are not. To protect against this
	 * class of races, we call check_buffer_tree_ref from the codepaths
	 * which trigger io after they set eb->io_pages. Note that once io is
	 * initiated, TREE_REF can no longer be cleared, so that is the
	 * moment at which any such race is best fixed.
6001
	 */
6002 6003 6004 6005
	refs = atomic_read(&eb->refs);
	if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
		return;

6006 6007
	spin_lock(&eb->refs_lock);
	if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
6008
		atomic_inc(&eb->refs);
6009
	spin_unlock(&eb->refs_lock);
6010 6011
}

6012 6013
static void mark_extent_buffer_accessed(struct extent_buffer *eb,
		struct page *accessed)
6014
{
6015
	int num_pages, i;
6016

6017 6018
	check_buffer_tree_ref(eb);

6019
	num_pages = num_extent_pages(eb);
6020
	for (i = 0; i < num_pages; i++) {
6021 6022
		struct page *p = eb->pages[i];

6023 6024
		if (p != accessed)
			mark_page_accessed(p);
6025 6026 6027
	}
}

6028 6029
struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
					 u64 start)
6030 6031 6032
{
	struct extent_buffer *eb;

6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051
	eb = find_extent_buffer_nolock(fs_info, start);
	if (!eb)
		return NULL;
	/*
	 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
	 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
	 * another task running free_extent_buffer() might have seen that flag
	 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
	 * writeback flags not set) and it's still in the tree (flag
	 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
	 * decrementing the extent buffer's reference count twice.  So here we
	 * could race and increment the eb's reference count, clear its stale
	 * flag, mark it as dirty and drop our reference before the other task
	 * finishes executing free_extent_buffer, which would later result in
	 * an attempt to free an extent buffer that is dirty.
	 */
	if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
		spin_lock(&eb->refs_lock);
		spin_unlock(&eb->refs_lock);
6052
	}
6053 6054
	mark_extent_buffer_accessed(eb, NULL);
	return eb;
6055 6056
}

6057 6058
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
6059
					u64 start)
6060 6061 6062 6063 6064 6065 6066
{
	struct extent_buffer *eb, *exists = NULL;
	int ret;

	eb = find_extent_buffer(fs_info, start);
	if (eb)
		return eb;
6067
	eb = alloc_dummy_extent_buffer(fs_info, start);
6068
	if (!eb)
6069
		return ERR_PTR(-ENOMEM);
6070
	eb->fs_info = fs_info;
6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084
again:
	ret = radix_tree_preload(GFP_NOFS);
	if (ret) {
		exists = ERR_PTR(ret);
		goto free_eb;
	}
	spin_lock(&fs_info->buffer_lock);
	ret = radix_tree_insert(&fs_info->buffer_radix,
				start >> fs_info->sectorsize_bits, eb);
	spin_unlock(&fs_info->buffer_lock);
	radix_tree_preload_end();
	if (ret == -EEXIST) {
		exists = find_extent_buffer(fs_info, start);
		if (exists)
6085
			goto free_eb;
6086 6087 6088
		else
			goto again;
	}
6089 6090 6091 6092 6093 6094 6095 6096 6097 6098
	check_buffer_tree_ref(eb);
	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);

	return eb;
free_eb:
	btrfs_release_extent_buffer(eb);
	return exists;
}
#endif

6099 6100
static struct extent_buffer *grab_extent_buffer(
		struct btrfs_fs_info *fs_info, struct page *page)
6101 6102 6103
{
	struct extent_buffer *exists;

6104 6105 6106 6107 6108
	/*
	 * For subpage case, we completely rely on radix tree to ensure we
	 * don't try to insert two ebs for the same bytenr.  So here we always
	 * return NULL and just continue.
	 */
6109
	if (fs_info->nodesize < PAGE_SIZE)
6110 6111
		return NULL;

6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130
	/* Page not yet attached to an extent buffer */
	if (!PagePrivate(page))
		return NULL;

	/*
	 * We could have already allocated an eb for this page and attached one
	 * so lets see if we can get a ref on the existing eb, and if we can we
	 * know it's good and we can just return that one, else we know we can
	 * just overwrite page->private.
	 */
	exists = (struct extent_buffer *)page->private;
	if (atomic_inc_not_zero(&exists->refs))
		return exists;

	WARN_ON(PageDirty(page));
	detach_page_private(page);
	return NULL;
}

6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145
static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
{
	if (!IS_ALIGNED(start, fs_info->sectorsize)) {
		btrfs_err(fs_info, "bad tree block start %llu", start);
		return -EINVAL;
	}

	if (fs_info->nodesize < PAGE_SIZE &&
	    offset_in_page(start) + fs_info->nodesize > PAGE_SIZE) {
		btrfs_err(fs_info,
		"tree block crosses page boundary, start %llu nodesize %u",
			  start, fs_info->nodesize);
		return -EINVAL;
	}
	if (fs_info->nodesize >= PAGE_SIZE &&
6146
	    !PAGE_ALIGNED(start)) {
6147 6148 6149 6150 6151 6152 6153 6154
		btrfs_err(fs_info,
		"tree block is not page aligned, start %llu nodesize %u",
			  start, fs_info->nodesize);
		return -EINVAL;
	}
	return 0;
}

6155
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
6156
					  u64 start, u64 owner_root, int level)
6157
{
6158
	unsigned long len = fs_info->nodesize;
6159 6160
	int num_pages;
	int i;
6161
	unsigned long index = start >> PAGE_SHIFT;
6162
	struct extent_buffer *eb;
6163
	struct extent_buffer *exists = NULL;
6164
	struct page *p;
6165
	struct address_space *mapping = fs_info->btree_inode->i_mapping;
6166
	int uptodate = 1;
6167
	int ret;
6168

6169
	if (check_eb_alignment(fs_info, start))
6170 6171
		return ERR_PTR(-EINVAL);

6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182
#if BITS_PER_LONG == 32
	if (start >= MAX_LFS_FILESIZE) {
		btrfs_err_rl(fs_info,
		"extent buffer %llu is beyond 32bit page cache limit", start);
		btrfs_err_32bit_limit(fs_info);
		return ERR_PTR(-EOVERFLOW);
	}
	if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
		btrfs_warn_32bit_limit(fs_info);
#endif

6183
	eb = find_extent_buffer(fs_info, start);
6184
	if (eb)
6185 6186
		return eb;

6187
	eb = __alloc_extent_buffer(fs_info, start, len);
6188
	if (!eb)
6189
		return ERR_PTR(-ENOMEM);
6190
	btrfs_set_buffer_lockdep_class(owner_root, eb, level);
6191

6192
	num_pages = num_extent_pages(eb);
6193
	for (i = 0; i < num_pages; i++, index++) {
6194 6195
		struct btrfs_subpage *prealloc = NULL;

6196
		p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
6197 6198
		if (!p) {
			exists = ERR_PTR(-ENOMEM);
6199
			goto free_eb;
6200
		}
J
Josef Bacik 已提交
6201

6202 6203 6204 6205 6206 6207 6208 6209 6210 6211
		/*
		 * Preallocate page->private for subpage case, so that we won't
		 * allocate memory with private_lock hold.  The memory will be
		 * freed by attach_extent_buffer_page() or freed manually if
		 * we exit earlier.
		 *
		 * Although we have ensured one subpage eb can only have one
		 * page, but it may change in the future for 16K page size
		 * support, so we still preallocate the memory in the loop.
		 */
6212
		if (fs_info->nodesize < PAGE_SIZE) {
6213 6214 6215
			prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
			if (IS_ERR(prealloc)) {
				ret = PTR_ERR(prealloc);
6216 6217 6218 6219 6220
				unlock_page(p);
				put_page(p);
				exists = ERR_PTR(ret);
				goto free_eb;
			}
6221 6222
		}

J
Josef Bacik 已提交
6223
		spin_lock(&mapping->private_lock);
6224
		exists = grab_extent_buffer(fs_info, p);
6225 6226 6227 6228 6229
		if (exists) {
			spin_unlock(&mapping->private_lock);
			unlock_page(p);
			put_page(p);
			mark_extent_buffer_accessed(exists, p);
6230
			btrfs_free_subpage(prealloc);
6231
			goto free_eb;
6232
		}
6233 6234 6235
		/* Should not fail, as we have preallocated the memory */
		ret = attach_extent_buffer_page(eb, p, prealloc);
		ASSERT(!ret);
6236 6237 6238 6239 6240 6241 6242 6243 6244 6245
		/*
		 * To inform we have extra eb under allocation, so that
		 * detach_extent_buffer_page() won't release the page private
		 * when the eb hasn't yet been inserted into radix tree.
		 *
		 * The ref will be decreased when the eb released the page, in
		 * detach_extent_buffer_page().
		 * Thus needs no special handling in error path.
		 */
		btrfs_page_inc_eb_refs(fs_info, p);
J
Josef Bacik 已提交
6246
		spin_unlock(&mapping->private_lock);
6247

6248
		WARN_ON(btrfs_page_test_dirty(fs_info, p, eb->start, eb->len));
6249
		eb->pages[i] = p;
6250 6251
		if (!PageUptodate(p))
			uptodate = 0;
C
Chris Mason 已提交
6252 6253

		/*
6254 6255
		 * We can't unlock the pages just yet since the extent buffer
		 * hasn't been properly inserted in the radix tree, this
6256
		 * opens a race with btree_release_folio which can free a page
6257 6258
		 * while we are still filling in all pages for the buffer and
		 * we could crash.
C
Chris Mason 已提交
6259
		 */
6260 6261
	}
	if (uptodate)
6262
		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277
again:
	ret = radix_tree_preload(GFP_NOFS);
	if (ret) {
		exists = ERR_PTR(ret);
		goto free_eb;
	}

	spin_lock(&fs_info->buffer_lock);
	ret = radix_tree_insert(&fs_info->buffer_radix,
				start >> fs_info->sectorsize_bits, eb);
	spin_unlock(&fs_info->buffer_lock);
	radix_tree_preload_end();
	if (ret == -EEXIST) {
		exists = find_extent_buffer(fs_info, start);
		if (exists)
6278
			goto free_eb;
6279 6280 6281
		else
			goto again;
	}
6282
	/* add one reference for the tree */
6283
	check_buffer_tree_ref(eb);
6284
	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
C
Chris Mason 已提交
6285 6286

	/*
6287
	 * Now it's safe to unlock the pages because any calls to
6288
	 * btree_release_folio will correctly detect that a page belongs to a
6289
	 * live buffer and won't free them prematurely.
C
Chris Mason 已提交
6290
	 */
6291 6292
	for (i = 0; i < num_pages; i++)
		unlock_page(eb->pages[i]);
6293 6294
	return eb;

6295
free_eb:
6296
	WARN_ON(!atomic_dec_and_test(&eb->refs));
6297 6298 6299 6300
	for (i = 0; i < num_pages; i++) {
		if (eb->pages[i])
			unlock_page(eb->pages[i]);
	}
C
Chris Mason 已提交
6301

6302
	btrfs_release_extent_buffer(eb);
6303
	return exists;
6304 6305
}

6306 6307 6308 6309 6310 6311 6312 6313
static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
{
	struct extent_buffer *eb =
			container_of(head, struct extent_buffer, rcu_head);

	__free_extent_buffer(eb);
}

6314
static int release_extent_buffer(struct extent_buffer *eb)
6315
	__releases(&eb->refs_lock)
6316
{
6317 6318
	lockdep_assert_held(&eb->refs_lock);

6319 6320
	WARN_ON(atomic_read(&eb->refs) == 0);
	if (atomic_dec_and_test(&eb->refs)) {
6321
		if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
6322
			struct btrfs_fs_info *fs_info = eb->fs_info;
6323

6324
			spin_unlock(&eb->refs_lock);
6325

6326 6327 6328 6329
			spin_lock(&fs_info->buffer_lock);
			radix_tree_delete(&fs_info->buffer_radix,
					  eb->start >> fs_info->sectorsize_bits);
			spin_unlock(&fs_info->buffer_lock);
6330 6331
		} else {
			spin_unlock(&eb->refs_lock);
6332
		}
6333

6334
		btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
6335
		/* Should be safe to release our pages at this point */
6336
		btrfs_release_extent_buffer_pages(eb);
6337
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
6338
		if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
6339 6340 6341 6342
			__free_extent_buffer(eb);
			return 1;
		}
#endif
6343
		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
6344
		return 1;
6345 6346
	}
	spin_unlock(&eb->refs_lock);
6347 6348

	return 0;
6349 6350
}

6351 6352
void free_extent_buffer(struct extent_buffer *eb)
{
6353 6354
	int refs;
	int old;
6355 6356 6357
	if (!eb)
		return;

6358 6359
	while (1) {
		refs = atomic_read(&eb->refs);
6360 6361 6362
		if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
		    || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
			refs == 1))
6363 6364 6365 6366 6367 6368
			break;
		old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
		if (old == refs)
			return;
	}

6369 6370 6371
	spin_lock(&eb->refs_lock);
	if (atomic_read(&eb->refs) == 2 &&
	    test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
6372
	    !extent_buffer_under_io(eb) &&
6373 6374 6375 6376 6377 6378 6379
	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
		atomic_dec(&eb->refs);

	/*
	 * I know this is terrible, but it's temporary until we stop tracking
	 * the uptodate bits and such for the extent buffers.
	 */
6380
	release_extent_buffer(eb);
6381 6382 6383 6384 6385
}

void free_extent_buffer_stale(struct extent_buffer *eb)
{
	if (!eb)
6386 6387
		return;

6388 6389 6390
	spin_lock(&eb->refs_lock);
	set_bit(EXTENT_BUFFER_STALE, &eb->bflags);

6391
	if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
6392 6393
	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
		atomic_dec(&eb->refs);
6394
	release_extent_buffer(eb);
6395 6396
}

6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424
static void btree_clear_page_dirty(struct page *page)
{
	ASSERT(PageDirty(page));
	ASSERT(PageLocked(page));
	clear_page_dirty_for_io(page);
	xa_lock_irq(&page->mapping->i_pages);
	if (!PageDirty(page))
		__xa_clear_mark(&page->mapping->i_pages,
				page_index(page), PAGECACHE_TAG_DIRTY);
	xa_unlock_irq(&page->mapping->i_pages);
}

static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
{
	struct btrfs_fs_info *fs_info = eb->fs_info;
	struct page *page = eb->pages[0];
	bool last;

	/* btree_clear_page_dirty() needs page locked */
	lock_page(page);
	last = btrfs_subpage_clear_and_test_dirty(fs_info, page, eb->start,
						  eb->len);
	if (last)
		btree_clear_page_dirty(page);
	unlock_page(page);
	WARN_ON(atomic_read(&eb->refs) == 0);
}

6425
void clear_extent_buffer_dirty(const struct extent_buffer *eb)
6426
{
6427 6428
	int i;
	int num_pages;
6429 6430
	struct page *page;

6431
	if (eb->fs_info->nodesize < PAGE_SIZE)
6432 6433
		return clear_subpage_extent_buffer_dirty(eb);

6434
	num_pages = num_extent_pages(eb);
6435 6436

	for (i = 0; i < num_pages; i++) {
6437
		page = eb->pages[i];
6438
		if (!PageDirty(page))
C
Chris Mason 已提交
6439
			continue;
6440
		lock_page(page);
6441
		btree_clear_page_dirty(page);
6442
		ClearPageError(page);
6443
		unlock_page(page);
6444
	}
6445
	WARN_ON(atomic_read(&eb->refs) == 0);
6446 6447
}

6448
bool set_extent_buffer_dirty(struct extent_buffer *eb)
6449
{
6450 6451
	int i;
	int num_pages;
6452
	bool was_dirty;
6453

6454 6455
	check_buffer_tree_ref(eb);

6456
	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
6457

6458
	num_pages = num_extent_pages(eb);
6459
	WARN_ON(atomic_read(&eb->refs) == 0);
6460 6461
	WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));

6462
	if (!was_dirty) {
6463
		bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
6464

6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483
		/*
		 * For subpage case, we can have other extent buffers in the
		 * same page, and in clear_subpage_extent_buffer_dirty() we
		 * have to clear page dirty without subpage lock held.
		 * This can cause race where our page gets dirty cleared after
		 * we just set it.
		 *
		 * Thankfully, clear_subpage_extent_buffer_dirty() has locked
		 * its page for other reasons, we can use page lock to prevent
		 * the above race.
		 */
		if (subpage)
			lock_page(eb->pages[0]);
		for (i = 0; i < num_pages; i++)
			btrfs_page_set_dirty(eb->fs_info, eb->pages[i],
					     eb->start, eb->len);
		if (subpage)
			unlock_page(eb->pages[0]);
	}
6484 6485 6486 6487 6488
#ifdef CONFIG_BTRFS_DEBUG
	for (i = 0; i < num_pages; i++)
		ASSERT(PageDirty(eb->pages[i]));
#endif

6489
	return was_dirty;
6490 6491
}

6492
void clear_extent_buffer_uptodate(struct extent_buffer *eb)
6493
{
6494
	struct btrfs_fs_info *fs_info = eb->fs_info;
6495
	struct page *page;
6496
	int num_pages;
6497
	int i;
6498

6499
	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
6500
	num_pages = num_extent_pages(eb);
6501
	for (i = 0; i < num_pages; i++) {
6502
		page = eb->pages[i];
6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514
		if (!page)
			continue;

		/*
		 * This is special handling for metadata subpage, as regular
		 * btrfs_is_subpage() can not handle cloned/dummy metadata.
		 */
		if (fs_info->nodesize >= PAGE_SIZE)
			ClearPageUptodate(page);
		else
			btrfs_subpage_clear_uptodate(fs_info, page, eb->start,
						     eb->len);
6515 6516 6517
	}
}

6518
void set_extent_buffer_uptodate(struct extent_buffer *eb)
6519
{
6520
	struct btrfs_fs_info *fs_info = eb->fs_info;
6521
	struct page *page;
6522
	int num_pages;
6523
	int i;
6524

6525
	set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
6526
	num_pages = num_extent_pages(eb);
6527
	for (i = 0; i < num_pages; i++) {
6528
		page = eb->pages[i];
6529 6530 6531 6532 6533 6534 6535 6536 6537 6538

		/*
		 * This is special handling for metadata subpage, as regular
		 * btrfs_is_subpage() can not handle cloned/dummy metadata.
		 */
		if (fs_info->nodesize >= PAGE_SIZE)
			SetPageUptodate(page);
		else
			btrfs_subpage_set_uptodate(fs_info, page, eb->start,
						   eb->len);
6539 6540 6541
	}
}

6542 6543 6544 6545 6546 6547
static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
				      int mirror_num)
{
	struct btrfs_fs_info *fs_info = eb->fs_info;
	struct extent_io_tree *io_tree;
	struct page *page = eb->pages[0];
6548
	struct btrfs_bio_ctrl bio_ctrl = { 0 };
6549 6550 6551 6552 6553 6554 6555
	int ret = 0;

	ASSERT(!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags));
	ASSERT(PagePrivate(page));
	io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;

	if (wait == WAIT_NONE) {
6556 6557
		if (!try_lock_extent(io_tree, eb->start, eb->start + eb->len - 1))
			return -EAGAIN;
6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578
	} else {
		ret = lock_extent(io_tree, eb->start, eb->start + eb->len - 1);
		if (ret < 0)
			return ret;
	}

	ret = 0;
	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags) ||
	    PageUptodate(page) ||
	    btrfs_subpage_test_uptodate(fs_info, page, eb->start, eb->len)) {
		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
		unlock_extent(io_tree, eb->start, eb->start + eb->len - 1);
		return ret;
	}

	clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
	eb->read_mirror = 0;
	atomic_set(&eb->io_pages, 1);
	check_buffer_tree_ref(eb);
	btrfs_subpage_clear_error(fs_info, page, eb->start, eb->len);

6579
	btrfs_subpage_start_reader(fs_info, page, eb->start, eb->len);
6580
	ret = submit_extent_page(REQ_OP_READ, NULL, &bio_ctrl,
6581 6582 6583
				 page, eb->start, eb->len,
				 eb->start - page_offset(page),
				 end_bio_extent_readpage, mirror_num, 0,
6584 6585 6586 6587 6588 6589 6590 6591 6592
				 true);
	if (ret) {
		/*
		 * In the endio function, if we hit something wrong we will
		 * increase the io_pages, so here we need to decrease it for
		 * error path.
		 */
		atomic_dec(&eb->io_pages);
	}
6593
	if (bio_ctrl.bio) {
6594
		submit_one_bio(bio_ctrl.bio, mirror_num, 0);
6595
		bio_ctrl.bio = NULL;
6596 6597 6598 6599 6600 6601 6602 6603 6604 6605
	}
	if (ret || wait != WAIT_COMPLETE)
		return ret;

	wait_extent_bit(io_tree, eb->start, eb->start + eb->len - 1, EXTENT_LOCKED);
	if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
		ret = -EIO;
	return ret;
}

6606
int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
6607
{
6608
	int i;
6609 6610 6611
	struct page *page;
	int err;
	int ret = 0;
6612 6613
	int locked_pages = 0;
	int all_uptodate = 1;
6614
	int num_pages;
6615
	unsigned long num_reads = 0;
6616
	struct btrfs_bio_ctrl bio_ctrl = { 0 };
6617

6618
	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
6619 6620
		return 0;

6621 6622 6623 6624 6625 6626 6627 6628
	/*
	 * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
	 * operation, which could potentially still be in flight.  In this case
	 * we simply want to return an error.
	 */
	if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
		return -EIO;

6629
	if (eb->fs_info->nodesize < PAGE_SIZE)
6630 6631
		return read_extent_buffer_subpage(eb, wait, mirror_num);

6632
	num_pages = num_extent_pages(eb);
6633
	for (i = 0; i < num_pages; i++) {
6634
		page = eb->pages[i];
6635
		if (wait == WAIT_NONE) {
6636 6637 6638 6639 6640 6641 6642
			/*
			 * WAIT_NONE is only utilized by readahead. If we can't
			 * acquire the lock atomically it means either the eb
			 * is being read out or under modification.
			 * Either way the eb will be or has been cached,
			 * readahead can exit safely.
			 */
6643
			if (!trylock_page(page))
6644
				goto unlock_exit;
6645 6646 6647
		} else {
			lock_page(page);
		}
6648
		locked_pages++;
6649 6650 6651 6652 6653 6654
	}
	/*
	 * We need to firstly lock all pages to make sure that
	 * the uptodate bit of our pages won't be affected by
	 * clear_extent_buffer_uptodate().
	 */
6655
	for (i = 0; i < num_pages; i++) {
6656
		page = eb->pages[i];
6657 6658
		if (!PageUptodate(page)) {
			num_reads++;
6659
			all_uptodate = 0;
6660
		}
6661
	}
6662

6663
	if (all_uptodate) {
6664
		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
6665 6666 6667
		goto unlock_exit;
	}

6668
	clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
6669
	eb->read_mirror = 0;
6670
	atomic_set(&eb->io_pages, num_reads);
6671
	/*
6672
	 * It is possible for release_folio to clear the TREE_REF bit before we
6673 6674 6675
	 * set io_pages. See check_buffer_tree_ref for a more detailed comment.
	 */
	check_buffer_tree_ref(eb);
6676
	for (i = 0; i < num_pages; i++) {
6677
		page = eb->pages[i];
6678

6679
		if (!PageUptodate(page)) {
6680 6681 6682 6683 6684 6685
			if (ret) {
				atomic_dec(&eb->io_pages);
				unlock_page(page);
				continue;
			}

6686
			ClearPageError(page);
6687
			err = submit_extent_page(REQ_OP_READ, NULL,
6688 6689 6690
					 &bio_ctrl, page, page_offset(page),
					 PAGE_SIZE, 0, end_bio_extent_readpage,
					 mirror_num, 0, false);
6691 6692
			if (err) {
				/*
6693 6694 6695
				 * We failed to submit the bio so it's the
				 * caller's responsibility to perform cleanup
				 * i.e unlock page/set error bit.
6696
				 */
6697 6698 6699
				ret = err;
				SetPageError(page);
				unlock_page(page);
6700 6701
				atomic_dec(&eb->io_pages);
			}
6702 6703 6704 6705 6706
		} else {
			unlock_page(page);
		}
	}

6707
	if (bio_ctrl.bio) {
6708
		submit_one_bio(bio_ctrl.bio, mirror_num, bio_ctrl.compress_type);
6709
		bio_ctrl.bio = NULL;
6710
	}
6711

6712
	if (ret || wait != WAIT_COMPLETE)
6713
		return ret;
C
Chris Mason 已提交
6714

6715
	for (i = 0; i < num_pages; i++) {
6716
		page = eb->pages[i];
6717
		wait_on_page_locked(page);
C
Chris Mason 已提交
6718
		if (!PageUptodate(page))
6719 6720
			ret = -EIO;
	}
C
Chris Mason 已提交
6721

6722
	return ret;
6723 6724

unlock_exit:
C
Chris Mason 已提交
6725
	while (locked_pages > 0) {
6726
		locked_pages--;
6727 6728
		page = eb->pages[locked_pages];
		unlock_page(page);
6729 6730
	}
	return ret;
6731 6732
}

6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762
static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
			    unsigned long len)
{
	btrfs_warn(eb->fs_info,
		"access to eb bytenr %llu len %lu out of range start %lu len %lu",
		eb->start, eb->len, start, len);
	WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));

	return true;
}

/*
 * Check if the [start, start + len) range is valid before reading/writing
 * the eb.
 * NOTE: @start and @len are offset inside the eb, not logical address.
 *
 * Caller should not touch the dst/src memory if this function returns error.
 */
static inline int check_eb_range(const struct extent_buffer *eb,
				 unsigned long start, unsigned long len)
{
	unsigned long offset;

	/* start, start + len should not go beyond eb->len nor overflow */
	if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
		return report_eb_range(eb, start, len);

	return false;
}

6763 6764
void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
			unsigned long start, unsigned long len)
6765 6766 6767 6768 6769 6770
{
	size_t cur;
	size_t offset;
	struct page *page;
	char *kaddr;
	char *dst = (char *)dstv;
6771
	unsigned long i = get_eb_page_index(start);
6772

6773
	if (check_eb_range(eb, start, len))
6774
		return;
6775

6776
	offset = get_eb_offset_in_page(eb, start);
6777

C
Chris Mason 已提交
6778
	while (len > 0) {
6779
		page = eb->pages[i];
6780

6781
		cur = min(len, (PAGE_SIZE - offset));
6782
		kaddr = page_address(page);
6783 6784 6785 6786 6787 6788 6789 6790 6791
		memcpy(dst, kaddr + offset, cur);

		dst += cur;
		len -= cur;
		offset = 0;
		i++;
	}
}

6792 6793 6794
int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
				       void __user *dstv,
				       unsigned long start, unsigned long len)
6795 6796 6797 6798 6799 6800
{
	size_t cur;
	size_t offset;
	struct page *page;
	char *kaddr;
	char __user *dst = (char __user *)dstv;
6801
	unsigned long i = get_eb_page_index(start);
6802 6803 6804 6805 6806
	int ret = 0;

	WARN_ON(start > eb->len);
	WARN_ON(start + len > eb->start + eb->len);

6807
	offset = get_eb_offset_in_page(eb, start);
6808 6809

	while (len > 0) {
6810
		page = eb->pages[i];
6811

6812
		cur = min(len, (PAGE_SIZE - offset));
6813
		kaddr = page_address(page);
6814
		if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827
			ret = -EFAULT;
			break;
		}

		dst += cur;
		len -= cur;
		offset = 0;
		i++;
	}

	return ret;
}

6828 6829
int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
			 unsigned long start, unsigned long len)
6830 6831 6832 6833 6834 6835
{
	size_t cur;
	size_t offset;
	struct page *page;
	char *kaddr;
	char *ptr = (char *)ptrv;
6836
	unsigned long i = get_eb_page_index(start);
6837 6838
	int ret = 0;

6839 6840
	if (check_eb_range(eb, start, len))
		return -EINVAL;
6841

6842
	offset = get_eb_offset_in_page(eb, start);
6843

C
Chris Mason 已提交
6844
	while (len > 0) {
6845
		page = eb->pages[i];
6846

6847
		cur = min(len, (PAGE_SIZE - offset));
6848

6849
		kaddr = page_address(page);
6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861
		ret = memcmp(ptr, kaddr + offset, cur);
		if (ret)
			break;

		ptr += cur;
		len -= cur;
		offset = 0;
		i++;
	}
	return ret;
}

6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872
/*
 * Check that the extent buffer is uptodate.
 *
 * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
 * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
 */
static void assert_eb_page_uptodate(const struct extent_buffer *eb,
				    struct page *page)
{
	struct btrfs_fs_info *fs_info = eb->fs_info;

6873 6874 6875 6876 6877 6878 6879 6880 6881
	/*
	 * If we are using the commit root we could potentially clear a page
	 * Uptodate while we're using the extent buffer that we've previously
	 * looked up.  We don't want to complain in this case, as the page was
	 * valid before, we just didn't write it out.  Instead we want to catch
	 * the case where we didn't actually read the block properly, which
	 * would have !PageUptodate && !PageError, as we clear PageError before
	 * reading.
	 */
6882
	if (fs_info->nodesize < PAGE_SIZE) {
6883
		bool uptodate, error;
6884 6885 6886

		uptodate = btrfs_subpage_test_uptodate(fs_info, page,
						       eb->start, eb->len);
6887 6888
		error = btrfs_subpage_test_error(fs_info, page, eb->start, eb->len);
		WARN_ON(!uptodate && !error);
6889
	} else {
6890
		WARN_ON(!PageUptodate(page) && !PageError(page));
6891 6892 6893
	}
}

6894
void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb,
6895 6896 6897 6898
		const void *srcv)
{
	char *kaddr;

6899
	assert_eb_page_uptodate(eb, eb->pages[0]);
6900 6901 6902 6903
	kaddr = page_address(eb->pages[0]) +
		get_eb_offset_in_page(eb, offsetof(struct btrfs_header,
						   chunk_tree_uuid));
	memcpy(kaddr, srcv, BTRFS_FSID_SIZE);
6904 6905
}

6906
void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *srcv)
6907 6908 6909
{
	char *kaddr;

6910
	assert_eb_page_uptodate(eb, eb->pages[0]);
6911 6912 6913
	kaddr = page_address(eb->pages[0]) +
		get_eb_offset_in_page(eb, offsetof(struct btrfs_header, fsid));
	memcpy(kaddr, srcv, BTRFS_FSID_SIZE);
6914 6915
}

6916
void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
6917 6918 6919 6920 6921 6922 6923
			 unsigned long start, unsigned long len)
{
	size_t cur;
	size_t offset;
	struct page *page;
	char *kaddr;
	char *src = (char *)srcv;
6924
	unsigned long i = get_eb_page_index(start);
6925

6926 6927
	WARN_ON(test_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags));

6928 6929
	if (check_eb_range(eb, start, len))
		return;
6930

6931
	offset = get_eb_offset_in_page(eb, start);
6932

C
Chris Mason 已提交
6933
	while (len > 0) {
6934
		page = eb->pages[i];
6935
		assert_eb_page_uptodate(eb, page);
6936

6937
		cur = min(len, PAGE_SIZE - offset);
6938
		kaddr = page_address(page);
6939 6940 6941 6942 6943 6944 6945 6946 6947
		memcpy(kaddr + offset, src, cur);

		src += cur;
		len -= cur;
		offset = 0;
		i++;
	}
}

6948
void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
6949
		unsigned long len)
6950 6951 6952 6953 6954
{
	size_t cur;
	size_t offset;
	struct page *page;
	char *kaddr;
6955
	unsigned long i = get_eb_page_index(start);
6956

6957 6958
	if (check_eb_range(eb, start, len))
		return;
6959

6960
	offset = get_eb_offset_in_page(eb, start);
6961

C
Chris Mason 已提交
6962
	while (len > 0) {
6963
		page = eb->pages[i];
6964
		assert_eb_page_uptodate(eb, page);
6965

6966
		cur = min(len, PAGE_SIZE - offset);
6967
		kaddr = page_address(page);
6968
		memset(kaddr + offset, 0, cur);
6969 6970 6971 6972 6973 6974 6975

		len -= cur;
		offset = 0;
		i++;
	}
}

6976 6977
void copy_extent_buffer_full(const struct extent_buffer *dst,
			     const struct extent_buffer *src)
6978 6979
{
	int i;
6980
	int num_pages;
6981 6982 6983

	ASSERT(dst->len == src->len);

6984
	if (dst->fs_info->nodesize >= PAGE_SIZE) {
6985 6986 6987 6988 6989 6990 6991 6992
		num_pages = num_extent_pages(dst);
		for (i = 0; i < num_pages; i++)
			copy_page(page_address(dst->pages[i]),
				  page_address(src->pages[i]));
	} else {
		size_t src_offset = get_eb_offset_in_page(src, 0);
		size_t dst_offset = get_eb_offset_in_page(dst, 0);

6993
		ASSERT(src->fs_info->nodesize < PAGE_SIZE);
6994 6995 6996 6997
		memcpy(page_address(dst->pages[0]) + dst_offset,
		       page_address(src->pages[0]) + src_offset,
		       src->len);
	}
6998 6999
}

7000 7001
void copy_extent_buffer(const struct extent_buffer *dst,
			const struct extent_buffer *src,
7002 7003 7004 7005 7006 7007 7008 7009
			unsigned long dst_offset, unsigned long src_offset,
			unsigned long len)
{
	u64 dst_len = dst->len;
	size_t cur;
	size_t offset;
	struct page *page;
	char *kaddr;
7010
	unsigned long i = get_eb_page_index(dst_offset);
7011

7012 7013 7014 7015
	if (check_eb_range(dst, dst_offset, len) ||
	    check_eb_range(src, src_offset, len))
		return;

7016 7017
	WARN_ON(src->len != dst_len);

7018
	offset = get_eb_offset_in_page(dst, dst_offset);
7019

C
Chris Mason 已提交
7020
	while (len > 0) {
7021
		page = dst->pages[i];
7022
		assert_eb_page_uptodate(dst, page);
7023

7024
		cur = min(len, (unsigned long)(PAGE_SIZE - offset));
7025

7026
		kaddr = page_address(page);
7027 7028 7029 7030 7031 7032 7033 7034 7035
		read_extent_buffer(src, kaddr + offset, src_offset, cur);

		src_offset += cur;
		len -= cur;
		offset = 0;
		i++;
	}
}

7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048
/*
 * eb_bitmap_offset() - calculate the page and offset of the byte containing the
 * given bit number
 * @eb: the extent buffer
 * @start: offset of the bitmap item in the extent buffer
 * @nr: bit number
 * @page_index: return index of the page in the extent buffer that contains the
 * given bit number
 * @page_offset: return offset into the page given by page_index
 *
 * This helper hides the ugliness of finding the byte in an extent buffer which
 * contains a given bit.
 */
7049
static inline void eb_bitmap_offset(const struct extent_buffer *eb,
7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061
				    unsigned long start, unsigned long nr,
				    unsigned long *page_index,
				    size_t *page_offset)
{
	size_t byte_offset = BIT_BYTE(nr);
	size_t offset;

	/*
	 * The byte we want is the offset of the extent buffer + the offset of
	 * the bitmap item in the extent buffer + the offset of the byte in the
	 * bitmap item.
	 */
7062
	offset = start + offset_in_page(eb->start) + byte_offset;
7063

7064
	*page_index = offset >> PAGE_SHIFT;
7065
	*page_offset = offset_in_page(offset);
7066 7067 7068 7069 7070 7071 7072 7073
}

/**
 * extent_buffer_test_bit - determine whether a bit in a bitmap item is set
 * @eb: the extent buffer
 * @start: offset of the bitmap item in the extent buffer
 * @nr: bit number to test
 */
7074
int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
7075 7076
			   unsigned long nr)
{
7077
	u8 *kaddr;
7078 7079 7080 7081 7082 7083
	struct page *page;
	unsigned long i;
	size_t offset;

	eb_bitmap_offset(eb, start, nr, &i, &offset);
	page = eb->pages[i];
7084
	assert_eb_page_uptodate(eb, page);
7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095
	kaddr = page_address(page);
	return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
}

/**
 * extent_buffer_bitmap_set - set an area of a bitmap
 * @eb: the extent buffer
 * @start: offset of the bitmap item in the extent buffer
 * @pos: bit number of the first bit
 * @len: number of bits to set
 */
7096
void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
7097 7098
			      unsigned long pos, unsigned long len)
{
7099
	u8 *kaddr;
7100 7101 7102 7103 7104
	struct page *page;
	unsigned long i;
	size_t offset;
	const unsigned int size = pos + len;
	int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
7105
	u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
7106 7107 7108

	eb_bitmap_offset(eb, start, pos, &i, &offset);
	page = eb->pages[i];
7109
	assert_eb_page_uptodate(eb, page);
7110 7111 7112 7113 7114 7115
	kaddr = page_address(page);

	while (len >= bits_to_set) {
		kaddr[offset] |= mask_to_set;
		len -= bits_to_set;
		bits_to_set = BITS_PER_BYTE;
D
Dan Carpenter 已提交
7116
		mask_to_set = ~0;
7117
		if (++offset >= PAGE_SIZE && len > 0) {
7118 7119
			offset = 0;
			page = eb->pages[++i];
7120
			assert_eb_page_uptodate(eb, page);
7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137
			kaddr = page_address(page);
		}
	}
	if (len) {
		mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
		kaddr[offset] |= mask_to_set;
	}
}


/**
 * extent_buffer_bitmap_clear - clear an area of a bitmap
 * @eb: the extent buffer
 * @start: offset of the bitmap item in the extent buffer
 * @pos: bit number of the first bit
 * @len: number of bits to clear
 */
7138 7139 7140
void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
				unsigned long start, unsigned long pos,
				unsigned long len)
7141
{
7142
	u8 *kaddr;
7143 7144 7145 7146 7147
	struct page *page;
	unsigned long i;
	size_t offset;
	const unsigned int size = pos + len;
	int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
7148
	u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
7149 7150 7151

	eb_bitmap_offset(eb, start, pos, &i, &offset);
	page = eb->pages[i];
7152
	assert_eb_page_uptodate(eb, page);
7153 7154 7155 7156 7157 7158
	kaddr = page_address(page);

	while (len >= bits_to_clear) {
		kaddr[offset] &= ~mask_to_clear;
		len -= bits_to_clear;
		bits_to_clear = BITS_PER_BYTE;
D
Dan Carpenter 已提交
7159
		mask_to_clear = ~0;
7160
		if (++offset >= PAGE_SIZE && len > 0) {
7161 7162
			offset = 0;
			page = eb->pages[++i];
7163
			assert_eb_page_uptodate(eb, page);
7164 7165 7166 7167 7168 7169 7170 7171 7172
			kaddr = page_address(page);
		}
	}
	if (len) {
		mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
		kaddr[offset] &= ~mask_to_clear;
	}
}

7173 7174 7175 7176 7177 7178
static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
{
	unsigned long distance = (src > dst) ? src - dst : dst - src;
	return distance < len;
}

7179 7180 7181 7182
static void copy_pages(struct page *dst_page, struct page *src_page,
		       unsigned long dst_off, unsigned long src_off,
		       unsigned long len)
{
7183
	char *dst_kaddr = page_address(dst_page);
7184
	char *src_kaddr;
7185
	int must_memmove = 0;
7186

7187
	if (dst_page != src_page) {
7188
		src_kaddr = page_address(src_page);
7189
	} else {
7190
		src_kaddr = dst_kaddr;
7191 7192
		if (areas_overlap(src_off, dst_off, len))
			must_memmove = 1;
7193
	}
7194

7195 7196 7197 7198
	if (must_memmove)
		memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
	else
		memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
7199 7200
}

7201 7202 7203
void memcpy_extent_buffer(const struct extent_buffer *dst,
			  unsigned long dst_offset, unsigned long src_offset,
			  unsigned long len)
7204 7205 7206 7207 7208 7209 7210
{
	size_t cur;
	size_t dst_off_in_page;
	size_t src_off_in_page;
	unsigned long dst_i;
	unsigned long src_i;

7211 7212 7213
	if (check_eb_range(dst, dst_offset, len) ||
	    check_eb_range(dst, src_offset, len))
		return;
7214

C
Chris Mason 已提交
7215
	while (len > 0) {
7216 7217
		dst_off_in_page = get_eb_offset_in_page(dst, dst_offset);
		src_off_in_page = get_eb_offset_in_page(dst, src_offset);
7218

7219 7220
		dst_i = get_eb_page_index(dst_offset);
		src_i = get_eb_page_index(src_offset);
7221

7222
		cur = min(len, (unsigned long)(PAGE_SIZE -
7223 7224
					       src_off_in_page));
		cur = min_t(unsigned long, cur,
7225
			(unsigned long)(PAGE_SIZE - dst_off_in_page));
7226

7227
		copy_pages(dst->pages[dst_i], dst->pages[src_i],
7228 7229 7230 7231 7232 7233 7234 7235
			   dst_off_in_page, src_off_in_page, cur);

		src_offset += cur;
		dst_offset += cur;
		len -= cur;
	}
}

7236 7237 7238
void memmove_extent_buffer(const struct extent_buffer *dst,
			   unsigned long dst_offset, unsigned long src_offset,
			   unsigned long len)
7239 7240 7241 7242 7243 7244 7245 7246 7247
{
	size_t cur;
	size_t dst_off_in_page;
	size_t src_off_in_page;
	unsigned long dst_end = dst_offset + len - 1;
	unsigned long src_end = src_offset + len - 1;
	unsigned long dst_i;
	unsigned long src_i;

7248 7249 7250
	if (check_eb_range(dst, dst_offset, len) ||
	    check_eb_range(dst, src_offset, len))
		return;
7251
	if (dst_offset < src_offset) {
7252 7253 7254
		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
		return;
	}
C
Chris Mason 已提交
7255
	while (len > 0) {
7256 7257
		dst_i = get_eb_page_index(dst_end);
		src_i = get_eb_page_index(src_end);
7258

7259 7260
		dst_off_in_page = get_eb_offset_in_page(dst, dst_end);
		src_off_in_page = get_eb_offset_in_page(dst, src_end);
7261 7262 7263

		cur = min_t(unsigned long, len, src_off_in_page + 1);
		cur = min(cur, dst_off_in_page + 1);
7264
		copy_pages(dst->pages[dst_i], dst->pages[src_i],
7265 7266 7267 7268 7269 7270 7271 7272
			   dst_off_in_page - cur + 1,
			   src_off_in_page - cur + 1, cur);

		dst_end -= cur;
		src_end -= cur;
		len -= cur;
	}
}
7273

7274
#define GANG_LOOKUP_SIZE	16
7275 7276 7277
static struct extent_buffer *get_next_extent_buffer(
		struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
{
7278 7279
	struct extent_buffer *gang[GANG_LOOKUP_SIZE];
	struct extent_buffer *found = NULL;
7280
	u64 page_start = page_offset(page);
7281
	u64 cur = page_start;
7282 7283 7284 7285

	ASSERT(in_range(bytenr, page_start, PAGE_SIZE));
	lockdep_assert_held(&fs_info->buffer_lock);

7286 7287 7288 7289 7290 7291 7292 7293 7294 7295 7296 7297 7298 7299 7300 7301 7302 7303 7304 7305 7306
	while (cur < page_start + PAGE_SIZE) {
		int ret;
		int i;

		ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
				(void **)gang, cur >> fs_info->sectorsize_bits,
				min_t(unsigned int, GANG_LOOKUP_SIZE,
				      PAGE_SIZE / fs_info->nodesize));
		if (ret == 0)
			goto out;
		for (i = 0; i < ret; i++) {
			/* Already beyond page end */
			if (gang[i]->start >= page_start + PAGE_SIZE)
				goto out;
			/* Found one */
			if (gang[i]->start >= bytenr) {
				found = gang[i];
				goto out;
			}
		}
		cur = gang[ret - 1]->start + gang[ret - 1]->len;
7307
	}
7308 7309
out:
	return found;
7310 7311 7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322 7323 7324 7325 7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362 7363 7364 7365 7366 7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381
}

static int try_release_subpage_extent_buffer(struct page *page)
{
	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
	u64 cur = page_offset(page);
	const u64 end = page_offset(page) + PAGE_SIZE;
	int ret;

	while (cur < end) {
		struct extent_buffer *eb = NULL;

		/*
		 * Unlike try_release_extent_buffer() which uses page->private
		 * to grab buffer, for subpage case we rely on radix tree, thus
		 * we need to ensure radix tree consistency.
		 *
		 * We also want an atomic snapshot of the radix tree, thus go
		 * with spinlock rather than RCU.
		 */
		spin_lock(&fs_info->buffer_lock);
		eb = get_next_extent_buffer(fs_info, page, cur);
		if (!eb) {
			/* No more eb in the page range after or at cur */
			spin_unlock(&fs_info->buffer_lock);
			break;
		}
		cur = eb->start + eb->len;

		/*
		 * The same as try_release_extent_buffer(), to ensure the eb
		 * won't disappear out from under us.
		 */
		spin_lock(&eb->refs_lock);
		if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
			spin_unlock(&eb->refs_lock);
			spin_unlock(&fs_info->buffer_lock);
			break;
		}
		spin_unlock(&fs_info->buffer_lock);

		/*
		 * If tree ref isn't set then we know the ref on this eb is a
		 * real ref, so just return, this eb will likely be freed soon
		 * anyway.
		 */
		if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
			spin_unlock(&eb->refs_lock);
			break;
		}

		/*
		 * Here we don't care about the return value, we will always
		 * check the page private at the end.  And
		 * release_extent_buffer() will release the refs_lock.
		 */
		release_extent_buffer(eb);
	}
	/*
	 * Finally to check if we have cleared page private, as if we have
	 * released all ebs in the page, the page private should be cleared now.
	 */
	spin_lock(&page->mapping->private_lock);
	if (!PagePrivate(page))
		ret = 1;
	else
		ret = 0;
	spin_unlock(&page->mapping->private_lock);
	return ret;

}

7382
int try_release_extent_buffer(struct page *page)
7383
{
7384 7385
	struct extent_buffer *eb;

7386
	if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
7387 7388
		return try_release_subpage_extent_buffer(page);

7389
	/*
7390 7391
	 * We need to make sure nobody is changing page->private, as we rely on
	 * page->private as the pointer to extent buffer.
7392 7393 7394 7395
	 */
	spin_lock(&page->mapping->private_lock);
	if (!PagePrivate(page)) {
		spin_unlock(&page->mapping->private_lock);
J
Josef Bacik 已提交
7396
		return 1;
7397
	}
7398

7399 7400
	eb = (struct extent_buffer *)page->private;
	BUG_ON(!eb);
7401 7402

	/*
7403 7404 7405
	 * This is a little awful but should be ok, we need to make sure that
	 * the eb doesn't disappear out from under us while we're looking at
	 * this page.
7406
	 */
7407
	spin_lock(&eb->refs_lock);
7408
	if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
7409 7410 7411
		spin_unlock(&eb->refs_lock);
		spin_unlock(&page->mapping->private_lock);
		return 0;
7412
	}
7413
	spin_unlock(&page->mapping->private_lock);
7414

7415
	/*
7416 7417
	 * If tree ref isn't set then we know the ref on this eb is a real ref,
	 * so just return, this page will likely be freed soon anyway.
7418
	 */
7419 7420 7421
	if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
		spin_unlock(&eb->refs_lock);
		return 0;
7422
	}
7423

7424
	return release_extent_buffer(eb);
7425
}
7426 7427 7428 7429 7430

/*
 * btrfs_readahead_tree_block - attempt to readahead a child block
 * @fs_info:	the fs_info
 * @bytenr:	bytenr to read
7431
 * @owner_root: objectid of the root that owns this eb
7432
 * @gen:	generation for the uptodate check, can be 0
7433
 * @level:	level for the eb
7434 7435 7436 7437 7438 7439
 *
 * Attempt to readahead a tree block at @bytenr.  If @gen is 0 then we do a
 * normal uptodate check of the eb, without checking the generation.  If we have
 * to read the block we will not block on anything.
 */
void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
7440
				u64 bytenr, u64 owner_root, u64 gen, int level)
7441 7442 7443 7444
{
	struct extent_buffer *eb;
	int ret;

7445
	eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
7446 7447 7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467 7468 7469 7470 7471 7472
	if (IS_ERR(eb))
		return;

	if (btrfs_buffer_uptodate(eb, gen, 1)) {
		free_extent_buffer(eb);
		return;
	}

	ret = read_extent_buffer_pages(eb, WAIT_NONE, 0);
	if (ret < 0)
		free_extent_buffer_stale(eb);
	else
		free_extent_buffer(eb);
}

/*
 * btrfs_readahead_node_child - readahead a node's child block
 * @node:	parent node we're reading from
 * @slot:	slot in the parent node for the child we want to read
 *
 * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
 * the slot in the node provided.
 */
void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
{
	btrfs_readahead_tree_block(node->fs_info,
				   btrfs_node_blockptr(node, slot),
7473 7474 7475
				   btrfs_header_owner(node),
				   btrfs_node_ptr_generation(node, slot),
				   btrfs_header_level(node) - 1);
7476
}