ordered-data.c 25.8 KB
Newer Older
C
Chris Mason 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

#include <linux/slab.h>
20
#include <linux/blkdev.h>
21 22
#include <linux/writeback.h>
#include <linux/pagevec.h>
C
Chris Mason 已提交
23 24 25
#include "ctree.h"
#include "transaction.h"
#include "btrfs_inode.h"
26
#include "extent_io.h"
C
Chris Mason 已提交
27

28
static u64 entry_end(struct btrfs_ordered_extent *entry)
C
Chris Mason 已提交
29
{
30 31 32
	if (entry->file_offset + entry->len < entry->file_offset)
		return (u64)-1;
	return entry->file_offset + entry->len;
C
Chris Mason 已提交
33 34
}

C
Chris Mason 已提交
35 36 37
/* returns NULL if the insertion worked, or it returns the node it did find
 * in the tree
 */
38 39
static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
				   struct rb_node *node)
C
Chris Mason 已提交
40
{
C
Chris Mason 已提交
41 42
	struct rb_node **p = &root->rb_node;
	struct rb_node *parent = NULL;
43
	struct btrfs_ordered_extent *entry;
C
Chris Mason 已提交
44

C
Chris Mason 已提交
45
	while (*p) {
C
Chris Mason 已提交
46
		parent = *p;
47
		entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
C
Chris Mason 已提交
48

49
		if (file_offset < entry->file_offset)
C
Chris Mason 已提交
50
			p = &(*p)->rb_left;
51
		else if (file_offset >= entry_end(entry))
C
Chris Mason 已提交
52 53 54 55 56 57 58 59 60 61
			p = &(*p)->rb_right;
		else
			return parent;
	}

	rb_link_node(node, parent, p);
	rb_insert_color(node, root);
	return NULL;
}

62 63 64 65 66 67 68 69
static void ordered_data_tree_panic(struct inode *inode, int errno,
					       u64 offset)
{
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
	btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset "
		    "%llu\n", (unsigned long long)offset);
}

C
Chris Mason 已提交
70 71 72 73
/*
 * look for a given offset in the tree, and if it can't be found return the
 * first lesser offset
 */
74 75
static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
				     struct rb_node **prev_ret)
C
Chris Mason 已提交
76
{
C
Chris Mason 已提交
77
	struct rb_node *n = root->rb_node;
C
Chris Mason 已提交
78
	struct rb_node *prev = NULL;
79 80 81
	struct rb_node *test;
	struct btrfs_ordered_extent *entry;
	struct btrfs_ordered_extent *prev_entry = NULL;
C
Chris Mason 已提交
82

C
Chris Mason 已提交
83
	while (n) {
84
		entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
C
Chris Mason 已提交
85 86 87
		prev = n;
		prev_entry = entry;

88
		if (file_offset < entry->file_offset)
C
Chris Mason 已提交
89
			n = n->rb_left;
90
		else if (file_offset >= entry_end(entry))
C
Chris Mason 已提交
91 92 93 94 95 96 97
			n = n->rb_right;
		else
			return n;
	}
	if (!prev_ret)
		return NULL;

C
Chris Mason 已提交
98
	while (prev && file_offset >= entry_end(prev_entry)) {
99 100 101 102 103 104 105 106 107 108 109 110 111
		test = rb_next(prev);
		if (!test)
			break;
		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
				      rb_node);
		if (file_offset < entry_end(prev_entry))
			break;

		prev = test;
	}
	if (prev)
		prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
				      rb_node);
C
Chris Mason 已提交
112
	while (prev && file_offset < entry_end(prev_entry)) {
113 114 115 116 117 118
		test = rb_prev(prev);
		if (!test)
			break;
		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
				      rb_node);
		prev = test;
C
Chris Mason 已提交
119 120 121 122 123
	}
	*prev_ret = prev;
	return NULL;
}

C
Chris Mason 已提交
124 125 126
/*
 * helper to check if a given offset is inside a given entry
 */
127 128 129 130 131 132 133 134
static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
{
	if (file_offset < entry->file_offset ||
	    entry->file_offset + entry->len <= file_offset)
		return 0;
	return 1;
}

135 136 137 138 139 140 141 142 143
static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
			  u64 len)
{
	if (file_offset + len <= entry->file_offset ||
	    entry->file_offset + entry->len <= file_offset)
		return 0;
	return 1;
}

C
Chris Mason 已提交
144 145 146 147
/*
 * look find the first ordered struct that has this offset, otherwise
 * the first one less than this offset
 */
148 149
static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
					  u64 file_offset)
C
Chris Mason 已提交
150
{
151
	struct rb_root *root = &tree->tree;
152
	struct rb_node *prev = NULL;
C
Chris Mason 已提交
153
	struct rb_node *ret;
154 155 156 157 158 159 160 161 162
	struct btrfs_ordered_extent *entry;

	if (tree->last) {
		entry = rb_entry(tree->last, struct btrfs_ordered_extent,
				 rb_node);
		if (offset_in_entry(entry, file_offset))
			return tree->last;
	}
	ret = __tree_search(root, file_offset, &prev);
C
Chris Mason 已提交
163
	if (!ret)
164 165 166
		ret = prev;
	if (ret)
		tree->last = ret;
C
Chris Mason 已提交
167 168 169
	return ret;
}

170 171 172 173 174 175 176 177 178 179 180
/* allocate and add a new ordered_extent into the per-inode tree.
 * file_offset is the logical offset in the file
 *
 * start is the disk block number of an extent already reserved in the
 * extent allocation tree
 *
 * len is the length of the extent
 *
 * The tree is given a single reference on the ordered extent that was
 * inserted.
 */
181 182
static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
				      u64 start, u64 len, u64 disk_len,
183
				      int type, int dio, int compress_type)
C
Chris Mason 已提交
184 185
{
	struct btrfs_ordered_inode_tree *tree;
186 187
	struct rb_node *node;
	struct btrfs_ordered_extent *entry;
C
Chris Mason 已提交
188

189 190
	tree = &BTRFS_I(inode)->ordered_tree;
	entry = kzalloc(sizeof(*entry), GFP_NOFS);
C
Chris Mason 已提交
191 192 193
	if (!entry)
		return -ENOMEM;

194 195 196
	entry->file_offset = file_offset;
	entry->start = start;
	entry->len = len;
C
Chris Mason 已提交
197
	entry->disk_len = disk_len;
198
	entry->bytes_left = len;
199
	entry->inode = igrab(inode);
200
	entry->compress_type = compress_type;
Y
Yan Zheng 已提交
201
	if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
Y
Yan Zheng 已提交
202
		set_bit(type, &entry->flags);
203

204 205 206
	if (dio)
		set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);

207 208 209 210
	/* one ref for the tree */
	atomic_set(&entry->refs, 1);
	init_waitqueue_head(&entry->wait);
	INIT_LIST_HEAD(&entry->list);
211
	INIT_LIST_HEAD(&entry->root_extent_list);
C
Chris Mason 已提交
212

213 214
	trace_btrfs_ordered_extent_add(inode, entry);

215
	spin_lock_irq(&tree->lock);
216 217
	node = tree_insert(&tree->tree, file_offset,
			   &entry->rb_node);
218 219
	if (node)
		ordered_data_tree_panic(inode, -EEXIST, file_offset);
220
	spin_unlock_irq(&tree->lock);
C
Chris Mason 已提交
221

222 223 224 225 226
	spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
	list_add_tail(&entry->root_extent_list,
		      &BTRFS_I(inode)->root->fs_info->ordered_extents);
	spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);

C
Chris Mason 已提交
227 228 229
	return 0;
}

230 231 232 233
int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
			     u64 start, u64 len, u64 disk_len, int type)
{
	return __btrfs_add_ordered_extent(inode, file_offset, start, len,
234 235
					  disk_len, type, 0,
					  BTRFS_COMPRESS_NONE);
236 237 238 239 240 241
}

int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
				 u64 start, u64 len, u64 disk_len, int type)
{
	return __btrfs_add_ordered_extent(inode, file_offset, start, len,
242 243 244 245 246 247 248 249 250 251 252
					  disk_len, type, 1,
					  BTRFS_COMPRESS_NONE);
}

int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
				      u64 start, u64 len, u64 disk_len,
				      int type, int compress_type)
{
	return __btrfs_add_ordered_extent(inode, file_offset, start, len,
					  disk_len, type, 0,
					  compress_type);
253 254
}

255 256
/*
 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
257 258
 * when an ordered extent is finished.  If the list covers more than one
 * ordered extent, it is split across multiples.
259
 */
260 261 262
void btrfs_add_ordered_sum(struct inode *inode,
			   struct btrfs_ordered_extent *entry,
			   struct btrfs_ordered_sum *sum)
C
Chris Mason 已提交
263
{
264
	struct btrfs_ordered_inode_tree *tree;
C
Chris Mason 已提交
265

266
	tree = &BTRFS_I(inode)->ordered_tree;
267
	spin_lock_irq(&tree->lock);
268
	list_add_tail(&sum->list, &entry->list);
269
	spin_unlock_irq(&tree->lock);
C
Chris Mason 已提交
270 271
}

272 273 274 275 276 277 278 279 280 281 282 283 284 285
/*
 * this is used to account for finished IO across a given range
 * of the file.  The IO may span ordered extents.  If
 * a given ordered_extent is completely done, 1 is returned, otherwise
 * 0.
 *
 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
 * to make sure this function only returns 1 once for a given ordered extent.
 *
 * file_offset is updated to one byte past the range that is recorded as
 * complete.  This allows you to walk forward in the file.
 */
int btrfs_dec_test_first_ordered_pending(struct inode *inode,
				   struct btrfs_ordered_extent **cached,
286
				   u64 *file_offset, u64 io_size, int uptodate)
287 288 289 290 291
{
	struct btrfs_ordered_inode_tree *tree;
	struct rb_node *node;
	struct btrfs_ordered_extent *entry = NULL;
	int ret;
292
	unsigned long flags;
293 294 295 296 297
	u64 dec_end;
	u64 dec_start;
	u64 to_dec;

	tree = &BTRFS_I(inode)->ordered_tree;
298
	spin_lock_irqsave(&tree->lock, flags);
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
	node = tree_search(tree, *file_offset);
	if (!node) {
		ret = 1;
		goto out;
	}

	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
	if (!offset_in_entry(entry, *file_offset)) {
		ret = 1;
		goto out;
	}

	dec_start = max(*file_offset, entry->file_offset);
	dec_end = min(*file_offset + io_size, entry->file_offset +
		      entry->len);
	*file_offset = dec_end;
	if (dec_start > dec_end) {
		printk(KERN_CRIT "bad ordering dec_start %llu end %llu\n",
		       (unsigned long long)dec_start,
		       (unsigned long long)dec_end);
	}
	to_dec = dec_end - dec_start;
	if (to_dec > entry->bytes_left) {
		printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
		       (unsigned long long)entry->bytes_left,
		       (unsigned long long)to_dec);
	}
	entry->bytes_left -= to_dec;
327 328 329
	if (!uptodate)
		set_bit(BTRFS_ORDERED_IOERR, &entry->flags);

330 331 332 333 334 335 336 337 338
	if (entry->bytes_left == 0)
		ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
	else
		ret = 1;
out:
	if (!ret && cached && entry) {
		*cached = entry;
		atomic_inc(&entry->refs);
	}
339
	spin_unlock_irqrestore(&tree->lock, flags);
340 341 342
	return ret == 0;
}

343 344 345 346 347 348 349 350 351
/*
 * this is used to account for finished IO across a given range
 * of the file.  The IO should not span ordered extents.  If
 * a given ordered_extent is completely done, 1 is returned, otherwise
 * 0.
 *
 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
 * to make sure this function only returns 1 once for a given ordered extent.
 */
352
int btrfs_dec_test_ordered_pending(struct inode *inode,
353
				   struct btrfs_ordered_extent **cached,
354
				   u64 file_offset, u64 io_size, int uptodate)
C
Chris Mason 已提交
355
{
356
	struct btrfs_ordered_inode_tree *tree;
C
Chris Mason 已提交
357
	struct rb_node *node;
358
	struct btrfs_ordered_extent *entry = NULL;
359
	unsigned long flags;
360 361 362
	int ret;

	tree = &BTRFS_I(inode)->ordered_tree;
363 364 365 366 367 368
	spin_lock_irqsave(&tree->lock, flags);
	if (cached && *cached) {
		entry = *cached;
		goto have_entry;
	}

369
	node = tree_search(tree, file_offset);
C
Chris Mason 已提交
370
	if (!node) {
371 372
		ret = 1;
		goto out;
C
Chris Mason 已提交
373 374
	}

375
	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
376
have_entry:
377 378 379
	if (!offset_in_entry(entry, file_offset)) {
		ret = 1;
		goto out;
C
Chris Mason 已提交
380
	}
381

382 383 384 385 386 387
	if (io_size > entry->bytes_left) {
		printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
		       (unsigned long long)entry->bytes_left,
		       (unsigned long long)io_size);
	}
	entry->bytes_left -= io_size;
388 389 390
	if (!uptodate)
		set_bit(BTRFS_ORDERED_IOERR, &entry->flags);

391
	if (entry->bytes_left == 0)
392
		ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
393 394
	else
		ret = 1;
395
out:
396 397 398 399
	if (!ret && cached && entry) {
		*cached = entry;
		atomic_inc(&entry->refs);
	}
400
	spin_unlock_irqrestore(&tree->lock, flags);
401 402
	return ret == 0;
}
C
Chris Mason 已提交
403

404 405 406 407
/*
 * used to drop a reference on an ordered extent.  This will free
 * the extent if the last reference is dropped
 */
408
void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
409
{
410 411 412
	struct list_head *cur;
	struct btrfs_ordered_sum *sum;

413 414
	trace_btrfs_ordered_extent_put(entry->inode, entry);

415
	if (atomic_dec_and_test(&entry->refs)) {
416 417
		if (entry->inode)
			btrfs_add_delayed_iput(entry->inode);
C
Chris Mason 已提交
418
		while (!list_empty(&entry->list)) {
419 420 421 422 423
			cur = entry->list.next;
			sum = list_entry(cur, struct btrfs_ordered_sum, list);
			list_del(&sum->list);
			kfree(sum);
		}
424
		kfree(entry);
425
	}
C
Chris Mason 已提交
426
}
427

428 429
/*
 * remove an ordered extent from the tree.  No references are dropped
430
 * and waiters are woken up.
431
 */
432 433
void btrfs_remove_ordered_extent(struct inode *inode,
				 struct btrfs_ordered_extent *entry)
434
{
435
	struct btrfs_ordered_inode_tree *tree;
436
	struct btrfs_root *root = BTRFS_I(inode)->root;
437 438
	struct rb_node *node;

439
	tree = &BTRFS_I(inode)->ordered_tree;
440
	spin_lock_irq(&tree->lock);
441
	node = &entry->rb_node;
442
	rb_erase(node, &tree->tree);
443 444
	tree->last = NULL;
	set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
445
	spin_unlock_irq(&tree->lock);
446

447
	spin_lock(&root->fs_info->ordered_extent_lock);
448
	list_del_init(&entry->root_extent_list);
449

450 451
	trace_btrfs_ordered_extent_remove(inode, entry);

452 453 454 455 456 457 458 459 460
	/*
	 * we have no more ordered extents for this inode and
	 * no dirty pages.  We can safely remove it from the
	 * list of ordered extents
	 */
	if (RB_EMPTY_ROOT(&tree->tree) &&
	    !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
		list_del_init(&BTRFS_I(inode)->ordered_operations);
	}
461
	spin_unlock(&root->fs_info->ordered_extent_lock);
462
	wake_up(&entry->wait);
463 464
}

C
Chris Mason 已提交
465 466 467 468
/*
 * wait for all the ordered extents in a root.  This is done when balancing
 * space between drives.
 */
469 470
void btrfs_wait_ordered_extents(struct btrfs_root *root,
				int nocow_only, int delay_iput)
471 472 473 474 475 476 477 478 479 480
{
	struct list_head splice;
	struct list_head *cur;
	struct btrfs_ordered_extent *ordered;
	struct inode *inode;

	INIT_LIST_HEAD(&splice);

	spin_lock(&root->fs_info->ordered_extent_lock);
	list_splice_init(&root->fs_info->ordered_extents, &splice);
481
	while (!list_empty(&splice)) {
482 483 484
		cur = splice.next;
		ordered = list_entry(cur, struct btrfs_ordered_extent,
				     root_extent_list);
485
		if (nocow_only &&
Y
Yan Zheng 已提交
486 487
		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
		    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
488 489
			list_move(&ordered->root_extent_list,
				  &root->fs_info->ordered_extents);
490 491 492 493
			cond_resched_lock(&root->fs_info->ordered_extent_lock);
			continue;
		}

494 495 496 497
		list_del_init(&ordered->root_extent_list);
		atomic_inc(&ordered->refs);

		/*
498
		 * the inode may be getting freed (in sys_unlink path).
499
		 */
500 501
		inode = igrab(ordered->inode);

502 503
		spin_unlock(&root->fs_info->ordered_extent_lock);

504 505 506
		if (inode) {
			btrfs_start_ordered_extent(inode, ordered, 1);
			btrfs_put_ordered_extent(ordered);
Y
Yan, Zheng 已提交
507 508 509 510
			if (delay_iput)
				btrfs_add_delayed_iput(inode);
			else
				iput(inode);
511 512 513
		} else {
			btrfs_put_ordered_extent(ordered);
		}
514 515 516 517 518 519

		spin_lock(&root->fs_info->ordered_extent_lock);
	}
	spin_unlock(&root->fs_info->ordered_extent_lock);
}

520 521 522 523 524 525 526 527 528 529
/*
 * this is used during transaction commit to write all the inodes
 * added to the ordered operation list.  These files must be fully on
 * disk before the transaction commits.
 *
 * we have two modes here, one is to just start the IO via filemap_flush
 * and the other is to wait for all the io.  When we wait, we have an
 * extra check to make sure the ordered operation list really is empty
 * before we return
 */
530
void btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
{
	struct btrfs_inode *btrfs_inode;
	struct inode *inode;
	struct list_head splice;

	INIT_LIST_HEAD(&splice);

	mutex_lock(&root->fs_info->ordered_operations_mutex);
	spin_lock(&root->fs_info->ordered_extent_lock);
again:
	list_splice_init(&root->fs_info->ordered_operations, &splice);

	while (!list_empty(&splice)) {
		btrfs_inode = list_entry(splice.next, struct btrfs_inode,
				   ordered_operations);

		inode = &btrfs_inode->vfs_inode;

		list_del_init(&btrfs_inode->ordered_operations);

		/*
		 * the inode may be getting freed (in sys_unlink path).
		 */
		inode = igrab(inode);

		if (!wait && inode) {
			list_add_tail(&BTRFS_I(inode)->ordered_operations,
			      &root->fs_info->ordered_operations);
		}
		spin_unlock(&root->fs_info->ordered_extent_lock);

		if (inode) {
			if (wait)
				btrfs_wait_ordered_range(inode, 0, (u64)-1);
			else
				filemap_flush(inode->i_mapping);
Y
Yan, Zheng 已提交
567
			btrfs_add_delayed_iput(inode);
568 569 570 571 572 573 574 575 576 577 578 579
		}

		cond_resched();
		spin_lock(&root->fs_info->ordered_extent_lock);
	}
	if (wait && !list_empty(&root->fs_info->ordered_operations))
		goto again;

	spin_unlock(&root->fs_info->ordered_extent_lock);
	mutex_unlock(&root->fs_info->ordered_operations_mutex);
}

580 581 582 583 584 585 586 587 588 589
/*
 * Used to start IO or wait for a given ordered extent to finish.
 *
 * If wait is one, this effectively waits on page writeback for all the pages
 * in the extent, and it waits on the io completion code to insert
 * metadata into the btree corresponding to the extent
 */
void btrfs_start_ordered_extent(struct inode *inode,
				       struct btrfs_ordered_extent *entry,
				       int wait)
590 591 592
{
	u64 start = entry->file_offset;
	u64 end = start + entry->len - 1;
593

594 595
	trace_btrfs_ordered_extent_start(inode, entry);

596 597 598 599 600
	/*
	 * pages in the range can be dirty, clean or writeback.  We
	 * start IO on any dirty ones so the wait doesn't stall waiting
	 * for pdflush to find them
	 */
601 602
	if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
		filemap_fdatawrite_range(inode->i_mapping, start, end);
C
Chris Mason 已提交
603
	if (wait) {
604 605
		wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
						 &entry->flags));
C
Chris Mason 已提交
606
	}
607
}
608

609 610 611
/*
 * Used to wait on ordered extents across a large range of bytes.
 */
612
void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
613 614
{
	u64 end;
615
	u64 orig_end;
616
	struct btrfs_ordered_extent *ordered;
617
	int found;
618 619

	if (start + len < start) {
620
		orig_end = INT_LIMIT(loff_t);
621 622
	} else {
		orig_end = start + len - 1;
623 624
		if (orig_end > INT_LIMIT(loff_t))
			orig_end = INT_LIMIT(loff_t);
625
	}
626

627 628 629
	/* start IO across the range first to instantiate any delalloc
	 * extents
	 */
630
	filemap_write_and_wait_range(inode->i_mapping, start, orig_end);
631

632
	end = orig_end;
633
	found = 0;
C
Chris Mason 已提交
634
	while (1) {
635
		ordered = btrfs_lookup_first_ordered_extent(inode, end);
C
Chris Mason 已提交
636
		if (!ordered)
637
			break;
638
		if (ordered->file_offset > orig_end) {
639 640 641 642 643 644 645
			btrfs_put_ordered_extent(ordered);
			break;
		}
		if (ordered->file_offset + ordered->len < start) {
			btrfs_put_ordered_extent(ordered);
			break;
		}
646
		found++;
647
		btrfs_start_ordered_extent(inode, ordered, 1);
648 649
		end = ordered->file_offset;
		btrfs_put_ordered_extent(ordered);
650
		if (end == 0 || end == start)
651 652 653
			break;
		end--;
	}
654 655
}

656 657 658 659
/*
 * find an ordered extent corresponding to file_offset.  return NULL if
 * nothing is found, otherwise take a reference on the extent and return it
 */
660 661 662 663 664 665 666 667
struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
							 u64 file_offset)
{
	struct btrfs_ordered_inode_tree *tree;
	struct rb_node *node;
	struct btrfs_ordered_extent *entry = NULL;

	tree = &BTRFS_I(inode)->ordered_tree;
668
	spin_lock_irq(&tree->lock);
669 670 671 672 673 674 675 676 677 678
	node = tree_search(tree, file_offset);
	if (!node)
		goto out;

	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
	if (!offset_in_entry(entry, file_offset))
		entry = NULL;
	if (entry)
		atomic_inc(&entry->refs);
out:
679
	spin_unlock_irq(&tree->lock);
680 681 682
	return entry;
}

683 684 685 686 687 688 689 690 691 692 693 694
/* Since the DIO code tries to lock a wide area we need to look for any ordered
 * extents that exist in the range, rather than just the start of the range.
 */
struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
							u64 file_offset,
							u64 len)
{
	struct btrfs_ordered_inode_tree *tree;
	struct rb_node *node;
	struct btrfs_ordered_extent *entry = NULL;

	tree = &BTRFS_I(inode)->ordered_tree;
695
	spin_lock_irq(&tree->lock);
696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719
	node = tree_search(tree, file_offset);
	if (!node) {
		node = tree_search(tree, file_offset + len);
		if (!node)
			goto out;
	}

	while (1) {
		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
		if (range_overlaps(entry, file_offset, len))
			break;

		if (entry->file_offset >= file_offset + len) {
			entry = NULL;
			break;
		}
		entry = NULL;
		node = rb_next(node);
		if (!node)
			break;
	}
out:
	if (entry)
		atomic_inc(&entry->refs);
720
	spin_unlock_irq(&tree->lock);
721 722 723
	return entry;
}

724 725 726 727
/*
 * lookup and return any extent before 'file_offset'.  NULL is returned
 * if none is found
 */
728
struct btrfs_ordered_extent *
C
Chris Mason 已提交
729
btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
730 731 732 733 734 735
{
	struct btrfs_ordered_inode_tree *tree;
	struct rb_node *node;
	struct btrfs_ordered_extent *entry = NULL;

	tree = &BTRFS_I(inode)->ordered_tree;
736
	spin_lock_irq(&tree->lock);
737 738 739 740 741 742 743
	node = tree_search(tree, file_offset);
	if (!node)
		goto out;

	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
	atomic_inc(&entry->refs);
out:
744
	spin_unlock_irq(&tree->lock);
745
	return entry;
746
}
747

748 749 750 751
/*
 * After an extent is done, call this to conditionally update the on disk
 * i_size.  i_size is updated to cover any fully written part of the file.
 */
752
int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
753 754 755 756 757 758
				struct btrfs_ordered_extent *ordered)
{
	struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
	u64 disk_i_size;
	u64 new_i_size;
	u64 i_size_test;
759
	u64 i_size = i_size_read(inode);
760
	struct rb_node *node;
761
	struct rb_node *prev = NULL;
762
	struct btrfs_ordered_extent *test;
763 764 765 766
	int ret = 1;

	if (ordered)
		offset = entry_end(ordered);
767 768
	else
		offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
769

770
	spin_lock_irq(&tree->lock);
771 772
	disk_i_size = BTRFS_I(inode)->disk_i_size;

773 774 775 776 777 778 779
	/* truncate file */
	if (disk_i_size > i_size) {
		BTRFS_I(inode)->disk_i_size = i_size;
		ret = 0;
		goto out;
	}

780 781 782 783
	/*
	 * if the disk i_size is already at the inode->i_size, or
	 * this ordered extent is inside the disk i_size, we're done
	 */
784
	if (disk_i_size == i_size || offset <= disk_i_size) {
785 786 787 788 789 790 791 792
		goto out;
	}

	/*
	 * walk backward from this ordered extent to disk_i_size.
	 * if we find an ordered extent then we can't update disk i_size
	 * yet
	 */
793 794 795 796 797 798 799 800 801 802 803 804 805 806 807
	if (ordered) {
		node = rb_prev(&ordered->rb_node);
	} else {
		prev = tree_search(tree, offset);
		/*
		 * we insert file extents without involving ordered struct,
		 * so there should be no ordered struct cover this offset
		 */
		if (prev) {
			test = rb_entry(prev, struct btrfs_ordered_extent,
					rb_node);
			BUG_ON(offset_in_entry(test, offset));
		}
		node = prev;
	}
808
	for (; node; node = rb_prev(node)) {
809
		test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
810 811 812 813

		/* We treat this entry as if it doesnt exist */
		if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
			continue;
814 815
		if (test->file_offset + test->len <= disk_i_size)
			break;
816
		if (test->file_offset >= i_size)
817 818 819 820
			break;
		if (test->file_offset >= disk_i_size)
			goto out;
	}
821
	new_i_size = min_t(u64, offset, i_size);
822 823 824 825 826 827 828

	/*
	 * at this point, we know we can safely update i_size to at least
	 * the offset from this ordered extent.  But, we need to
	 * walk forward and see if ios from higher up in the file have
	 * finished.
	 */
829 830 831 832 833 834 835 836
	if (ordered) {
		node = rb_next(&ordered->rb_node);
	} else {
		if (prev)
			node = rb_next(prev);
		else
			node = rb_first(&tree->tree);
	}
837 838 839 840 841 842 843 844 845 846 847 848 849

	/*
	 * We are looking for an area between our current extent and the next
	 * ordered extent to update the i_size to.  There are 3 cases here
	 *
	 * 1) We don't actually have anything and we can update to i_size.
	 * 2) We have stuff but they already did their i_size update so again we
	 * can just update to i_size.
	 * 3) We have an outstanding ordered extent so the most we can update
	 * our disk_i_size to is the start of the next offset.
	 */
	i_size_test = i_size;
	for (; node; node = rb_next(node)) {
850
		test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
851 852 853 854

		if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
			continue;
		if (test->file_offset > offset) {
Y
Yan Zheng 已提交
855
			i_size_test = test->file_offset;
856 857
			break;
		}
858 859 860 861
	}

	/*
	 * i_size_test is the end of a region after this ordered
862 863
	 * extent where there are no ordered extents, we can safely set
	 * disk_i_size to this.
864
	 */
865
	if (i_size_test > offset)
866
		new_i_size = min_t(u64, i_size_test, i_size);
867
	BTRFS_I(inode)->disk_i_size = new_i_size;
868
	ret = 0;
869
out:
870
	/*
871 872 873 874 875
	 * We need to do this because we can't remove ordered extents until
	 * after the i_disk_size has been updated and then the inode has been
	 * updated to reflect the change, so we need to tell anybody who finds
	 * this ordered extent that we've already done all the real work, we
	 * just haven't completed all the other work.
876 877
	 */
	if (ordered)
878 879
		set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
	spin_unlock_irq(&tree->lock);
880
	return ret;
881
}
882

883 884 885 886 887
/*
 * search the ordered extents for one corresponding to 'offset' and
 * try to find a checksum.  This is used because we allow pages to
 * be reclaimed before their checksum is actually put into the btree
 */
888 889
int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
			   u32 *sum)
890 891 892 893 894
{
	struct btrfs_ordered_sum *ordered_sum;
	struct btrfs_sector_sum *sector_sums;
	struct btrfs_ordered_extent *ordered;
	struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
895 896 897
	unsigned long num_sectors;
	unsigned long i;
	u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
898 899 900 901 902 903
	int ret = 1;

	ordered = btrfs_lookup_ordered_extent(inode, offset);
	if (!ordered)
		return 1;

904
	spin_lock_irq(&tree->lock);
Q
Qinghuang Feng 已提交
905
	list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
906
		if (disk_bytenr >= ordered_sum->bytenr) {
907
			num_sectors = ordered_sum->len / sectorsize;
908
			sector_sums = ordered_sum->sums;
909
			for (i = 0; i < num_sectors; i++) {
910
				if (sector_sums[i].bytenr == disk_bytenr) {
911 912 913 914 915
					*sum = sector_sums[i].sum;
					ret = 0;
					goto out;
				}
			}
916 917 918
		}
	}
out:
919
	spin_unlock_irq(&tree->lock);
920
	btrfs_put_ordered_extent(ordered);
921 922 923
	return ret;
}

924

925 926 927 928 929 930 931 932 933 934 935 936
/*
 * add a given inode to the list of inodes that must be fully on
 * disk before a transaction commit finishes.
 *
 * This basically gives us the ext3 style data=ordered mode, and it is mostly
 * used to make sure renamed files are fully on disk.
 *
 * It is a noop if the inode is already fully on disk.
 *
 * If trans is not null, we'll do a friendly check for a transaction that
 * is already flushing things and force the IO down ourselves.
 */
937 938
void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root, struct inode *inode)
939 940 941 942 943 944 945 946 947 948
{
	u64 last_mod;

	last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);

	/*
	 * if this file hasn't been changed since the last transaction
	 * commit, we can safely return without doing anything
	 */
	if (last_mod < root->fs_info->last_trans_committed)
949
		return;
950 951 952 953 954 955 956

	/*
	 * the transaction is already committing.  Just start the IO and
	 * don't bother with all of this list nonsense
	 */
	if (trans && root->fs_info->running_transaction->blocked) {
		btrfs_wait_ordered_range(inode, 0, (u64)-1);
957
		return;
958 959 960 961 962 963 964 965 966
	}

	spin_lock(&root->fs_info->ordered_extent_lock);
	if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
		list_add_tail(&BTRFS_I(inode)->ordered_operations,
			      &root->fs_info->ordered_operations);
	}
	spin_unlock(&root->fs_info->ordered_extent_lock);
}