file.c 97.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
C
Chris Mason 已提交
2 3 4 5
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 */

C
Chris Mason 已提交
6 7 8 9 10 11
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
12
#include <linux/falloc.h>
C
Chris Mason 已提交
13 14
#include <linux/writeback.h>
#include <linux/compat.h>
15
#include <linux/slab.h>
16
#include <linux/btrfs.h>
17
#include <linux/uio.h>
18
#include <linux/iversion.h>
C
Chris Mason 已提交
19 20 21 22 23
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
#include "print-tree.h"
24 25
#include "tree-log.h"
#include "locking.h"
J
Josef Bacik 已提交
26
#include "volumes.h"
J
Josef Bacik 已提交
27
#include "qgroup.h"
28
#include "compression.h"
29
#include "delalloc-space.h"
30
#include "reflink.h"
C
Chris Mason 已提交
31

32
static struct kmem_cache *btrfs_inode_defrag_cachep;
C
Chris Mason 已提交
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
/*
 * when auto defrag is enabled we
 * queue up these defrag structs to remember which
 * inodes need defragging passes
 */
struct inode_defrag {
	struct rb_node rb_node;
	/* objectid */
	u64 ino;
	/*
	 * transid where the defrag was added, we search for
	 * extents newer than this
	 */
	u64 transid;

	/* root objectid */
	u64 root;

	/* last offset we were able to defrag */
	u64 last_offset;

	/* if we've wrapped around back to zero once already */
	int cycled;
};

58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
static int __compare_inode_defrag(struct inode_defrag *defrag1,
				  struct inode_defrag *defrag2)
{
	if (defrag1->root > defrag2->root)
		return 1;
	else if (defrag1->root < defrag2->root)
		return -1;
	else if (defrag1->ino > defrag2->ino)
		return 1;
	else if (defrag1->ino < defrag2->ino)
		return -1;
	else
		return 0;
}

C
Chris Mason 已提交
73 74 75 76 77 78 79 80 81
/* pop a record for an inode into the defrag tree.  The lock
 * must be held already
 *
 * If you're inserting a record for an older transid than an
 * existing record, the transid already in the tree is lowered
 *
 * If an existing record is found the defrag item you
 * pass in is freed
 */
82
static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
C
Chris Mason 已提交
83 84
				    struct inode_defrag *defrag)
{
85
	struct btrfs_fs_info *fs_info = inode->root->fs_info;
C
Chris Mason 已提交
86 87 88
	struct inode_defrag *entry;
	struct rb_node **p;
	struct rb_node *parent = NULL;
89
	int ret;
C
Chris Mason 已提交
90

91
	p = &fs_info->defrag_inodes.rb_node;
C
Chris Mason 已提交
92 93 94 95
	while (*p) {
		parent = *p;
		entry = rb_entry(parent, struct inode_defrag, rb_node);

96 97
		ret = __compare_inode_defrag(defrag, entry);
		if (ret < 0)
C
Chris Mason 已提交
98
			p = &parent->rb_left;
99
		else if (ret > 0)
C
Chris Mason 已提交
100 101 102 103 104 105 106 107 108 109
			p = &parent->rb_right;
		else {
			/* if we're reinserting an entry for
			 * an old defrag run, make sure to
			 * lower the transid of our existing record
			 */
			if (defrag->transid < entry->transid)
				entry->transid = defrag->transid;
			if (defrag->last_offset > entry->last_offset)
				entry->last_offset = defrag->last_offset;
110
			return -EEXIST;
C
Chris Mason 已提交
111 112
		}
	}
113
	set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
C
Chris Mason 已提交
114
	rb_link_node(&defrag->rb_node, parent, p);
115
	rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
116 117
	return 0;
}
C
Chris Mason 已提交
118

119
static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
120
{
121
	if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
122 123
		return 0;

124
	if (btrfs_fs_closing(fs_info))
125
		return 0;
C
Chris Mason 已提交
126

127
	return 1;
C
Chris Mason 已提交
128 129 130 131 132 133 134
}

/*
 * insert a defrag record for this inode if auto defrag is
 * enabled
 */
int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
135
			   struct btrfs_inode *inode)
C
Chris Mason 已提交
136
{
137
	struct btrfs_root *root = inode->root;
138
	struct btrfs_fs_info *fs_info = root->fs_info;
C
Chris Mason 已提交
139 140
	struct inode_defrag *defrag;
	u64 transid;
141
	int ret;
C
Chris Mason 已提交
142

143
	if (!__need_auto_defrag(fs_info))
C
Chris Mason 已提交
144 145
		return 0;

146
	if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
C
Chris Mason 已提交
147 148 149 150 151
		return 0;

	if (trans)
		transid = trans->transid;
	else
152
		transid = inode->root->last_trans;
C
Chris Mason 已提交
153

154
	defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
C
Chris Mason 已提交
155 156 157
	if (!defrag)
		return -ENOMEM;

158
	defrag->ino = btrfs_ino(inode);
C
Chris Mason 已提交
159 160 161
	defrag->transid = transid;
	defrag->root = root->root_key.objectid;

162
	spin_lock(&fs_info->defrag_inodes_lock);
163
	if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
164 165 166 167 168 169 170 171 172
		/*
		 * If we set IN_DEFRAG flag and evict the inode from memory,
		 * and then re-read this inode, this new inode doesn't have
		 * IN_DEFRAG flag. At the case, we may find the existed defrag.
		 */
		ret = __btrfs_add_inode_defrag(inode, defrag);
		if (ret)
			kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
	} else {
173
		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
174
	}
175
	spin_unlock(&fs_info->defrag_inodes_lock);
176
	return 0;
C
Chris Mason 已提交
177 178 179
}

/*
180 181 182
 * Requeue the defrag object. If there is a defrag object that points to
 * the same inode in the tree, we will merge them together (by
 * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
C
Chris Mason 已提交
183
 */
184
static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode,
185
				       struct inode_defrag *defrag)
186
{
187
	struct btrfs_fs_info *fs_info = inode->root->fs_info;
188 189
	int ret;

190
	if (!__need_auto_defrag(fs_info))
191 192 193 194 195 196
		goto out;

	/*
	 * Here we don't check the IN_DEFRAG flag, because we need merge
	 * them together.
	 */
197
	spin_lock(&fs_info->defrag_inodes_lock);
198
	ret = __btrfs_add_inode_defrag(inode, defrag);
199
	spin_unlock(&fs_info->defrag_inodes_lock);
200 201 202 203 204 205 206
	if (ret)
		goto out;
	return;
out:
	kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
}

C
Chris Mason 已提交
207
/*
208 209
 * pick the defragable inode that we want, if it doesn't exist, we will get
 * the next one.
C
Chris Mason 已提交
210
 */
211 212
static struct inode_defrag *
btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
C
Chris Mason 已提交
213 214
{
	struct inode_defrag *entry = NULL;
215
	struct inode_defrag tmp;
C
Chris Mason 已提交
216 217
	struct rb_node *p;
	struct rb_node *parent = NULL;
218 219 220 221
	int ret;

	tmp.ino = ino;
	tmp.root = root;
C
Chris Mason 已提交
222

223 224
	spin_lock(&fs_info->defrag_inodes_lock);
	p = fs_info->defrag_inodes.rb_node;
C
Chris Mason 已提交
225 226 227 228
	while (p) {
		parent = p;
		entry = rb_entry(parent, struct inode_defrag, rb_node);

229 230
		ret = __compare_inode_defrag(&tmp, entry);
		if (ret < 0)
C
Chris Mason 已提交
231
			p = parent->rb_left;
232
		else if (ret > 0)
C
Chris Mason 已提交
233 234
			p = parent->rb_right;
		else
235
			goto out;
C
Chris Mason 已提交
236 237
	}

238 239 240
	if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
		parent = rb_next(parent);
		if (parent)
C
Chris Mason 已提交
241
			entry = rb_entry(parent, struct inode_defrag, rb_node);
242 243
		else
			entry = NULL;
C
Chris Mason 已提交
244
	}
245 246 247 248 249
out:
	if (entry)
		rb_erase(parent, &fs_info->defrag_inodes);
	spin_unlock(&fs_info->defrag_inodes_lock);
	return entry;
C
Chris Mason 已提交
250 251
}

252
void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
C
Chris Mason 已提交
253 254
{
	struct inode_defrag *defrag;
255 256 257 258 259 260 261 262 263
	struct rb_node *node;

	spin_lock(&fs_info->defrag_inodes_lock);
	node = rb_first(&fs_info->defrag_inodes);
	while (node) {
		rb_erase(node, &fs_info->defrag_inodes);
		defrag = rb_entry(node, struct inode_defrag, rb_node);
		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);

264
		cond_resched_lock(&fs_info->defrag_inodes_lock);
265 266 267 268 269 270 271 272 273 274 275

		node = rb_first(&fs_info->defrag_inodes);
	}
	spin_unlock(&fs_info->defrag_inodes_lock);
}

#define BTRFS_DEFRAG_BATCH	1024

static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
				    struct inode_defrag *defrag)
{
C
Chris Mason 已提交
276 277 278 279
	struct btrfs_root *inode_root;
	struct inode *inode;
	struct btrfs_ioctl_defrag_range_args range;
	int num_defrag;
280
	int ret;
C
Chris Mason 已提交
281

282
	/* get the inode */
D
David Sterba 已提交
283
	inode_root = btrfs_get_fs_root(fs_info, defrag->root, true);
284
	if (IS_ERR(inode_root)) {
285 286 287
		ret = PTR_ERR(inode_root);
		goto cleanup;
	}
288

D
David Sterba 已提交
289
	inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root);
290
	btrfs_put_root(inode_root);
291
	if (IS_ERR(inode)) {
292 293
		ret = PTR_ERR(inode);
		goto cleanup;
294 295 296 297
	}

	/* do a chunk of defrag */
	clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
C
Chris Mason 已提交
298 299
	memset(&range, 0, sizeof(range));
	range.len = (u64)-1;
300
	range.start = defrag->last_offset;
M
Miao Xie 已提交
301 302

	sb_start_write(fs_info->sb);
303 304
	num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
				       BTRFS_DEFRAG_BATCH);
M
Miao Xie 已提交
305
	sb_end_write(fs_info->sb);
306 307 308 309 310 311 312
	/*
	 * if we filled the whole defrag batch, there
	 * must be more work to do.  Queue this defrag
	 * again
	 */
	if (num_defrag == BTRFS_DEFRAG_BATCH) {
		defrag->last_offset = range.start;
313
		btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
314 315 316 317 318 319 320 321
	} else if (defrag->last_offset && !defrag->cycled) {
		/*
		 * we didn't fill our defrag batch, but
		 * we didn't start at zero.  Make sure we loop
		 * around to the start of the file.
		 */
		defrag->last_offset = 0;
		defrag->cycled = 1;
322
		btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
323 324 325 326 327 328
	} else {
		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
	}

	iput(inode);
	return 0;
329 330 331
cleanup:
	kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
	return ret;
332 333 334 335 336 337 338 339 340 341 342
}

/*
 * run through the list of inodes in the FS that need
 * defragging
 */
int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
{
	struct inode_defrag *defrag;
	u64 first_ino = 0;
	u64 root_objectid = 0;
C
Chris Mason 已提交
343 344

	atomic_inc(&fs_info->defrag_running);
345
	while (1) {
M
Miao Xie 已提交
346 347 348 349 350
		/* Pause the auto defragger. */
		if (test_bit(BTRFS_FS_STATE_REMOUNTING,
			     &fs_info->fs_state))
			break;

351
		if (!__need_auto_defrag(fs_info))
352
			break;
C
Chris Mason 已提交
353 354

		/* find an inode to defrag */
355 356
		defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
						 first_ino);
C
Chris Mason 已提交
357
		if (!defrag) {
358
			if (root_objectid || first_ino) {
359
				root_objectid = 0;
C
Chris Mason 已提交
360 361 362 363 364 365 366 367
				first_ino = 0;
				continue;
			} else {
				break;
			}
		}

		first_ino = defrag->ino + 1;
368
		root_objectid = defrag->root;
C
Chris Mason 已提交
369

370
		__btrfs_run_defrag_inode(fs_info, defrag);
C
Chris Mason 已提交
371 372 373 374 375 376 377 378 379 380
	}
	atomic_dec(&fs_info->defrag_running);

	/*
	 * during unmount, we use the transaction_wait queue to
	 * wait for the defragger to stop
	 */
	wake_up(&fs_info->transaction_wait);
	return 0;
}
C
Chris Mason 已提交
381

C
Chris Mason 已提交
382 383 384
/* simple helper to fault in pages and copy.  This should go away
 * and be replaced with calls into generic code.
 */
385
static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
386
					 struct page **prepared_pages,
387
					 struct iov_iter *i)
C
Chris Mason 已提交
388
{
389
	size_t copied = 0;
J
Josef Bacik 已提交
390
	size_t total_copied = 0;
391
	int pg = 0;
392
	int offset = offset_in_page(pos);
C
Chris Mason 已提交
393

394
	while (write_bytes > 0) {
C
Chris Mason 已提交
395
		size_t count = min_t(size_t,
396
				     PAGE_SIZE - offset, write_bytes);
397
		struct page *page = prepared_pages[pg];
398 399 400 401
		/*
		 * Copy data from userspace to the current page
		 */
		copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
402

C
Chris Mason 已提交
403 404
		/* Flush processor's dcache for this page */
		flush_dcache_page(page);
405 406 407 408 409 410 411 412 413 414 415 416 417

		/*
		 * if we get a partial write, we can end up with
		 * partially up to date pages.  These add
		 * a lot of complexity, so make sure they don't
		 * happen by forcing this copy to be retried.
		 *
		 * The rest of the btrfs_file_write code will fall
		 * back to page at a time copies after we return 0.
		 */
		if (!PageUptodate(page) && copied < count)
			copied = 0;

418 419
		iov_iter_advance(i, copied);
		write_bytes -= copied;
420
		total_copied += copied;
C
Chris Mason 已提交
421

A
Al Viro 已提交
422
		/* Return to btrfs_file_write_iter to fault page */
J
Josef Bacik 已提交
423
		if (unlikely(copied == 0))
424
			break;
425

426
		if (copied < PAGE_SIZE - offset) {
427 428 429 430 431
			offset += copied;
		} else {
			pg++;
			offset = 0;
		}
C
Chris Mason 已提交
432
	}
433
	return total_copied;
C
Chris Mason 已提交
434 435
}

C
Chris Mason 已提交
436 437 438
/*
 * unlocks pages after btrfs_file_write is done with them
 */
439
static void btrfs_drop_pages(struct page **pages, size_t num_pages)
C
Chris Mason 已提交
440 441 442
{
	size_t i;
	for (i = 0; i < num_pages; i++) {
C
Chris Mason 已提交
443 444
		/* page checked is some magic around finding pages that
		 * have been modified without going through btrfs_set_page_dirty
445 446 447
		 * clear it here. There should be no need to mark the pages
		 * accessed as prepare_pages should have marked them accessed
		 * in prepare_pages via find_or_create_page()
C
Chris Mason 已提交
448
		 */
C
Chris Mason 已提交
449
		ClearPageChecked(pages[i]);
C
Chris Mason 已提交
450
		unlock_page(pages[i]);
451
		put_page(pages[i]);
C
Chris Mason 已提交
452 453 454
	}
}

C
Chris Mason 已提交
455 456 457 458 459 460 461 462
/*
 * after copy_from_user, pages need to be dirtied and we need to make
 * sure holes are created between the current EOF and the start of
 * any next extents (if required).
 *
 * this also makes the decision about creating an inline extent vs
 * doing real data extents, marking pages dirty and delalloc as required.
 */
463
int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
464 465
		      size_t num_pages, loff_t pos, size_t write_bytes,
		      struct extent_state **cached)
C
Chris Mason 已提交
466
{
467
	struct btrfs_fs_info *fs_info = inode->root->fs_info;
C
Chris Mason 已提交
468
	int err = 0;
469
	int i;
470
	u64 num_bytes;
471 472 473
	u64 start_pos;
	u64 end_of_last_block;
	u64 end_pos = pos + write_bytes;
474
	loff_t isize = i_size_read(&inode->vfs_inode);
475
	unsigned int extra_bits = 0;
C
Chris Mason 已提交
476

477
	start_pos = pos & ~((u64) fs_info->sectorsize - 1);
478
	num_bytes = round_up(write_bytes + pos - start_pos,
479
			     fs_info->sectorsize);
C
Chris Mason 已提交
480

481
	end_of_last_block = start_pos + num_bytes - 1;
482

483 484 485 486
	/*
	 * The pages may have already been dirty, clear out old accounting so
	 * we can set things up properly
	 */
487
	clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block,
488 489
			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
			 0, 0, cached);
490

491
	err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
492
					extra_bits, cached);
J
Josef Bacik 已提交
493 494
	if (err)
		return err;
J
Josef Bacik 已提交
495

C
Chris Mason 已提交
496 497 498 499 500
	for (i = 0; i < num_pages; i++) {
		struct page *p = pages[i];
		SetPageUptodate(p);
		ClearPageChecked(p);
		set_page_dirty(p);
501
	}
J
Josef Bacik 已提交
502 503 504 505 506 507 508

	/*
	 * we've only changed i_size in ram, and we haven't updated
	 * the disk i_size.  There is no need to log the inode
	 * at this time.
	 */
	if (end_pos > isize)
509
		i_size_write(&inode->vfs_inode, end_pos);
510
	return 0;
C
Chris Mason 已提交
511 512
}

C
Chris Mason 已提交
513 514 515 516
/*
 * this drops all the extents in the cache that intersect the range
 * [start, end].  Existing extents are split as required.
 */
517
void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
518
			     int skip_pinned)
519 520
{
	struct extent_map *em;
521 522
	struct extent_map *split = NULL;
	struct extent_map *split2 = NULL;
523
	struct extent_map_tree *em_tree = &inode->extent_tree;
524
	u64 len = end - start + 1;
J
Josef Bacik 已提交
525
	u64 gen;
526 527
	int ret;
	int testend = 1;
528
	unsigned long flags;
C
Chris Mason 已提交
529
	int compressed = 0;
J
Josef Bacik 已提交
530
	bool modified;
531

532
	WARN_ON(end < start);
533
	if (end == (u64)-1) {
534
		len = (u64)-1;
535 536
		testend = 0;
	}
C
Chris Mason 已提交
537
	while (1) {
538 539
		int no_splits = 0;

J
Josef Bacik 已提交
540
		modified = false;
541
		if (!split)
542
			split = alloc_extent_map();
543
		if (!split2)
544
			split2 = alloc_extent_map();
545 546
		if (!split || !split2)
			no_splits = 1;
547

548
		write_lock(&em_tree->lock);
549
		em = lookup_extent_mapping(em_tree, start, len);
550
		if (!em) {
551
			write_unlock(&em_tree->lock);
552
			break;
553
		}
554
		flags = em->flags;
J
Josef Bacik 已提交
555
		gen = em->generation;
556
		if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
557
			if (testend && em->start + em->len >= start + len) {
558
				free_extent_map(em);
C
Chris Mason 已提交
559
				write_unlock(&em_tree->lock);
560 561
				break;
			}
562 563
			start = em->start + em->len;
			if (testend)
564 565
				len = start + len - (em->start + em->len);
			free_extent_map(em);
C
Chris Mason 已提交
566
			write_unlock(&em_tree->lock);
567 568
			continue;
		}
C
Chris Mason 已提交
569
		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
570
		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
L
Liu Bo 已提交
571
		clear_bit(EXTENT_FLAG_LOGGING, &flags);
J
Josef Bacik 已提交
572
		modified = !list_empty(&em->list);
573 574
		if (no_splits)
			goto next;
575

576
		if (em->start < start) {
577 578
			split->start = em->start;
			split->len = start - em->start;
579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598

			if (em->block_start < EXTENT_MAP_LAST_BYTE) {
				split->orig_start = em->orig_start;
				split->block_start = em->block_start;

				if (compressed)
					split->block_len = em->block_len;
				else
					split->block_len = split->len;
				split->orig_block_len = max(split->block_len,
						em->orig_block_len);
				split->ram_bytes = em->ram_bytes;
			} else {
				split->orig_start = split->start;
				split->block_len = 0;
				split->block_start = em->block_start;
				split->orig_block_len = 0;
				split->ram_bytes = split->len;
			}

J
Josef Bacik 已提交
599
			split->generation = gen;
600
			split->flags = flags;
601
			split->compress_type = em->compress_type;
602
			replace_extent_mapping(em_tree, em, split, modified);
603 604 605 606
			free_extent_map(split);
			split = split2;
			split2 = NULL;
		}
607
		if (testend && em->start + em->len > start + len) {
608 609 610 611
			u64 diff = start + len - em->start;

			split->start = start + len;
			split->len = em->start + em->len - (start + len);
612
			split->flags = flags;
613
			split->compress_type = em->compress_type;
J
Josef Bacik 已提交
614
			split->generation = gen;
615 616 617

			if (em->block_start < EXTENT_MAP_LAST_BYTE) {
				split->orig_block_len = max(em->block_len,
618
						    em->orig_block_len);
619

620 621 622 623 624 625 626 627 628 629 630
				split->ram_bytes = em->ram_bytes;
				if (compressed) {
					split->block_len = em->block_len;
					split->block_start = em->block_start;
					split->orig_start = em->orig_start;
				} else {
					split->block_len = split->len;
					split->block_start = em->block_start
						+ diff;
					split->orig_start = em->orig_start;
				}
C
Chris Mason 已提交
631
			} else {
632 633 634 635 636
				split->ram_bytes = split->len;
				split->orig_start = split->start;
				split->block_len = 0;
				split->block_start = em->block_start;
				split->orig_block_len = 0;
C
Chris Mason 已提交
637
			}
638

639 640 641 642 643 644 645 646
			if (extent_map_in_tree(em)) {
				replace_extent_mapping(em_tree, em, split,
						       modified);
			} else {
				ret = add_extent_mapping(em_tree, split,
							 modified);
				ASSERT(ret == 0); /* Logic error */
			}
647 648 649
			free_extent_map(split);
			split = NULL;
		}
650
next:
651 652
		if (extent_map_in_tree(em))
			remove_extent_mapping(em_tree, em);
653
		write_unlock(&em_tree->lock);
654

655 656 657 658 659
		/* once for us */
		free_extent_map(em);
		/* once for the tree*/
		free_extent_map(em);
	}
660 661 662 663
	if (split)
		free_extent_map(split);
	if (split2)
		free_extent_map(split2);
664 665
}

C
Chris Mason 已提交
666 667 668 669 670 671 672 673 674
/*
 * this is very complex, but the basic idea is to drop all extents
 * in the range start - end.  hint_block is filled in with a block number
 * that would be a good hint to the block allocator for this file.
 *
 * If an extent intersects the range but is not entirely inside the range
 * it is either truncated or split.  Anything entirely inside the range
 * is deleted from the tree.
 */
J
Josef Bacik 已提交
675
int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
676
			 struct btrfs_root *root, struct btrfs_inode *inode,
J
Josef Bacik 已提交
677
			 struct btrfs_path *path, u64 start, u64 end,
678 679 680 681
			 u64 *drop_end, int drop_cache,
			 int replace_extent,
			 u32 extent_item_size,
			 int *key_inserted)
C
Chris Mason 已提交
682
{
683
	struct btrfs_fs_info *fs_info = root->fs_info;
684
	struct extent_buffer *leaf;
Y
Yan, Zheng 已提交
685
	struct btrfs_file_extent_item *fi;
686
	struct btrfs_ref ref = { 0 };
687
	struct btrfs_key key;
Y
Yan, Zheng 已提交
688
	struct btrfs_key new_key;
689 690
	struct inode *vfs_inode = &inode->vfs_inode;
	u64 ino = btrfs_ino(inode);
Y
Yan, Zheng 已提交
691 692 693 694 695
	u64 search_start = start;
	u64 disk_bytenr = 0;
	u64 num_bytes = 0;
	u64 extent_offset = 0;
	u64 extent_end = 0;
J
Josef Bacik 已提交
696
	u64 last_end = start;
Y
Yan, Zheng 已提交
697 698 699
	int del_nr = 0;
	int del_slot = 0;
	int extent_type;
C
Chris Mason 已提交
700
	int recow;
701
	int ret;
702
	int modify_tree = -1;
703
	int update_refs;
704
	int found = 0;
705
	int leafs_visited = 0;
C
Chris Mason 已提交
706

C
Chris Mason 已提交
707
	if (drop_cache)
708
		btrfs_drop_extent_cache(inode, start, end - 1, 0);
709

710
	if (start >= inode->disk_i_size && !replace_extent)
711 712
		modify_tree = 0;

713
	update_refs = (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
714
		       root == fs_info->tree_root);
C
Chris Mason 已提交
715
	while (1) {
C
Chris Mason 已提交
716
		recow = 0;
L
Li Zefan 已提交
717
		ret = btrfs_lookup_file_extent(trans, root, path, ino,
718
					       search_start, modify_tree);
C
Chris Mason 已提交
719
		if (ret < 0)
Y
Yan, Zheng 已提交
720 721 722 723
			break;
		if (ret > 0 && path->slots[0] > 0 && search_start == start) {
			leaf = path->nodes[0];
			btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
L
Li Zefan 已提交
724
			if (key.objectid == ino &&
Y
Yan, Zheng 已提交
725 726
			    key.type == BTRFS_EXTENT_DATA_KEY)
				path->slots[0]--;
C
Chris Mason 已提交
727
		}
Y
Yan, Zheng 已提交
728
		ret = 0;
729
		leafs_visited++;
730
next_slot:
731
		leaf = path->nodes[0];
Y
Yan, Zheng 已提交
732 733 734 735 736 737 738 739
		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
			BUG_ON(del_nr > 0);
			ret = btrfs_next_leaf(root, path);
			if (ret < 0)
				break;
			if (ret > 0) {
				ret = 0;
				break;
740
			}
741
			leafs_visited++;
Y
Yan, Zheng 已提交
742 743 744 745 746
			leaf = path->nodes[0];
			recow = 1;
		}

		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
747 748 749 750 751 752 753 754 755 756

		if (key.objectid > ino)
			break;
		if (WARN_ON_ONCE(key.objectid < ino) ||
		    key.type < BTRFS_EXTENT_DATA_KEY) {
			ASSERT(del_nr == 0);
			path->slots[0]++;
			goto next_slot;
		}
		if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
Y
Yan, Zheng 已提交
757 758 759 760 761 762 763 764 765 766 767 768 769 770 771
			break;

		fi = btrfs_item_ptr(leaf, path->slots[0],
				    struct btrfs_file_extent_item);
		extent_type = btrfs_file_extent_type(leaf, fi);

		if (extent_type == BTRFS_FILE_EXTENT_REG ||
		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
			num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
			extent_offset = btrfs_file_extent_offset(leaf, fi);
			extent_end = key.offset +
				btrfs_file_extent_num_bytes(leaf, fi);
		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
			extent_end = key.offset +
772
				btrfs_file_extent_ram_bytes(leaf, fi);
773
		} else {
774 775
			/* can't happen */
			BUG();
C
Chris Mason 已提交
776 777
		}

778 779 780 781 782 783 784 785 786
		/*
		 * Don't skip extent items representing 0 byte lengths. They
		 * used to be created (bug) if while punching holes we hit
		 * -ENOSPC condition. So if we find one here, just ensure we
		 * delete it, otherwise we would insert a new file extent item
		 * with the same key (offset) as that 0 bytes length file
		 * extent item in the call to setup_items_for_insert() later
		 * in this function.
		 */
J
Josef Bacik 已提交
787 788
		if (extent_end == key.offset && extent_end >= search_start) {
			last_end = extent_end;
789
			goto delete_extent_item;
J
Josef Bacik 已提交
790
		}
791

Y
Yan, Zheng 已提交
792 793
		if (extent_end <= search_start) {
			path->slots[0]++;
794
			goto next_slot;
C
Chris Mason 已提交
795 796
		}

797
		found = 1;
Y
Yan, Zheng 已提交
798
		search_start = max(key.offset, start);
799 800
		if (recow || !modify_tree) {
			modify_tree = -1;
801
			btrfs_release_path(path);
Y
Yan, Zheng 已提交
802
			continue;
C
Chris Mason 已提交
803
		}
Y
Yan Zheng 已提交
804

Y
Yan, Zheng 已提交
805 806 807 808 809 810
		/*
		 *     | - range to drop - |
		 *  | -------- extent -------- |
		 */
		if (start > key.offset && end < extent_end) {
			BUG_ON(del_nr > 0);
811
			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
812
				ret = -EOPNOTSUPP;
813 814
				break;
			}
Y
Yan, Zheng 已提交
815 816 817 818 819 820

			memcpy(&new_key, &key, sizeof(new_key));
			new_key.offset = start;
			ret = btrfs_duplicate_item(trans, root, path,
						   &new_key);
			if (ret == -EAGAIN) {
821
				btrfs_release_path(path);
Y
Yan, Zheng 已提交
822
				continue;
Y
Yan Zheng 已提交
823
			}
Y
Yan, Zheng 已提交
824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841
			if (ret < 0)
				break;

			leaf = path->nodes[0];
			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
					    struct btrfs_file_extent_item);
			btrfs_set_file_extent_num_bytes(leaf, fi,
							start - key.offset);

			fi = btrfs_item_ptr(leaf, path->slots[0],
					    struct btrfs_file_extent_item);

			extent_offset += start - key.offset;
			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
			btrfs_set_file_extent_num_bytes(leaf, fi,
							extent_end - start);
			btrfs_mark_buffer_dirty(leaf);

J
Josef Bacik 已提交
842
			if (update_refs && disk_bytenr > 0) {
843 844 845 846
				btrfs_init_generic_ref(&ref,
						BTRFS_ADD_DELAYED_REF,
						disk_bytenr, num_bytes, 0);
				btrfs_init_data_ref(&ref,
Y
Yan, Zheng 已提交
847 848
						root->root_key.objectid,
						new_key.objectid,
849
						start - extent_offset);
850
				ret = btrfs_inc_extent_ref(trans, &ref);
851
				BUG_ON(ret); /* -ENOMEM */
852
			}
Y
Yan, Zheng 已提交
853
			key.offset = start;
Y
Yan Zheng 已提交
854
		}
J
Josef Bacik 已提交
855 856 857 858 859 860
		/*
		 * From here on out we will have actually dropped something, so
		 * last_end can be updated.
		 */
		last_end = extent_end;

Y
Yan, Zheng 已提交
861 862 863 864 865
		/*
		 *  | ---- range to drop ----- |
		 *      | -------- extent -------- |
		 */
		if (start <= key.offset && end < extent_end) {
866
			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
867
				ret = -EOPNOTSUPP;
868 869
				break;
			}
Y
Yan Zheng 已提交
870

Y
Yan, Zheng 已提交
871 872
			memcpy(&new_key, &key, sizeof(new_key));
			new_key.offset = end;
873
			btrfs_set_item_key_safe(fs_info, path, &new_key);
Y
Yan Zheng 已提交
874

Y
Yan, Zheng 已提交
875 876 877 878 879
			extent_offset += end - key.offset;
			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
			btrfs_set_file_extent_num_bytes(leaf, fi,
							extent_end - end);
			btrfs_mark_buffer_dirty(leaf);
880
			if (update_refs && disk_bytenr > 0)
881
				inode_sub_bytes(vfs_inode, end - key.offset);
Y
Yan, Zheng 已提交
882
			break;
C
Chris Mason 已提交
883
		}
884

Y
Yan, Zheng 已提交
885 886 887 888 889 890 891
		search_start = extent_end;
		/*
		 *       | ---- range to drop ----- |
		 *  | -------- extent -------- |
		 */
		if (start > key.offset && end >= extent_end) {
			BUG_ON(del_nr > 0);
892
			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
893
				ret = -EOPNOTSUPP;
894 895
				break;
			}
896

Y
Yan, Zheng 已提交
897 898 899
			btrfs_set_file_extent_num_bytes(leaf, fi,
							start - key.offset);
			btrfs_mark_buffer_dirty(leaf);
900
			if (update_refs && disk_bytenr > 0)
901
				inode_sub_bytes(vfs_inode, extent_end - start);
Y
Yan, Zheng 已提交
902 903
			if (end == extent_end)
				break;
C
Chris Mason 已提交
904

Y
Yan, Zheng 已提交
905 906
			path->slots[0]++;
			goto next_slot;
Z
Zheng Yan 已提交
907 908
		}

Y
Yan, Zheng 已提交
909 910 911 912 913
		/*
		 *  | ---- range to drop ----- |
		 *    | ------ extent ------ |
		 */
		if (start <= key.offset && end >= extent_end) {
914
delete_extent_item:
Y
Yan, Zheng 已提交
915 916 917 918 919 920 921
			if (del_nr == 0) {
				del_slot = path->slots[0];
				del_nr = 1;
			} else {
				BUG_ON(del_slot + del_nr != path->slots[0]);
				del_nr++;
			}
Z
Zheng Yan 已提交
922

J
Josef Bacik 已提交
923 924
			if (update_refs &&
			    extent_type == BTRFS_FILE_EXTENT_INLINE) {
925
				inode_sub_bytes(vfs_inode,
Y
Yan, Zheng 已提交
926 927
						extent_end - key.offset);
				extent_end = ALIGN(extent_end,
928
						   fs_info->sectorsize);
J
Josef Bacik 已提交
929
			} else if (update_refs && disk_bytenr > 0) {
930 931 932 933
				btrfs_init_generic_ref(&ref,
						BTRFS_DROP_DELAYED_REF,
						disk_bytenr, num_bytes, 0);
				btrfs_init_data_ref(&ref,
Y
Yan, Zheng 已提交
934
						root->root_key.objectid,
935 936 937
						key.objectid,
						key.offset - extent_offset);
				ret = btrfs_free_extent(trans, &ref);
938
				BUG_ON(ret); /* -ENOMEM */
939
				inode_sub_bytes(vfs_inode,
Y
Yan, Zheng 已提交
940
						extent_end - key.offset);
Z
Zheng Yan 已提交
941 942
			}

Y
Yan, Zheng 已提交
943 944 945 946 947 948 949 950 951 952
			if (end == extent_end)
				break;

			if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
				path->slots[0]++;
				goto next_slot;
			}

			ret = btrfs_del_items(trans, root, path, del_slot,
					      del_nr);
953
			if (ret) {
954
				btrfs_abort_transaction(trans, ret);
J
Josef Bacik 已提交
955
				break;
956
			}
Y
Yan, Zheng 已提交
957 958 959 960

			del_nr = 0;
			del_slot = 0;

961
			btrfs_release_path(path);
Y
Yan, Zheng 已提交
962
			continue;
C
Chris Mason 已提交
963
		}
Y
Yan, Zheng 已提交
964

965
		BUG();
C
Chris Mason 已提交
966
	}
Y
Yan, Zheng 已提交
967

968
	if (!ret && del_nr > 0) {
969 970 971 972
		/*
		 * Set path->slots[0] to first slot, so that after the delete
		 * if items are move off from our leaf to its immediate left or
		 * right neighbor leafs, we end up with a correct and adjusted
973
		 * path->slots[0] for our insertion (if replace_extent != 0).
974 975
		 */
		path->slots[0] = del_slot;
Y
Yan, Zheng 已提交
976
		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
977
		if (ret)
978
			btrfs_abort_transaction(trans, ret);
979
	}
980

981 982 983 984 985 986 987 988 989
	leaf = path->nodes[0];
	/*
	 * If btrfs_del_items() was called, it might have deleted a leaf, in
	 * which case it unlocked our path, so check path->locks[0] matches a
	 * write lock.
	 */
	if (!ret && replace_extent && leafs_visited == 1 &&
	    (path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING ||
	     path->locks[0] == BTRFS_WRITE_LOCK) &&
990
	    btrfs_leaf_free_space(leaf) >=
991 992 993 994 995 996 997 998 999 1000 1001
	    sizeof(struct btrfs_item) + extent_item_size) {

		key.objectid = ino;
		key.type = BTRFS_EXTENT_DATA_KEY;
		key.offset = start;
		if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
			struct btrfs_key slot_key;

			btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
			if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
				path->slots[0]++;
1002
		}
1003
		setup_items_for_insert(root, path, &key, &extent_item_size, 1);
1004
		*key_inserted = 1;
Y
Yan Zheng 已提交
1005
	}
Y
Yan, Zheng 已提交
1006

1007 1008
	if (!replace_extent || !(*key_inserted))
		btrfs_release_path(path);
J
Josef Bacik 已提交
1009
	if (drop_end)
J
Josef Bacik 已提交
1010
		*drop_end = found ? min(end, last_end) : end;
J
Josef Bacik 已提交
1011 1012 1013 1014 1015
	return ret;
}

int btrfs_drop_extents(struct btrfs_trans_handle *trans,
		       struct btrfs_root *root, struct inode *inode, u64 start,
1016
		       u64 end, int drop_cache)
J
Josef Bacik 已提交
1017 1018 1019 1020 1021 1022 1023
{
	struct btrfs_path *path;
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
1024 1025
	ret = __btrfs_drop_extents(trans, root, BTRFS_I(inode), path, start,
				   end, NULL, drop_cache, 0, 0, NULL);
Y
Yan, Zheng 已提交
1026
	btrfs_free_path(path);
C
Chris Mason 已提交
1027 1028 1029
	return ret;
}

Y
Yan Zheng 已提交
1030
static int extent_mergeable(struct extent_buffer *leaf, int slot,
1031 1032
			    u64 objectid, u64 bytenr, u64 orig_offset,
			    u64 *start, u64 *end)
Y
Yan Zheng 已提交
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
{
	struct btrfs_file_extent_item *fi;
	struct btrfs_key key;
	u64 extent_end;

	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
		return 0;

	btrfs_item_key_to_cpu(leaf, &key, slot);
	if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
		return 0;

	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
	    btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
1048
	    btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
Y
Yan Zheng 已提交
1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
	    btrfs_file_extent_compression(leaf, fi) ||
	    btrfs_file_extent_encryption(leaf, fi) ||
	    btrfs_file_extent_other_encoding(leaf, fi))
		return 0;

	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
	if ((*start && *start != key.offset) || (*end && *end != extent_end))
		return 0;

	*start = key.offset;
	*end = extent_end;
	return 1;
}

/*
 * Mark extent in the range start - end as written.
 *
 * This changes extent type from 'pre-allocated' to 'regular'. If only
 * part of extent is marked as written, the extent will be split into
 * two or three.
 */
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
1071
			      struct btrfs_inode *inode, u64 start, u64 end)
Y
Yan Zheng 已提交
1072
{
1073
	struct btrfs_fs_info *fs_info = trans->fs_info;
1074
	struct btrfs_root *root = inode->root;
Y
Yan Zheng 已提交
1075 1076 1077
	struct extent_buffer *leaf;
	struct btrfs_path *path;
	struct btrfs_file_extent_item *fi;
1078
	struct btrfs_ref ref = { 0 };
Y
Yan Zheng 已提交
1079
	struct btrfs_key key;
Y
Yan, Zheng 已提交
1080
	struct btrfs_key new_key;
Y
Yan Zheng 已提交
1081 1082 1083
	u64 bytenr;
	u64 num_bytes;
	u64 extent_end;
1084
	u64 orig_offset;
Y
Yan Zheng 已提交
1085 1086
	u64 other_start;
	u64 other_end;
Y
Yan, Zheng 已提交
1087 1088 1089
	u64 split;
	int del_nr = 0;
	int del_slot = 0;
1090
	int recow;
Y
Yan Zheng 已提交
1091
	int ret;
1092
	u64 ino = btrfs_ino(inode);
Y
Yan Zheng 已提交
1093 1094

	path = btrfs_alloc_path();
1095 1096
	if (!path)
		return -ENOMEM;
Y
Yan Zheng 已提交
1097
again:
1098
	recow = 0;
Y
Yan, Zheng 已提交
1099
	split = start;
L
Li Zefan 已提交
1100
	key.objectid = ino;
Y
Yan Zheng 已提交
1101
	key.type = BTRFS_EXTENT_DATA_KEY;
Y
Yan, Zheng 已提交
1102
	key.offset = split;
Y
Yan Zheng 已提交
1103 1104

	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1105 1106
	if (ret < 0)
		goto out;
Y
Yan Zheng 已提交
1107 1108 1109 1110 1111
	if (ret > 0 && path->slots[0] > 0)
		path->slots[0]--;

	leaf = path->nodes[0];
	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1112 1113 1114 1115 1116 1117
	if (key.objectid != ino ||
	    key.type != BTRFS_EXTENT_DATA_KEY) {
		ret = -EINVAL;
		btrfs_abort_transaction(trans, ret);
		goto out;
	}
Y
Yan Zheng 已提交
1118 1119
	fi = btrfs_item_ptr(leaf, path->slots[0],
			    struct btrfs_file_extent_item);
1120 1121 1122 1123 1124
	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
		ret = -EINVAL;
		btrfs_abort_transaction(trans, ret);
		goto out;
	}
Y
Yan Zheng 已提交
1125
	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1126 1127 1128 1129 1130
	if (key.offset > start || extent_end < end) {
		ret = -EINVAL;
		btrfs_abort_transaction(trans, ret);
		goto out;
	}
Y
Yan Zheng 已提交
1131 1132 1133

	bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
	num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1134
	orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
1135 1136 1137 1138 1139 1140
	memcpy(&new_key, &key, sizeof(new_key));

	if (start == key.offset && end < extent_end) {
		other_start = 0;
		other_end = start;
		if (extent_mergeable(leaf, path->slots[0] - 1,
L
Li Zefan 已提交
1141
				     ino, bytenr, orig_offset,
1142 1143
				     &other_start, &other_end)) {
			new_key.offset = end;
1144
			btrfs_set_item_key_safe(fs_info, path, &new_key);
1145 1146
			fi = btrfs_item_ptr(leaf, path->slots[0],
					    struct btrfs_file_extent_item);
1147 1148
			btrfs_set_file_extent_generation(leaf, fi,
							 trans->transid);
1149 1150 1151 1152 1153 1154
			btrfs_set_file_extent_num_bytes(leaf, fi,
							extent_end - end);
			btrfs_set_file_extent_offset(leaf, fi,
						     end - orig_offset);
			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
					    struct btrfs_file_extent_item);
1155 1156
			btrfs_set_file_extent_generation(leaf, fi,
							 trans->transid);
1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
			btrfs_set_file_extent_num_bytes(leaf, fi,
							end - other_start);
			btrfs_mark_buffer_dirty(leaf);
			goto out;
		}
	}

	if (start > key.offset && end == extent_end) {
		other_start = end;
		other_end = 0;
		if (extent_mergeable(leaf, path->slots[0] + 1,
L
Li Zefan 已提交
1168
				     ino, bytenr, orig_offset,
1169 1170 1171 1172 1173
				     &other_start, &other_end)) {
			fi = btrfs_item_ptr(leaf, path->slots[0],
					    struct btrfs_file_extent_item);
			btrfs_set_file_extent_num_bytes(leaf, fi,
							start - key.offset);
1174 1175
			btrfs_set_file_extent_generation(leaf, fi,
							 trans->transid);
1176 1177
			path->slots[0]++;
			new_key.offset = start;
1178
			btrfs_set_item_key_safe(fs_info, path, &new_key);
1179 1180 1181

			fi = btrfs_item_ptr(leaf, path->slots[0],
					    struct btrfs_file_extent_item);
1182 1183
			btrfs_set_file_extent_generation(leaf, fi,
							 trans->transid);
1184 1185 1186 1187 1188 1189 1190 1191
			btrfs_set_file_extent_num_bytes(leaf, fi,
							other_end - start);
			btrfs_set_file_extent_offset(leaf, fi,
						     start - orig_offset);
			btrfs_mark_buffer_dirty(leaf);
			goto out;
		}
	}
Y
Yan Zheng 已提交
1192

Y
Yan, Zheng 已提交
1193 1194 1195 1196 1197 1198 1199
	while (start > key.offset || end < extent_end) {
		if (key.offset == start)
			split = end;

		new_key.offset = split;
		ret = btrfs_duplicate_item(trans, root, path, &new_key);
		if (ret == -EAGAIN) {
1200
			btrfs_release_path(path);
Y
Yan, Zheng 已提交
1201
			goto again;
Y
Yan Zheng 已提交
1202
		}
1203
		if (ret < 0) {
1204
			btrfs_abort_transaction(trans, ret);
1205 1206
			goto out;
		}
Y
Yan Zheng 已提交
1207

Y
Yan, Zheng 已提交
1208 1209
		leaf = path->nodes[0];
		fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
Y
Yan Zheng 已提交
1210
				    struct btrfs_file_extent_item);
1211
		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
Y
Yan Zheng 已提交
1212
		btrfs_set_file_extent_num_bytes(leaf, fi,
Y
Yan, Zheng 已提交
1213 1214 1215 1216 1217
						split - key.offset);

		fi = btrfs_item_ptr(leaf, path->slots[0],
				    struct btrfs_file_extent_item);

1218
		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
Y
Yan, Zheng 已提交
1219 1220 1221
		btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
		btrfs_set_file_extent_num_bytes(leaf, fi,
						extent_end - split);
Y
Yan Zheng 已提交
1222 1223
		btrfs_mark_buffer_dirty(leaf);

1224 1225 1226 1227 1228
		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
				       num_bytes, 0);
		btrfs_init_data_ref(&ref, root->root_key.objectid, ino,
				    orig_offset);
		ret = btrfs_inc_extent_ref(trans, &ref);
1229 1230 1231 1232
		if (ret) {
			btrfs_abort_transaction(trans, ret);
			goto out;
		}
Y
Yan Zheng 已提交
1233

Y
Yan, Zheng 已提交
1234 1235 1236
		if (split == start) {
			key.offset = start;
		} else {
1237 1238 1239 1240 1241
			if (start != key.offset) {
				ret = -EINVAL;
				btrfs_abort_transaction(trans, ret);
				goto out;
			}
Y
Yan Zheng 已提交
1242
			path->slots[0]--;
Y
Yan, Zheng 已提交
1243
			extent_end = end;
Y
Yan Zheng 已提交
1244
		}
1245
		recow = 1;
Y
Yan Zheng 已提交
1246 1247
	}

Y
Yan, Zheng 已提交
1248 1249
	other_start = end;
	other_end = 0;
1250 1251 1252
	btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
			       num_bytes, 0);
	btrfs_init_data_ref(&ref, root->root_key.objectid, ino, orig_offset);
1253
	if (extent_mergeable(leaf, path->slots[0] + 1,
L
Li Zefan 已提交
1254
			     ino, bytenr, orig_offset,
1255 1256
			     &other_start, &other_end)) {
		if (recow) {
1257
			btrfs_release_path(path);
1258 1259
			goto again;
		}
Y
Yan, Zheng 已提交
1260 1261 1262
		extent_end = other_end;
		del_slot = path->slots[0] + 1;
		del_nr++;
1263
		ret = btrfs_free_extent(trans, &ref);
1264 1265 1266 1267
		if (ret) {
			btrfs_abort_transaction(trans, ret);
			goto out;
		}
Y
Yan Zheng 已提交
1268
	}
Y
Yan, Zheng 已提交
1269 1270
	other_start = 0;
	other_end = start;
1271
	if (extent_mergeable(leaf, path->slots[0] - 1,
L
Li Zefan 已提交
1272
			     ino, bytenr, orig_offset,
1273 1274
			     &other_start, &other_end)) {
		if (recow) {
1275
			btrfs_release_path(path);
1276 1277
			goto again;
		}
Y
Yan, Zheng 已提交
1278 1279 1280
		key.offset = other_start;
		del_slot = path->slots[0];
		del_nr++;
1281
		ret = btrfs_free_extent(trans, &ref);
1282 1283 1284 1285
		if (ret) {
			btrfs_abort_transaction(trans, ret);
			goto out;
		}
Y
Yan, Zheng 已提交
1286 1287
	}
	if (del_nr == 0) {
1288 1289
		fi = btrfs_item_ptr(leaf, path->slots[0],
			   struct btrfs_file_extent_item);
Y
Yan, Zheng 已提交
1290 1291
		btrfs_set_file_extent_type(leaf, fi,
					   BTRFS_FILE_EXTENT_REG);
1292
		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
Y
Yan, Zheng 已提交
1293
		btrfs_mark_buffer_dirty(leaf);
1294
	} else {
1295 1296
		fi = btrfs_item_ptr(leaf, del_slot - 1,
			   struct btrfs_file_extent_item);
1297 1298
		btrfs_set_file_extent_type(leaf, fi,
					   BTRFS_FILE_EXTENT_REG);
1299
		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1300 1301 1302
		btrfs_set_file_extent_num_bytes(leaf, fi,
						extent_end - key.offset);
		btrfs_mark_buffer_dirty(leaf);
Y
Yan, Zheng 已提交
1303

1304
		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1305
		if (ret < 0) {
1306
			btrfs_abort_transaction(trans, ret);
1307 1308
			goto out;
		}
1309
	}
Y
Yan, Zheng 已提交
1310
out:
Y
Yan Zheng 已提交
1311 1312 1313 1314
	btrfs_free_path(path);
	return 0;
}

1315 1316 1317 1318
/*
 * on error we return an unlocked page and the error value
 * on success we return a locked page and 0
 */
1319 1320
static int prepare_uptodate_page(struct inode *inode,
				 struct page *page, u64 pos,
1321
				 bool force_uptodate)
1322 1323 1324
{
	int ret = 0;

1325
	if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
1326
	    !PageUptodate(page)) {
1327 1328 1329 1330 1331 1332 1333 1334
		ret = btrfs_readpage(NULL, page);
		if (ret)
			return ret;
		lock_page(page);
		if (!PageUptodate(page)) {
			unlock_page(page);
			return -EIO;
		}
1335 1336 1337 1338
		if (page->mapping != inode->i_mapping) {
			unlock_page(page);
			return -EAGAIN;
		}
1339 1340 1341 1342
	}
	return 0;
}

C
Chris Mason 已提交
1343
/*
1344
 * this just gets pages into the page cache and locks them down.
C
Chris Mason 已提交
1345
 */
1346 1347 1348
static noinline int prepare_pages(struct inode *inode, struct page **pages,
				  size_t num_pages, loff_t pos,
				  size_t write_bytes, bool force_uptodate)
C
Chris Mason 已提交
1349 1350
{
	int i;
1351
	unsigned long index = pos >> PAGE_SHIFT;
1352
	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1353
	int err = 0;
1354
	int faili;
1355

C
Chris Mason 已提交
1356
	for (i = 0; i < num_pages; i++) {
1357
again:
1358
		pages[i] = find_or_create_page(inode->i_mapping, index + i,
1359
					       mask | __GFP_WRITE);
C
Chris Mason 已提交
1360
		if (!pages[i]) {
1361 1362 1363 1364 1365 1366
			faili = i - 1;
			err = -ENOMEM;
			goto fail;
		}

		if (i == 0)
1367
			err = prepare_uptodate_page(inode, pages[i], pos,
1368
						    force_uptodate);
1369 1370
		if (!err && i == num_pages - 1)
			err = prepare_uptodate_page(inode, pages[i],
1371
						    pos + write_bytes, false);
1372
		if (err) {
1373
			put_page(pages[i]);
1374 1375 1376 1377
			if (err == -EAGAIN) {
				err = 0;
				goto again;
			}
1378 1379
			faili = i - 1;
			goto fail;
C
Chris Mason 已提交
1380
		}
C
Chris Mason 已提交
1381
		wait_on_page_writeback(pages[i]);
C
Chris Mason 已提交
1382
	}
1383 1384 1385 1386 1387

	return 0;
fail:
	while (faili >= 0) {
		unlock_page(pages[faili]);
1388
		put_page(pages[faili]);
1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405
		faili--;
	}
	return err;

}

/*
 * This function locks the extent and properly waits for data=ordered extents
 * to finish before allowing the pages to be modified if need.
 *
 * The return value:
 * 1 - the extent is locked
 * 0 - the extent is not locked, and everything is OK
 * -EAGAIN - need re-prepare the pages
 * the other < 0 number - Something wrong happens
 */
static noinline int
1406
lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
1407
				size_t num_pages, loff_t pos,
1408
				size_t write_bytes,
1409 1410 1411
				u64 *lockstart, u64 *lockend,
				struct extent_state **cached_state)
{
1412
	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1413 1414 1415 1416 1417
	u64 start_pos;
	u64 last_pos;
	int i;
	int ret = 0;

1418
	start_pos = round_down(pos, fs_info->sectorsize);
1419
	last_pos = round_up(pos + write_bytes, fs_info->sectorsize) - 1;
1420

1421
	if (start_pos < inode->vfs_inode.i_size) {
1422
		struct btrfs_ordered_extent *ordered;
1423

1424 1425
		lock_extent_bits(&inode->io_tree, start_pos, last_pos,
				cached_state);
1426 1427
		ordered = btrfs_lookup_ordered_range(inode, start_pos,
						     last_pos - start_pos + 1);
1428
		if (ordered &&
1429
		    ordered->file_offset + ordered->num_bytes > start_pos &&
1430
		    ordered->file_offset <= last_pos) {
1431
			unlock_extent_cached(&inode->io_tree, start_pos,
1432
					last_pos, cached_state);
1433 1434
			for (i = 0; i < num_pages; i++) {
				unlock_page(pages[i]);
1435
				put_page(pages[i]);
1436
			}
1437
			btrfs_start_ordered_extent(ordered, 1);
1438 1439
			btrfs_put_ordered_extent(ordered);
			return -EAGAIN;
1440 1441 1442
		}
		if (ordered)
			btrfs_put_ordered_extent(ordered);
1443

1444 1445 1446
		*lockstart = start_pos;
		*lockend = last_pos;
		ret = 1;
1447
	}
1448

1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462
	/*
	 * It's possible the pages are dirty right now, but we don't want
	 * to clean them yet because copy_from_user may catch a page fault
	 * and we might have to fall back to one page at a time.  If that
	 * happens, we'll unlock these pages and we'd have a window where
	 * reclaim could sneak in and drop the once-dirty page on the floor
	 * without writing it.
	 *
	 * We have the pages locked and the extent range locked, so there's
	 * no way someone can start IO on any dirty pages in this range.
	 *
	 * We'll call btrfs_dirty_pages() later on, and that will flip around
	 * delalloc bits and dirty the pages as required.
	 */
1463 1464 1465 1466
	for (i = 0; i < num_pages; i++) {
		set_page_extent_mapped(pages[i]);
		WARN_ON(!PageLocked(pages[i]));
	}
1467

1468
	return ret;
C
Chris Mason 已提交
1469 1470
}

1471 1472
static int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
			   size_t *write_bytes, bool nowait)
1473
{
1474
	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1475
	struct btrfs_root *root = inode->root;
1476 1477 1478 1479
	u64 lockstart, lockend;
	u64 num_bytes;
	int ret;

1480 1481 1482
	if (!(inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
		return 0;

1483
	if (!nowait && !btrfs_drew_try_write_lock(&root->snapshot_lock))
1484
		return -EAGAIN;
1485

1486
	lockstart = round_down(pos, fs_info->sectorsize);
1487
	lockend = round_up(pos + *write_bytes,
1488
			   fs_info->sectorsize) - 1;
1489
	num_bytes = lockend - lockstart + 1;
1490

1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507
	if (nowait) {
		struct btrfs_ordered_extent *ordered;

		if (!try_lock_extent(&inode->io_tree, lockstart, lockend))
			return -EAGAIN;

		ordered = btrfs_lookup_ordered_range(inode, lockstart,
						     num_bytes);
		if (ordered) {
			btrfs_put_ordered_extent(ordered);
			ret = -EAGAIN;
			goto out_unlock;
		}
	} else {
		btrfs_lock_and_flush_ordered_range(inode, lockstart,
						   lockend, NULL);
	}
1508

1509
	ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
1510
			NULL, NULL, NULL, false);
1511 1512
	if (ret <= 0) {
		ret = 0;
1513 1514
		if (!nowait)
			btrfs_drew_write_unlock(&root->snapshot_lock);
1515
	} else {
1516 1517
		*write_bytes = min_t(size_t, *write_bytes ,
				     num_bytes - pos + lockstart);
1518
	}
1519
out_unlock:
1520
	unlock_extent(&inode->io_tree, lockstart, lockend);
1521 1522 1523 1524

	return ret;
}

1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560
static int check_nocow_nolock(struct btrfs_inode *inode, loff_t pos,
			      size_t *write_bytes)
{
	return check_can_nocow(inode, pos, write_bytes, true);
}

/*
 * Check if we can do nocow write into the range [@pos, @pos + @write_bytes)
 *
 * @pos:	 File offset
 * @write_bytes: The length to write, will be updated to the nocow writeable
 *		 range
 *
 * This function will flush ordered extents in the range to ensure proper
 * nocow checks.
 *
 * Return:
 * >0		and update @write_bytes if we can do nocow write
 *  0		if we can't do nocow write
 * -EAGAIN	if we can't get the needed lock or there are ordered extents
 * 		for * (nowait == true) case
 * <0		if other error happened
 *
 * NOTE: Callers need to release the lock by btrfs_check_nocow_unlock().
 */
int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
			   size_t *write_bytes)
{
	return check_can_nocow(inode, pos, write_bytes, false);
}

void btrfs_check_nocow_unlock(struct btrfs_inode *inode)
{
	btrfs_drew_write_unlock(&inode->root->snapshot_lock);
}

1561 1562
static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
					       struct iov_iter *i)
1563
{
1564 1565
	struct file *file = iocb->ki_filp;
	loff_t pos = iocb->ki_pos;
A
Al Viro 已提交
1566
	struct inode *inode = file_inode(file);
1567
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1568
	struct page **pages = NULL;
1569
	struct extent_changeset *data_reserved = NULL;
1570
	u64 release_bytes = 0;
1571 1572
	u64 lockstart;
	u64 lockend;
J
Josef Bacik 已提交
1573 1574
	size_t num_written = 0;
	int nrptrs;
1575
	int ret = 0;
1576
	bool only_release_metadata = false;
1577
	bool force_page_uptodate = false;
1578

1579 1580
	nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
			PAGE_SIZE / (sizeof(struct page *)));
1581 1582
	nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
	nrptrs = max(nrptrs, 8);
1583
	pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
J
Josef Bacik 已提交
1584 1585
	if (!pages)
		return -ENOMEM;
1586

J
Josef Bacik 已提交
1587
	while (iov_iter_count(i) > 0) {
1588
		struct extent_state *cached_state = NULL;
1589
		size_t offset = offset_in_page(pos);
1590
		size_t sector_offset;
J
Josef Bacik 已提交
1591
		size_t write_bytes = min(iov_iter_count(i),
1592
					 nrptrs * (size_t)PAGE_SIZE -
1593
					 offset);
1594
		size_t num_pages = DIV_ROUND_UP(write_bytes + offset,
1595
						PAGE_SIZE);
1596
		size_t reserve_bytes;
J
Josef Bacik 已提交
1597 1598
		size_t dirty_pages;
		size_t copied;
1599 1600
		size_t dirty_sectors;
		size_t num_sectors;
1601
		int extents_locked;
C
Chris Mason 已提交
1602

1603
		WARN_ON(num_pages > nrptrs);
1604

1605 1606 1607 1608
		/*
		 * Fault pages before locking them in prepare_pages
		 * to avoid recursive lock
		 */
J
Josef Bacik 已提交
1609
		if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1610
			ret = -EFAULT;
J
Josef Bacik 已提交
1611
			break;
1612 1613
		}

1614
		only_release_metadata = false;
1615
		sector_offset = pos & (fs_info->sectorsize - 1);
1616
		reserve_bytes = round_up(write_bytes + sector_offset,
1617
				fs_info->sectorsize);
1618

1619
		extent_changeset_release(data_reserved);
1620 1621
		ret = btrfs_check_data_free_space(BTRFS_I(inode),
						  &data_reserved, pos,
1622
						  write_bytes);
1623
		if (ret < 0) {
1624 1625
			if (btrfs_check_nocow_lock(BTRFS_I(inode), pos,
						   &write_bytes) > 0) {
1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638
				/*
				 * For nodata cow case, no need to reserve
				 * data space.
				 */
				only_release_metadata = true;
				/*
				 * our prealloc extent may be smaller than
				 * write_bytes, so scale down.
				 */
				num_pages = DIV_ROUND_UP(write_bytes + offset,
							 PAGE_SIZE);
				reserve_bytes = round_up(write_bytes +
							 sector_offset,
1639
							 fs_info->sectorsize);
1640 1641 1642 1643
			} else {
				break;
			}
		}
1644

J
Josef Bacik 已提交
1645
		WARN_ON(reserve_bytes == 0);
1646 1647
		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
				reserve_bytes);
1648 1649
		if (ret) {
			if (!only_release_metadata)
1650
				btrfs_free_reserved_data_space(BTRFS_I(inode),
1651 1652
						data_reserved, pos,
						write_bytes);
1653
			else
1654
				btrfs_check_nocow_unlock(BTRFS_I(inode));
1655 1656 1657 1658
			break;
		}

		release_bytes = reserve_bytes;
1659
again:
1660 1661 1662 1663 1664
		/*
		 * This is going to setup the pages array with the number of
		 * pages we want, so we don't really need to worry about the
		 * contents of pages from loop to loop
		 */
1665 1666
		ret = prepare_pages(inode, pages, num_pages,
				    pos, write_bytes,
1667
				    force_page_uptodate);
J
Josef Bacik 已提交
1668 1669
		if (ret) {
			btrfs_delalloc_release_extents(BTRFS_I(inode),
1670
						       reserve_bytes);
J
Josef Bacik 已提交
1671
			break;
J
Josef Bacik 已提交
1672
		}
C
Chris Mason 已提交
1673

1674 1675
		extents_locked = lock_and_cleanup_extent_if_need(
				BTRFS_I(inode), pages,
1676 1677
				num_pages, pos, write_bytes, &lockstart,
				&lockend, &cached_state);
1678 1679
		if (extents_locked < 0) {
			if (extents_locked == -EAGAIN)
1680
				goto again;
J
Josef Bacik 已提交
1681
			btrfs_delalloc_release_extents(BTRFS_I(inode),
1682
						       reserve_bytes);
1683
			ret = extents_locked;
1684 1685 1686
			break;
		}

1687
		copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
1688

1689
		num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
1690
		dirty_sectors = round_up(copied + sector_offset,
1691 1692
					fs_info->sectorsize);
		dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
1693

1694 1695 1696 1697 1698 1699 1700
		/*
		 * if we have trouble faulting in the pages, fall
		 * back to one page at a time
		 */
		if (copied < write_bytes)
			nrptrs = 1;

1701 1702
		if (copied == 0) {
			force_page_uptodate = true;
1703
			dirty_sectors = 0;
1704
			dirty_pages = 0;
1705 1706
		} else {
			force_page_uptodate = false;
1707
			dirty_pages = DIV_ROUND_UP(copied + offset,
1708
						   PAGE_SIZE);
1709
		}
1710

1711
		if (num_sectors > dirty_sectors) {
1712 1713
			/* release everything except the sectors we dirtied */
			release_bytes -= dirty_sectors <<
1714
						fs_info->sb->s_blocksize_bits;
1715
			if (only_release_metadata) {
1716
				btrfs_delalloc_release_metadata(BTRFS_I(inode),
1717
							release_bytes, true);
1718 1719 1720
			} else {
				u64 __pos;

1721
				__pos = round_down(pos,
1722
						   fs_info->sectorsize) +
1723
					(dirty_pages << PAGE_SHIFT);
1724
				btrfs_delalloc_release_space(BTRFS_I(inode),
1725
						data_reserved, __pos,
1726
						release_bytes, true);
1727
			}
1728 1729
		}

1730
		release_bytes = round_up(copied + sector_offset,
1731
					fs_info->sectorsize);
1732 1733

		if (copied > 0)
1734 1735 1736
			ret = btrfs_dirty_pages(BTRFS_I(inode), pages,
						dirty_pages, pos, copied,
						&cached_state);
1737 1738 1739 1740 1741 1742 1743 1744

		/*
		 * If we have not locked the extent range, because the range's
		 * start offset is >= i_size, we might still have a non-NULL
		 * cached extent state, acquired while marking the extent range
		 * as delalloc through btrfs_dirty_pages(). Therefore free any
		 * possible cached extent state to avoid a memory leak.
		 */
1745
		if (extents_locked)
1746
			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1747
					     lockstart, lockend, &cached_state);
1748 1749 1750
		else
			free_extent_state(cached_state);

1751
		btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1752 1753
		if (ret) {
			btrfs_drop_pages(pages, num_pages);
1754
			break;
1755
		}
C
Chris Mason 已提交
1756

1757
		release_bytes = 0;
1758
		if (only_release_metadata)
1759
			btrfs_check_nocow_unlock(BTRFS_I(inode));
1760

1761
		if (only_release_metadata && copied > 0) {
1762
			lockstart = round_down(pos,
1763
					       fs_info->sectorsize);
1764
			lockend = round_up(pos + copied,
1765
					   fs_info->sectorsize) - 1;
1766 1767 1768 1769 1770 1771

			set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
				       lockend, EXTENT_NORESERVE, NULL,
				       NULL, GFP_NOFS);
		}

1772 1773
		btrfs_drop_pages(pages, num_pages);

J
Josef Bacik 已提交
1774 1775
		cond_resched();

1776
		balance_dirty_pages_ratelimited(inode->i_mapping);
1777

1778 1779
		pos += copied;
		num_written += copied;
J
Josef Bacik 已提交
1780
	}
C
Chris Mason 已提交
1781

J
Josef Bacik 已提交
1782 1783
	kfree(pages);

1784
	if (release_bytes) {
1785
		if (only_release_metadata) {
1786
			btrfs_check_nocow_unlock(BTRFS_I(inode));
1787
			btrfs_delalloc_release_metadata(BTRFS_I(inode),
1788
					release_bytes, true);
1789
		} else {
1790 1791
			btrfs_delalloc_release_space(BTRFS_I(inode),
					data_reserved,
1792
					round_down(pos, fs_info->sectorsize),
1793
					release_bytes, true);
1794
		}
1795 1796
	}

1797
	extent_changeset_free(data_reserved);
J
Josef Bacik 已提交
1798 1799 1800
	return num_written ? num_written : ret;
}

1801
static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
J
Josef Bacik 已提交
1802 1803
{
	struct file *file = iocb->ki_filp;
1804
	struct inode *inode = file_inode(file);
1805 1806
	loff_t pos;
	ssize_t written;
J
Josef Bacik 已提交
1807 1808 1809 1810
	ssize_t written_buffered;
	loff_t endbyte;
	int err;

1811
	written = btrfs_direct_IO(iocb, from);
J
Josef Bacik 已提交
1812

A
Al Viro 已提交
1813
	if (written < 0 || !iov_iter_count(from))
J
Josef Bacik 已提交
1814 1815
		return written;

1816 1817
	pos = iocb->ki_pos;
	written_buffered = btrfs_buffered_write(iocb, from);
J
Josef Bacik 已提交
1818 1819 1820
	if (written_buffered < 0) {
		err = written_buffered;
		goto out;
C
Chris Mason 已提交
1821
	}
1822 1823 1824 1825
	/*
	 * Ensure all data is persisted. We want the next direct IO read to be
	 * able to read what was just written.
	 */
J
Josef Bacik 已提交
1826
	endbyte = pos + written_buffered - 1;
1827
	err = btrfs_fdatawrite_range(inode, pos, endbyte);
1828 1829
	if (err)
		goto out;
1830
	err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
J
Josef Bacik 已提交
1831 1832 1833
	if (err)
		goto out;
	written += written_buffered;
1834
	iocb->ki_pos = pos + written_buffered;
1835 1836
	invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
				 endbyte >> PAGE_SHIFT);
C
Chris Mason 已提交
1837
out:
J
Josef Bacik 已提交
1838 1839
	return written ? written : err;
}
1840

1841 1842
static void update_time_for_write(struct inode *inode)
{
1843
	struct timespec64 now;
1844 1845 1846 1847

	if (IS_NOCMTIME(inode))
		return;

1848
	now = current_time(inode);
1849
	if (!timespec64_equal(&inode->i_mtime, &now))
1850 1851
		inode->i_mtime = now;

1852
	if (!timespec64_equal(&inode->i_ctime, &now))
1853 1854 1855 1856 1857 1858
		inode->i_ctime = now;

	if (IS_I_VERSION(inode))
		inode_inc_iversion(inode);
}

A
Al Viro 已提交
1859 1860
static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
				    struct iov_iter *from)
J
Josef Bacik 已提交
1861 1862
{
	struct file *file = iocb->ki_filp;
A
Al Viro 已提交
1863
	struct inode *inode = file_inode(file);
1864
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
J
Josef Bacik 已提交
1865
	struct btrfs_root *root = BTRFS_I(inode)->root;
1866
	u64 start_pos;
1867
	u64 end_pos;
J
Josef Bacik 已提交
1868
	ssize_t num_written = 0;
1869
	const bool sync = iocb->ki_flags & IOCB_DSYNC;
1870
	ssize_t err;
1871
	loff_t pos;
1872
	size_t count;
1873 1874
	loff_t oldsize;
	int clean_page = 0;
J
Josef Bacik 已提交
1875

1876 1877 1878 1879
	if (!(iocb->ki_flags & IOCB_DIRECT) &&
	    (iocb->ki_flags & IOCB_NOWAIT))
		return -EOPNOTSUPP;

1880 1881
	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!inode_trylock(inode))
G
Goldwyn Rodrigues 已提交
1882
			return -EAGAIN;
1883
	} else {
1884 1885 1886 1887 1888 1889 1890 1891 1892 1893
		inode_lock(inode);
	}

	err = generic_write_checks(iocb, from);
	if (err <= 0) {
		inode_unlock(inode);
		return err;
	}

	pos = iocb->ki_pos;
1894
	count = iov_iter_count(from);
1895
	if (iocb->ki_flags & IOCB_NOWAIT) {
1896 1897
		size_t nocow_bytes = count;

G
Goldwyn Rodrigues 已提交
1898 1899 1900 1901
		/*
		 * We will allocate space in case nodatacow is not set,
		 * so bail
		 */
1902 1903
		if (check_nocow_nolock(BTRFS_I(inode), pos, &nocow_bytes)
		    <= 0) {
G
Goldwyn Rodrigues 已提交
1904 1905 1906
			inode_unlock(inode);
			return -EAGAIN;
		}
1907 1908 1909 1910 1911 1912 1913 1914 1915
		/*
		 * There are holes in the range or parts of the range that must
		 * be COWed (shared extents, RO block groups, etc), so just bail
		 * out.
		 */
		if (nocow_bytes < count) {
			inode_unlock(inode);
			return -EAGAIN;
		}
J
Josef Bacik 已提交
1916 1917
	}

1918
	current->backing_dev_info = inode_to_bdi(inode);
1919
	err = file_remove_privs(file);
J
Josef Bacik 已提交
1920
	if (err) {
A
Al Viro 已提交
1921
		inode_unlock(inode);
J
Josef Bacik 已提交
1922 1923 1924 1925 1926 1927 1928 1929 1930
		goto out;
	}

	/*
	 * If BTRFS flips readonly due to some impossible error
	 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
	 * although we have opened a file as writable, we have
	 * to stop this write operation to ensure FS consistency.
	 */
1931
	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
A
Al Viro 已提交
1932
		inode_unlock(inode);
J
Josef Bacik 已提交
1933 1934 1935 1936
		err = -EROFS;
		goto out;
	}

1937 1938 1939 1940 1941 1942 1943
	/*
	 * We reserve space for updating the inode when we reserve space for the
	 * extent we are going to write, so we will enospc out there.  We don't
	 * need to start yet another transaction to update the inode as we will
	 * update the inode when we finish writing whatever data we write.
	 */
	update_time_for_write(inode);
J
Josef Bacik 已提交
1944

1945
	start_pos = round_down(pos, fs_info->sectorsize);
1946 1947
	oldsize = i_size_read(inode);
	if (start_pos > oldsize) {
1948
		/* Expand hole size to cover write data, preventing empty gap */
1949
		end_pos = round_up(pos + count,
1950
				   fs_info->sectorsize);
1951
		err = btrfs_cont_expand(inode, oldsize, end_pos);
1952
		if (err) {
A
Al Viro 已提交
1953
			inode_unlock(inode);
1954 1955
			goto out;
		}
1956
		if (start_pos > round_up(oldsize, fs_info->sectorsize))
1957
			clean_page = 1;
1958 1959
	}

1960 1961 1962
	if (sync)
		atomic_inc(&BTRFS_I(inode)->sync_writers);

1963
	if (iocb->ki_flags & IOCB_DIRECT) {
J
Josef Bacik 已提交
1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988
		/*
		 * 1. We must always clear IOCB_DSYNC in order to not deadlock
		 *    in iomap, as it calls generic_write_sync() in this case.
		 * 2. If we are async, we can call iomap_dio_complete() either
		 *    in
		 *
		 *    2.1. A worker thread from the last bio completed.  In this
		 *	   case we need to mark the btrfs_dio_data that it is
		 *	   async in order to call generic_write_sync() properly.
		 *	   This is handled by setting BTRFS_DIO_SYNC_STUB in the
		 *	   current->journal_info.
		 *    2.2  The submitter context, because all IO completed
		 *         before we exited iomap_dio_rw().  In this case we can
		 *         just re-set the IOCB_DSYNC on the iocb and we'll do
		 *         the sync below.  If our ->end_io() gets called and
		 *         current->journal_info is set, then we know we're in
		 *         our current context and we will clear
		 *         current->journal_info to indicate that we need to
		 *         sync below.
		 */
		if (sync) {
			ASSERT(current->journal_info == NULL);
			iocb->ki_flags &= ~IOCB_DSYNC;
			current->journal_info = BTRFS_DIO_SYNC_STUB;
		}
1989
		num_written = __btrfs_direct_write(iocb, from);
J
Josef Bacik 已提交
1990 1991 1992 1993 1994 1995 1996 1997

		/*
		 * As stated above, we cleared journal_info, so we need to do
		 * the sync ourselves.
		 */
		if (sync && current->journal_info == NULL)
			iocb->ki_flags |= IOCB_DSYNC;
		current->journal_info = NULL;
J
Josef Bacik 已提交
1998
	} else {
1999
		num_written = btrfs_buffered_write(iocb, from);
J
Josef Bacik 已提交
2000
		if (num_written > 0)
2001
			iocb->ki_pos = pos + num_written;
2002 2003 2004
		if (clean_page)
			pagecache_isize_extended(inode, oldsize,
						i_size_read(inode));
J
Josef Bacik 已提交
2005 2006
	}

A
Al Viro 已提交
2007
	inode_unlock(inode);
2008

2009
	/*
2010 2011
	 * We also have to set last_sub_trans to the current log transid,
	 * otherwise subsequent syncs to a file that's been synced in this
2012
	 * transaction will appear to have already occurred.
2013
	 */
2014
	spin_lock(&BTRFS_I(inode)->lock);
2015
	BTRFS_I(inode)->last_sub_trans = root->log_transid;
2016
	spin_unlock(&BTRFS_I(inode)->lock);
2017 2018
	if (num_written > 0)
		num_written = generic_write_sync(iocb, num_written);
2019

2020 2021
	if (sync)
		atomic_dec(&BTRFS_I(inode)->sync_writers);
2022
out:
C
Chris Mason 已提交
2023 2024 2025 2026
	current->backing_dev_info = NULL;
	return num_written ? num_written : err;
}

C
Chris Mason 已提交
2027
int btrfs_release_file(struct inode *inode, struct file *filp)
2028
{
2029 2030 2031 2032 2033 2034 2035
	struct btrfs_file_private *private = filp->private_data;

	if (private && private->filldir_buf)
		kfree(private->filldir_buf);
	kfree(private);
	filp->private_data = NULL;

2036
	/*
2037 2038 2039 2040
	 * Set by setattr when we are about to truncate a file from a non-zero
	 * size to a zero size.  This tries to flush down new bytes that may
	 * have been written if the application were using truncate to replace
	 * a file in place.
2041
	 */
2042
	if (test_and_clear_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
2043 2044
			       &BTRFS_I(inode)->runtime_flags))
			filemap_flush(inode->i_mapping);
2045 2046 2047
	return 0;
}

2048 2049 2050
static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
{
	int ret;
L
Liu Bo 已提交
2051
	struct blk_plug plug;
2052

L
Liu Bo 已提交
2053 2054 2055 2056 2057 2058 2059
	/*
	 * This is only called in fsync, which would do synchronous writes, so
	 * a plug can merge adjacent IOs as much as possible.  Esp. in case of
	 * multiple disks using raid profile, a large IO can be split to
	 * several segments of stripe length (currently 64K).
	 */
	blk_start_plug(&plug);
2060
	atomic_inc(&BTRFS_I(inode)->sync_writers);
2061
	ret = btrfs_fdatawrite_range(inode, start, end);
2062
	atomic_dec(&BTRFS_I(inode)->sync_writers);
L
Liu Bo 已提交
2063
	blk_finish_plug(&plug);
2064 2065 2066 2067

	return ret;
}

C
Chris Mason 已提交
2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078
/*
 * fsync call for both files and directories.  This logs the inode into
 * the tree log instead of forcing full commits whenever possible.
 *
 * It needs to call filemap_fdatawait so that all ordered extent updates are
 * in the metadata btree are up to date for copying to the log.
 *
 * It drops the inode mutex before doing the tree log commit.  This is an
 * important optimization for directories because holding the mutex prevents
 * new operations on the dir while we write to disk.
 */
2079
int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
C
Chris Mason 已提交
2080
{
2081
	struct dentry *dentry = file_dentry(file);
2082
	struct inode *inode = d_inode(dentry);
2083
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
C
Chris Mason 已提交
2084 2085
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
2086
	struct btrfs_log_ctx ctx;
2087
	int ret = 0, err;
2088 2089
	u64 len;
	bool full_sync;
C
Chris Mason 已提交
2090

2091
	trace_btrfs_sync_file(file, datasync);
2092

2093 2094
	btrfs_init_log_ctx(&ctx, inode);

2095
	/*
2096 2097 2098 2099 2100 2101
	 * Always set the range to a full range, otherwise we can get into
	 * several problems, from missing file extent items to represent holes
	 * when not using the NO_HOLES feature, to log tree corruption due to
	 * races between hole detection during logging and completion of ordered
	 * extents outside the range, to missing checksums due to ordered extents
	 * for which we flushed only a subset of their pages.
2102
	 */
2103 2104 2105
	start = 0;
	end = LLONG_MAX;
	len = (u64)LLONG_MAX + 1;
2106

2107 2108 2109
	/*
	 * We write the dirty pages in the range and wait until they complete
	 * out of the ->i_mutex. If so, we can flush the dirty pages by
2110 2111
	 * multi-task, and make the performance up.  See
	 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
2112
	 */
2113
	ret = start_ordered_ops(inode, start, end);
2114
	if (ret)
2115
		goto out;
2116

A
Al Viro 已提交
2117
	inode_lock(inode);
2118 2119 2120 2121 2122 2123 2124 2125

	/*
	 * We take the dio_sem here because the tree log stuff can race with
	 * lockless dio writes and get an extent map logged for an extent we
	 * never waited on.  We need it this high up for lockdep reasons.
	 */
	down_write(&BTRFS_I(inode)->dio_sem);

M
Miao Xie 已提交
2126
	atomic_inc(&root->log_batch);
2127

2128
	/*
2129 2130 2131
	 * Always check for the full sync flag while holding the inode's lock,
	 * to avoid races with other tasks. The flag must be either set all the
	 * time during logging or always off all the time while logging.
2132
	 */
2133 2134
	full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
			     &BTRFS_I(inode)->runtime_flags);
2135

2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155
	/*
	 * Before we acquired the inode's lock, someone may have dirtied more
	 * pages in the target range. We need to make sure that writeback for
	 * any such pages does not start while we are logging the inode, because
	 * if it does, any of the following might happen when we are not doing a
	 * full inode sync:
	 *
	 * 1) We log an extent after its writeback finishes but before its
	 *    checksums are added to the csum tree, leading to -EIO errors
	 *    when attempting to read the extent after a log replay.
	 *
	 * 2) We can end up logging an extent before its writeback finishes.
	 *    Therefore after the log replay we will have a file extent item
	 *    pointing to an unwritten extent (and no data checksums as well).
	 *
	 * So trigger writeback for any eventual new dirty pages and then we
	 * wait for all ordered extents to complete below.
	 */
	ret = start_ordered_ops(inode, start, end);
	if (ret) {
2156
		up_write(&BTRFS_I(inode)->dio_sem);
2157 2158 2159 2160
		inode_unlock(inode);
		goto out;
	}

2161
	/*
2162
	 * We have to do this here to avoid the priority inversion of waiting on
2163
	 * IO of a lower priority task while holding a transaction open.
2164
	 *
2165 2166 2167 2168 2169 2170
	 * For a full fsync we wait for the ordered extents to complete while
	 * for a fast fsync we wait just for writeback to complete, and then
	 * attach the ordered extents to the transaction so that a transaction
	 * commit waits for their completion, to avoid data loss if we fsync,
	 * the current transaction commits before the ordered extents complete
	 * and a power failure happens right after that.
2171
	 */
2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182
	if (full_sync) {
		ret = btrfs_wait_ordered_range(inode, start, len);
	} else {
		/*
		 * Get our ordered extents as soon as possible to avoid doing
		 * checksum lookups in the csum tree, and use instead the
		 * checksums attached to the ordered extents.
		 */
		btrfs_get_ordered_extents_for_logging(BTRFS_I(inode),
						      &ctx.ordered_extents);
		ret = filemap_fdatawait_range(inode->i_mapping, start, end);
2183
	}
2184 2185 2186 2187

	if (ret)
		goto out_release_extents;

M
Miao Xie 已提交
2188
	atomic_inc(&root->log_batch);
2189

2190 2191 2192 2193 2194 2195 2196
	/*
	 * If we are doing a fast fsync we can not bail out if the inode's
	 * last_trans is <= then the last committed transaction, because we only
	 * update the last_trans of the inode during ordered extent completion,
	 * and for a fast fsync we don't wait for that, we only wait for the
	 * writeback to complete.
	 */
J
Josef Bacik 已提交
2197
	smp_mb();
2198
	if (btrfs_inode_in_log(BTRFS_I(inode), fs_info->generation) ||
2199 2200
	    (BTRFS_I(inode)->last_trans <= fs_info->last_trans_committed &&
	     (full_sync || list_empty(&ctx.ordered_extents)))) {
J
Josef Bacik 已提交
2201
		/*
2202
		 * We've had everything committed since the last time we were
J
Josef Bacik 已提交
2203 2204 2205 2206 2207
		 * modified so clear this flag in case it was set for whatever
		 * reason, it's no longer relevant.
		 */
		clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
			  &BTRFS_I(inode)->runtime_flags);
2208 2209 2210 2211
		/*
		 * An ordered extent might have started before and completed
		 * already with io errors, in which case the inode was not
		 * updated and we end up here. So check the inode's mapping
2212 2213
		 * for any errors that might have happened since we last
		 * checked called fsync.
2214
		 */
2215
		ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
2216
		goto out_release_extents;
2217 2218
	}

2219 2220 2221 2222 2223 2224 2225
	/*
	 * We use start here because we will need to wait on the IO to complete
	 * in btrfs_sync_log, which could require joining a transaction (for
	 * example checking cross references in the nocow path).  If we use join
	 * here we could get into a situation where we're waiting on IO to
	 * happen that is blocked on a transaction trying to commit.  With start
	 * we inc the extwriter counter, so we wait for all extwriters to exit
2226
	 * before we start blocking joiners.  This comment is to keep somebody
2227 2228 2229
	 * from thinking they are super smart and changing this to
	 * btrfs_join_transaction *cough*Josef*cough*.
	 */
2230 2231 2232
	trans = btrfs_start_transaction(root, 0);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
2233
		goto out_release_extents;
C
Chris Mason 已提交
2234
	}
2235

2236 2237
	ret = btrfs_log_dentry_safe(trans, dentry, &ctx);
	btrfs_release_log_ctx_extents(&ctx);
2238
	if (ret < 0) {
2239 2240
		/* Fallthrough and commit/free transaction. */
		ret = 1;
2241
	}
C
Chris Mason 已提交
2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252

	/* we've logged all the items and now have a consistent
	 * version of the file in the log.  It is possible that
	 * someone will come in and modify the file, but that's
	 * fine because the log is consistent on disk, and we
	 * have references to all of the file's extents
	 *
	 * It is possible that someone will come in and log the
	 * file again, but that will end up using the synchronization
	 * inside btrfs_sync_log to keep things safe.
	 */
2253
	up_write(&BTRFS_I(inode)->dio_sem);
A
Al Viro 已提交
2254
	inode_unlock(inode);
C
Chris Mason 已提交
2255

2256
	if (ret != BTRFS_NO_LOG_SYNC) {
2257
		if (!ret) {
2258
			ret = btrfs_sync_log(trans, root, &ctx);
2259
			if (!ret) {
2260
				ret = btrfs_end_transaction(trans);
2261
				goto out;
2262
			}
2263
		}
2264 2265 2266 2267 2268 2269 2270
		if (!full_sync) {
			ret = btrfs_wait_ordered_range(inode, start, len);
			if (ret) {
				btrfs_end_transaction(trans);
				goto out;
			}
		}
2271
		ret = btrfs_commit_transaction(trans);
2272
	} else {
2273
		ret = btrfs_end_transaction(trans);
2274
	}
C
Chris Mason 已提交
2275
out:
2276
	ASSERT(list_empty(&ctx.list));
2277 2278 2279
	err = file_check_and_advance_wb_err(file);
	if (!ret)
		ret = err;
2280
	return ret > 0 ? -EIO : ret;
2281 2282 2283 2284 2285 2286

out_release_extents:
	btrfs_release_log_ctx_extents(&ctx);
	up_write(&BTRFS_I(inode)->dio_sem);
	inode_unlock(inode);
	goto out;
C
Chris Mason 已提交
2287 2288
}

2289
static const struct vm_operations_struct btrfs_file_vm_ops = {
2290
	.fault		= filemap_fault,
2291
	.map_pages	= filemap_map_pages,
C
Chris Mason 已提交
2292 2293 2294 2295 2296
	.page_mkwrite	= btrfs_page_mkwrite,
};

static int btrfs_file_mmap(struct file	*filp, struct vm_area_struct *vma)
{
M
Miao Xie 已提交
2297 2298 2299 2300 2301
	struct address_space *mapping = filp->f_mapping;

	if (!mapping->a_ops->readpage)
		return -ENOEXEC;

C
Chris Mason 已提交
2302
	file_accessed(filp);
M
Miao Xie 已提交
2303 2304
	vma->vm_ops = &btrfs_file_vm_ops;

C
Chris Mason 已提交
2305 2306 2307
	return 0;
}

2308
static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
J
Josef Bacik 已提交
2309 2310 2311 2312 2313 2314 2315 2316 2317
			  int slot, u64 start, u64 end)
{
	struct btrfs_file_extent_item *fi;
	struct btrfs_key key;

	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
		return 0;

	btrfs_item_key_to_cpu(leaf, &key, slot);
2318
	if (key.objectid != btrfs_ino(inode) ||
J
Josef Bacik 已提交
2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336
	    key.type != BTRFS_EXTENT_DATA_KEY)
		return 0;

	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);

	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
		return 0;

	if (btrfs_file_extent_disk_bytenr(leaf, fi))
		return 0;

	if (key.offset == end)
		return 1;
	if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
		return 1;
	return 0;
}

2337 2338 2339
static int fill_holes(struct btrfs_trans_handle *trans,
		struct btrfs_inode *inode,
		struct btrfs_path *path, u64 offset, u64 end)
J
Josef Bacik 已提交
2340
{
2341
	struct btrfs_fs_info *fs_info = trans->fs_info;
2342
	struct btrfs_root *root = inode->root;
J
Josef Bacik 已提交
2343 2344 2345
	struct extent_buffer *leaf;
	struct btrfs_file_extent_item *fi;
	struct extent_map *hole_em;
2346
	struct extent_map_tree *em_tree = &inode->extent_tree;
J
Josef Bacik 已提交
2347 2348 2349
	struct btrfs_key key;
	int ret;

2350
	if (btrfs_fs_incompat(fs_info, NO_HOLES))
2351 2352
		goto out;

2353
	key.objectid = btrfs_ino(inode);
J
Josef Bacik 已提交
2354 2355 2356 2357
	key.type = BTRFS_EXTENT_DATA_KEY;
	key.offset = offset;

	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2358 2359 2360 2361 2362 2363 2364
	if (ret <= 0) {
		/*
		 * We should have dropped this offset, so if we find it then
		 * something has gone horribly wrong.
		 */
		if (ret == 0)
			ret = -EINVAL;
J
Josef Bacik 已提交
2365
		return ret;
2366
	}
J
Josef Bacik 已提交
2367 2368

	leaf = path->nodes[0];
2369
	if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) {
J
Josef Bacik 已提交
2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383
		u64 num_bytes;

		path->slots[0]--;
		fi = btrfs_item_ptr(leaf, path->slots[0],
				    struct btrfs_file_extent_item);
		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
			end - offset;
		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
		btrfs_set_file_extent_offset(leaf, fi, 0);
		btrfs_mark_buffer_dirty(leaf);
		goto out;
	}

2384
	if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
J
Josef Bacik 已提交
2385 2386 2387
		u64 num_bytes;

		key.offset = offset;
2388
		btrfs_set_item_key_safe(fs_info, path, &key);
J
Josef Bacik 已提交
2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400
		fi = btrfs_item_ptr(leaf, path->slots[0],
				    struct btrfs_file_extent_item);
		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
			offset;
		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
		btrfs_set_file_extent_offset(leaf, fi, 0);
		btrfs_mark_buffer_dirty(leaf);
		goto out;
	}
	btrfs_release_path(path);

2401
	ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode),
2402
			offset, 0, 0, end - offset, 0, end - offset, 0, 0, 0);
J
Josef Bacik 已提交
2403 2404 2405 2406 2407 2408 2409 2410 2411
	if (ret)
		return ret;

out:
	btrfs_release_path(path);

	hole_em = alloc_extent_map();
	if (!hole_em) {
		btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2412
		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
J
Josef Bacik 已提交
2413 2414 2415
	} else {
		hole_em->start = offset;
		hole_em->len = end - offset;
J
Josef Bacik 已提交
2416
		hole_em->ram_bytes = hole_em->len;
J
Josef Bacik 已提交
2417 2418 2419 2420
		hole_em->orig_start = offset;

		hole_em->block_start = EXTENT_MAP_HOLE;
		hole_em->block_len = 0;
2421
		hole_em->orig_block_len = 0;
J
Josef Bacik 已提交
2422 2423 2424 2425 2426 2427
		hole_em->compress_type = BTRFS_COMPRESS_NONE;
		hole_em->generation = trans->transid;

		do {
			btrfs_drop_extent_cache(inode, offset, end - 1, 0);
			write_lock(&em_tree->lock);
J
Josef Bacik 已提交
2428
			ret = add_extent_mapping(em_tree, hole_em, 1);
J
Josef Bacik 已提交
2429 2430 2431 2432 2433
			write_unlock(&em_tree->lock);
		} while (ret == -EEXIST);
		free_extent_map(hole_em);
		if (ret)
			set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2434
					&inode->runtime_flags);
J
Josef Bacik 已提交
2435 2436 2437 2438 2439
	}

	return 0;
}

2440 2441 2442 2443 2444 2445 2446 2447
/*
 * Find a hole extent on given inode and change start/len to the end of hole
 * extent.(hole/vacuum extent whose em->start <= start &&
 *	   em->start + em->len > start)
 * When a hole extent is found, return 1 and modify start/len.
 */
static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
{
2448
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2449 2450 2451
	struct extent_map *em;
	int ret = 0;

2452 2453
	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
			      round_down(*start, fs_info->sectorsize),
2454
			      round_up(*len, fs_info->sectorsize));
2455 2456
	if (IS_ERR(em))
		return PTR_ERR(em);
2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468

	/* Hole or vacuum extent(only exists in no-hole mode) */
	if (em->block_start == EXTENT_MAP_HOLE) {
		ret = 1;
		*len = em->start + em->len > *start + *len ?
		       0 : *start + *len - em->start - em->len;
		*start = em->start + em->len;
	}
	free_extent_map(em);
	return ret;
}

2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481
static int btrfs_punch_hole_lock_range(struct inode *inode,
				       const u64 lockstart,
				       const u64 lockend,
				       struct extent_state **cached_state)
{
	while (1) {
		struct btrfs_ordered_extent *ordered;
		int ret;

		truncate_pagecache_range(inode, lockstart, lockend);

		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
				 cached_state);
2482 2483
		ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode),
							    lockend);
2484 2485 2486 2487 2488 2489 2490

		/*
		 * We need to make sure we have no ordered extents in this range
		 * and nobody raced in and read a page in this range, if we did
		 * we need to try again.
		 */
		if ((!ordered ||
2491
		    (ordered->file_offset + ordered->num_bytes <= lockstart ||
2492
		     ordered->file_offset > lockend)) &&
2493 2494
		     !filemap_range_has_page(inode->i_mapping,
					     lockstart, lockend)) {
2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510
			if (ordered)
				btrfs_put_ordered_extent(ordered);
			break;
		}
		if (ordered)
			btrfs_put_ordered_extent(ordered);
		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
				     lockend, cached_state);
		ret = btrfs_wait_ordered_range(inode, lockstart,
					       lockend - lockstart + 1);
		if (ret)
			return ret;
	}
	return 0;
}

2511
static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
2512 2513
				     struct inode *inode,
				     struct btrfs_path *path,
2514 2515
				     struct btrfs_replace_extent_info *extent_info,
				     const u64 replace_len)
2516 2517 2518 2519 2520 2521 2522 2523 2524 2525
{
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_file_extent_item *extent;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	int slot;
	struct btrfs_ref ref = { 0 };
	int ret;

2526
	if (replace_len == 0)
2527 2528
		return 0;

2529
	if (extent_info->disk_offset == 0 &&
2530 2531 2532 2533 2534
	    btrfs_fs_incompat(fs_info, NO_HOLES))
		return 0;

	key.objectid = btrfs_ino(BTRFS_I(inode));
	key.type = BTRFS_EXTENT_DATA_KEY;
2535
	key.offset = extent_info->file_offset;
2536
	ret = btrfs_insert_empty_item(trans, root, path, &key,
2537
				      sizeof(struct btrfs_file_extent_item));
2538 2539 2540 2541
	if (ret)
		return ret;
	leaf = path->nodes[0];
	slot = path->slots[0];
2542
	write_extent_buffer(leaf, extent_info->extent_buf,
2543
			    btrfs_item_ptr_offset(leaf, slot),
2544
			    sizeof(struct btrfs_file_extent_item));
2545
	extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2546
	ASSERT(btrfs_file_extent_type(leaf, extent) != BTRFS_FILE_EXTENT_INLINE);
2547 2548 2549
	btrfs_set_file_extent_offset(leaf, extent, extent_info->data_offset);
	btrfs_set_file_extent_num_bytes(leaf, extent, replace_len);
	if (extent_info->is_new_extent)
2550
		btrfs_set_file_extent_generation(leaf, extent, trans->transid);
2551 2552 2553
	btrfs_mark_buffer_dirty(leaf);
	btrfs_release_path(path);

2554
	ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode),
2555
			extent_info->file_offset, replace_len);
2556 2557 2558
	if (ret)
		return ret;

2559
	/* If it's a hole, nothing more needs to be done. */
2560
	if (extent_info->disk_offset == 0)
2561 2562
		return 0;

2563
	inode_add_bytes(inode, replace_len);
2564

2565 2566
	if (extent_info->is_new_extent && extent_info->insertions == 0) {
		key.objectid = extent_info->disk_offset;
2567
		key.type = BTRFS_EXTENT_ITEM_KEY;
2568
		key.offset = extent_info->disk_len;
2569 2570
		ret = btrfs_alloc_reserved_file_extent(trans, root,
						       btrfs_ino(BTRFS_I(inode)),
2571 2572
						       extent_info->file_offset,
						       extent_info->qgroup_reserved,
2573 2574 2575 2576 2577
						       &key);
	} else {
		u64 ref_offset;

		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2578 2579 2580
				       extent_info->disk_offset,
				       extent_info->disk_len, 0);
		ref_offset = extent_info->file_offset - extent_info->data_offset;
2581 2582 2583 2584 2585
		btrfs_init_data_ref(&ref, root->root_key.objectid,
				    btrfs_ino(BTRFS_I(inode)), ref_offset);
		ret = btrfs_inc_extent_ref(trans, &ref);
	}

2586
	extent_info->insertions++;
2587 2588 2589 2590

	return ret;
}

2591 2592 2593
/*
 * The respective range must have been previously locked, as well as the inode.
 * The end offset is inclusive (last byte of the range).
2594 2595 2596 2597 2598
 * @extent_info is NULL for fallocate's hole punching and non-NULL when replacing
 * the file range with an extent.
 * When not punching a hole, we don't want to end up in a state where we dropped
 * extents without inserting a new one, so we must abort the transaction to avoid
 * a corruption.
2599
 */
2600
int btrfs_replace_file_extents(struct inode *inode, struct btrfs_path *path,
2601
			   const u64 start, const u64 end,
2602
			   struct btrfs_replace_extent_info *extent_info,
2603
			   struct btrfs_trans_handle **trans_out)
2604 2605
{
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2606
	u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1);
2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624
	u64 ino_size = round_up(inode->i_size, fs_info->sectorsize);
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans = NULL;
	struct btrfs_block_rsv *rsv;
	unsigned int rsv_count;
	u64 cur_offset;
	u64 drop_end;
	u64 len = end - start;
	int ret = 0;

	if (end <= start)
		return -EINVAL;

	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
	if (!rsv) {
		ret = -ENOMEM;
		goto out;
	}
2625
	rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1);
2626 2627 2628 2629 2630
	rsv->failfast = 1;

	/*
	 * 1 - update the inode
	 * 1 - removing the extents in the range
2631 2632
	 * 1 - adding the hole extent if no_holes isn't set or if we are
	 *     replacing the range with a new extent
2633
	 */
2634
	if (!btrfs_fs_incompat(fs_info, NO_HOLES) || extent_info)
2635 2636 2637 2638
		rsv_count = 3;
	else
		rsv_count = 2;

2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652
	trans = btrfs_start_transaction(root, rsv_count);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		trans = NULL;
		goto out_free;
	}

	ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
				      min_size, false);
	BUG_ON(ret);
	trans->block_rsv = rsv;

	cur_offset = start;
	while (cur_offset < end) {
2653
		ret = __btrfs_drop_extents(trans, root, BTRFS_I(inode), path,
2654 2655
					   cur_offset, end + 1, &drop_end,
					   1, 0, 0, NULL);
2656 2657 2658 2659 2660 2661 2662 2663
		if (ret != -ENOSPC) {
			/*
			 * When cloning we want to avoid transaction aborts when
			 * nothing was done and we are attempting to clone parts
			 * of inline extents, in such cases -EOPNOTSUPP is
			 * returned by __btrfs_drop_extents() without having
			 * changed anything in the file.
			 */
2664
			if (extent_info && !extent_info->is_new_extent &&
2665
			    ret && ret != -EOPNOTSUPP)
2666
				btrfs_abort_transaction(trans, ret);
2667
			break;
2668
		}
2669 2670 2671

		trans->block_rsv = &fs_info->trans_block_rsv;

2672
		if (!extent_info && cur_offset < drop_end &&
2673
		    cur_offset < ino_size) {
2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685
			ret = fill_holes(trans, BTRFS_I(inode), path,
					cur_offset, drop_end);
			if (ret) {
				/*
				 * If we failed then we didn't insert our hole
				 * entries for the area we dropped, so now the
				 * fs is corrupted, so we must abort the
				 * transaction.
				 */
				btrfs_abort_transaction(trans, ret);
				break;
			}
2686
		} else if (!extent_info && cur_offset < drop_end) {
2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703
			/*
			 * We are past the i_size here, but since we didn't
			 * insert holes we need to clear the mapped area so we
			 * know to not set disk_i_size in this area until a new
			 * file extent is inserted here.
			 */
			ret = btrfs_inode_clear_file_extent_range(BTRFS_I(inode),
					cur_offset, drop_end - cur_offset);
			if (ret) {
				/*
				 * We couldn't clear our area, so we could
				 * presumably adjust up and corrupt the fs, so
				 * we need to abort.
				 */
				btrfs_abort_transaction(trans, ret);
				break;
			}
2704 2705
		}

2706 2707
		if (extent_info && drop_end > extent_info->file_offset) {
			u64 replace_len = drop_end - extent_info->file_offset;
2708

2709
			ret = btrfs_insert_replace_extent(trans, inode, path,
2710
							extent_info, replace_len);
2711 2712 2713 2714
			if (ret) {
				btrfs_abort_transaction(trans, ret);
				break;
			}
2715 2716 2717
			extent_info->data_len -= replace_len;
			extent_info->data_offset += replace_len;
			extent_info->file_offset += replace_len;
2718 2719
		}

2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740
		cur_offset = drop_end;

		ret = btrfs_update_inode(trans, root, inode);
		if (ret)
			break;

		btrfs_end_transaction(trans);
		btrfs_btree_balance_dirty(fs_info);

		trans = btrfs_start_transaction(root, rsv_count);
		if (IS_ERR(trans)) {
			ret = PTR_ERR(trans);
			trans = NULL;
			break;
		}

		ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
					      rsv, min_size, false);
		BUG_ON(ret);	/* shouldn't happen */
		trans->block_rsv = rsv;

2741
		if (!extent_info) {
2742 2743 2744 2745 2746 2747 2748
			ret = find_first_non_hole(inode, &cur_offset, &len);
			if (unlikely(ret < 0))
				break;
			if (ret && !len) {
				ret = 0;
				break;
			}
2749 2750 2751
		}
	}

2752 2753 2754 2755 2756 2757 2758 2759
	/*
	 * If we were cloning, force the next fsync to be a full one since we
	 * we replaced (or just dropped in the case of cloning holes when
	 * NO_HOLES is enabled) extents and extent maps.
	 * This is for the sake of simplicity, and cloning into files larger
	 * than 16Mb would force the full fsync any way (when
	 * try_release_extent_mapping() is invoked during page cache truncation.
	 */
2760
	if (extent_info && !extent_info->is_new_extent)
2761 2762 2763
		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
			&BTRFS_I(inode)->runtime_flags);

2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785
	if (ret)
		goto out_trans;

	trans->block_rsv = &fs_info->trans_block_rsv;
	/*
	 * If we are using the NO_HOLES feature we might have had already an
	 * hole that overlaps a part of the region [lockstart, lockend] and
	 * ends at (or beyond) lockend. Since we have no file extent items to
	 * represent holes, drop_end can be less than lockend and so we must
	 * make sure we have an extent map representing the existing hole (the
	 * call to __btrfs_drop_extents() might have dropped the existing extent
	 * map representing the existing hole), otherwise the fast fsync path
	 * will not record the existence of the hole region
	 * [existing_hole_start, lockend].
	 */
	if (drop_end <= end)
		drop_end = end + 1;
	/*
	 * Don't insert file hole extent item if it's for a range beyond eof
	 * (because it's useless) or if it represents a 0 bytes range (when
	 * cur_offset == drop_end).
	 */
2786
	if (!extent_info && cur_offset < ino_size && cur_offset < drop_end) {
2787 2788 2789 2790 2791 2792 2793
		ret = fill_holes(trans, BTRFS_I(inode), path,
				cur_offset, drop_end);
		if (ret) {
			/* Same comment as above. */
			btrfs_abort_transaction(trans, ret);
			goto out_trans;
		}
2794
	} else if (!extent_info && cur_offset < drop_end) {
2795 2796 2797 2798 2799 2800 2801 2802
		/* See the comment in the loop above for the reasoning here. */
		ret = btrfs_inode_clear_file_extent_range(BTRFS_I(inode),
					cur_offset, drop_end - cur_offset);
		if (ret) {
			btrfs_abort_transaction(trans, ret);
			goto out_trans;
		}

2803
	}
2804
	if (extent_info) {
2805
		ret = btrfs_insert_replace_extent(trans, inode, path, extent_info,
2806
						extent_info->data_len);
2807 2808 2809 2810 2811
		if (ret) {
			btrfs_abort_transaction(trans, ret);
			goto out_trans;
		}
	}
2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827

out_trans:
	if (!trans)
		goto out_free;

	trans->block_rsv = &fs_info->trans_block_rsv;
	if (ret)
		btrfs_end_transaction(trans);
	else
		*trans_out = trans;
out_free:
	btrfs_free_block_rsv(fs_info, rsv);
out:
	return ret;
}

J
Josef Bacik 已提交
2828 2829
static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
{
2830
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
J
Josef Bacik 已提交
2831 2832 2833
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct extent_state *cached_state = NULL;
	struct btrfs_path *path;
2834
	struct btrfs_trans_handle *trans = NULL;
2835 2836 2837 2838 2839
	u64 lockstart;
	u64 lockend;
	u64 tail_start;
	u64 tail_len;
	u64 orig_start = offset;
J
Josef Bacik 已提交
2840
	int ret = 0;
2841
	bool same_block;
2842
	u64 ino_size;
2843
	bool truncated_block = false;
2844
	bool updated_inode = false;
J
Josef Bacik 已提交
2845

2846 2847 2848
	ret = btrfs_wait_ordered_range(inode, offset, len);
	if (ret)
		return ret;
J
Josef Bacik 已提交
2849

A
Al Viro 已提交
2850
	inode_lock(inode);
2851
	ino_size = round_up(inode->i_size, fs_info->sectorsize);
2852 2853 2854 2855 2856 2857 2858 2859 2860
	ret = find_first_non_hole(inode, &offset, &len);
	if (ret < 0)
		goto out_only_mutex;
	if (ret && !len) {
		/* Already in a large hole */
		ret = 0;
		goto out_only_mutex;
	}

2861
	lockstart = round_up(offset, btrfs_inode_sectorsize(BTRFS_I(inode)));
2862
	lockend = round_down(offset + len,
2863
			     btrfs_inode_sectorsize(BTRFS_I(inode))) - 1;
2864 2865
	same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
		== (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
2866
	/*
2867
	 * We needn't truncate any block which is beyond the end of the file
2868 2869
	 * because we are sure there is no data there.
	 */
J
Josef Bacik 已提交
2870
	/*
2871 2872
	 * Only do this if we are in the same block and we aren't doing the
	 * entire block.
J
Josef Bacik 已提交
2873
	 */
2874
	if (same_block && len < fs_info->sectorsize) {
2875
		if (offset < ino_size) {
2876 2877
			truncated_block = true;
			ret = btrfs_truncate_block(inode, offset, len, 0);
2878 2879 2880
		} else {
			ret = 0;
		}
2881
		goto out_only_mutex;
J
Josef Bacik 已提交
2882 2883
	}

2884
	/* zero back part of the first block */
2885
	if (offset < ino_size) {
2886 2887
		truncated_block = true;
		ret = btrfs_truncate_block(inode, offset, 0, 0);
2888
		if (ret) {
A
Al Viro 已提交
2889
			inode_unlock(inode);
2890 2891
			return ret;
		}
J
Josef Bacik 已提交
2892 2893
	}

2894 2895
	/* Check the aligned pages after the first unaligned page,
	 * if offset != orig_start, which means the first unaligned page
2896
	 * including several following pages are already in holes,
2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921
	 * the extra check can be skipped */
	if (offset == orig_start) {
		/* after truncate page, check hole again */
		len = offset + len - lockstart;
		offset = lockstart;
		ret = find_first_non_hole(inode, &offset, &len);
		if (ret < 0)
			goto out_only_mutex;
		if (ret && !len) {
			ret = 0;
			goto out_only_mutex;
		}
		lockstart = offset;
	}

	/* Check the tail unaligned part is in a hole */
	tail_start = lockend + 1;
	tail_len = offset + len - tail_start;
	if (tail_len) {
		ret = find_first_non_hole(inode, &tail_start, &tail_len);
		if (unlikely(ret < 0))
			goto out_only_mutex;
		if (!ret) {
			/* zero the front end of the last page */
			if (tail_start + tail_len < ino_size) {
2922 2923 2924 2925
				truncated_block = true;
				ret = btrfs_truncate_block(inode,
							tail_start + tail_len,
							0, 1);
2926 2927
				if (ret)
					goto out_only_mutex;
2928
			}
M
Miao Xie 已提交
2929
		}
J
Josef Bacik 已提交
2930 2931 2932
	}

	if (lockend < lockstart) {
2933 2934
		ret = 0;
		goto out_only_mutex;
J
Josef Bacik 已提交
2935 2936
	}

2937 2938
	ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
					  &cached_state);
2939
	if (ret)
2940
		goto out_only_mutex;
J
Josef Bacik 已提交
2941 2942 2943 2944 2945 2946 2947

	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

2948
	ret = btrfs_replace_file_extents(inode, path, lockstart, lockend, NULL,
2949
				     &trans);
2950 2951 2952
	btrfs_free_path(path);
	if (ret)
		goto out;
J
Josef Bacik 已提交
2953

2954
	ASSERT(trans != NULL);
2955
	inode_inc_iversion(inode);
2956
	inode->i_mtime = inode->i_ctime = current_time(inode);
J
Josef Bacik 已提交
2957
	ret = btrfs_update_inode(trans, root, inode);
2958
	updated_inode = true;
2959
	btrfs_end_transaction(trans);
2960
	btrfs_btree_balance_dirty(fs_info);
J
Josef Bacik 已提交
2961 2962
out:
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2963
			     &cached_state);
2964
out_only_mutex:
2965
	if (!updated_inode && truncated_block && !ret) {
2966 2967 2968 2969 2970 2971 2972
		/*
		 * If we only end up zeroing part of a page, we still need to
		 * update the inode item, so that all the time fields are
		 * updated as well as the necessary btrfs inode in memory fields
		 * for detecting, at fsync time, if the inode isn't yet in the
		 * log tree or it's there but not up to date.
		 */
2973 2974 2975 2976 2977
		struct timespec64 now = current_time(inode);

		inode_inc_iversion(inode);
		inode->i_mtime = now;
		inode->i_ctime = now;
2978 2979
		trans = btrfs_start_transaction(root, 1);
		if (IS_ERR(trans)) {
2980
			ret = PTR_ERR(trans);
2981
		} else {
2982 2983 2984 2985 2986 2987
			int ret2;

			ret = btrfs_update_inode(trans, root, inode);
			ret2 = btrfs_end_transaction(trans);
			if (!ret)
				ret = ret2;
2988 2989
		}
	}
A
Al Viro 已提交
2990
	inode_unlock(inode);
2991
	return ret;
J
Josef Bacik 已提交
2992 2993
}

2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024
/* Helper structure to record which range is already reserved */
struct falloc_range {
	struct list_head list;
	u64 start;
	u64 len;
};

/*
 * Helper function to add falloc range
 *
 * Caller should have locked the larger range of extent containing
 * [start, len)
 */
static int add_falloc_range(struct list_head *head, u64 start, u64 len)
{
	struct falloc_range *prev = NULL;
	struct falloc_range *range = NULL;

	if (list_empty(head))
		goto insert;

	/*
	 * As fallocate iterate by bytenr order, we only need to check
	 * the last range.
	 */
	prev = list_entry(head->prev, struct falloc_range, list);
	if (prev->start + prev->len == start) {
		prev->len += len;
		return 0;
	}
insert:
D
David Sterba 已提交
3025
	range = kmalloc(sizeof(*range), GFP_KERNEL);
3026 3027 3028 3029 3030 3031 3032 3033
	if (!range)
		return -ENOMEM;
	range->start = start;
	range->len = len;
	list_add_tail(&range->list, head);
	return 0;
}

3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051
static int btrfs_fallocate_update_isize(struct inode *inode,
					const u64 end,
					const int mode)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	int ret;
	int ret2;

	if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
		return 0;

	trans = btrfs_start_transaction(root, 1);
	if (IS_ERR(trans))
		return PTR_ERR(trans);

	inode->i_ctime = current_time(inode);
	i_size_write(inode, end);
3052
	btrfs_inode_safe_disk_i_size_write(inode, 0);
3053 3054 3055 3056 3057 3058
	ret = btrfs_update_inode(trans, root, inode);
	ret2 = btrfs_end_transaction(trans);

	return ret ? ret : ret2;
}

3059
enum {
3060 3061 3062
	RANGE_BOUNDARY_WRITTEN_EXTENT,
	RANGE_BOUNDARY_PREALLOC_EXTENT,
	RANGE_BOUNDARY_HOLE,
3063 3064
};

3065
static int btrfs_zero_range_check_range_boundary(struct btrfs_inode *inode,
3066 3067
						 u64 offset)
{
3068
	const u64 sectorsize = btrfs_inode_sectorsize(inode);
3069
	struct extent_map *em;
3070
	int ret;
3071 3072

	offset = round_down(offset, sectorsize);
3073
	em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize);
3074 3075 3076 3077
	if (IS_ERR(em))
		return PTR_ERR(em);

	if (em->block_start == EXTENT_MAP_HOLE)
3078 3079 3080 3081 3082
		ret = RANGE_BOUNDARY_HOLE;
	else if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
		ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
	else
		ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097

	free_extent_map(em);
	return ret;
}

static int btrfs_zero_range(struct inode *inode,
			    loff_t offset,
			    loff_t len,
			    const int mode)
{
	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
	struct extent_map *em;
	struct extent_changeset *data_reserved = NULL;
	int ret;
	u64 alloc_hint = 0;
3098
	const u64 sectorsize = btrfs_inode_sectorsize(BTRFS_I(inode));
3099 3100 3101 3102 3103 3104 3105
	u64 alloc_start = round_down(offset, sectorsize);
	u64 alloc_end = round_up(offset + len, sectorsize);
	u64 bytes_to_reserve = 0;
	bool space_reserved = false;

	inode_dio_wait(inode);

3106 3107
	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
			      alloc_end - alloc_start);
3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149
	if (IS_ERR(em)) {
		ret = PTR_ERR(em);
		goto out;
	}

	/*
	 * Avoid hole punching and extent allocation for some cases. More cases
	 * could be considered, but these are unlikely common and we keep things
	 * as simple as possible for now. Also, intentionally, if the target
	 * range contains one or more prealloc extents together with regular
	 * extents and holes, we drop all the existing extents and allocate a
	 * new prealloc extent, so that we get a larger contiguous disk extent.
	 */
	if (em->start <= alloc_start &&
	    test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
		const u64 em_end = em->start + em->len;

		if (em_end >= offset + len) {
			/*
			 * The whole range is already a prealloc extent,
			 * do nothing except updating the inode's i_size if
			 * needed.
			 */
			free_extent_map(em);
			ret = btrfs_fallocate_update_isize(inode, offset + len,
							   mode);
			goto out;
		}
		/*
		 * Part of the range is already a prealloc extent, so operate
		 * only on the remaining part of the range.
		 */
		alloc_start = em_end;
		ASSERT(IS_ALIGNED(alloc_start, sectorsize));
		len = offset + len - alloc_start;
		offset = alloc_start;
		alloc_hint = em->block_start + em->len;
	}
	free_extent_map(em);

	if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
	    BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
3150 3151
		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
				      sectorsize);
3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187
		if (IS_ERR(em)) {
			ret = PTR_ERR(em);
			goto out;
		}

		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
			free_extent_map(em);
			ret = btrfs_fallocate_update_isize(inode, offset + len,
							   mode);
			goto out;
		}
		if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) {
			free_extent_map(em);
			ret = btrfs_truncate_block(inode, offset, len, 0);
			if (!ret)
				ret = btrfs_fallocate_update_isize(inode,
								   offset + len,
								   mode);
			return ret;
		}
		free_extent_map(em);
		alloc_start = round_down(offset, sectorsize);
		alloc_end = alloc_start + sectorsize;
		goto reserve_space;
	}

	alloc_start = round_up(offset, sectorsize);
	alloc_end = round_down(offset + len, sectorsize);

	/*
	 * For unaligned ranges, check the pages at the boundaries, they might
	 * map to an extent, in which case we need to partially zero them, or
	 * they might map to a hole, in which case we need our allocation range
	 * to cover them.
	 */
	if (!IS_ALIGNED(offset, sectorsize)) {
3188 3189
		ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
							    offset);
3190 3191
		if (ret < 0)
			goto out;
3192
		if (ret == RANGE_BOUNDARY_HOLE) {
3193 3194
			alloc_start = round_down(offset, sectorsize);
			ret = 0;
3195
		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
3196 3197 3198
			ret = btrfs_truncate_block(inode, offset, 0, 0);
			if (ret)
				goto out;
3199 3200
		} else {
			ret = 0;
3201 3202 3203 3204
		}
	}

	if (!IS_ALIGNED(offset + len, sectorsize)) {
3205
		ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
3206 3207 3208
							    offset + len);
		if (ret < 0)
			goto out;
3209
		if (ret == RANGE_BOUNDARY_HOLE) {
3210 3211
			alloc_end = round_up(offset + len, sectorsize);
			ret = 0;
3212
		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
3213 3214 3215
			ret = btrfs_truncate_block(inode, offset + len, 0, 1);
			if (ret)
				goto out;
3216 3217
		} else {
			ret = 0;
3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236
		}
	}

reserve_space:
	if (alloc_start < alloc_end) {
		struct extent_state *cached_state = NULL;
		const u64 lockstart = alloc_start;
		const u64 lockend = alloc_end - 1;

		bytes_to_reserve = alloc_end - alloc_start;
		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
						      bytes_to_reserve);
		if (ret < 0)
			goto out;
		space_reserved = true;
		ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
						  &cached_state);
		if (ret)
			goto out;
3237
		ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
3238 3239 3240
						alloc_start, bytes_to_reserve);
		if (ret)
			goto out;
3241 3242 3243 3244 3245 3246 3247
		ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
						alloc_end - alloc_start,
						i_blocksize(inode),
						offset + len, &alloc_hint);
		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
				     lockend, &cached_state);
		/* btrfs_prealloc_file_range releases reserved space on error */
3248
		if (ret) {
3249
			space_reserved = false;
3250 3251
			goto out;
		}
3252
	}
3253
	ret = btrfs_fallocate_update_isize(inode, offset + len, mode);
3254 3255
 out:
	if (ret && space_reserved)
3256
		btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
3257 3258 3259 3260 3261 3262
					       alloc_start, bytes_to_reserve);
	extent_changeset_free(data_reserved);

	return ret;
}

3263 3264 3265
static long btrfs_fallocate(struct file *file, int mode,
			    loff_t offset, loff_t len)
{
A
Al Viro 已提交
3266
	struct inode *inode = file_inode(file);
3267
	struct extent_state *cached_state = NULL;
3268
	struct extent_changeset *data_reserved = NULL;
3269 3270 3271
	struct falloc_range *range;
	struct falloc_range *tmp;
	struct list_head reserve_list;
3272 3273 3274 3275 3276 3277
	u64 cur_offset;
	u64 last_byte;
	u64 alloc_start;
	u64 alloc_end;
	u64 alloc_hint = 0;
	u64 locked_end;
3278
	u64 actual_end = 0;
3279
	struct extent_map *em;
3280
	int blocksize = btrfs_inode_sectorsize(BTRFS_I(inode));
3281 3282
	int ret;

3283 3284
	alloc_start = round_down(offset, blocksize);
	alloc_end = round_up(offset + len, blocksize);
3285
	cur_offset = alloc_start;
3286

J
Josef Bacik 已提交
3287
	/* Make sure we aren't being give some crap mode */
3288 3289
	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
		     FALLOC_FL_ZERO_RANGE))
3290 3291
		return -EOPNOTSUPP;

J
Josef Bacik 已提交
3292 3293 3294
	if (mode & FALLOC_FL_PUNCH_HOLE)
		return btrfs_punch_hole(inode, offset, len);

3295
	/*
3296 3297 3298
	 * Only trigger disk allocation, don't trigger qgroup reserve
	 *
	 * For qgroup space, it will be checked later.
3299
	 */
3300 3301 3302 3303 3304 3305
	if (!(mode & FALLOC_FL_ZERO_RANGE)) {
		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
						      alloc_end - alloc_start);
		if (ret < 0)
			return ret;
	}
3306

A
Al Viro 已提交
3307
	inode_lock(inode);
3308 3309 3310 3311 3312 3313

	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
		ret = inode_newsize_ok(inode, offset + len);
		if (ret)
			goto out;
	}
3314

3315 3316 3317 3318 3319 3320 3321
	/*
	 * TODO: Move these two operations after we have checked
	 * accurate reserved space, or fallocate can still fail but
	 * with page truncated or size expanded.
	 *
	 * But that's a minor problem and won't do much harm BTW.
	 */
3322
	if (alloc_start > inode->i_size) {
3323 3324
		ret = btrfs_cont_expand(inode, i_size_read(inode),
					alloc_start);
3325 3326
		if (ret)
			goto out;
3327
	} else if (offset + len > inode->i_size) {
3328 3329
		/*
		 * If we are fallocating from the end of the file onward we
3330 3331
		 * need to zero out the end of the block if i_size lands in the
		 * middle of a block.
3332
		 */
3333
		ret = btrfs_truncate_block(inode, inode->i_size, 0, 0);
3334 3335
		if (ret)
			goto out;
3336 3337
	}

3338 3339 3340 3341
	/*
	 * wait for ordered IO before we have any locks.  We'll loop again
	 * below with the locks held.
	 */
3342 3343 3344 3345
	ret = btrfs_wait_ordered_range(inode, alloc_start,
				       alloc_end - alloc_start);
	if (ret)
		goto out;
3346

3347 3348 3349 3350 3351 3352
	if (mode & FALLOC_FL_ZERO_RANGE) {
		ret = btrfs_zero_range(inode, offset, len, mode);
		inode_unlock(inode);
		return ret;
	}

3353 3354 3355 3356 3357 3358 3359 3360
	locked_end = alloc_end - 1;
	while (1) {
		struct btrfs_ordered_extent *ordered;

		/* the extent lock is ordered inside the running
		 * transaction
		 */
		lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
3361
				 locked_end, &cached_state);
3362 3363
		ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode),
							    locked_end);
3364

3365
		if (ordered &&
3366
		    ordered->file_offset + ordered->num_bytes > alloc_start &&
3367 3368 3369 3370
		    ordered->file_offset < alloc_end) {
			btrfs_put_ordered_extent(ordered);
			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
					     alloc_start, locked_end,
3371
					     &cached_state);
3372 3373 3374 3375
			/*
			 * we can't wait on the range with the transaction
			 * running or with the extent lock held
			 */
3376 3377 3378 3379
			ret = btrfs_wait_ordered_range(inode, alloc_start,
						       alloc_end - alloc_start);
			if (ret)
				goto out;
3380 3381 3382 3383 3384 3385 3386
		} else {
			if (ordered)
				btrfs_put_ordered_extent(ordered);
			break;
		}
	}

3387 3388
	/* First, check if we exceed the qgroup limit */
	INIT_LIST_HEAD(&reserve_list);
3389
	while (cur_offset < alloc_end) {
3390
		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
3391
				      alloc_end - cur_offset);
3392 3393
		if (IS_ERR(em)) {
			ret = PTR_ERR(em);
3394 3395
			break;
		}
3396
		last_byte = min(extent_map_end(em), alloc_end);
3397
		actual_end = min_t(u64, extent_map_end(em), offset + len);
3398
		last_byte = ALIGN(last_byte, blocksize);
3399 3400 3401
		if (em->block_start == EXTENT_MAP_HOLE ||
		    (cur_offset >= inode->i_size &&
		     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
3402 3403 3404 3405 3406
			ret = add_falloc_range(&reserve_list, cur_offset,
					       last_byte - cur_offset);
			if (ret < 0) {
				free_extent_map(em);
				break;
3407
			}
3408 3409 3410
			ret = btrfs_qgroup_reserve_data(BTRFS_I(inode),
					&data_reserved, cur_offset,
					last_byte - cur_offset);
3411
			if (ret < 0) {
3412
				cur_offset = last_byte;
3413
				free_extent_map(em);
3414
				break;
3415
			}
3416 3417 3418 3419 3420 3421
		} else {
			/*
			 * Do not need to reserve unwritten extent for this
			 * range, free reserved data space first, otherwise
			 * it'll result in false ENOSPC error.
			 */
3422 3423 3424
			btrfs_free_reserved_data_space(BTRFS_I(inode),
				data_reserved, cur_offset,
				last_byte - cur_offset);
3425 3426 3427
		}
		free_extent_map(em);
		cur_offset = last_byte;
3428 3429 3430 3431 3432 3433 3434 3435 3436 3437
	}

	/*
	 * If ret is still 0, means we're OK to fallocate.
	 * Or just cleanup the list and exit.
	 */
	list_for_each_entry_safe(range, tmp, &reserve_list, list) {
		if (!ret)
			ret = btrfs_prealloc_file_range(inode, mode,
					range->start,
F
Fabian Frederick 已提交
3438
					range->len, i_blocksize(inode),
3439
					offset + len, &alloc_hint);
3440
		else
3441
			btrfs_free_reserved_data_space(BTRFS_I(inode),
3442 3443
					data_reserved, range->start,
					range->len);
3444 3445 3446 3447 3448 3449
		list_del(&range->list);
		kfree(range);
	}
	if (ret < 0)
		goto out_unlock;

3450 3451 3452 3453 3454
	/*
	 * We didn't need to allocate any more space, but we still extended the
	 * size of the file so we need to update i_size and the inode item.
	 */
	ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
3455
out_unlock:
3456
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3457
			     &cached_state);
3458
out:
A
Al Viro 已提交
3459
	inode_unlock(inode);
3460
	/* Let go of our reservation. */
3461
	if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE))
3462
		btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
3463
				cur_offset, alloc_end - cur_offset);
3464
	extent_changeset_free(data_reserved);
3465 3466 3467
	return ret;
}

3468 3469
static loff_t find_desired_extent(struct inode *inode, loff_t offset,
				  int whence)
J
Josef Bacik 已提交
3470
{
3471
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3472
	struct extent_map *em = NULL;
J
Josef Bacik 已提交
3473
	struct extent_state *cached_state = NULL;
3474
	loff_t i_size = inode->i_size;
L
Liu Bo 已提交
3475 3476 3477 3478
	u64 lockstart;
	u64 lockend;
	u64 start;
	u64 len;
J
Josef Bacik 已提交
3479 3480
	int ret = 0;

3481
	if (i_size == 0 || offset >= i_size)
L
Liu Bo 已提交
3482 3483 3484
		return -ENXIO;

	/*
3485
	 * offset can be negative, in this case we start finding DATA/HOLE from
L
Liu Bo 已提交
3486 3487
	 * the very start of the file.
	 */
3488
	start = max_t(loff_t, 0, offset);
L
Liu Bo 已提交
3489

3490
	lockstart = round_down(start, fs_info->sectorsize);
3491
	lockend = round_up(i_size, fs_info->sectorsize);
J
Josef Bacik 已提交
3492
	if (lockend <= lockstart)
3493
		lockend = lockstart + fs_info->sectorsize;
L
Liu Bo 已提交
3494
	lockend--;
J
Josef Bacik 已提交
3495 3496
	len = lockend - lockstart + 1;

3497
	lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3498
			 &cached_state);
J
Josef Bacik 已提交
3499

3500
	while (start < i_size) {
3501
		em = btrfs_get_extent_fiemap(BTRFS_I(inode), start, len);
J
Josef Bacik 已提交
3502
		if (IS_ERR(em)) {
3503
			ret = PTR_ERR(em);
3504
			em = NULL;
J
Josef Bacik 已提交
3505 3506 3507
			break;
		}

3508 3509 3510 3511 3512 3513 3514 3515
		if (whence == SEEK_HOLE &&
		    (em->block_start == EXTENT_MAP_HOLE ||
		     test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
			break;
		else if (whence == SEEK_DATA &&
			   (em->block_start != EXTENT_MAP_HOLE &&
			    !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
			break;
J
Josef Bacik 已提交
3516 3517 3518

		start = em->start + em->len;
		free_extent_map(em);
3519
		em = NULL;
J
Josef Bacik 已提交
3520 3521
		cond_resched();
	}
3522
	free_extent_map(em);
3523 3524 3525 3526 3527
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
			     &cached_state);
	if (ret) {
		offset = ret;
	} else {
3528
		if (whence == SEEK_DATA && start >= i_size)
3529
			offset = -ENXIO;
3530
		else
3531
			offset = min_t(loff_t, start, i_size);
3532
	}
3533 3534

	return offset;
J
Josef Bacik 已提交
3535 3536
}

3537
static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
J
Josef Bacik 已提交
3538 3539 3540
{
	struct inode *inode = file->f_mapping->host;

3541
	switch (whence) {
3542 3543
	default:
		return generic_file_llseek(file, offset, whence);
J
Josef Bacik 已提交
3544 3545
	case SEEK_DATA:
	case SEEK_HOLE:
3546
		inode_lock_shared(inode);
3547
		offset = find_desired_extent(inode, offset, whence);
3548
		inode_unlock_shared(inode);
3549
		break;
J
Josef Bacik 已提交
3550 3551
	}

3552 3553 3554
	if (offset < 0)
		return offset;

3555
	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
J
Josef Bacik 已提交
3556 3557
}

G
Goldwyn Rodrigues 已提交
3558 3559
static int btrfs_file_open(struct inode *inode, struct file *filp)
{
3560
	filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
G
Goldwyn Rodrigues 已提交
3561 3562 3563
	return generic_file_open(inode, filp);
}

3564 3565 3566 3567 3568 3569 3570 3571 3572 3573
static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
	ssize_t ret = 0;

	if (iocb->ki_flags & IOCB_DIRECT) {
		struct inode *inode = file_inode(iocb->ki_filp);

		inode_lock_shared(inode);
		ret = btrfs_direct_IO(iocb, to);
		inode_unlock_shared(inode);
3574 3575
		if (ret < 0 || !iov_iter_count(to) ||
		    iocb->ki_pos >= i_size_read(file_inode(iocb->ki_filp)))
3576 3577 3578 3579 3580 3581
			return ret;
	}

	return generic_file_buffered_read(iocb, to, ret);
}

3582
const struct file_operations btrfs_file_operations = {
J
Josef Bacik 已提交
3583
	.llseek		= btrfs_file_llseek,
3584
	.read_iter      = btrfs_file_read_iter,
C
Chris Mason 已提交
3585
	.splice_read	= generic_file_splice_read,
A
Al Viro 已提交
3586
	.write_iter	= btrfs_file_write_iter,
3587
	.splice_write	= iter_file_splice_write,
C
Chris Mason 已提交
3588
	.mmap		= btrfs_file_mmap,
G
Goldwyn Rodrigues 已提交
3589
	.open		= btrfs_file_open,
3590
	.release	= btrfs_release_file,
C
Chris Mason 已提交
3591
	.fsync		= btrfs_sync_file,
3592
	.fallocate	= btrfs_fallocate,
C
Christoph Hellwig 已提交
3593
	.unlocked_ioctl	= btrfs_ioctl,
C
Chris Mason 已提交
3594
#ifdef CONFIG_COMPAT
3595
	.compat_ioctl	= btrfs_compat_ioctl,
C
Chris Mason 已提交
3596
#endif
3597
	.remap_file_range = btrfs_remap_file_range,
C
Chris Mason 已提交
3598
};
3599

3600
void __cold btrfs_auto_defrag_exit(void)
3601
{
3602
	kmem_cache_destroy(btrfs_inode_defrag_cachep);
3603 3604
}

3605
int __init btrfs_auto_defrag_init(void)
3606 3607 3608
{
	btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
					sizeof(struct inode_defrag), 0,
3609
					SLAB_MEM_SPREAD,
3610 3611 3612 3613 3614 3615
					NULL);
	if (!btrfs_inode_defrag_cachep)
		return -ENOMEM;

	return 0;
}
3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641

int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
{
	int ret;

	/*
	 * So with compression we will find and lock a dirty page and clear the
	 * first one as dirty, setup an async extent, and immediately return
	 * with the entire range locked but with nobody actually marked with
	 * writeback.  So we can't just filemap_write_and_wait_range() and
	 * expect it to work since it will just kick off a thread to do the
	 * actual work.  So we need to call filemap_fdatawrite_range _again_
	 * since it will wait on the page lock, which won't be unlocked until
	 * after the pages have been marked as writeback and so we're good to go
	 * from there.  We have to do this otherwise we'll miss the ordered
	 * extents and that results in badness.  Please Josef, do not think you
	 * know better and pull this out at some point in the future, it is
	 * right and you are wrong.
	 */
	ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
	if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
			     &BTRFS_I(inode)->runtime_flags))
		ret = filemap_fdatawrite_range(inode->i_mapping, start, end);

	return ret;
}