inode.c 195.5 KB
Newer Older
C
Chris Mason 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

19
#include <linux/kernel.h>
20
#include <linux/bio.h>
C
Chris Mason 已提交
21
#include <linux/buffer_head.h>
S
Sage Weil 已提交
22
#include <linux/file.h>
C
Chris Mason 已提交
23 24 25 26 27 28 29 30 31 32 33 34
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/mpage.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/statfs.h>
#include <linux/compat.h>
35
#include <linux/bit_spinlock.h>
J
Josef Bacik 已提交
36
#include <linux/xattr.h>
J
Josef Bacik 已提交
37
#include <linux/posix_acl.h>
38
#include <linux/falloc.h>
39
#include <linux/slab.h>
40
#include <linux/ratelimit.h>
41
#include <linux/mount.h>
C
Chris Mason 已提交
42
#include "compat.h"
C
Chris Mason 已提交
43 44 45 46 47 48
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
#include "ioctl.h"
#include "print-tree.h"
49
#include "ordered-data.h"
50
#include "xattr.h"
51
#include "tree-log.h"
52
#include "volumes.h"
53
#include "compression.h"
54
#include "locking.h"
55
#include "free-space-cache.h"
56
#include "inode-map.h"
C
Chris Mason 已提交
57 58 59 60 61 62

struct btrfs_iget_args {
	u64 ino;
	struct btrfs_root *root;
};

63 64 65 66 67
static const struct inode_operations btrfs_dir_inode_operations;
static const struct inode_operations btrfs_symlink_inode_operations;
static const struct inode_operations btrfs_dir_ro_inode_operations;
static const struct inode_operations btrfs_special_inode_operations;
static const struct inode_operations btrfs_file_inode_operations;
68 69
static const struct address_space_operations btrfs_aops;
static const struct address_space_operations btrfs_symlink_aops;
70
static const struct file_operations btrfs_dir_file_operations;
71
static struct extent_io_ops btrfs_extent_io_ops;
C
Chris Mason 已提交
72 73 74 75 76

static struct kmem_cache *btrfs_inode_cachep;
struct kmem_cache *btrfs_trans_handle_cachep;
struct kmem_cache *btrfs_transaction_cachep;
struct kmem_cache *btrfs_path_cachep;
77
struct kmem_cache *btrfs_free_space_cachep;
C
Chris Mason 已提交
78 79 80 81 82 83 84 85 86 87 88 89

#define S_SHIFT 12
static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
	[S_IFREG >> S_SHIFT]	= BTRFS_FT_REG_FILE,
	[S_IFDIR >> S_SHIFT]	= BTRFS_FT_DIR,
	[S_IFCHR >> S_SHIFT]	= BTRFS_FT_CHRDEV,
	[S_IFBLK >> S_SHIFT]	= BTRFS_FT_BLKDEV,
	[S_IFIFO >> S_SHIFT]	= BTRFS_FT_FIFO,
	[S_IFSOCK >> S_SHIFT]	= BTRFS_FT_SOCK,
	[S_IFLNK >> S_SHIFT]	= BTRFS_FT_SYMLINK,
};

90 91
static int btrfs_setsize(struct inode *inode, loff_t newsize);
static int btrfs_truncate(struct inode *inode);
92
static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
93 94 95 96
static noinline int cow_file_range(struct inode *inode,
				   struct page *locked_page,
				   u64 start, u64 end, int *page_started,
				   unsigned long *nr_written, int unlock);
97 98
static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
				struct btrfs_root *root, struct inode *inode);
99

100
static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
101 102
				     struct inode *inode,  struct inode *dir,
				     const struct qstr *qstr)
J
Jim Owens 已提交
103 104 105
{
	int err;

106
	err = btrfs_init_acl(trans, inode, dir);
J
Jim Owens 已提交
107
	if (!err)
108
		err = btrfs_xattr_security_init(trans, inode, dir, qstr);
J
Jim Owens 已提交
109 110 111
	return err;
}

112 113 114 115 116
/*
 * this does all the hard work for inserting an inline extent into
 * the btree.  The caller should have done a btrfs_drop_extents so that
 * no overlapping inline items exist in the btree
 */
117
static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
118 119
				struct btrfs_root *root, struct inode *inode,
				u64 start, size_t size, size_t compressed_size,
120
				int compress_type,
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
				struct page **compressed_pages)
{
	struct btrfs_key key;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct page *page = NULL;
	char *kaddr;
	unsigned long ptr;
	struct btrfs_file_extent_item *ei;
	int err = 0;
	int ret;
	size_t cur_size = size;
	size_t datasize;
	unsigned long offset;

136
	if (compressed_size && compressed_pages)
137 138
		cur_size = compressed_size;

139 140
	path = btrfs_alloc_path();
	if (!path)
141 142
		return -ENOMEM;

143
	path->leave_spinning = 1;
144

145
	key.objectid = btrfs_ino(inode);
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
	key.offset = start;
	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
	datasize = btrfs_file_extent_calc_inline_size(cur_size);

	inode_add_bytes(inode, size);
	ret = btrfs_insert_empty_item(trans, root, path, &key,
				      datasize);
	BUG_ON(ret);
	if (ret) {
		err = ret;
		goto fail;
	}
	leaf = path->nodes[0];
	ei = btrfs_item_ptr(leaf, path->slots[0],
			    struct btrfs_file_extent_item);
	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
	btrfs_set_file_extent_encryption(leaf, ei, 0);
	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
	ptr = btrfs_file_extent_inline_start(ei);

168
	if (compress_type != BTRFS_COMPRESS_NONE) {
169 170
		struct page *cpage;
		int i = 0;
171
		while (compressed_size > 0) {
172
			cpage = compressed_pages[i];
173
			cur_size = min_t(unsigned long, compressed_size,
174 175
				       PAGE_CACHE_SIZE);

176
			kaddr = kmap_atomic(cpage, KM_USER0);
177
			write_extent_buffer(leaf, kaddr, ptr, cur_size);
178
			kunmap_atomic(kaddr, KM_USER0);
179 180 181 182 183 184

			i++;
			ptr += cur_size;
			compressed_size -= cur_size;
		}
		btrfs_set_file_extent_compression(leaf, ei,
185
						  compress_type);
186 187 188 189 190 191 192 193 194 195 196 197 198
	} else {
		page = find_get_page(inode->i_mapping,
				     start >> PAGE_CACHE_SHIFT);
		btrfs_set_file_extent_compression(leaf, ei, 0);
		kaddr = kmap_atomic(page, KM_USER0);
		offset = start & (PAGE_CACHE_SIZE - 1);
		write_extent_buffer(leaf, kaddr + offset, ptr, size);
		kunmap_atomic(kaddr, KM_USER0);
		page_cache_release(page);
	}
	btrfs_mark_buffer_dirty(leaf);
	btrfs_free_path(path);

199 200 201 202 203 204 205 206 207
	/*
	 * we're an inline extent, so nobody can
	 * extend the file past i_size without locking
	 * a page we already have locked.
	 *
	 * We must do any isize and inode updates
	 * before we unlock the pages.  Otherwise we
	 * could end up racing with unlink.
	 */
208 209
	BTRFS_I(inode)->disk_i_size = inode->i_size;
	btrfs_update_inode(trans, root, inode);
210

211 212 213 214 215 216 217 218 219 220 221 222
	return 0;
fail:
	btrfs_free_path(path);
	return err;
}


/*
 * conditionally insert an inline extent into the file.  This
 * does the checks required to make sure the data is small enough
 * to fit as an inline extent.
 */
223
static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
224 225
				 struct btrfs_root *root,
				 struct inode *inode, u64 start, u64 end,
226
				 size_t compressed_size, int compress_type,
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
				 struct page **compressed_pages)
{
	u64 isize = i_size_read(inode);
	u64 actual_end = min(end + 1, isize);
	u64 inline_len = actual_end - start;
	u64 aligned_end = (end + root->sectorsize - 1) &
			~((u64)root->sectorsize - 1);
	u64 hint_byte;
	u64 data_len = inline_len;
	int ret;

	if (compressed_size)
		data_len = compressed_size;

	if (start > 0 ||
242
	    actual_end >= PAGE_CACHE_SIZE ||
243 244 245 246 247 248 249 250
	    data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
	    (!compressed_size &&
	    (actual_end & (root->sectorsize - 1)) == 0) ||
	    end + 1 < isize ||
	    data_len > root->fs_info->max_inline) {
		return 1;
	}

251
	ret = btrfs_drop_extents(trans, inode, start, aligned_end,
252
				 &hint_byte, 1);
253 254 255 256 257 258
	BUG_ON(ret);

	if (isize > actual_end)
		inline_len = min_t(u64, isize, actual_end);
	ret = insert_inline_extent(trans, root, inode, start,
				   inline_len, compressed_size,
259
				   compress_type, compressed_pages);
260
	BUG_ON(ret);
261
	btrfs_delalloc_release_metadata(inode, end + 1 - start);
262
	btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
263 264 265
	return 0;
}

266 267 268 269 270 271
struct async_extent {
	u64 start;
	u64 ram_size;
	u64 compressed_size;
	struct page **pages;
	unsigned long nr_pages;
272
	int compress_type;
273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
	struct list_head list;
};

struct async_cow {
	struct inode *inode;
	struct btrfs_root *root;
	struct page *locked_page;
	u64 start;
	u64 end;
	struct list_head extents;
	struct btrfs_work work;
};

static noinline int add_async_extent(struct async_cow *cow,
				     u64 start, u64 ram_size,
				     u64 compressed_size,
				     struct page **pages,
290 291
				     unsigned long nr_pages,
				     int compress_type)
292 293 294 295
{
	struct async_extent *async_extent;

	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
296
	BUG_ON(!async_extent);
297 298 299 300 301
	async_extent->start = start;
	async_extent->ram_size = ram_size;
	async_extent->compressed_size = compressed_size;
	async_extent->pages = pages;
	async_extent->nr_pages = nr_pages;
302
	async_extent->compress_type = compress_type;
303 304 305 306
	list_add_tail(&async_extent->list, &cow->extents);
	return 0;
}

307
/*
308 309 310
 * we create compressed extents in two phases.  The first
 * phase compresses a range of pages that have already been
 * locked (both pages and state bits are locked).
311
 *
312 313 314 315 316
 * This is done inside an ordered work queue, and the compression
 * is spread across many cpus.  The actual IO submission is step
 * two, and the ordered work queue takes care of making sure that
 * happens in the same order things were put onto the queue by
 * writepages and friends.
317
 *
318 319 320 321
 * If this code finds it can't get good compression, it puts an
 * entry onto the work queue to write the uncompressed bytes.  This
 * makes sure that both compressed inodes and uncompressed inodes
 * are written in the same order that pdflush sent them down.
322
 */
323 324 325 326 327
static noinline int compress_file_range(struct inode *inode,
					struct page *locked_page,
					u64 start, u64 end,
					struct async_cow *async_cow,
					int *num_added)
328 329 330
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
331 332
	u64 num_bytes;
	u64 blocksize = root->sectorsize;
333
	u64 actual_end;
334
	u64 isize = i_size_read(inode);
335
	int ret = 0;
336 337 338 339 340 341
	struct page **pages = NULL;
	unsigned long nr_pages;
	unsigned long nr_pages_ret = 0;
	unsigned long total_compressed = 0;
	unsigned long total_in = 0;
	unsigned long max_compressed = 128 * 1024;
342
	unsigned long max_uncompressed = 128 * 1024;
343 344
	int i;
	int will_compress;
345
	int compress_type = root->fs_info->compress_type;
346

347 348 349 350
	/* if this is a small write inside eof, kick off a defragbot */
	if (end <= BTRFS_I(inode)->disk_i_size && (end - start + 1) < 16 * 1024)
		btrfs_add_inode_defrag(NULL, inode);

351
	actual_end = min_t(u64, isize, end + 1);
352 353 354 355
again:
	will_compress = 0;
	nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
	nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
356

357 358 359 360 361 362 363 364 365 366 367 368 369
	/*
	 * we don't want to send crud past the end of i_size through
	 * compression, that's just a waste of CPU time.  So, if the
	 * end of the file is before the start of our current
	 * requested range of bytes, we bail out to the uncompressed
	 * cleanup code that can deal with all of this.
	 *
	 * It isn't really the fastest way to fix things, but this is a
	 * very uncommon corner.
	 */
	if (actual_end <= start)
		goto cleanup_and_bail_uncompressed;

370 371 372 373
	total_compressed = actual_end - start;

	/* we want to make sure that amount of ram required to uncompress
	 * an extent is reasonable, so we limit the total size in ram
374 375 376 377 378 379 380
	 * of a compressed extent to 128k.  This is a crucial number
	 * because it also controls how easily we can spread reads across
	 * cpus for decompression.
	 *
	 * We also want to make sure the amount of IO required to do
	 * a random read is reasonably small, so we limit the size of
	 * a compressed extent to 128k.
381 382
	 */
	total_compressed = min(total_compressed, max_uncompressed);
383
	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
384
	num_bytes = max(blocksize,  num_bytes);
385 386
	total_in = 0;
	ret = 0;
387

388 389 390 391
	/*
	 * we do compression for mount -o compress and when the
	 * inode has not been flagged as nocompress.  This flag can
	 * change at any time if we discover bad compression ratios.
392
	 */
393
	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
394
	    (btrfs_test_opt(root, COMPRESS) ||
395 396
	     (BTRFS_I(inode)->force_compress) ||
	     (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
397
		WARN_ON(pages);
398
		pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
399 400 401 402
		if (!pages) {
			/* just bail out to the uncompressed code */
			goto cont;
		}
403

404 405 406 407 408 409 410 411 412 413
		if (BTRFS_I(inode)->force_compress)
			compress_type = BTRFS_I(inode)->force_compress;

		ret = btrfs_compress_pages(compress_type,
					   inode->i_mapping, start,
					   total_compressed, pages,
					   nr_pages, &nr_pages_ret,
					   &total_in,
					   &total_compressed,
					   max_compressed);
414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432

		if (!ret) {
			unsigned long offset = total_compressed &
				(PAGE_CACHE_SIZE - 1);
			struct page *page = pages[nr_pages_ret - 1];
			char *kaddr;

			/* zero the tail end of the last page, we might be
			 * sending it down to disk
			 */
			if (offset) {
				kaddr = kmap_atomic(page, KM_USER0);
				memset(kaddr + offset, 0,
				       PAGE_CACHE_SIZE - offset);
				kunmap_atomic(kaddr, KM_USER0);
			}
			will_compress = 1;
		}
	}
433
cont:
434
	if (start == 0) {
435
		trans = btrfs_join_transaction(root);
436
		BUG_ON(IS_ERR(trans));
437
		trans->block_rsv = &root->fs_info->delalloc_block_rsv;
438

439
		/* lets try to make an inline extent */
440
		if (ret || total_in < (actual_end - start)) {
441
			/* we didn't compress the entire range, try
442
			 * to make an uncompressed inline extent.
443 444
			 */
			ret = cow_file_range_inline(trans, root, inode,
445
						    start, end, 0, 0, NULL);
446
		} else {
447
			/* try making a compressed inline extent */
448 449
			ret = cow_file_range_inline(trans, root, inode,
						    start, end,
450 451
						    total_compressed,
						    compress_type, pages);
452 453
		}
		if (ret == 0) {
454 455 456 457 458
			/*
			 * inline extent creation worked, we don't need
			 * to create any more async work items.  Unlock
			 * and free up our temp pages.
			 */
459
			extent_clear_unlock_delalloc(inode,
460 461 462
			     &BTRFS_I(inode)->io_tree,
			     start, end, NULL,
			     EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
463
			     EXTENT_CLEAR_DELALLOC |
464
			     EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
465 466

			btrfs_end_transaction(trans, root);
467 468
			goto free_pages_out;
		}
469
		btrfs_end_transaction(trans, root);
470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
	}

	if (will_compress) {
		/*
		 * we aren't doing an inline extent round the compressed size
		 * up to a block size boundary so the allocator does sane
		 * things
		 */
		total_compressed = (total_compressed + blocksize - 1) &
			~(blocksize - 1);

		/*
		 * one last check to make sure the compression is really a
		 * win, compare the page count read with the blocks on disk
		 */
		total_in = (total_in + PAGE_CACHE_SIZE - 1) &
			~(PAGE_CACHE_SIZE - 1);
		if (total_compressed >= total_in) {
			will_compress = 0;
		} else {
			num_bytes = total_in;
		}
	}
	if (!will_compress && pages) {
		/*
		 * the compression code ran but failed to make things smaller,
		 * free any pages it allocated and our page pointer array
		 */
		for (i = 0; i < nr_pages_ret; i++) {
499
			WARN_ON(pages[i]->mapping);
500 501 502 503 504 505 506 507
			page_cache_release(pages[i]);
		}
		kfree(pages);
		pages = NULL;
		total_compressed = 0;
		nr_pages_ret = 0;

		/* flag the file so we don't compress in the future */
508 509
		if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
		    !(BTRFS_I(inode)->force_compress)) {
510
			BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
511
		}
512
	}
513 514
	if (will_compress) {
		*num_added += 1;
515

516 517 518 519 520
		/* the async work queues will take care of doing actual
		 * allocation on disk for these compressed pages,
		 * and will submit them to the elevator.
		 */
		add_async_extent(async_cow, start, num_bytes,
521 522
				 total_compressed, pages, nr_pages_ret,
				 compress_type);
523

524
		if (start + num_bytes < end) {
525 526 527 528 529 530
			start += num_bytes;
			pages = NULL;
			cond_resched();
			goto again;
		}
	} else {
531
cleanup_and_bail_uncompressed:
532 533 534 535 536 537 538 539 540 541 542 543
		/*
		 * No compression, but we still need to write the pages in
		 * the file we've been given so far.  redirty the locked
		 * page if it corresponds to our extent and set things up
		 * for the async work queue to run cow_file_range to do
		 * the normal delalloc dance
		 */
		if (page_offset(locked_page) >= start &&
		    page_offset(locked_page) <= end) {
			__set_page_dirty_nobuffers(locked_page);
			/* unlocked later on in the async handlers */
		}
544 545
		add_async_extent(async_cow, start, end - start + 1,
				 0, NULL, 0, BTRFS_COMPRESS_NONE);
546 547
		*num_added += 1;
	}
548

549 550 551 552 553 554 555 556
out:
	return 0;

free_pages_out:
	for (i = 0; i < nr_pages_ret; i++) {
		WARN_ON(pages[i]->mapping);
		page_cache_release(pages[i]);
	}
557
	kfree(pages);
558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578

	goto out;
}

/*
 * phase two of compressed writeback.  This is the ordered portion
 * of the code, which only gets called in the order the work was
 * queued.  We walk all the async extents created by compress_file_range
 * and send them down to the disk.
 */
static noinline int submit_compressed_extents(struct inode *inode,
					      struct async_cow *async_cow)
{
	struct async_extent *async_extent;
	u64 alloc_hint = 0;
	struct btrfs_trans_handle *trans;
	struct btrfs_key ins;
	struct extent_map *em;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
	struct extent_io_tree *io_tree;
579
	int ret = 0;
580 581 582 583 584

	if (list_empty(&async_cow->extents))
		return 0;


585
	while (!list_empty(&async_cow->extents)) {
586 587 588
		async_extent = list_entry(async_cow->extents.next,
					  struct async_extent, list);
		list_del(&async_extent->list);
589

590 591
		io_tree = &BTRFS_I(inode)->io_tree;

592
retry:
593 594 595 596 597 598
		/* did the compression code fall back to uncompressed IO? */
		if (!async_extent->pages) {
			int page_started = 0;
			unsigned long nr_written = 0;

			lock_extent(io_tree, async_extent->start,
599 600
					 async_extent->start +
					 async_extent->ram_size - 1, GFP_NOFS);
601 602

			/* allocate blocks */
603 604 605 606 607
			ret = cow_file_range(inode, async_cow->locked_page,
					     async_extent->start,
					     async_extent->start +
					     async_extent->ram_size - 1,
					     &page_started, &nr_written, 0);
608 609 610 611 612 613 614

			/*
			 * if page_started, cow_file_range inserted an
			 * inline extent and took care of all the unlocking
			 * and IO for us.  Otherwise, we need to submit
			 * all those pages down to the drive.
			 */
615
			if (!page_started && !ret)
616 617
				extent_write_locked_range(io_tree,
						  inode, async_extent->start,
618
						  async_extent->start +
619 620 621 622 623 624 625 626 627 628 629 630
						  async_extent->ram_size - 1,
						  btrfs_get_extent,
						  WB_SYNC_ALL);
			kfree(async_extent);
			cond_resched();
			continue;
		}

		lock_extent(io_tree, async_extent->start,
			    async_extent->start + async_extent->ram_size - 1,
			    GFP_NOFS);

631
		trans = btrfs_join_transaction(root);
632
		BUG_ON(IS_ERR(trans));
633
		trans->block_rsv = &root->fs_info->delalloc_block_rsv;
634 635 636 637 638
		ret = btrfs_reserve_extent(trans, root,
					   async_extent->compressed_size,
					   async_extent->compressed_size,
					   0, alloc_hint,
					   (u64)-1, &ins, 1);
639 640
		btrfs_end_transaction(trans, root);

641 642 643 644 645 646 647 648 649 650 651 652 653 654 655
		if (ret) {
			int i;
			for (i = 0; i < async_extent->nr_pages; i++) {
				WARN_ON(async_extent->pages[i]->mapping);
				page_cache_release(async_extent->pages[i]);
			}
			kfree(async_extent->pages);
			async_extent->nr_pages = 0;
			async_extent->pages = NULL;
			unlock_extent(io_tree, async_extent->start,
				      async_extent->start +
				      async_extent->ram_size - 1, GFP_NOFS);
			goto retry;
		}

656 657 658 659 660 661 662 663
		/*
		 * here we're doing allocation and writeback of the
		 * compressed pages
		 */
		btrfs_drop_extent_cache(inode, async_extent->start,
					async_extent->start +
					async_extent->ram_size - 1, 0);

664
		em = alloc_extent_map();
665
		BUG_ON(!em);
666 667
		em->start = async_extent->start;
		em->len = async_extent->ram_size;
668
		em->orig_start = em->start;
669

670 671 672
		em->block_start = ins.objectid;
		em->block_len = ins.offset;
		em->bdev = root->fs_info->fs_devices->latest_bdev;
673
		em->compress_type = async_extent->compress_type;
674 675 676
		set_bit(EXTENT_FLAG_PINNED, &em->flags);
		set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);

677
		while (1) {
678
			write_lock(&em_tree->lock);
679
			ret = add_extent_mapping(em_tree, em);
680
			write_unlock(&em_tree->lock);
681 682 683 684 685 686 687 688 689
			if (ret != -EEXIST) {
				free_extent_map(em);
				break;
			}
			btrfs_drop_extent_cache(inode, async_extent->start,
						async_extent->start +
						async_extent->ram_size - 1, 0);
		}

690 691 692 693 694 695 696
		ret = btrfs_add_ordered_extent_compress(inode,
						async_extent->start,
						ins.objectid,
						async_extent->ram_size,
						ins.offset,
						BTRFS_ORDERED_COMPRESSED,
						async_extent->compress_type);
697 698 699 700 701 702
		BUG_ON(ret);

		/*
		 * clear dirty, set writeback and unlock the pages.
		 */
		extent_clear_unlock_delalloc(inode,
703 704 705 706 707 708
				&BTRFS_I(inode)->io_tree,
				async_extent->start,
				async_extent->start +
				async_extent->ram_size - 1,
				NULL, EXTENT_CLEAR_UNLOCK_PAGE |
				EXTENT_CLEAR_UNLOCK |
709
				EXTENT_CLEAR_DELALLOC |
710
				EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
711 712

		ret = btrfs_submit_compressed_write(inode,
713 714 715 716 717
				    async_extent->start,
				    async_extent->ram_size,
				    ins.objectid,
				    ins.offset, async_extent->pages,
				    async_extent->nr_pages);
718 719 720 721 722 723 724 725 726 727

		BUG_ON(ret);
		alloc_hint = ins.objectid + ins.offset;
		kfree(async_extent);
		cond_resched();
	}

	return 0;
}

728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759
static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
				      u64 num_bytes)
{
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
	struct extent_map *em;
	u64 alloc_hint = 0;

	read_lock(&em_tree->lock);
	em = search_extent_mapping(em_tree, start, num_bytes);
	if (em) {
		/*
		 * if block start isn't an actual block number then find the
		 * first block in this inode and use that as a hint.  If that
		 * block is also bogus then just don't worry about it.
		 */
		if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
			free_extent_map(em);
			em = search_extent_mapping(em_tree, 0, 0);
			if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
				alloc_hint = em->block_start;
			if (em)
				free_extent_map(em);
		} else {
			alloc_hint = em->block_start;
			free_extent_map(em);
		}
	}
	read_unlock(&em_tree->lock);

	return alloc_hint;
}

760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
/*
 * when extent_io.c finds a delayed allocation range in the file,
 * the call backs end up in this code.  The basic idea is to
 * allocate extents on disk for the range, and create ordered data structs
 * in ram to track those extents.
 *
 * locked_page is the page that writepage had locked already.  We use
 * it to make sure we don't do extra locks or unlocks.
 *
 * *page_started is set to one if we unlock locked_page and do everything
 * required to start IO on it.  It may be clean and already done with
 * IO when we return.
 */
static noinline int cow_file_range(struct inode *inode,
				   struct page *locked_page,
				   u64 start, u64 end, int *page_started,
				   unsigned long *nr_written,
				   int unlock)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
	u64 alloc_hint = 0;
	u64 num_bytes;
	unsigned long ram_size;
	u64 disk_num_bytes;
	u64 cur_alloc_size;
	u64 blocksize = root->sectorsize;
	struct btrfs_key ins;
	struct extent_map *em;
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
	int ret = 0;

792
	BUG_ON(btrfs_is_free_space_inode(root, inode));
793
	trans = btrfs_join_transaction(root);
794
	BUG_ON(IS_ERR(trans));
795
	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
796 797 798 799 800 801

	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
	num_bytes = max(blocksize,  num_bytes);
	disk_num_bytes = num_bytes;
	ret = 0;

802 803 804 805
	/* if this is a small write inside eof, kick off defrag */
	if (end <= BTRFS_I(inode)->disk_i_size && num_bytes < 64 * 1024)
		btrfs_add_inode_defrag(trans, inode);

806 807 808
	if (start == 0) {
		/* lets try to make an inline extent */
		ret = cow_file_range_inline(trans, root, inode,
809
					    start, end, 0, 0, NULL);
810 811
		if (ret == 0) {
			extent_clear_unlock_delalloc(inode,
812 813 814 815 816 817 818 819
				     &BTRFS_I(inode)->io_tree,
				     start, end, NULL,
				     EXTENT_CLEAR_UNLOCK_PAGE |
				     EXTENT_CLEAR_UNLOCK |
				     EXTENT_CLEAR_DELALLOC |
				     EXTENT_CLEAR_DIRTY |
				     EXTENT_SET_WRITEBACK |
				     EXTENT_END_WRITEBACK);
820

821 822 823 824 825 826 827 828 829
			*nr_written = *nr_written +
			     (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
			*page_started = 1;
			ret = 0;
			goto out;
		}
	}

	BUG_ON(disk_num_bytes >
830
	       btrfs_super_total_bytes(root->fs_info->super_copy));
831

832
	alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
833 834
	btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);

835
	while (disk_num_bytes > 0) {
836 837
		unsigned long op;

838
		cur_alloc_size = disk_num_bytes;
839
		ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
840
					   root->sectorsize, 0, alloc_hint,
841
					   (u64)-1, &ins, 1);
842 843
		BUG_ON(ret);

844
		em = alloc_extent_map();
845
		BUG_ON(!em);
846
		em->start = start;
847
		em->orig_start = em->start;
848 849
		ram_size = ins.offset;
		em->len = ins.offset;
850

851
		em->block_start = ins.objectid;
852
		em->block_len = ins.offset;
853
		em->bdev = root->fs_info->fs_devices->latest_bdev;
854
		set_bit(EXTENT_FLAG_PINNED, &em->flags);
855

856
		while (1) {
857
			write_lock(&em_tree->lock);
858
			ret = add_extent_mapping(em_tree, em);
859
			write_unlock(&em_tree->lock);
860 861 862 863 864
			if (ret != -EEXIST) {
				free_extent_map(em);
				break;
			}
			btrfs_drop_extent_cache(inode, start,
865
						start + ram_size - 1, 0);
866 867
		}

868
		cur_alloc_size = ins.offset;
869
		ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
870
					       ram_size, cur_alloc_size, 0);
871
		BUG_ON(ret);
872

873 874 875 876 877 878 879
		if (root->root_key.objectid ==
		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
			ret = btrfs_reloc_clone_csums(inode, start,
						      cur_alloc_size);
			BUG_ON(ret);
		}

880
		if (disk_num_bytes < cur_alloc_size)
881
			break;
882

883 884 885
		/* we're not doing compressed IO, don't unlock the first
		 * page (which the caller expects to stay locked), don't
		 * clear any dirty bits and don't set any writeback bits
886 887 888
		 *
		 * Do set the Private2 bit so we know this page was properly
		 * setup for writepage
889
		 */
890 891 892 893
		op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
		op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
			EXTENT_SET_PRIVATE2;

894 895
		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
					     start, start + ram_size - 1,
896
					     locked_page, op);
897
		disk_num_bytes -= cur_alloc_size;
898 899 900
		num_bytes -= cur_alloc_size;
		alloc_hint = ins.objectid + ins.offset;
		start += cur_alloc_size;
901 902
	}
out:
903
	ret = 0;
904
	btrfs_end_transaction(trans, root);
905

906
	return ret;
907
}
908

909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946
/*
 * work queue call back to started compression on a file and pages
 */
static noinline void async_cow_start(struct btrfs_work *work)
{
	struct async_cow *async_cow;
	int num_added = 0;
	async_cow = container_of(work, struct async_cow, work);

	compress_file_range(async_cow->inode, async_cow->locked_page,
			    async_cow->start, async_cow->end, async_cow,
			    &num_added);
	if (num_added == 0)
		async_cow->inode = NULL;
}

/*
 * work queue call back to submit previously compressed pages
 */
static noinline void async_cow_submit(struct btrfs_work *work)
{
	struct async_cow *async_cow;
	struct btrfs_root *root;
	unsigned long nr_pages;

	async_cow = container_of(work, struct async_cow, work);

	root = async_cow->root;
	nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
		PAGE_CACHE_SHIFT;

	atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);

	if (atomic_read(&root->fs_info->async_delalloc_pages) <
	    5 * 1042 * 1024 &&
	    waitqueue_active(&root->fs_info->async_submit_wait))
		wake_up(&root->fs_info->async_submit_wait);

947
	if (async_cow->inode)
948 949
		submit_compressed_extents(async_cow->inode, async_cow);
}
950

951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967
static noinline void async_cow_free(struct btrfs_work *work)
{
	struct async_cow *async_cow;
	async_cow = container_of(work, struct async_cow, work);
	kfree(async_cow);
}

static int cow_file_range_async(struct inode *inode, struct page *locked_page,
				u64 start, u64 end, int *page_started,
				unsigned long *nr_written)
{
	struct async_cow *async_cow;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	unsigned long nr_pages;
	u64 cur_end;
	int limit = 10 * 1024 * 1042;

968 969
	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
			 1, 0, NULL, GFP_NOFS);
970
	while (start < end) {
971
		async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
972
		BUG_ON(!async_cow);
973 974 975 976 977
		async_cow->inode = inode;
		async_cow->root = root;
		async_cow->locked_page = locked_page;
		async_cow->start = start;

978
		if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003
			cur_end = end;
		else
			cur_end = min(end, start + 512 * 1024 - 1);

		async_cow->end = cur_end;
		INIT_LIST_HEAD(&async_cow->extents);

		async_cow->work.func = async_cow_start;
		async_cow->work.ordered_func = async_cow_submit;
		async_cow->work.ordered_free = async_cow_free;
		async_cow->work.flags = 0;

		nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
			PAGE_CACHE_SHIFT;
		atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);

		btrfs_queue_worker(&root->fs_info->delalloc_workers,
				   &async_cow->work);

		if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
			wait_event(root->fs_info->async_submit_wait,
			   (atomic_read(&root->fs_info->async_delalloc_pages) <
			    limit));
		}

1004
		while (atomic_read(&root->fs_info->async_submit_draining) &&
1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
		      atomic_read(&root->fs_info->async_delalloc_pages)) {
			wait_event(root->fs_info->async_submit_wait,
			  (atomic_read(&root->fs_info->async_delalloc_pages) ==
			   0));
		}

		*nr_written += nr_pages;
		start = cur_end + 1;
	}
	*page_started = 1;
	return 0;
1016 1017
}

1018
static noinline int csum_exist_in_range(struct btrfs_root *root,
1019 1020 1021 1022 1023 1024
					u64 bytenr, u64 num_bytes)
{
	int ret;
	struct btrfs_ordered_sum *sums;
	LIST_HEAD(list);

1025
	ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
A
Arne Jansen 已提交
1026
				       bytenr + num_bytes - 1, &list, 0);
1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
	if (ret == 0 && list_empty(&list))
		return 0;

	while (!list_empty(&list)) {
		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
		list_del(&sums->list);
		kfree(sums);
	}
	return 1;
}

1038 1039 1040 1041 1042 1043 1044
/*
 * when nowcow writeback call back.  This checks for snapshots or COW copies
 * of the extents that exist in the file, and COWs the file as required.
 *
 * If no cow copies or snapshots exist, we write directly to the existing
 * blocks on disk
 */
1045 1046
static noinline int run_delalloc_nocow(struct inode *inode,
				       struct page *locked_page,
1047 1048
			      u64 start, u64 end, int *page_started, int force,
			      unsigned long *nr_written)
1049 1050
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
1051
	struct btrfs_trans_handle *trans;
1052 1053
	struct extent_buffer *leaf;
	struct btrfs_path *path;
1054
	struct btrfs_file_extent_item *fi;
1055
	struct btrfs_key found_key;
1056 1057 1058
	u64 cow_start;
	u64 cur_offset;
	u64 extent_end;
1059
	u64 extent_offset;
1060 1061 1062 1063
	u64 disk_bytenr;
	u64 num_bytes;
	int extent_type;
	int ret;
1064
	int type;
1065 1066
	int nocow;
	int check_prev = 1;
1067
	bool nolock;
1068
	u64 ino = btrfs_ino(inode);
1069 1070

	path = btrfs_alloc_path();
1071 1072
	if (!path)
		return -ENOMEM;
1073

1074
	nolock = btrfs_is_free_space_inode(root, inode);
1075 1076

	if (nolock)
1077
		trans = btrfs_join_transaction_nolock(root);
1078
	else
1079
		trans = btrfs_join_transaction(root);
C
Chris Mason 已提交
1080

1081
	BUG_ON(IS_ERR(trans));
1082
	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1083

1084 1085 1086
	cow_start = (u64)-1;
	cur_offset = start;
	while (1) {
1087
		ret = btrfs_lookup_file_extent(trans, root, path, ino,
1088 1089 1090 1091 1092 1093
					       cur_offset, 0);
		BUG_ON(ret < 0);
		if (ret > 0 && path->slots[0] > 0 && check_prev) {
			leaf = path->nodes[0];
			btrfs_item_key_to_cpu(leaf, &found_key,
					      path->slots[0] - 1);
1094
			if (found_key.objectid == ino &&
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108
			    found_key.type == BTRFS_EXTENT_DATA_KEY)
				path->slots[0]--;
		}
		check_prev = 0;
next_slot:
		leaf = path->nodes[0];
		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret < 0)
				BUG_ON(1);
			if (ret > 0)
				break;
			leaf = path->nodes[0];
		}
1109

1110 1111
		nocow = 0;
		disk_bytenr = 0;
1112
		num_bytes = 0;
1113 1114
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);

1115
		if (found_key.objectid > ino ||
1116 1117 1118 1119 1120 1121
		    found_key.type > BTRFS_EXTENT_DATA_KEY ||
		    found_key.offset > end)
			break;

		if (found_key.offset > cur_offset) {
			extent_end = found_key.offset;
1122
			extent_type = 0;
1123 1124 1125 1126 1127 1128 1129
			goto out_check;
		}

		fi = btrfs_item_ptr(leaf, path->slots[0],
				    struct btrfs_file_extent_item);
		extent_type = btrfs_file_extent_type(leaf, fi);

1130 1131
		if (extent_type == BTRFS_FILE_EXTENT_REG ||
		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1132
			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1133
			extent_offset = btrfs_file_extent_offset(leaf, fi);
1134 1135 1136 1137 1138 1139
			extent_end = found_key.offset +
				btrfs_file_extent_num_bytes(leaf, fi);
			if (extent_end <= start) {
				path->slots[0]++;
				goto next_slot;
			}
1140 1141
			if (disk_bytenr == 0)
				goto out_check;
1142 1143 1144 1145
			if (btrfs_file_extent_compression(leaf, fi) ||
			    btrfs_file_extent_encryption(leaf, fi) ||
			    btrfs_file_extent_other_encoding(leaf, fi))
				goto out_check;
1146 1147
			if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
				goto out_check;
1148
			if (btrfs_extent_readonly(root, disk_bytenr))
1149
				goto out_check;
1150
			if (btrfs_cross_ref_exist(trans, root, ino,
1151 1152
						  found_key.offset -
						  extent_offset, disk_bytenr))
1153
				goto out_check;
1154
			disk_bytenr += extent_offset;
1155 1156 1157 1158 1159 1160 1161 1162 1163
			disk_bytenr += cur_offset - found_key.offset;
			num_bytes = min(end + 1, extent_end) - cur_offset;
			/*
			 * force cow if csum exists in the range.
			 * this ensure that csum for a given extent are
			 * either valid or do not exist.
			 */
			if (csum_exist_in_range(root, disk_bytenr, num_bytes))
				goto out_check;
1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
			nocow = 1;
		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
			extent_end = found_key.offset +
				btrfs_file_extent_inline_len(leaf, fi);
			extent_end = ALIGN(extent_end, root->sectorsize);
		} else {
			BUG_ON(1);
		}
out_check:
		if (extent_end <= start) {
			path->slots[0]++;
			goto next_slot;
		}
		if (!nocow) {
			if (cow_start == (u64)-1)
				cow_start = cur_offset;
			cur_offset = extent_end;
			if (cur_offset > end)
				break;
			path->slots[0]++;
			goto next_slot;
1185 1186
		}

1187
		btrfs_release_path(path);
1188 1189
		if (cow_start != (u64)-1) {
			ret = cow_file_range(inode, locked_page, cow_start,
1190 1191
					found_key.offset - 1, page_started,
					nr_written, 1);
1192 1193
			BUG_ON(ret);
			cow_start = (u64)-1;
1194
		}
1195

1196 1197 1198 1199
		if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
			struct extent_map *em;
			struct extent_map_tree *em_tree;
			em_tree = &BTRFS_I(inode)->extent_tree;
1200
			em = alloc_extent_map();
1201
			BUG_ON(!em);
1202
			em->start = cur_offset;
1203
			em->orig_start = em->start;
1204 1205 1206 1207 1208 1209
			em->len = num_bytes;
			em->block_len = num_bytes;
			em->block_start = disk_bytenr;
			em->bdev = root->fs_info->fs_devices->latest_bdev;
			set_bit(EXTENT_FLAG_PINNED, &em->flags);
			while (1) {
1210
				write_lock(&em_tree->lock);
1211
				ret = add_extent_mapping(em_tree, em);
1212
				write_unlock(&em_tree->lock);
1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223
				if (ret != -EEXIST) {
					free_extent_map(em);
					break;
				}
				btrfs_drop_extent_cache(inode, em->start,
						em->start + em->len - 1, 0);
			}
			type = BTRFS_ORDERED_PREALLOC;
		} else {
			type = BTRFS_ORDERED_NOCOW;
		}
1224 1225

		ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1226 1227
					       num_bytes, num_bytes, type);
		BUG_ON(ret);
1228

1229 1230 1231 1232 1233 1234 1235
		if (root->root_key.objectid ==
		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
			ret = btrfs_reloc_clone_csums(inode, cur_offset,
						      num_bytes);
			BUG_ON(ret);
		}

1236
		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1237 1238 1239 1240
				cur_offset, cur_offset + num_bytes - 1,
				locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
				EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
				EXTENT_SET_PRIVATE2);
1241 1242 1243
		cur_offset = extent_end;
		if (cur_offset > end)
			break;
1244
	}
1245
	btrfs_release_path(path);
1246 1247 1248 1249 1250

	if (cur_offset <= end && cow_start == (u64)-1)
		cow_start = cur_offset;
	if (cow_start != (u64)-1) {
		ret = cow_file_range(inode, locked_page, cow_start, end,
1251
				     page_started, nr_written, 1);
1252 1253 1254
		BUG_ON(ret);
	}

1255 1256 1257 1258 1259 1260 1261
	if (nolock) {
		ret = btrfs_end_transaction_nolock(trans, root);
		BUG_ON(ret);
	} else {
		ret = btrfs_end_transaction(trans, root);
		BUG_ON(ret);
	}
1262
	btrfs_free_path(path);
1263
	return 0;
1264 1265
}

1266 1267 1268
/*
 * extent_io.c call back to do delayed allocation processing
 */
1269
static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1270 1271
			      u64 start, u64 end, int *page_started,
			      unsigned long *nr_written)
1272 1273
{
	int ret;
1274
	struct btrfs_root *root = BTRFS_I(inode)->root;
1275

1276
	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
1277
		ret = run_delalloc_nocow(inode, locked_page, start, end,
1278
					 page_started, 1, nr_written);
1279
	else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
1280
		ret = run_delalloc_nocow(inode, locked_page, start, end,
1281
					 page_started, 0, nr_written);
1282
	else if (!btrfs_test_opt(root, COMPRESS) &&
1283 1284
		 !(BTRFS_I(inode)->force_compress) &&
		 !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))
1285 1286
		ret = cow_file_range(inode, locked_page, start, end,
				      page_started, nr_written, 1);
1287
	else
1288
		ret = cow_file_range_async(inode, locked_page, start, end,
1289
					   page_started, nr_written);
1290 1291 1292
	return ret;
}

1293 1294
static void btrfs_split_extent_hook(struct inode *inode,
				    struct extent_state *orig, u64 split)
1295
{
1296
	/* not delalloc, ignore it */
1297
	if (!(orig->state & EXTENT_DELALLOC))
1298
		return;
1299

1300 1301 1302
	spin_lock(&BTRFS_I(inode)->lock);
	BTRFS_I(inode)->outstanding_extents++;
	spin_unlock(&BTRFS_I(inode)->lock);
1303 1304 1305 1306 1307 1308 1309 1310
}

/*
 * extent_io.c merge_extent_hook, used to track merged delayed allocation
 * extents so we can keep track of new extents that are just merged onto old
 * extents, such as when we are doing sequential writes, so we can properly
 * account for the metadata space we'll need.
 */
1311 1312 1313
static void btrfs_merge_extent_hook(struct inode *inode,
				    struct extent_state *new,
				    struct extent_state *other)
1314 1315 1316
{
	/* not delalloc, ignore it */
	if (!(other->state & EXTENT_DELALLOC))
1317
		return;
1318

1319 1320 1321
	spin_lock(&BTRFS_I(inode)->lock);
	BTRFS_I(inode)->outstanding_extents--;
	spin_unlock(&BTRFS_I(inode)->lock);
1322 1323
}

1324 1325 1326 1327 1328
/*
 * extent_io.c set_bit_hook, used to track delayed allocation
 * bytes in this file, and to maintain the list of inodes that
 * have pending delalloc work to be done.
 */
1329 1330
static void btrfs_set_bit_hook(struct inode *inode,
			       struct extent_state *state, int *bits)
1331
{
1332

1333 1334
	/*
	 * set_bit and clear bit hooks normally require _irqsave/restore
1335
	 * but in this case, we are only testing for the DELALLOC
1336 1337
	 * bit, which is only set or cleared with irqs on
	 */
1338
	if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1339
		struct btrfs_root *root = BTRFS_I(inode)->root;
1340
		u64 len = state->end + 1 - state->start;
1341
		bool do_list = !btrfs_is_free_space_inode(root, inode);
1342

1343
		if (*bits & EXTENT_FIRST_DELALLOC) {
1344
			*bits &= ~EXTENT_FIRST_DELALLOC;
1345 1346 1347 1348 1349
		} else {
			spin_lock(&BTRFS_I(inode)->lock);
			BTRFS_I(inode)->outstanding_extents++;
			spin_unlock(&BTRFS_I(inode)->lock);
		}
1350

1351
		spin_lock(&root->fs_info->delalloc_lock);
1352 1353
		BTRFS_I(inode)->delalloc_bytes += len;
		root->fs_info->delalloc_bytes += len;
1354
		if (do_list && list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1355 1356 1357
			list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
				      &root->fs_info->delalloc_inodes);
		}
1358
		spin_unlock(&root->fs_info->delalloc_lock);
1359 1360 1361
	}
}

1362 1363 1364
/*
 * extent_io.c clear_bit_hook, see set_bit_hook for why
 */
1365 1366
static void btrfs_clear_bit_hook(struct inode *inode,
				 struct extent_state *state, int *bits)
1367
{
1368 1369
	/*
	 * set_bit and clear bit hooks normally require _irqsave/restore
1370
	 * but in this case, we are only testing for the DELALLOC
1371 1372
	 * bit, which is only set or cleared with irqs on
	 */
1373
	if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1374
		struct btrfs_root *root = BTRFS_I(inode)->root;
1375
		u64 len = state->end + 1 - state->start;
1376
		bool do_list = !btrfs_is_free_space_inode(root, inode);
1377

1378
		if (*bits & EXTENT_FIRST_DELALLOC) {
1379
			*bits &= ~EXTENT_FIRST_DELALLOC;
1380 1381 1382 1383 1384
		} else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
			spin_lock(&BTRFS_I(inode)->lock);
			BTRFS_I(inode)->outstanding_extents--;
			spin_unlock(&BTRFS_I(inode)->lock);
		}
1385 1386 1387 1388

		if (*bits & EXTENT_DO_ACCOUNTING)
			btrfs_delalloc_release_metadata(inode, len);

1389 1390
		if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
		    && do_list)
1391
			btrfs_free_reserved_data_space(inode, len);
1392

1393
		spin_lock(&root->fs_info->delalloc_lock);
1394 1395 1396
		root->fs_info->delalloc_bytes -= len;
		BTRFS_I(inode)->delalloc_bytes -= len;

1397
		if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
1398 1399 1400
		    !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
			list_del_init(&BTRFS_I(inode)->delalloc_inodes);
		}
1401
		spin_unlock(&root->fs_info->delalloc_lock);
1402 1403 1404
	}
}

1405 1406 1407 1408
/*
 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
 * we don't create bios that span stripes or chunks
 */
1409
int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1410 1411
			 size_t size, struct bio *bio,
			 unsigned long bio_flags)
1412 1413 1414
{
	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
	struct btrfs_mapping_tree *map_tree;
1415
	u64 logical = (u64)bio->bi_sector << 9;
1416 1417 1418 1419
	u64 length = 0;
	u64 map_length;
	int ret;

1420 1421 1422
	if (bio_flags & EXTENT_BIO_COMPRESSED)
		return 0;

1423
	length = bio->bi_size;
1424 1425
	map_tree = &root->fs_info->mapping_tree;
	map_length = length;
1426
	ret = btrfs_map_block(map_tree, READ, logical,
1427
			      &map_length, NULL, 0);
1428

1429
	if (map_length < length + size)
1430
		return 1;
1431
	return ret;
1432 1433
}

1434 1435 1436 1437 1438 1439 1440 1441
/*
 * in order to insert checksums into the metadata in large chunks,
 * we wait until bio submission time.   All the pages in the bio are
 * checksummed and sums are attached onto the ordered extent record.
 *
 * At IO completion time the cums attached on the ordered extent record
 * are inserted into the btree
 */
1442 1443
static int __btrfs_submit_bio_start(struct inode *inode, int rw,
				    struct bio *bio, int mirror_num,
1444 1445
				    unsigned long bio_flags,
				    u64 bio_offset)
1446 1447 1448
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	int ret = 0;
1449

1450
	ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1451
	BUG_ON(ret);
1452 1453
	return 0;
}
1454

1455 1456 1457 1458 1459 1460 1461 1462
/*
 * in order to insert checksums into the metadata in large chunks,
 * we wait until bio submission time.   All the pages in the bio are
 * checksummed and sums are attached onto the ordered extent record.
 *
 * At IO completion time the cums attached on the ordered extent record
 * are inserted into the btree
 */
1463
static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1464 1465
			  int mirror_num, unsigned long bio_flags,
			  u64 bio_offset)
1466 1467
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
1468
	return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1469 1470
}

1471
/*
1472 1473
 * extent_io.c submission hook. This does the right thing for csum calculation
 * on write, or reading the csums from the tree before a read
1474
 */
1475
static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1476 1477
			  int mirror_num, unsigned long bio_flags,
			  u64 bio_offset)
1478 1479 1480
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	int ret = 0;
1481
	int skip_sum;
1482

1483
	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1484

1485
	if (btrfs_is_free_space_inode(root, inode))
1486 1487 1488
		ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2);
	else
		ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1489
	BUG_ON(ret);
1490

1491
	if (!(rw & REQ_WRITE)) {
1492
		if (bio_flags & EXTENT_BIO_COMPRESSED) {
1493 1494
			return btrfs_submit_compressed_read(inode, bio,
						    mirror_num, bio_flags);
1495 1496 1497 1498 1499
		} else if (!skip_sum) {
			ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
			if (ret)
				return ret;
		}
1500
		goto mapit;
1501
	} else if (!skip_sum) {
1502 1503 1504
		/* csum items have already been cloned */
		if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
			goto mapit;
1505 1506
		/* we're doing a write, do the async checksumming */
		return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1507
				   inode, rw, bio, mirror_num,
1508 1509
				   bio_flags, bio_offset,
				   __btrfs_submit_bio_start,
1510
				   __btrfs_submit_bio_done);
1511 1512
	}

1513
mapit:
1514
	return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1515
}
C
Chris Mason 已提交
1516

1517 1518 1519 1520
/*
 * given a list of ordered sums record them in the inode.  This happens
 * at IO completion time based on sums calculated at bio submission time.
 */
1521
static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1522 1523 1524 1525 1526
			     struct inode *inode, u64 file_offset,
			     struct list_head *list)
{
	struct btrfs_ordered_sum *sum;

1527
	list_for_each_entry(sum, list, list) {
1528 1529
		btrfs_csum_file_blocks(trans,
		       BTRFS_I(inode)->root->fs_info->csum_root, sum);
1530 1531 1532 1533
	}
	return 0;
}

1534 1535
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
			      struct extent_state **cached_state)
1536
{
1537
	if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1538
		WARN_ON(1);
1539
	return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1540
				   cached_state, GFP_NOFS);
1541 1542
}

1543
/* see btrfs_writepage_start_hook for details on why this is required */
1544 1545 1546 1547 1548
struct btrfs_writepage_fixup {
	struct page *page;
	struct btrfs_work work;
};

1549
static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1550 1551 1552
{
	struct btrfs_writepage_fixup *fixup;
	struct btrfs_ordered_extent *ordered;
1553
	struct extent_state *cached_state = NULL;
1554 1555 1556 1557 1558 1559 1560
	struct page *page;
	struct inode *inode;
	u64 page_start;
	u64 page_end;

	fixup = container_of(work, struct btrfs_writepage_fixup, work);
	page = fixup->page;
C
Chris Mason 已提交
1561
again:
1562 1563 1564 1565 1566 1567 1568 1569 1570 1571
	lock_page(page);
	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
		ClearPageChecked(page);
		goto out_page;
	}

	inode = page->mapping->host;
	page_start = page_offset(page);
	page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;

1572 1573
	lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
			 &cached_state, GFP_NOFS);
C
Chris Mason 已提交
1574 1575

	/* already ordered? We're done */
1576
	if (PagePrivate2(page))
1577
		goto out;
C
Chris Mason 已提交
1578 1579 1580

	ordered = btrfs_lookup_ordered_extent(inode, page_start);
	if (ordered) {
1581 1582
		unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
				     page_end, &cached_state, GFP_NOFS);
C
Chris Mason 已提交
1583 1584 1585 1586
		unlock_page(page);
		btrfs_start_ordered_extent(inode, ordered, 1);
		goto again;
	}
1587

1588
	BUG();
1589
	btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
1590 1591
	ClearPageChecked(page);
out:
1592 1593
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
			     &cached_state, GFP_NOFS);
1594 1595 1596
out_page:
	unlock_page(page);
	page_cache_release(page);
1597
	kfree(fixup);
1598 1599 1600 1601 1602 1603 1604 1605
}

/*
 * There are a few paths in the higher layers of the kernel that directly
 * set the page dirty bit without asking the filesystem if it is a
 * good idea.  This causes problems because we want to make sure COW
 * properly happens and the data=ordered rules are followed.
 *
1606
 * In our case any range that doesn't have the ORDERED bit set
1607 1608 1609 1610
 * hasn't been properly setup for IO.  We kick off an async process
 * to fix it up.  The async helper will wait for ordered extents, set
 * the delalloc bit and make it safe to write the page.
 */
1611
static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1612 1613 1614 1615 1616
{
	struct inode *inode = page->mapping->host;
	struct btrfs_writepage_fixup *fixup;
	struct btrfs_root *root = BTRFS_I(inode)->root;

1617 1618
	/* this page is properly in the ordered list */
	if (TestClearPagePrivate2(page))
1619 1620 1621 1622 1623 1624 1625 1626
		return 0;

	if (PageChecked(page))
		return -EAGAIN;

	fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
	if (!fixup)
		return -EAGAIN;
1627

1628 1629 1630 1631 1632 1633 1634 1635
	SetPageChecked(page);
	page_cache_get(page);
	fixup->work.func = btrfs_writepage_fixup_worker;
	fixup->page = page;
	btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
	return -EAGAIN;
}

1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651
static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
				       struct inode *inode, u64 file_pos,
				       u64 disk_bytenr, u64 disk_num_bytes,
				       u64 num_bytes, u64 ram_bytes,
				       u8 compression, u8 encryption,
				       u16 other_encoding, int extent_type)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_file_extent_item *fi;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key ins;
	u64 hint;
	int ret;

	path = btrfs_alloc_path();
1652 1653
	if (!path)
		return -ENOMEM;
1654

1655
	path->leave_spinning = 1;
1656 1657 1658 1659 1660 1661 1662 1663 1664 1665

	/*
	 * we may be replacing one extent in the tree with another.
	 * The new extent is pinned in the extent map, and we don't want
	 * to drop it from the cache until it is completely in the btree.
	 *
	 * So, tell btrfs_drop_extents to leave this extent in the cache.
	 * the caller is expected to unpin it and allow it to be merged
	 * with the others.
	 */
1666 1667
	ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
				 &hint, 0);
1668 1669
	BUG_ON(ret);

1670
	ins.objectid = btrfs_ino(inode);
1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687
	ins.offset = file_pos;
	ins.type = BTRFS_EXTENT_DATA_KEY;
	ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
	BUG_ON(ret);
	leaf = path->nodes[0];
	fi = btrfs_item_ptr(leaf, path->slots[0],
			    struct btrfs_file_extent_item);
	btrfs_set_file_extent_generation(leaf, fi, trans->transid);
	btrfs_set_file_extent_type(leaf, fi, extent_type);
	btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
	btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
	btrfs_set_file_extent_offset(leaf, fi, 0);
	btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
	btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
	btrfs_set_file_extent_compression(leaf, fi, compression);
	btrfs_set_file_extent_encryption(leaf, fi, encryption);
	btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1688 1689 1690 1691

	btrfs_unlock_up_safe(path, 1);
	btrfs_set_lock_blocking(leaf);

1692 1693 1694 1695 1696 1697 1698
	btrfs_mark_buffer_dirty(leaf);

	inode_add_bytes(inode, num_bytes);

	ins.objectid = disk_bytenr;
	ins.offset = disk_num_bytes;
	ins.type = BTRFS_EXTENT_ITEM_KEY;
1699 1700
	ret = btrfs_alloc_reserved_file_extent(trans, root,
					root->root_key.objectid,
1701
					btrfs_ino(inode), file_pos, &ins);
1702 1703
	BUG_ON(ret);
	btrfs_free_path(path);
1704

1705 1706 1707
	return 0;
}

1708 1709 1710 1711 1712 1713
/*
 * helper function for btrfs_finish_ordered_io, this
 * just reads in some of the csum leaves to prime them into ram
 * before we start the transaction.  It limits the amount of btree
 * reads required while inside the transaction.
 */
1714 1715 1716 1717
/* as ordered data IO finishes, this gets called so we can finish
 * an ordered extent if the range of bytes in the file it covers are
 * fully written.
 */
1718
static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1719 1720
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
1721
	struct btrfs_trans_handle *trans = NULL;
1722
	struct btrfs_ordered_extent *ordered_extent = NULL;
1723
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1724
	struct extent_state *cached_state = NULL;
1725
	int compress_type = 0;
1726
	int ret;
1727
	bool nolock;
1728

1729 1730
	ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
					     end - start + 1);
1731
	if (!ret)
1732 1733
		return 0;
	BUG_ON(!ordered_extent);
1734

1735
	nolock = btrfs_is_free_space_inode(root, inode);
1736

1737 1738 1739 1740
	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
		BUG_ON(!list_empty(&ordered_extent->list));
		ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
		if (!ret) {
1741
			if (nolock)
1742
				trans = btrfs_join_transaction_nolock(root);
1743
			else
1744
				trans = btrfs_join_transaction(root);
1745
			BUG_ON(IS_ERR(trans));
1746
			trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1747
			ret = btrfs_update_inode_fallback(trans, root, inode);
1748 1749 1750 1751
			BUG_ON(ret);
		}
		goto out;
	}
1752

1753 1754 1755
	lock_extent_bits(io_tree, ordered_extent->file_offset,
			 ordered_extent->file_offset + ordered_extent->len - 1,
			 0, &cached_state, GFP_NOFS);
1756

1757
	if (nolock)
1758
		trans = btrfs_join_transaction_nolock(root);
1759
	else
1760
		trans = btrfs_join_transaction(root);
1761
	BUG_ON(IS_ERR(trans));
1762
	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1763

1764
	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1765
		compress_type = ordered_extent->compress_type;
1766
	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1767
		BUG_ON(compress_type);
1768
		ret = btrfs_mark_extent_written(trans, inode,
1769 1770 1771 1772 1773
						ordered_extent->file_offset,
						ordered_extent->file_offset +
						ordered_extent->len);
		BUG_ON(ret);
	} else {
1774
		BUG_ON(root == root->fs_info->tree_root);
1775 1776 1777 1778 1779 1780
		ret = insert_reserved_file_extent(trans, inode,
						ordered_extent->file_offset,
						ordered_extent->start,
						ordered_extent->disk_len,
						ordered_extent->len,
						ordered_extent->len,
1781
						compress_type, 0, 0,
1782
						BTRFS_FILE_EXTENT_REG);
1783 1784 1785
		unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
				   ordered_extent->file_offset,
				   ordered_extent->len);
1786 1787
		BUG_ON(ret);
	}
1788 1789 1790 1791
	unlock_extent_cached(io_tree, ordered_extent->file_offset,
			     ordered_extent->file_offset +
			     ordered_extent->len - 1, &cached_state, GFP_NOFS);

1792 1793 1794
	add_pending_csums(trans, inode, ordered_extent->file_offset,
			  &ordered_extent->list);

1795
	ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1796
	if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1797
		ret = btrfs_update_inode_fallback(trans, root, inode);
1798 1799 1800
		BUG_ON(ret);
	}
	ret = 0;
1801
out:
1802 1803 1804 1805
	if (root != root->fs_info->tree_root)
		btrfs_delalloc_release_metadata(inode, ordered_extent->len);
	if (trans) {
		if (nolock)
1806
			btrfs_end_transaction_nolock(trans, root);
1807
		else
1808 1809 1810
			btrfs_end_transaction(trans, root);
	}

1811 1812 1813 1814 1815 1816 1817 1818
	/* once for us */
	btrfs_put_ordered_extent(ordered_extent);
	/* once for the tree */
	btrfs_put_ordered_extent(ordered_extent);

	return 0;
}

1819
static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1820 1821
				struct extent_state *state, int uptodate)
{
1822 1823
	trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);

1824
	ClearPagePrivate2(page);
1825 1826 1827
	return btrfs_finish_ordered_io(page->mapping->host, start, end);
}

1828 1829
/*
 * when reads are done, we need to check csums to verify the data is correct
1830 1831
 * if there's a match, we allow the bio to finish.  If not, the code in
 * extent_io.c will try to find good copies for us.
1832
 */
1833
static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1834
			       struct extent_state *state)
1835
{
1836
	size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1837
	struct inode *inode = page->mapping->host;
1838
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1839
	char *kaddr;
1840
	u64 private = ~(u32)0;
1841
	int ret;
1842 1843
	struct btrfs_root *root = BTRFS_I(inode)->root;
	u32 csum = ~(u32)0;
1844

1845 1846 1847 1848
	if (PageChecked(page)) {
		ClearPageChecked(page);
		goto good;
	}
1849 1850

	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
1851
		goto good;
1852 1853

	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1854
	    test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
1855 1856
		clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
				  GFP_NOFS);
1857
		return 0;
1858
	}
1859

Y
Yan 已提交
1860
	if (state && state->start == start) {
1861 1862 1863 1864 1865
		private = state->private;
		ret = 0;
	} else {
		ret = get_state_private(io_tree, start, &private);
	}
1866
	kaddr = kmap_atomic(page, KM_USER0);
1867
	if (ret)
1868
		goto zeroit;
1869

1870 1871
	csum = btrfs_csum_data(root, kaddr + offset, csum,  end - start + 1);
	btrfs_csum_final(csum, (char *)&csum);
1872
	if (csum != private)
1873
		goto zeroit;
1874

1875
	kunmap_atomic(kaddr, KM_USER0);
1876
good:
1877 1878 1879
	return 0;

zeroit:
1880
	printk_ratelimited(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u "
1881 1882
		       "private %llu\n",
		       (unsigned long long)btrfs_ino(page->mapping->host),
1883 1884
		       (unsigned long long)start, csum,
		       (unsigned long long)private);
1885 1886
	memset(kaddr + offset, 1, end - start + 1);
	flush_dcache_page(page);
1887
	kunmap_atomic(kaddr, KM_USER0);
1888 1889
	if (private == 0)
		return 0;
1890
	return -EIO;
1891
}
1892

Y
Yan, Zheng 已提交
1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940
struct delayed_iput {
	struct list_head list;
	struct inode *inode;
};

void btrfs_add_delayed_iput(struct inode *inode)
{
	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
	struct delayed_iput *delayed;

	if (atomic_add_unless(&inode->i_count, -1, 1))
		return;

	delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
	delayed->inode = inode;

	spin_lock(&fs_info->delayed_iput_lock);
	list_add_tail(&delayed->list, &fs_info->delayed_iputs);
	spin_unlock(&fs_info->delayed_iput_lock);
}

void btrfs_run_delayed_iputs(struct btrfs_root *root)
{
	LIST_HEAD(list);
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct delayed_iput *delayed;
	int empty;

	spin_lock(&fs_info->delayed_iput_lock);
	empty = list_empty(&fs_info->delayed_iputs);
	spin_unlock(&fs_info->delayed_iput_lock);
	if (empty)
		return;

	down_read(&root->fs_info->cleanup_work_sem);
	spin_lock(&fs_info->delayed_iput_lock);
	list_splice_init(&fs_info->delayed_iputs, &list);
	spin_unlock(&fs_info->delayed_iput_lock);

	while (!list_empty(&list)) {
		delayed = list_entry(list.next, struct delayed_iput, list);
		list_del(&delayed->list);
		iput(delayed->inode);
		kfree(delayed);
	}
	up_read(&root->fs_info->cleanup_work_sem);
}

1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974
enum btrfs_orphan_cleanup_state {
	ORPHAN_CLEANUP_STARTED	= 1,
	ORPHAN_CLEANUP_DONE	= 2,
};

/*
 * This is called in transaction commmit time. If there are no orphan
 * files in the subvolume, it removes orphan item and frees block_rsv
 * structure.
 */
void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
			      struct btrfs_root *root)
{
	int ret;

	if (!list_empty(&root->orphan_list) ||
	    root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
		return;

	if (root->orphan_item_inserted &&
	    btrfs_root_refs(&root->root_item) > 0) {
		ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
					    root->root_key.objectid);
		BUG_ON(ret);
		root->orphan_item_inserted = 0;
	}

	if (root->orphan_block_rsv) {
		WARN_ON(root->orphan_block_rsv->size > 0);
		btrfs_free_block_rsv(root, root->orphan_block_rsv);
		root->orphan_block_rsv = NULL;
	}
}

1975 1976 1977
/*
 * This creates an orphan entry for the given inode in case something goes
 * wrong in the middle of an unlink/truncate.
1978 1979 1980
 *
 * NOTE: caller of this function should reserve 5 units of metadata for
 *	 this function.
1981 1982 1983 1984
 */
int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
1985 1986 1987 1988
	struct btrfs_block_rsv *block_rsv = NULL;
	int reserve = 0;
	int insert = 0;
	int ret;
1989

1990 1991
	if (!root->orphan_block_rsv) {
		block_rsv = btrfs_alloc_block_rsv(root);
1992 1993
		if (!block_rsv)
			return -ENOMEM;
1994
	}
1995

1996 1997 1998 1999 2000 2001
	spin_lock(&root->orphan_lock);
	if (!root->orphan_block_rsv) {
		root->orphan_block_rsv = block_rsv;
	} else if (block_rsv) {
		btrfs_free_block_rsv(root, block_rsv);
		block_rsv = NULL;
2002 2003
	}

2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017
	if (list_empty(&BTRFS_I(inode)->i_orphan)) {
		list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
#if 0
		/*
		 * For proper ENOSPC handling, we should do orphan
		 * cleanup when mounting. But this introduces backward
		 * compatibility issue.
		 */
		if (!xchg(&root->orphan_item_inserted, 1))
			insert = 2;
		else
			insert = 1;
#endif
		insert = 1;
2018 2019
	}

2020 2021 2022 2023 2024
	if (!BTRFS_I(inode)->orphan_meta_reserved) {
		BTRFS_I(inode)->orphan_meta_reserved = 1;
		reserve = 1;
	}
	spin_unlock(&root->orphan_lock);
2025

2026 2027 2028 2029 2030
	/* grab metadata reservation from transaction handle */
	if (reserve) {
		ret = btrfs_orphan_reserve_metadata(trans, inode);
		BUG_ON(ret);
	}
2031

2032 2033
	/* insert an orphan item to track this unlinked/truncated file */
	if (insert >= 1) {
2034
		ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
2035 2036 2037 2038 2039 2040 2041 2042 2043 2044
		BUG_ON(ret);
	}

	/* insert an orphan item to track subvolume contains orphan files */
	if (insert >= 2) {
		ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
					       root->root_key.objectid);
		BUG_ON(ret);
	}
	return 0;
2045 2046 2047 2048 2049 2050 2051 2052 2053
}

/*
 * We have done the truncate/delete so we can go ahead and remove the orphan
 * item for this particular inode.
 */
int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
2054 2055
	int delete_item = 0;
	int release_rsv = 0;
2056 2057
	int ret = 0;

2058 2059 2060 2061
	spin_lock(&root->orphan_lock);
	if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
		list_del_init(&BTRFS_I(inode)->i_orphan);
		delete_item = 1;
2062 2063
	}

2064 2065 2066
	if (BTRFS_I(inode)->orphan_meta_reserved) {
		BTRFS_I(inode)->orphan_meta_reserved = 0;
		release_rsv = 1;
2067
	}
2068
	spin_unlock(&root->orphan_lock);
2069

2070
	if (trans && delete_item) {
2071
		ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
2072 2073
		BUG_ON(ret);
	}
2074

2075 2076
	if (release_rsv)
		btrfs_orphan_release_metadata(inode);
2077

2078
	return 0;
2079 2080 2081 2082 2083 2084
}

/*
 * this cleans up any orphans that may be left on the list from the last use
 * of this root.
 */
2085
int btrfs_orphan_cleanup(struct btrfs_root *root)
2086 2087 2088 2089 2090 2091
{
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key key, found_key;
	struct btrfs_trans_handle *trans;
	struct inode *inode;
2092
	u64 last_objectid = 0;
2093 2094
	int ret = 0, nr_unlink = 0, nr_truncate = 0;

2095
	if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
2096
		return 0;
2097 2098

	path = btrfs_alloc_path();
2099 2100 2101 2102
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}
2103 2104 2105 2106 2107 2108 2109 2110
	path->reada = -1;

	key.objectid = BTRFS_ORPHAN_OBJECTID;
	btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
	key.offset = (u64)-1;

	while (1) {
		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2111 2112
		if (ret < 0)
			goto out;
2113 2114 2115

		/*
		 * if ret == 0 means we found what we were searching for, which
L
Lucas De Marchi 已提交
2116
		 * is weird, but possible, so only screw with path if we didn't
2117 2118 2119
		 * find the key and see if we have stuff that matches
		 */
		if (ret > 0) {
2120
			ret = 0;
2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136
			if (path->slots[0] == 0)
				break;
			path->slots[0]--;
		}

		/* pull out the item */
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);

		/* make sure the item matches what we want */
		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
			break;
		if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
			break;

		/* release the path since we're done with it */
2137
		btrfs_release_path(path);
2138 2139 2140 2141 2142 2143

		/*
		 * this is where we are basically btrfs_lookup, without the
		 * crossing root thing.  we store the inode number in the
		 * offset of the orphan item.
		 */
2144 2145 2146 2147 2148 2149 2150 2151 2152 2153

		if (found_key.offset == last_objectid) {
			printk(KERN_ERR "btrfs: Error removing orphan entry, "
			       "stopping orphan cleanup\n");
			ret = -EINVAL;
			goto out;
		}

		last_objectid = found_key.offset;

2154 2155 2156
		found_key.objectid = found_key.offset;
		found_key.type = BTRFS_INODE_ITEM_KEY;
		found_key.offset = 0;
2157
		inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
2158 2159
		ret = PTR_RET(inode);
		if (ret && ret != -ESTALE)
2160
			goto out;
2161 2162

		/*
2163 2164
		 * Inode is already gone but the orphan item is still there,
		 * kill the orphan item.
2165
		 */
2166 2167
		if (ret == -ESTALE) {
			trans = btrfs_start_transaction(root, 1);
2168 2169 2170 2171
			if (IS_ERR(trans)) {
				ret = PTR_ERR(trans);
				goto out;
			}
2172 2173 2174
			ret = btrfs_del_orphan_item(trans, root,
						    found_key.objectid);
			BUG_ON(ret);
2175
			btrfs_end_transaction(trans, root);
2176 2177 2178
			continue;
		}

2179 2180 2181 2182 2183 2184 2185 2186
		/*
		 * add this inode to the orphan list so btrfs_orphan_del does
		 * the proper thing when we hit it
		 */
		spin_lock(&root->orphan_lock);
		list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
		spin_unlock(&root->orphan_lock);

2187 2188
		/* if we have links, this was a truncate, lets do that */
		if (inode->i_nlink) {
2189 2190 2191 2192 2193
			if (!S_ISREG(inode->i_mode)) {
				WARN_ON(1);
				iput(inode);
				continue;
			}
2194
			nr_truncate++;
2195 2196 2197 2198 2199 2200
			/*
			 * Need to hold the imutex for reservation purposes, not
			 * a huge deal here but I have a WARN_ON in
			 * btrfs_delalloc_reserve_space to catch offenders.
			 */
			mutex_lock(&inode->i_mutex);
2201
			ret = btrfs_truncate(inode);
2202
			mutex_unlock(&inode->i_mutex);
2203 2204 2205 2206 2207 2208
		} else {
			nr_unlink++;
		}

		/* this will do delete_inode and everything for us */
		iput(inode);
2209 2210
		if (ret)
			goto out;
2211
	}
2212 2213 2214
	/* release the path since we're done with it */
	btrfs_release_path(path);

2215 2216 2217 2218 2219 2220 2221
	root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;

	if (root->orphan_block_rsv)
		btrfs_block_rsv_release(root, root->orphan_block_rsv,
					(u64)-1);

	if (root->orphan_block_rsv || root->orphan_item_inserted) {
2222
		trans = btrfs_join_transaction(root);
2223 2224
		if (!IS_ERR(trans))
			btrfs_end_transaction(trans, root);
2225
	}
2226 2227 2228 2229 2230

	if (nr_unlink)
		printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
	if (nr_truncate)
		printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2231 2232 2233 2234 2235 2236

out:
	if (ret)
		printk(KERN_CRIT "btrfs: could not do orphan cleanup %d\n", ret);
	btrfs_free_path(path);
	return ret;
2237 2238
}

2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289
/*
 * very simple check to peek ahead in the leaf looking for xattrs.  If we
 * don't find any xattrs, we know there can't be any acls.
 *
 * slot is the slot the inode is in, objectid is the objectid of the inode
 */
static noinline int acls_after_inode_item(struct extent_buffer *leaf,
					  int slot, u64 objectid)
{
	u32 nritems = btrfs_header_nritems(leaf);
	struct btrfs_key found_key;
	int scanned = 0;

	slot++;
	while (slot < nritems) {
		btrfs_item_key_to_cpu(leaf, &found_key, slot);

		/* we found a different objectid, there must not be acls */
		if (found_key.objectid != objectid)
			return 0;

		/* we found an xattr, assume we've got an acl */
		if (found_key.type == BTRFS_XATTR_ITEM_KEY)
			return 1;

		/*
		 * we found a key greater than an xattr key, there can't
		 * be any acls later on
		 */
		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
			return 0;

		slot++;
		scanned++;

		/*
		 * it goes inode, inode backrefs, xattrs, extents,
		 * so if there are a ton of hard links to an inode there can
		 * be a lot of backrefs.  Don't waste time searching too hard,
		 * this is just an optimization
		 */
		if (scanned >= 8)
			break;
	}
	/* we hit the end of the leaf before we found an xattr or
	 * something larger than an xattr.  We have to assume the inode
	 * has acls
	 */
	return 1;
}

2290 2291 2292
/*
 * read an inode from the btree into the in-memory inode
 */
2293
static void btrfs_read_locked_inode(struct inode *inode)
C
Chris Mason 已提交
2294 2295
{
	struct btrfs_path *path;
2296
	struct extent_buffer *leaf;
C
Chris Mason 已提交
2297
	struct btrfs_inode_item *inode_item;
2298
	struct btrfs_timespec *tspec;
C
Chris Mason 已提交
2299 2300
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_key location;
2301
	int maybe_acls;
J
Josef Bacik 已提交
2302
	u32 rdev;
C
Chris Mason 已提交
2303
	int ret;
2304 2305 2306 2307 2308
	bool filled = false;

	ret = btrfs_fill_inode(inode, &rdev);
	if (!ret)
		filled = true;
C
Chris Mason 已提交
2309 2310

	path = btrfs_alloc_path();
2311 2312 2313
	if (!path)
		goto make_bad;

2314
	path->leave_spinning = 1;
C
Chris Mason 已提交
2315
	memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2316

C
Chris Mason 已提交
2317
	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2318
	if (ret)
C
Chris Mason 已提交
2319 2320
		goto make_bad;

2321
	leaf = path->nodes[0];
2322 2323 2324 2325

	if (filled)
		goto cache_acl;

2326 2327 2328 2329 2330 2331
	inode_item = btrfs_item_ptr(leaf, path->slots[0],
				    struct btrfs_inode_item);
	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
	inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
	inode->i_uid = btrfs_inode_uid(leaf, inode_item);
	inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2332
	btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345

	tspec = btrfs_inode_atime(inode_item);
	inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
	inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);

	tspec = btrfs_inode_mtime(inode_item);
	inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
	inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);

	tspec = btrfs_inode_ctime(inode_item);
	inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
	inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);

2346
	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2347
	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2348
	BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2349
	inode->i_generation = BTRFS_I(inode)->generation;
J
Josef Bacik 已提交
2350
	inode->i_rdev = 0;
2351 2352
	rdev = btrfs_inode_rdev(leaf, inode_item);

2353
	BTRFS_I(inode)->index_cnt = (u64)-1;
2354
	BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2355
cache_acl:
2356 2357 2358 2359
	/*
	 * try to precache a NULL acl entry for files that don't have
	 * any xattrs or acls
	 */
2360 2361
	maybe_acls = acls_after_inode_item(leaf, path->slots[0],
					   btrfs_ino(inode));
2362 2363
	if (!maybe_acls)
		cache_no_acl(inode);
2364

C
Chris Mason 已提交
2365 2366 2367 2368 2369
	btrfs_free_path(path);

	switch (inode->i_mode & S_IFMT) {
	case S_IFREG:
		inode->i_mapping->a_ops = &btrfs_aops;
2370
		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2371
		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
C
Chris Mason 已提交
2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384
		inode->i_fop = &btrfs_file_operations;
		inode->i_op = &btrfs_file_inode_operations;
		break;
	case S_IFDIR:
		inode->i_fop = &btrfs_dir_file_operations;
		if (root == root->fs_info->tree_root)
			inode->i_op = &btrfs_dir_ro_inode_operations;
		else
			inode->i_op = &btrfs_dir_inode_operations;
		break;
	case S_IFLNK:
		inode->i_op = &btrfs_symlink_inode_operations;
		inode->i_mapping->a_ops = &btrfs_symlink_aops;
2385
		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
C
Chris Mason 已提交
2386
		break;
J
Josef Bacik 已提交
2387
	default:
J
Jim Owens 已提交
2388
		inode->i_op = &btrfs_special_inode_operations;
J
Josef Bacik 已提交
2389 2390
		init_special_inode(inode, inode->i_mode, rdev);
		break;
C
Chris Mason 已提交
2391
	}
2392 2393

	btrfs_update_iflags(inode);
C
Chris Mason 已提交
2394 2395 2396 2397 2398 2399 2400
	return;

make_bad:
	btrfs_free_path(path);
	make_bad_inode(inode);
}

2401 2402 2403
/*
 * given a leaf and an inode, copy the inode fields into the leaf
 */
2404 2405
static void fill_inode_item(struct btrfs_trans_handle *trans,
			    struct extent_buffer *leaf,
2406
			    struct btrfs_inode_item *item,
C
Chris Mason 已提交
2407 2408
			    struct inode *inode)
{
2409 2410
	btrfs_set_inode_uid(leaf, item, inode->i_uid);
	btrfs_set_inode_gid(leaf, item, inode->i_gid);
2411
	btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429
	btrfs_set_inode_mode(leaf, item, inode->i_mode);
	btrfs_set_inode_nlink(leaf, item, inode->i_nlink);

	btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
			       inode->i_atime.tv_sec);
	btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
				inode->i_atime.tv_nsec);

	btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
			       inode->i_mtime.tv_sec);
	btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
				inode->i_mtime.tv_nsec);

	btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
			       inode->i_ctime.tv_sec);
	btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
				inode->i_ctime.tv_nsec);

2430
	btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2431
	btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2432
	btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2433
	btrfs_set_inode_transid(leaf, item, trans->transid);
2434
	btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
Y
Yan 已提交
2435
	btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2436
	btrfs_set_inode_block_group(leaf, item, 0);
C
Chris Mason 已提交
2437 2438
}

2439 2440 2441
/*
 * copy everything in the in-memory inode into the btree.
 */
2442
static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
2443
				struct btrfs_root *root, struct inode *inode)
C
Chris Mason 已提交
2444 2445 2446
{
	struct btrfs_inode_item *inode_item;
	struct btrfs_path *path;
2447
	struct extent_buffer *leaf;
C
Chris Mason 已提交
2448 2449 2450
	int ret;

	path = btrfs_alloc_path();
2451 2452 2453
	if (!path)
		return -ENOMEM;

2454
	path->leave_spinning = 1;
2455 2456
	ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
				 1);
C
Chris Mason 已提交
2457 2458 2459 2460 2461 2462
	if (ret) {
		if (ret > 0)
			ret = -ENOENT;
		goto failed;
	}

2463
	btrfs_unlock_up_safe(path, 1);
2464 2465
	leaf = path->nodes[0];
	inode_item = btrfs_item_ptr(leaf, path->slots[0],
2466
				    struct btrfs_inode_item);
C
Chris Mason 已提交
2467

2468
	fill_inode_item(trans, leaf, inode_item, inode);
2469
	btrfs_mark_buffer_dirty(leaf);
2470
	btrfs_set_inode_last_trans(trans, inode);
C
Chris Mason 已提交
2471 2472 2473 2474 2475 2476
	ret = 0;
failed:
	btrfs_free_path(path);
	return ret;
}

2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513
/*
 * copy everything in the in-memory inode into the btree.
 */
noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
				struct btrfs_root *root, struct inode *inode)
{
	int ret;

	/*
	 * If the inode is a free space inode, we can deadlock during commit
	 * if we put it into the delayed code.
	 *
	 * The data relocation inode should also be directly updated
	 * without delay
	 */
	if (!btrfs_is_free_space_inode(root, inode)
	    && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
		ret = btrfs_delayed_update_inode(trans, root, inode);
		if (!ret)
			btrfs_set_inode_last_trans(trans, inode);
		return ret;
	}

	return btrfs_update_inode_item(trans, root, inode);
}

static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
				struct btrfs_root *root, struct inode *inode)
{
	int ret;

	ret = btrfs_update_inode(trans, root, inode);
	if (ret == -ENOSPC)
		return btrfs_update_inode_item(trans, root, inode);
	return ret;
}

2514 2515 2516 2517 2518
/*
 * unlink helper that gets used here in inode.c and in the tree logging
 * recovery code.  It remove a link in a directory with a given name, and
 * also drops the back refs in the inode to the directory
 */
2519 2520 2521 2522
static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
				struct btrfs_root *root,
				struct inode *dir, struct inode *inode,
				const char *name, int name_len)
C
Chris Mason 已提交
2523 2524 2525
{
	struct btrfs_path *path;
	int ret = 0;
2526
	struct extent_buffer *leaf;
C
Chris Mason 已提交
2527
	struct btrfs_dir_item *di;
2528
	struct btrfs_key key;
2529
	u64 index;
2530 2531
	u64 ino = btrfs_ino(inode);
	u64 dir_ino = btrfs_ino(dir);
C
Chris Mason 已提交
2532 2533

	path = btrfs_alloc_path();
2534 2535
	if (!path) {
		ret = -ENOMEM;
2536
		goto out;
2537 2538
	}

2539
	path->leave_spinning = 1;
2540
	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
C
Chris Mason 已提交
2541 2542 2543 2544 2545 2546 2547 2548 2549
				    name, name_len, -1);
	if (IS_ERR(di)) {
		ret = PTR_ERR(di);
		goto err;
	}
	if (!di) {
		ret = -ENOENT;
		goto err;
	}
2550 2551
	leaf = path->nodes[0];
	btrfs_dir_item_key_to_cpu(leaf, di, &key);
C
Chris Mason 已提交
2552
	ret = btrfs_delete_one_dir_name(trans, root, path, di);
2553 2554
	if (ret)
		goto err;
2555
	btrfs_release_path(path);
C
Chris Mason 已提交
2556

2557 2558
	ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
				  dir_ino, &index);
2559
	if (ret) {
2560
		printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2561 2562
		       "inode %llu parent %llu\n", name_len, name,
		       (unsigned long long)ino, (unsigned long long)dir_ino);
2563 2564 2565
		goto err;
	}

2566 2567
	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
	if (ret)
C
Chris Mason 已提交
2568 2569
		goto err;

2570
	ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2571
					 inode, dir_ino);
2572
	BUG_ON(ret != 0 && ret != -ENOENT);
2573 2574 2575

	ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
					   dir, index);
2576 2577
	if (ret == -ENOENT)
		ret = 0;
C
Chris Mason 已提交
2578 2579
err:
	btrfs_free_path(path);
2580 2581 2582 2583 2584 2585 2586
	if (ret)
		goto out;

	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
	inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
	btrfs_update_inode(trans, root, dir);
out:
C
Chris Mason 已提交
2587 2588 2589
	return ret;
}

2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604
int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
		       struct btrfs_root *root,
		       struct inode *dir, struct inode *inode,
		       const char *name, int name_len)
{
	int ret;
	ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
	if (!ret) {
		btrfs_drop_nlink(inode);
		ret = btrfs_update_inode(trans, root, inode);
	}
	return ret;
}
		

2605 2606 2607
/* helper to check if there is any shared block in the path */
static int check_path_shared(struct btrfs_root *root,
			     struct btrfs_path *path)
C
Chris Mason 已提交
2608
{
2609 2610
	struct extent_buffer *eb;
	int level;
2611
	u64 refs = 1;
2612

2613
	for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2614 2615
		int ret;

2616 2617 2618 2619 2620 2621 2622 2623 2624
		if (!path->nodes[level])
			break;
		eb = path->nodes[level];
		if (!btrfs_block_can_be_shared(root, eb))
			continue;
		ret = btrfs_lookup_extent_info(NULL, root, eb->start, eb->len,
					       &refs, NULL);
		if (refs > 1)
			return 1;
2625
	}
2626
	return 0;
C
Chris Mason 已提交
2627 2628
}

2629 2630 2631 2632 2633 2634 2635 2636 2637
/*
 * helper to start transaction for unlink and rmdir.
 *
 * unlink and rmdir are special in btrfs, they do not always free space.
 * so in enospc case, we should make sure they will free space before
 * allowing them to use the global metadata reservation.
 */
static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
						       struct dentry *dentry)
2638
{
C
Chris Mason 已提交
2639
	struct btrfs_trans_handle *trans;
2640
	struct btrfs_root *root = BTRFS_I(dir)->root;
2641
	struct btrfs_path *path;
2642
	struct btrfs_inode_ref *ref;
2643
	struct btrfs_dir_item *di;
2644
	struct inode *inode = dentry->d_inode;
2645
	u64 index;
2646 2647
	int check_link = 1;
	int err = -ENOSPC;
2648
	int ret;
2649 2650
	u64 ino = btrfs_ino(inode);
	u64 dir_ino = btrfs_ino(dir);
2651

2652 2653 2654 2655 2656 2657 2658 2659 2660 2661
	/*
	 * 1 for the possible orphan item
	 * 1 for the dir item
	 * 1 for the dir index
	 * 1 for the inode ref
	 * 1 for the inode ref in the tree log
	 * 2 for the dir entries in the log
	 * 1 for the inode
	 */
	trans = btrfs_start_transaction(root, 8);
2662 2663
	if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
		return trans;
2664

2665
	if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
2666
		return ERR_PTR(-ENOSPC);
2667

2668 2669 2670
	/* check if there is someone else holds reference */
	if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1)
		return ERR_PTR(-ENOSPC);
2671

2672 2673
	if (atomic_read(&inode->i_count) > 2)
		return ERR_PTR(-ENOSPC);
2674

2675 2676 2677 2678 2679 2680 2681
	if (xchg(&root->fs_info->enospc_unlink, 1))
		return ERR_PTR(-ENOSPC);

	path = btrfs_alloc_path();
	if (!path) {
		root->fs_info->enospc_unlink = 0;
		return ERR_PTR(-ENOMEM);
2682 2683
	}

2684 2685
	/* 1 for the orphan item */
	trans = btrfs_start_transaction(root, 1);
2686
	if (IS_ERR(trans)) {
2687 2688 2689 2690
		btrfs_free_path(path);
		root->fs_info->enospc_unlink = 0;
		return trans;
	}
2691

2692 2693
	path->skip_locking = 1;
	path->search_commit_root = 1;
2694

2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705
	ret = btrfs_lookup_inode(trans, root, path,
				&BTRFS_I(dir)->location, 0);
	if (ret < 0) {
		err = ret;
		goto out;
	}
	if (ret == 0) {
		if (check_path_shared(root, path))
			goto out;
	} else {
		check_link = 0;
2706
	}
2707
	btrfs_release_path(path);
2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720

	ret = btrfs_lookup_inode(trans, root, path,
				&BTRFS_I(inode)->location, 0);
	if (ret < 0) {
		err = ret;
		goto out;
	}
	if (ret == 0) {
		if (check_path_shared(root, path))
			goto out;
	} else {
		check_link = 0;
	}
2721
	btrfs_release_path(path);
2722 2723 2724

	if (ret == 0 && S_ISREG(inode->i_mode)) {
		ret = btrfs_lookup_file_extent(trans, root, path,
2725
					       ino, (u64)-1, 0);
2726 2727 2728 2729 2730 2731 2732
		if (ret < 0) {
			err = ret;
			goto out;
		}
		BUG_ON(ret == 0);
		if (check_path_shared(root, path))
			goto out;
2733
		btrfs_release_path(path);
2734 2735 2736 2737 2738 2739 2740
	}

	if (!check_link) {
		err = 0;
		goto out;
	}

2741
	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753
				dentry->d_name.name, dentry->d_name.len, 0);
	if (IS_ERR(di)) {
		err = PTR_ERR(di);
		goto out;
	}
	if (di) {
		if (check_path_shared(root, path))
			goto out;
	} else {
		err = 0;
		goto out;
	}
2754
	btrfs_release_path(path);
2755 2756 2757

	ref = btrfs_lookup_inode_ref(trans, root, path,
				dentry->d_name.name, dentry->d_name.len,
2758
				ino, dir_ino, 0);
2759 2760 2761 2762 2763 2764 2765 2766
	if (IS_ERR(ref)) {
		err = PTR_ERR(ref);
		goto out;
	}
	BUG_ON(!ref);
	if (check_path_shared(root, path))
		goto out;
	index = btrfs_inode_ref_index(path->nodes[0], ref);
2767
	btrfs_release_path(path);
2768

2769 2770 2771 2772 2773 2774 2775 2776
	/*
	 * This is a commit root search, if we can lookup inode item and other
	 * relative items in the commit root, it means the transaction of
	 * dir/file creation has been committed, and the dir index item that we
	 * delay to insert has also been inserted into the commit root. So
	 * we needn't worry about the delayed insertion of the dir index item
	 * here.
	 */
2777
	di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index,
2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789
				dentry->d_name.name, dentry->d_name.len, 0);
	if (IS_ERR(di)) {
		err = PTR_ERR(di);
		goto out;
	}
	BUG_ON(ret == -ENOENT);
	if (check_path_shared(root, path))
		goto out;

	err = 0;
out:
	btrfs_free_path(path);
2790 2791 2792 2793
	/* Migrate the orphan reservation over */
	if (!err)
		err = btrfs_block_rsv_migrate(trans->block_rsv,
				&root->fs_info->global_block_rsv,
2794
				trans->bytes_reserved);
2795

2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809
	if (err) {
		btrfs_end_transaction(trans, root);
		root->fs_info->enospc_unlink = 0;
		return ERR_PTR(err);
	}

	trans->block_rsv = &root->fs_info->global_block_rsv;
	return trans;
}

static void __unlink_end_trans(struct btrfs_trans_handle *trans,
			       struct btrfs_root *root)
{
	if (trans->block_rsv == &root->fs_info->global_block_rsv) {
2810 2811 2812
		btrfs_block_rsv_release(root, trans->block_rsv,
					trans->bytes_reserved);
		trans->block_rsv = &root->fs_info->trans_block_rsv;
2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829
		BUG_ON(!root->fs_info->enospc_unlink);
		root->fs_info->enospc_unlink = 0;
	}
	btrfs_end_transaction_throttle(trans, root);
}

static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
{
	struct btrfs_root *root = BTRFS_I(dir)->root;
	struct btrfs_trans_handle *trans;
	struct inode *inode = dentry->d_inode;
	int ret;
	unsigned long nr = 0;

	trans = __unlink_start_trans(dir, dentry);
	if (IS_ERR(trans))
		return PTR_ERR(trans);
2830

2831 2832
	btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);

2833 2834
	ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
				 dentry->d_name.name, dentry->d_name.len);
2835 2836
	if (ret)
		goto out;
2837

2838
	if (inode->i_nlink == 0) {
2839
		ret = btrfs_orphan_add(trans, inode);
2840 2841
		if (ret)
			goto out;
2842
	}
2843

2844
out:
2845
	nr = trans->blocks_used;
2846
	__unlink_end_trans(trans, root);
2847
	btrfs_btree_balance_dirty(root, nr);
C
Chris Mason 已提交
2848 2849 2850
	return ret;
}

2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861
int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
			struct btrfs_root *root,
			struct inode *dir, u64 objectid,
			const char *name, int name_len)
{
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_dir_item *di;
	struct btrfs_key key;
	u64 index;
	int ret;
2862
	u64 dir_ino = btrfs_ino(dir);
2863 2864 2865 2866 2867

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

2868
	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
2869
				   name, name_len, -1);
2870
	BUG_ON(IS_ERR_OR_NULL(di));
2871 2872 2873 2874 2875 2876

	leaf = path->nodes[0];
	btrfs_dir_item_key_to_cpu(leaf, di, &key);
	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
	ret = btrfs_delete_one_dir_name(trans, root, path, di);
	BUG_ON(ret);
2877
	btrfs_release_path(path);
2878 2879 2880

	ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
				 objectid, root->root_key.objectid,
2881
				 dir_ino, &index, name, name_len);
2882 2883
	if (ret < 0) {
		BUG_ON(ret != -ENOENT);
2884
		di = btrfs_search_dir_index_item(root, path, dir_ino,
2885
						 name, name_len);
2886
		BUG_ON(IS_ERR_OR_NULL(di));
2887 2888 2889

		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2890
		btrfs_release_path(path);
2891 2892
		index = key.offset;
	}
2893
	btrfs_release_path(path);
2894

2895
	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
2896 2897 2898 2899 2900 2901 2902
	BUG_ON(ret);

	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
	ret = btrfs_update_inode(trans, root, dir);
	BUG_ON(ret);

2903
	btrfs_free_path(path);
2904 2905 2906
	return 0;
}

C
Chris Mason 已提交
2907 2908 2909
static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
{
	struct inode *inode = dentry->d_inode;
2910
	int err = 0;
C
Chris Mason 已提交
2911 2912
	struct btrfs_root *root = BTRFS_I(dir)->root;
	struct btrfs_trans_handle *trans;
2913
	unsigned long nr = 0;
C
Chris Mason 已提交
2914

2915
	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
2916
	    btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
Y
Yan 已提交
2917 2918
		return -ENOTEMPTY;

2919 2920
	trans = __unlink_start_trans(dir, dentry);
	if (IS_ERR(trans))
2921 2922
		return PTR_ERR(trans);

2923
	if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
2924 2925 2926 2927 2928 2929 2930
		err = btrfs_unlink_subvol(trans, root, dir,
					  BTRFS_I(inode)->location.objectid,
					  dentry->d_name.name,
					  dentry->d_name.len);
		goto out;
	}

2931 2932
	err = btrfs_orphan_add(trans, inode);
	if (err)
2933
		goto out;
2934

C
Chris Mason 已提交
2935
	/* now the directory is empty */
2936 2937
	err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
				 dentry->d_name.name, dentry->d_name.len);
2938
	if (!err)
2939
		btrfs_i_size_write(inode, 0);
2940
out:
2941
	nr = trans->blocks_used;
2942
	__unlink_end_trans(trans, root);
2943
	btrfs_btree_balance_dirty(root, nr);
2944

C
Chris Mason 已提交
2945 2946 2947 2948 2949 2950
	return err;
}

/*
 * this can truncate away extent items, csum items and directory items.
 * It starts at a high offset and removes keys until it can't find
2951
 * any higher than new_size
C
Chris Mason 已提交
2952 2953 2954
 *
 * csum items that cross the new i_size are truncated to the new size
 * as well.
2955 2956 2957
 *
 * min_type is the minimum key type to truncate down to.  If set to 0, this
 * will kill all the items on this inode, including the INODE_ITEM_KEY.
C
Chris Mason 已提交
2958
 */
2959 2960 2961 2962
int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
			       struct btrfs_root *root,
			       struct inode *inode,
			       u64 new_size, u32 min_type)
C
Chris Mason 已提交
2963 2964
{
	struct btrfs_path *path;
2965
	struct extent_buffer *leaf;
C
Chris Mason 已提交
2966
	struct btrfs_file_extent_item *fi;
2967 2968
	struct btrfs_key key;
	struct btrfs_key found_key;
C
Chris Mason 已提交
2969
	u64 extent_start = 0;
2970
	u64 extent_num_bytes = 0;
2971
	u64 extent_offset = 0;
C
Chris Mason 已提交
2972
	u64 item_end = 0;
2973 2974
	u64 mask = root->sectorsize - 1;
	u32 found_type = (u8)-1;
C
Chris Mason 已提交
2975 2976
	int found_extent;
	int del_item;
2977 2978
	int pending_del_nr = 0;
	int pending_del_slot = 0;
2979
	int extent_type = -1;
2980
	int encoding;
2981 2982
	int ret;
	int err = 0;
2983
	u64 ino = btrfs_ino(inode);
2984 2985

	BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
C
Chris Mason 已提交
2986

2987 2988 2989 2990 2991
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
	path->reada = -1;

2992
	if (root->ref_cows || root == root->fs_info->tree_root)
2993
		btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2994

2995 2996 2997 2998 2999 3000 3001 3002 3003
	/*
	 * This function is also used to drop the items in the log tree before
	 * we relog the inode, so if root != BTRFS_I(inode)->root, it means
	 * it is used to drop the loged items. So we shouldn't kill the delayed
	 * items.
	 */
	if (min_type == 0 && root == BTRFS_I(inode)->root)
		btrfs_kill_delayed_inode_items(inode);

3004
	key.objectid = ino;
C
Chris Mason 已提交
3005
	key.offset = (u64)-1;
3006 3007
	key.type = (u8)-1;

3008
search_again:
3009
	path->leave_spinning = 1;
3010
	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3011 3012 3013 3014
	if (ret < 0) {
		err = ret;
		goto out;
	}
3015

3016
	if (ret > 0) {
3017 3018 3019
		/* there are no items in the tree for us to truncate, we're
		 * done
		 */
3020 3021
		if (path->slots[0] == 0)
			goto out;
3022 3023 3024
		path->slots[0]--;
	}

3025
	while (1) {
C
Chris Mason 已提交
3026
		fi = NULL;
3027 3028 3029
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
		found_type = btrfs_key_type(&found_key);
3030
		encoding = 0;
C
Chris Mason 已提交
3031

3032
		if (found_key.objectid != ino)
C
Chris Mason 已提交
3033
			break;
3034

3035
		if (found_type < min_type)
C
Chris Mason 已提交
3036 3037
			break;

3038
		item_end = found_key.offset;
C
Chris Mason 已提交
3039
		if (found_type == BTRFS_EXTENT_DATA_KEY) {
3040
			fi = btrfs_item_ptr(leaf, path->slots[0],
C
Chris Mason 已提交
3041
					    struct btrfs_file_extent_item);
3042
			extent_type = btrfs_file_extent_type(leaf, fi);
3043 3044 3045 3046
			encoding = btrfs_file_extent_compression(leaf, fi);
			encoding |= btrfs_file_extent_encryption(leaf, fi);
			encoding |= btrfs_file_extent_other_encoding(leaf, fi);

3047
			if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3048
				item_end +=
3049
				    btrfs_file_extent_num_bytes(leaf, fi);
3050 3051
			} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
				item_end += btrfs_file_extent_inline_len(leaf,
3052
									 fi);
C
Chris Mason 已提交
3053
			}
3054
			item_end--;
C
Chris Mason 已提交
3055
		}
3056 3057 3058 3059
		if (found_type > min_type) {
			del_item = 1;
		} else {
			if (item_end < new_size)
3060
				break;
3061 3062 3063 3064
			if (found_key.offset >= new_size)
				del_item = 1;
			else
				del_item = 0;
C
Chris Mason 已提交
3065 3066 3067
		}
		found_extent = 0;
		/* FIXME, shrink the extent if the ref count is only 1 */
3068 3069 3070 3071
		if (found_type != BTRFS_EXTENT_DATA_KEY)
			goto delete;

		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
C
Chris Mason 已提交
3072
			u64 num_dec;
3073
			extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
3074
			if (!del_item && !encoding) {
3075 3076
				u64 orig_num_bytes =
					btrfs_file_extent_num_bytes(leaf, fi);
3077
				extent_num_bytes = new_size -
3078
					found_key.offset + root->sectorsize - 1;
3079 3080
				extent_num_bytes = extent_num_bytes &
					~((u64)root->sectorsize - 1);
3081 3082 3083
				btrfs_set_file_extent_num_bytes(leaf, fi,
							 extent_num_bytes);
				num_dec = (orig_num_bytes -
3084
					   extent_num_bytes);
3085
				if (root->ref_cows && extent_start != 0)
3086
					inode_sub_bytes(inode, num_dec);
3087
				btrfs_mark_buffer_dirty(leaf);
C
Chris Mason 已提交
3088
			} else {
3089 3090 3091
				extent_num_bytes =
					btrfs_file_extent_disk_num_bytes(leaf,
									 fi);
3092 3093 3094
				extent_offset = found_key.offset -
					btrfs_file_extent_offset(leaf, fi);

C
Chris Mason 已提交
3095
				/* FIXME blocksize != 4096 */
3096
				num_dec = btrfs_file_extent_num_bytes(leaf, fi);
C
Chris Mason 已提交
3097 3098
				if (extent_start != 0) {
					found_extent = 1;
3099
					if (root->ref_cows)
3100
						inode_sub_bytes(inode, num_dec);
3101
				}
C
Chris Mason 已提交
3102
			}
3103
		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
3104 3105 3106 3107 3108 3109 3110 3111
			/*
			 * we can't truncate inline items that have had
			 * special encodings
			 */
			if (!del_item &&
			    btrfs_file_extent_compression(leaf, fi) == 0 &&
			    btrfs_file_extent_encryption(leaf, fi) == 0 &&
			    btrfs_file_extent_other_encoding(leaf, fi) == 0) {
3112 3113 3114
				u32 size = new_size - found_key.offset;

				if (root->ref_cows) {
3115 3116
					inode_sub_bytes(inode, item_end + 1 -
							new_size);
3117 3118 3119
				}
				size =
				    btrfs_file_extent_calc_inline_size(size);
3120
				ret = btrfs_truncate_item(trans, root, path,
3121 3122
							  size, 1);
			} else if (root->ref_cows) {
3123 3124
				inode_sub_bytes(inode, item_end + 1 -
						found_key.offset);
3125
			}
C
Chris Mason 已提交
3126
		}
3127
delete:
C
Chris Mason 已提交
3128
		if (del_item) {
3129 3130 3131 3132 3133 3134 3135 3136 3137 3138
			if (!pending_del_nr) {
				/* no pending yet, add ourselves */
				pending_del_slot = path->slots[0];
				pending_del_nr = 1;
			} else if (pending_del_nr &&
				   path->slots[0] + 1 == pending_del_slot) {
				/* hop on the pending chunk */
				pending_del_nr++;
				pending_del_slot = path->slots[0];
			} else {
3139
				BUG();
3140
			}
C
Chris Mason 已提交
3141 3142 3143
		} else {
			break;
		}
3144 3145
		if (found_extent && (root->ref_cows ||
				     root == root->fs_info->tree_root)) {
3146
			btrfs_set_path_blocking(path);
C
Chris Mason 已提交
3147
			ret = btrfs_free_extent(trans, root, extent_start,
3148 3149
						extent_num_bytes, 0,
						btrfs_header_owner(leaf),
3150
						ino, extent_offset);
C
Chris Mason 已提交
3151 3152
			BUG_ON(ret);
		}
3153

3154 3155 3156 3157 3158
		if (found_type == BTRFS_INODE_ITEM_KEY)
			break;

		if (path->slots[0] == 0 ||
		    path->slots[0] != pending_del_slot) {
3159 3160 3161
			if (root->ref_cows &&
			    BTRFS_I(inode)->location.objectid !=
						BTRFS_FREE_INO_OBJECTID) {
3162 3163 3164 3165 3166 3167 3168 3169 3170 3171
				err = -EAGAIN;
				goto out;
			}
			if (pending_del_nr) {
				ret = btrfs_del_items(trans, root, path,
						pending_del_slot,
						pending_del_nr);
				BUG_ON(ret);
				pending_del_nr = 0;
			}
3172
			btrfs_release_path(path);
3173
			goto search_again;
3174 3175
		} else {
			path->slots[0]--;
3176
		}
C
Chris Mason 已提交
3177
	}
3178
out:
3179 3180 3181
	if (pending_del_nr) {
		ret = btrfs_del_items(trans, root, path, pending_del_slot,
				      pending_del_nr);
3182
		BUG_ON(ret);
3183
	}
C
Chris Mason 已提交
3184
	btrfs_free_path(path);
3185
	return err;
C
Chris Mason 已提交
3186 3187 3188 3189 3190 3191 3192 3193 3194
}

/*
 * taken from block_truncate_page, but does cow as it zeros out
 * any bytes left in the last page in the file.
 */
static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
{
	struct inode *inode = mapping->host;
3195
	struct btrfs_root *root = BTRFS_I(inode)->root;
3196 3197
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
	struct btrfs_ordered_extent *ordered;
3198
	struct extent_state *cached_state = NULL;
3199
	char *kaddr;
3200
	u32 blocksize = root->sectorsize;
C
Chris Mason 已提交
3201 3202 3203
	pgoff_t index = from >> PAGE_CACHE_SHIFT;
	unsigned offset = from & (PAGE_CACHE_SIZE-1);
	struct page *page;
3204
	gfp_t mask = btrfs_alloc_write_mask(mapping);
C
Chris Mason 已提交
3205
	int ret = 0;
3206
	u64 page_start;
3207
	u64 page_end;
C
Chris Mason 已提交
3208 3209 3210

	if ((offset & (blocksize - 1)) == 0)
		goto out;
3211
	ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
3212 3213
	if (ret)
		goto out;
C
Chris Mason 已提交
3214 3215

	ret = -ENOMEM;
3216
again:
3217
	page = find_or_create_page(mapping, index, mask);
3218
	if (!page) {
3219
		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
C
Chris Mason 已提交
3220
		goto out;
3221
	}
3222 3223 3224 3225

	page_start = page_offset(page);
	page_end = page_start + PAGE_CACHE_SIZE - 1;

C
Chris Mason 已提交
3226
	if (!PageUptodate(page)) {
3227
		ret = btrfs_readpage(NULL, page);
C
Chris Mason 已提交
3228
		lock_page(page);
3229 3230 3231 3232 3233
		if (page->mapping != mapping) {
			unlock_page(page);
			page_cache_release(page);
			goto again;
		}
C
Chris Mason 已提交
3234 3235
		if (!PageUptodate(page)) {
			ret = -EIO;
3236
			goto out_unlock;
C
Chris Mason 已提交
3237 3238
		}
	}
3239
	wait_on_page_writeback(page);
3240

3241 3242
	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
			 GFP_NOFS);
3243 3244 3245 3246
	set_page_extent_mapped(page);

	ordered = btrfs_lookup_ordered_extent(inode, page_start);
	if (ordered) {
3247 3248
		unlock_extent_cached(io_tree, page_start, page_end,
				     &cached_state, GFP_NOFS);
3249 3250
		unlock_page(page);
		page_cache_release(page);
3251
		btrfs_start_ordered_extent(inode, ordered, 1);
3252 3253 3254 3255
		btrfs_put_ordered_extent(ordered);
		goto again;
	}

3256
	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
3257
			  EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
3258
			  0, 0, &cached_state, GFP_NOFS);
3259

3260 3261
	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
					&cached_state);
3262
	if (ret) {
3263 3264
		unlock_extent_cached(io_tree, page_start, page_end,
				     &cached_state, GFP_NOFS);
3265 3266 3267
		goto out_unlock;
	}

3268 3269 3270 3271 3272 3273 3274
	ret = 0;
	if (offset != PAGE_CACHE_SIZE) {
		kaddr = kmap(page);
		memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
		flush_dcache_page(page);
		kunmap(page);
	}
3275
	ClearPageChecked(page);
3276
	set_page_dirty(page);
3277 3278
	unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
			     GFP_NOFS);
C
Chris Mason 已提交
3279

3280
out_unlock:
3281
	if (ret)
3282
		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
C
Chris Mason 已提交
3283 3284 3285 3286 3287 3288
	unlock_page(page);
	page_cache_release(page);
out:
	return ret;
}

3289 3290 3291 3292 3293 3294
/*
 * This function puts in dummy file extents for the area we're creating a hole
 * for.  So if we are truncating this file to a larger size we need to insert
 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
 * the range between oldsize and size
 */
3295
int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
C
Chris Mason 已提交
3296
{
3297 3298 3299
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3300
	struct extent_map *em = NULL;
3301
	struct extent_state *cached_state = NULL;
3302
	u64 mask = root->sectorsize - 1;
3303
	u64 hole_start = (oldsize + mask) & ~mask;
3304 3305 3306 3307
	u64 block_end = (size + mask) & ~mask;
	u64 last_byte;
	u64 cur_offset;
	u64 hole_size;
3308
	int err = 0;
C
Chris Mason 已提交
3309

3310 3311 3312 3313 3314 3315 3316
	if (size <= hole_start)
		return 0;

	while (1) {
		struct btrfs_ordered_extent *ordered;
		btrfs_wait_ordered_range(inode, hole_start,
					 block_end - hole_start);
3317 3318
		lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
				 &cached_state, GFP_NOFS);
3319 3320 3321
		ordered = btrfs_lookup_ordered_extent(inode, hole_start);
		if (!ordered)
			break;
3322 3323
		unlock_extent_cached(io_tree, hole_start, block_end - 1,
				     &cached_state, GFP_NOFS);
3324 3325
		btrfs_put_ordered_extent(ordered);
	}
C
Chris Mason 已提交
3326

3327 3328 3329 3330
	cur_offset = hole_start;
	while (1) {
		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
				block_end - cur_offset, 0);
3331
		BUG_ON(IS_ERR_OR_NULL(em));
3332 3333
		last_byte = min(extent_map_end(em), block_end);
		last_byte = (last_byte + mask) & ~mask;
3334
		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3335
			u64 hint_byte = 0;
3336
			hole_size = last_byte - cur_offset;
3337

3338 3339 3340
			trans = btrfs_start_transaction(root, 2);
			if (IS_ERR(trans)) {
				err = PTR_ERR(trans);
3341
				break;
3342
			}
3343 3344 3345 3346

			err = btrfs_drop_extents(trans, inode, cur_offset,
						 cur_offset + hole_size,
						 &hint_byte, 1);
3347 3348
			if (err) {
				btrfs_end_transaction(trans, root);
3349
				break;
3350
			}
3351

3352
			err = btrfs_insert_file_extent(trans, root,
3353
					btrfs_ino(inode), cur_offset, 0,
3354 3355
					0, hole_size, 0, hole_size,
					0, 0, 0);
3356 3357
			if (err) {
				btrfs_end_transaction(trans, root);
3358
				break;
3359
			}
3360

3361 3362
			btrfs_drop_extent_cache(inode, hole_start,
					last_byte - 1, 0);
3363 3364

			btrfs_end_transaction(trans, root);
3365 3366
		}
		free_extent_map(em);
3367
		em = NULL;
3368
		cur_offset = last_byte;
3369
		if (cur_offset >= block_end)
3370 3371
			break;
	}
3372

3373
	free_extent_map(em);
3374 3375
	unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
			     GFP_NOFS);
3376 3377
	return err;
}
C
Chris Mason 已提交
3378

3379
static int btrfs_setsize(struct inode *inode, loff_t newsize)
3380
{
3381
	loff_t oldsize = i_size_read(inode);
3382 3383
	int ret;

3384
	if (newsize == oldsize)
3385 3386
		return 0;

3387 3388 3389 3390 3391
	if (newsize > oldsize) {
		i_size_write(inode, newsize);
		btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
		truncate_pagecache(inode, oldsize, newsize);
		ret = btrfs_cont_expand(inode, oldsize, newsize);
3392
		if (ret) {
3393
			btrfs_setsize(inode, oldsize);
3394 3395 3396
			return ret;
		}

3397
		ret = btrfs_dirty_inode(inode);
3398
	} else {
3399

3400 3401 3402 3403 3404 3405 3406
		/*
		 * We're truncating a file that used to have good data down to
		 * zero. Make sure it gets into the ordered flush list so that
		 * any new writes get down to disk quickly.
		 */
		if (newsize == 0)
			BTRFS_I(inode)->ordered_data_close = 1;
3407

3408 3409 3410
		/* we don't support swapfiles, so vmtruncate shouldn't fail */
		truncate_setsize(inode, newsize);
		ret = btrfs_truncate(inode);
3411 3412
	}

3413
	return ret;
3414 3415
}

3416 3417 3418
static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
{
	struct inode *inode = dentry->d_inode;
3419
	struct btrfs_root *root = BTRFS_I(inode)->root;
3420
	int err;
C
Chris Mason 已提交
3421

3422 3423 3424
	if (btrfs_root_readonly(root))
		return -EROFS;

3425 3426 3427
	err = inode_change_ok(inode, attr);
	if (err)
		return err;
C
Chris Mason 已提交
3428

3429
	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
3430
		err = btrfs_setsize(inode, attr->ia_size);
3431 3432
		if (err)
			return err;
C
Chris Mason 已提交
3433
	}
3434

C
Christoph Hellwig 已提交
3435 3436
	if (attr->ia_valid) {
		setattr_copy(inode, attr);
3437
		err = btrfs_dirty_inode(inode);
C
Christoph Hellwig 已提交
3438

3439
		if (!err && attr->ia_valid & ATTR_MODE)
C
Christoph Hellwig 已提交
3440 3441
			err = btrfs_acl_chmod(inode);
	}
J
Josef Bacik 已提交
3442

C
Chris Mason 已提交
3443 3444
	return err;
}
3445

3446
void btrfs_evict_inode(struct inode *inode)
C
Chris Mason 已提交
3447 3448 3449
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(inode)->root;
3450
	struct btrfs_block_rsv *rsv, *global_rsv;
3451
	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
3452
	unsigned long nr;
C
Chris Mason 已提交
3453 3454
	int ret;

3455 3456
	trace_btrfs_inode_evict(inode);

C
Chris Mason 已提交
3457
	truncate_inode_pages(&inode->i_data, 0);
3458
	if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
3459
			       btrfs_is_free_space_inode(root, inode)))
3460 3461
		goto no_delete;

C
Chris Mason 已提交
3462
	if (is_bad_inode(inode)) {
3463
		btrfs_orphan_del(NULL, inode);
C
Chris Mason 已提交
3464 3465
		goto no_delete;
	}
3466
	/* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
C
Chris Mason 已提交
3467
	btrfs_wait_ordered_range(inode, 0, (u64)-1);
3468

3469 3470 3471 3472 3473
	if (root->fs_info->log_root_recovering) {
		BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan));
		goto no_delete;
	}

3474 3475 3476 3477 3478
	if (inode->i_nlink > 0) {
		BUG_ON(btrfs_root_refs(&root->root_item) != 0);
		goto no_delete;
	}

3479 3480 3481 3482 3483
	rsv = btrfs_alloc_block_rsv(root);
	if (!rsv) {
		btrfs_orphan_del(NULL, inode);
		goto no_delete;
	}
3484
	rsv->size = min_size;
3485
	global_rsv = &root->fs_info->global_block_rsv;
3486

3487
	btrfs_i_size_write(inode, 0);
3488

3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499
	/*
	 * This is a bit simpler than btrfs_truncate since
	 *
	 * 1) We've already reserved our space for our orphan item in the
	 *    unlink.
	 * 2) We're going to delete the inode item, so we don't need to update
	 *    it at all.
	 *
	 * So we just need to reserve some slack space in case we add bytes when
	 * doing the truncate.
	 */
3500
	while (1) {
3501
		ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size);
3502 3503 3504 3505 3506 3507 3508 3509 3510

		/*
		 * Try and steal from the global reserve since we will
		 * likely not use this space anyway, we want to try as
		 * hard as possible to get this to work.
		 */
		if (ret)
			ret = btrfs_block_rsv_migrate(global_rsv, rsv, min_size);

3511
		if (ret) {
3512
			printk(KERN_WARNING "Could not get space for a "
3513
			       "delete, will truncate on mount %d\n", ret);
3514 3515 3516 3517 3518 3519 3520 3521 3522 3523
			btrfs_orphan_del(NULL, inode);
			btrfs_free_block_rsv(root, rsv);
			goto no_delete;
		}

		trans = btrfs_start_transaction(root, 0);
		if (IS_ERR(trans)) {
			btrfs_orphan_del(NULL, inode);
			btrfs_free_block_rsv(root, rsv);
			goto no_delete;
3524
		}
3525

3526 3527
		trans->block_rsv = rsv;

3528
		ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
3529 3530
		if (ret != -EAGAIN)
			break;
3531

3532 3533 3534 3535 3536
		nr = trans->blocks_used;
		btrfs_end_transaction(trans, root);
		trans = NULL;
		btrfs_btree_balance_dirty(root, nr);
	}
3537

3538 3539
	btrfs_free_block_rsv(root, rsv);

3540
	if (ret == 0) {
3541
		trans->block_rsv = root->orphan_block_rsv;
3542 3543 3544
		ret = btrfs_orphan_del(trans, inode);
		BUG_ON(ret);
	}
3545

3546
	trans->block_rsv = &root->fs_info->trans_block_rsv;
3547 3548
	if (!(root == root->fs_info->tree_root ||
	      root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
3549
		btrfs_return_ino(root, btrfs_ino(inode));
3550

3551
	nr = trans->blocks_used;
3552
	btrfs_end_transaction(trans, root);
3553
	btrfs_btree_balance_dirty(root, nr);
C
Chris Mason 已提交
3554
no_delete:
3555
	end_writeback(inode);
3556
	return;
C
Chris Mason 已提交
3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570
}

/*
 * this returns the key found in the dir entry in the location pointer.
 * If no dir entries were found, location->objectid is 0.
 */
static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
			       struct btrfs_key *location)
{
	const char *name = dentry->d_name.name;
	int namelen = dentry->d_name.len;
	struct btrfs_dir_item *di;
	struct btrfs_path *path;
	struct btrfs_root *root = BTRFS_I(dir)->root;
3571
	int ret = 0;
C
Chris Mason 已提交
3572 3573

	path = btrfs_alloc_path();
3574 3575
	if (!path)
		return -ENOMEM;
3576

3577
	di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
C
Chris Mason 已提交
3578
				    namelen, 0);
3579 3580
	if (IS_ERR(di))
		ret = PTR_ERR(di);
3581

3582
	if (IS_ERR_OR_NULL(di))
3583
		goto out_err;
3584

3585
	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
C
Chris Mason 已提交
3586 3587 3588
out:
	btrfs_free_path(path);
	return ret;
3589 3590 3591
out_err:
	location->objectid = 0;
	goto out;
C
Chris Mason 已提交
3592 3593 3594 3595 3596 3597 3598 3599
}

/*
 * when we hit a tree root in a directory, the btrfs part of the inode
 * needs to be changed to reflect the root directory of the tree root.  This
 * is kind of like crossing a mount point.
 */
static int fixup_tree_root_location(struct btrfs_root *root,
3600 3601 3602 3603
				    struct inode *dir,
				    struct dentry *dentry,
				    struct btrfs_key *location,
				    struct btrfs_root **sub_root)
C
Chris Mason 已提交
3604
{
3605 3606 3607 3608 3609 3610
	struct btrfs_path *path;
	struct btrfs_root *new_root;
	struct btrfs_root_ref *ref;
	struct extent_buffer *leaf;
	int ret;
	int err = 0;
C
Chris Mason 已提交
3611

3612 3613 3614 3615 3616
	path = btrfs_alloc_path();
	if (!path) {
		err = -ENOMEM;
		goto out;
	}
C
Chris Mason 已提交
3617

3618 3619 3620 3621 3622 3623 3624 3625 3626
	err = -ENOENT;
	ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
				  BTRFS_I(dir)->root->root_key.objectid,
				  location->objectid);
	if (ret) {
		if (ret < 0)
			err = ret;
		goto out;
	}
C
Chris Mason 已提交
3627

3628 3629
	leaf = path->nodes[0];
	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
3630
	if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
3631 3632
	    btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
		goto out;
C
Chris Mason 已提交
3633

3634 3635 3636 3637 3638 3639
	ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
				   (unsigned long)(ref + 1),
				   dentry->d_name.len);
	if (ret)
		goto out;

3640
	btrfs_release_path(path);
3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660

	new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
	if (IS_ERR(new_root)) {
		err = PTR_ERR(new_root);
		goto out;
	}

	if (btrfs_root_refs(&new_root->root_item) == 0) {
		err = -ENOENT;
		goto out;
	}

	*sub_root = new_root;
	location->objectid = btrfs_root_dirid(&new_root->root_item);
	location->type = BTRFS_INODE_ITEM_KEY;
	location->offset = 0;
	err = 0;
out:
	btrfs_free_path(path);
	return err;
C
Chris Mason 已提交
3661 3662
}

3663 3664 3665 3666
static void inode_tree_add(struct inode *inode)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_inode *entry;
3667 3668
	struct rb_node **p;
	struct rb_node *parent;
3669
	u64 ino = btrfs_ino(inode);
3670 3671 3672
again:
	p = &root->inode_tree.rb_node;
	parent = NULL;
3673

A
Al Viro 已提交
3674
	if (inode_unhashed(inode))
3675 3676
		return;

3677 3678 3679 3680 3681
	spin_lock(&root->inode_lock);
	while (*p) {
		parent = *p;
		entry = rb_entry(parent, struct btrfs_inode, rb_node);

3682
		if (ino < btrfs_ino(&entry->vfs_inode))
3683
			p = &parent->rb_left;
3684
		else if (ino > btrfs_ino(&entry->vfs_inode))
3685
			p = &parent->rb_right;
3686 3687
		else {
			WARN_ON(!(entry->vfs_inode.i_state &
3688
				  (I_WILL_FREE | I_FREEING)));
3689 3690 3691 3692
			rb_erase(parent, &root->inode_tree);
			RB_CLEAR_NODE(parent);
			spin_unlock(&root->inode_lock);
			goto again;
3693 3694 3695 3696 3697 3698 3699 3700 3701 3702
		}
	}
	rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
	rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
	spin_unlock(&root->inode_lock);
}

static void inode_tree_del(struct inode *inode)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
3703
	int empty = 0;
3704

3705
	spin_lock(&root->inode_lock);
3706 3707 3708
	if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
		rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
		RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3709
		empty = RB_EMPTY_ROOT(&root->inode_tree);
3710
	}
3711
	spin_unlock(&root->inode_lock);
3712

3713 3714 3715 3716 3717 3718 3719 3720
	/*
	 * Free space cache has inodes in the tree root, but the tree root has a
	 * root_refs of 0, so this could end up dropping the tree root as a
	 * snapshot, so we need the extra !root->fs_info->tree_root check to
	 * make sure we don't drop it.
	 */
	if (empty && btrfs_root_refs(&root->root_item) == 0 &&
	    root != root->fs_info->tree_root) {
3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747
		synchronize_srcu(&root->fs_info->subvol_srcu);
		spin_lock(&root->inode_lock);
		empty = RB_EMPTY_ROOT(&root->inode_tree);
		spin_unlock(&root->inode_lock);
		if (empty)
			btrfs_add_dead_root(root);
	}
}

int btrfs_invalidate_inodes(struct btrfs_root *root)
{
	struct rb_node *node;
	struct rb_node *prev;
	struct btrfs_inode *entry;
	struct inode *inode;
	u64 objectid = 0;

	WARN_ON(btrfs_root_refs(&root->root_item) != 0);

	spin_lock(&root->inode_lock);
again:
	node = root->inode_tree.rb_node;
	prev = NULL;
	while (node) {
		prev = node;
		entry = rb_entry(node, struct btrfs_inode, rb_node);

3748
		if (objectid < btrfs_ino(&entry->vfs_inode))
3749
			node = node->rb_left;
3750
		else if (objectid > btrfs_ino(&entry->vfs_inode))
3751 3752 3753 3754 3755 3756 3757
			node = node->rb_right;
		else
			break;
	}
	if (!node) {
		while (prev) {
			entry = rb_entry(prev, struct btrfs_inode, rb_node);
3758
			if (objectid <= btrfs_ino(&entry->vfs_inode)) {
3759 3760 3761 3762 3763 3764 3765 3766
				node = prev;
				break;
			}
			prev = rb_next(prev);
		}
	}
	while (node) {
		entry = rb_entry(node, struct btrfs_inode, rb_node);
3767
		objectid = btrfs_ino(&entry->vfs_inode) + 1;
3768 3769 3770 3771 3772 3773
		inode = igrab(&entry->vfs_inode);
		if (inode) {
			spin_unlock(&root->inode_lock);
			if (atomic_read(&inode->i_count) > 1)
				d_prune_aliases(inode);
			/*
3774
			 * btrfs_drop_inode will have it removed from
3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790
			 * the inode cache when its usage count
			 * hits zero.
			 */
			iput(inode);
			cond_resched();
			spin_lock(&root->inode_lock);
			goto again;
		}

		if (cond_resched_lock(&root->inode_lock))
			goto again;

		node = rb_next(node);
	}
	spin_unlock(&root->inode_lock);
	return 0;
3791 3792
}

3793 3794 3795 3796 3797
static int btrfs_init_locked_inode(struct inode *inode, void *p)
{
	struct btrfs_iget_args *args = p;
	inode->i_ino = args->ino;
	BTRFS_I(inode)->root = args->root;
3798
	btrfs_set_inode_space_info(args->root, inode);
C
Chris Mason 已提交
3799 3800 3801 3802 3803 3804
	return 0;
}

static int btrfs_find_actor(struct inode *inode, void *opaque)
{
	struct btrfs_iget_args *args = opaque;
3805
	return args->ino == btrfs_ino(inode) &&
3806
		args->root == BTRFS_I(inode)->root;
C
Chris Mason 已提交
3807 3808
}

3809 3810 3811
static struct inode *btrfs_iget_locked(struct super_block *s,
				       u64 objectid,
				       struct btrfs_root *root)
C
Chris Mason 已提交
3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823
{
	struct inode *inode;
	struct btrfs_iget_args args;
	args.ino = objectid;
	args.root = root;

	inode = iget5_locked(s, objectid, btrfs_find_actor,
			     btrfs_init_locked_inode,
			     (void *)&args);
	return inode;
}

B
Balaji Rao 已提交
3824 3825 3826 3827
/* Get an inode object given its location and corresponding root.
 * Returns in *is_new if the inode was read from disk
 */
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3828
			 struct btrfs_root *root, int *new)
B
Balaji Rao 已提交
3829 3830 3831 3832 3833
{
	struct inode *inode;

	inode = btrfs_iget_locked(s, location->objectid, root);
	if (!inode)
3834
		return ERR_PTR(-ENOMEM);
B
Balaji Rao 已提交
3835 3836 3837 3838 3839

	if (inode->i_state & I_NEW) {
		BTRFS_I(inode)->root = root;
		memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
		btrfs_read_locked_inode(inode);
3840 3841 3842 3843 3844 3845
		if (!is_bad_inode(inode)) {
			inode_tree_add(inode);
			unlock_new_inode(inode);
			if (new)
				*new = 1;
		} else {
3846 3847 3848
			unlock_new_inode(inode);
			iput(inode);
			inode = ERR_PTR(-ESTALE);
3849 3850 3851
		}
	}

B
Balaji Rao 已提交
3852 3853 3854
	return inode;
}

3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876
static struct inode *new_simple_dir(struct super_block *s,
				    struct btrfs_key *key,
				    struct btrfs_root *root)
{
	struct inode *inode = new_inode(s);

	if (!inode)
		return ERR_PTR(-ENOMEM);

	BTRFS_I(inode)->root = root;
	memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
	BTRFS_I(inode)->dummy_inode = 1;

	inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
	inode->i_op = &simple_dir_inode_operations;
	inode->i_fop = &simple_dir_operations;
	inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;

	return inode;
}

3877
struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
C
Chris Mason 已提交
3878
{
3879
	struct inode *inode;
3880
	struct btrfs_root *root = BTRFS_I(dir)->root;
C
Chris Mason 已提交
3881 3882
	struct btrfs_root *sub_root = root;
	struct btrfs_key location;
3883
	int index;
3884
	int ret = 0;
C
Chris Mason 已提交
3885 3886 3887

	if (dentry->d_name.len > BTRFS_NAME_LEN)
		return ERR_PTR(-ENAMETOOLONG);
3888

3889 3890 3891 3892
	if (unlikely(d_need_lookup(dentry))) {
		memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key));
		kfree(dentry->d_fsdata);
		dentry->d_fsdata = NULL;
3893 3894
		/* This thing is hashed, drop it for now */
		d_drop(dentry);
3895 3896 3897
	} else {
		ret = btrfs_inode_by_name(dir, dentry, &location);
	}
3898

C
Chris Mason 已提交
3899 3900
	if (ret < 0)
		return ERR_PTR(ret);
3901

3902 3903 3904 3905
	if (location.objectid == 0)
		return NULL;

	if (location.type == BTRFS_INODE_ITEM_KEY) {
3906
		inode = btrfs_iget(dir->i_sb, &location, root, NULL);
3907 3908 3909 3910 3911
		return inode;
	}

	BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);

3912
	index = srcu_read_lock(&root->fs_info->subvol_srcu);
3913 3914 3915 3916 3917 3918 3919 3920
	ret = fixup_tree_root_location(root, dir, dentry,
				       &location, &sub_root);
	if (ret < 0) {
		if (ret != -ENOENT)
			inode = ERR_PTR(ret);
		else
			inode = new_simple_dir(dir->i_sb, &location, sub_root);
	} else {
3921
		inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
C
Chris Mason 已提交
3922
	}
3923 3924
	srcu_read_unlock(&root->fs_info->subvol_srcu, index);

3925
	if (!IS_ERR(inode) && root != sub_root) {
3926 3927
		down_read(&root->fs_info->cleanup_work_sem);
		if (!(inode->i_sb->s_flags & MS_RDONLY))
3928
			ret = btrfs_orphan_cleanup(sub_root);
3929
		up_read(&root->fs_info->cleanup_work_sem);
3930 3931
		if (ret)
			inode = ERR_PTR(ret);
3932 3933
	}

3934 3935 3936
	return inode;
}

3937
static int btrfs_dentry_delete(const struct dentry *dentry)
3938 3939 3940
{
	struct btrfs_root *root;

3941 3942
	if (!dentry->d_inode && !IS_ROOT(dentry))
		dentry = dentry->d_parent;
3943

3944 3945 3946 3947 3948
	if (dentry->d_inode) {
		root = BTRFS_I(dentry->d_inode)->root;
		if (btrfs_root_refs(&root->root_item) == 0)
			return 1;
	}
3949 3950 3951
	return 0;
}

3952 3953 3954 3955 3956 3957
static void btrfs_dentry_release(struct dentry *dentry)
{
	if (dentry->d_fsdata)
		kfree(dentry->d_fsdata);
}

3958 3959 3960
static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
				   struct nameidata *nd)
{
3961 3962 3963 3964 3965 3966 3967 3968 3969
	struct dentry *ret;

	ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
	if (unlikely(d_need_lookup(dentry))) {
		spin_lock(&dentry->d_lock);
		dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
		spin_unlock(&dentry->d_lock);
	}
	return ret;
C
Chris Mason 已提交
3970 3971
}

3972
unsigned char btrfs_filetype_table[] = {
C
Chris Mason 已提交
3973 3974 3975
	DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
};

3976 3977
static int btrfs_real_readdir(struct file *filp, void *dirent,
			      filldir_t filldir)
C
Chris Mason 已提交
3978
{
3979
	struct inode *inode = filp->f_dentry->d_inode;
C
Chris Mason 已提交
3980 3981 3982 3983
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_item *item;
	struct btrfs_dir_item *di;
	struct btrfs_key key;
3984
	struct btrfs_key found_key;
C
Chris Mason 已提交
3985
	struct btrfs_path *path;
3986 3987
	struct list_head ins_list;
	struct list_head del_list;
3988
	struct qstr q;
C
Chris Mason 已提交
3989
	int ret;
3990
	struct extent_buffer *leaf;
C
Chris Mason 已提交
3991 3992 3993 3994 3995 3996 3997
	int slot;
	unsigned char d_type;
	int over = 0;
	u32 di_cur;
	u32 di_total;
	u32 di_len;
	int key_type = BTRFS_DIR_INDEX_KEY;
3998 3999 4000
	char tmp_name[32];
	char *name_ptr;
	int name_len;
4001
	int is_curr = 0;	/* filp->f_pos points to the current index? */
C
Chris Mason 已提交
4002 4003 4004 4005

	/* FIXME, use a real flag for deciding about the key type */
	if (root->fs_info->tree_root == root)
		key_type = BTRFS_DIR_ITEM_KEY;
4006

4007 4008
	/* special case for "." */
	if (filp->f_pos == 0) {
4009 4010
		over = filldir(dirent, ".", 1,
			       filp->f_pos, btrfs_ino(inode), DT_DIR);
4011 4012 4013 4014 4015 4016
		if (over)
			return 0;
		filp->f_pos = 1;
	}
	/* special case for .., just use the back ref */
	if (filp->f_pos == 1) {
4017
		u64 pino = parent_ino(filp->f_path.dentry);
4018
		over = filldir(dirent, "..", 2,
4019
			       filp->f_pos, pino, DT_DIR);
4020
		if (over)
4021
			return 0;
4022 4023
		filp->f_pos = 2;
	}
4024
	path = btrfs_alloc_path();
4025 4026
	if (!path)
		return -ENOMEM;
C
Chris Mason 已提交
4027

4028
	path->reada = 1;
4029

4030 4031 4032 4033 4034 4035
	if (key_type == BTRFS_DIR_INDEX_KEY) {
		INIT_LIST_HEAD(&ins_list);
		INIT_LIST_HEAD(&del_list);
		btrfs_get_delayed_items(inode, &ins_list, &del_list);
	}

C
Chris Mason 已提交
4036 4037
	btrfs_set_key_type(&key, key_type);
	key.offset = filp->f_pos;
4038
	key.objectid = btrfs_ino(inode);
4039

C
Chris Mason 已提交
4040 4041 4042
	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		goto err;
4043 4044

	while (1) {
4045
		leaf = path->nodes[0];
C
Chris Mason 已提交
4046
		slot = path->slots[0];
4047 4048 4049 4050 4051 4052 4053
		if (slot >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret < 0)
				goto err;
			else if (ret > 0)
				break;
			continue;
C
Chris Mason 已提交
4054
		}
4055

4056 4057 4058 4059
		item = btrfs_item_nr(leaf, slot);
		btrfs_item_key_to_cpu(leaf, &found_key, slot);

		if (found_key.objectid != key.objectid)
C
Chris Mason 已提交
4060
			break;
4061
		if (btrfs_key_type(&found_key) != key_type)
C
Chris Mason 已提交
4062
			break;
4063
		if (found_key.offset < filp->f_pos)
4064
			goto next;
4065 4066 4067 4068
		if (key_type == BTRFS_DIR_INDEX_KEY &&
		    btrfs_should_delete_dir_index(&del_list,
						  found_key.offset))
			goto next;
4069 4070

		filp->f_pos = found_key.offset;
4071
		is_curr = 1;
4072

C
Chris Mason 已提交
4073 4074
		di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
		di_cur = 0;
4075
		di_total = btrfs_item_size(leaf, item);
4076 4077

		while (di_cur < di_total) {
4078
			struct btrfs_key location;
4079
			struct dentry *tmp;
4080

4081 4082 4083
			if (verify_dir_item(root, leaf, di))
				break;

4084
			name_len = btrfs_dir_name_len(leaf, di);
4085
			if (name_len <= sizeof(tmp_name)) {
4086 4087 4088
				name_ptr = tmp_name;
			} else {
				name_ptr = kmalloc(name_len, GFP_NOFS);
4089 4090 4091 4092
				if (!name_ptr) {
					ret = -ENOMEM;
					goto err;
				}
4093 4094 4095 4096 4097 4098
			}
			read_extent_buffer(leaf, name_ptr,
					   (unsigned long)(di + 1), name_len);

			d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
			btrfs_dir_item_key_to_cpu(leaf, di, &location);
4099

4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126
			q.name = name_ptr;
			q.len = name_len;
			q.hash = full_name_hash(q.name, q.len);
			tmp = d_lookup(filp->f_dentry, &q);
			if (!tmp) {
				struct btrfs_key *newkey;

				newkey = kzalloc(sizeof(struct btrfs_key),
						 GFP_NOFS);
				if (!newkey)
					goto no_dentry;
				tmp = d_alloc(filp->f_dentry, &q);
				if (!tmp) {
					kfree(newkey);
					dput(tmp);
					goto no_dentry;
				}
				memcpy(newkey, &location,
				       sizeof(struct btrfs_key));
				tmp->d_fsdata = newkey;
				tmp->d_flags |= DCACHE_NEED_LOOKUP;
				d_rehash(tmp);
				dput(tmp);
			} else {
				dput(tmp);
			}
no_dentry:
4127 4128 4129 4130 4131 4132 4133 4134
			/* is this a reference to our own snapshot? If so
			 * skip it
			 */
			if (location.type == BTRFS_ROOT_ITEM_KEY &&
			    location.objectid == root->root_key.objectid) {
				over = 0;
				goto skip;
			}
4135
			over = filldir(dirent, name_ptr, name_len,
4136
				       found_key.offset, location.objectid,
C
Chris Mason 已提交
4137
				       d_type);
4138

4139
skip:
4140 4141 4142
			if (name_ptr != tmp_name)
				kfree(name_ptr);

C
Chris Mason 已提交
4143 4144
			if (over)
				goto nopos;
J
Josef Bacik 已提交
4145
			di_len = btrfs_dir_name_len(leaf, di) +
4146
				 btrfs_dir_data_len(leaf, di) + sizeof(*di);
C
Chris Mason 已提交
4147 4148 4149
			di_cur += di_len;
			di = (struct btrfs_dir_item *)((char *)di + di_len);
		}
4150 4151
next:
		path->slots[0]++;
C
Chris Mason 已提交
4152
	}
4153

4154 4155 4156 4157 4158 4159 4160 4161 4162
	if (key_type == BTRFS_DIR_INDEX_KEY) {
		if (is_curr)
			filp->f_pos++;
		ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir,
						      &ins_list);
		if (ret)
			goto nopos;
	}

4163
	/* Reached end of directory/root. Bump pos past the last item. */
4164
	if (key_type == BTRFS_DIR_INDEX_KEY)
4165 4166 4167 4168 4169
		/*
		 * 32-bit glibc will use getdents64, but then strtol -
		 * so the last number we can serve is this.
		 */
		filp->f_pos = 0x7fffffff;
4170 4171
	else
		filp->f_pos++;
C
Chris Mason 已提交
4172 4173 4174
nopos:
	ret = 0;
err:
4175 4176
	if (key_type == BTRFS_DIR_INDEX_KEY)
		btrfs_put_delayed_items(&ins_list, &del_list);
C
Chris Mason 已提交
4177 4178 4179 4180
	btrfs_free_path(path);
	return ret;
}

4181
int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
C
Chris Mason 已提交
4182 4183 4184 4185
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
	int ret = 0;
4186
	bool nolock = false;
C
Chris Mason 已提交
4187

4188
	if (BTRFS_I(inode)->dummy_inode)
4189 4190
		return 0;

4191
	if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(root, inode))
4192
		nolock = true;
4193

4194
	if (wbc->sync_mode == WB_SYNC_ALL) {
4195
		if (nolock)
4196
			trans = btrfs_join_transaction_nolock(root);
4197
		else
4198
			trans = btrfs_join_transaction(root);
4199 4200
		if (IS_ERR(trans))
			return PTR_ERR(trans);
4201 4202 4203 4204
		if (nolock)
			ret = btrfs_end_transaction_nolock(trans, root);
		else
			ret = btrfs_commit_transaction(trans, root);
C
Chris Mason 已提交
4205 4206 4207 4208 4209
	}
	return ret;
}

/*
4210
 * This is somewhat expensive, updating the tree every time the
C
Chris Mason 已提交
4211 4212 4213 4214
 * inode changes.  But, it is most likely to find the inode in cache.
 * FIXME, needs more benchmarking...there are no reasons other than performance
 * to keep or drop this code.
 */
4215
int btrfs_dirty_inode(struct inode *inode)
C
Chris Mason 已提交
4216 4217 4218
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
4219 4220 4221
	int ret;

	if (BTRFS_I(inode)->dummy_inode)
4222
		return 0;
C
Chris Mason 已提交
4223

4224
	trans = btrfs_join_transaction(root);
4225 4226
	if (IS_ERR(trans))
		return PTR_ERR(trans);
4227 4228

	ret = btrfs_update_inode(trans, root, inode);
4229 4230 4231 4232
	if (ret && ret == -ENOSPC) {
		/* whoops, lets try again with the full transaction */
		btrfs_end_transaction(trans, root);
		trans = btrfs_start_transaction(root, 1);
4233 4234
		if (IS_ERR(trans))
			return PTR_ERR(trans);
4235

4236 4237
		ret = btrfs_update_inode(trans, root, inode);
	}
C
Chris Mason 已提交
4238
	btrfs_end_transaction(trans, root);
4239 4240
	if (BTRFS_I(inode)->delayed_node)
		btrfs_balance_delayed_items(root);
4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288

	return ret;
}

/*
 * This is a copy of file_update_time.  We need this so we can return error on
 * ENOSPC for updating the inode in the case of file write and mmap writes.
 */
int btrfs_update_time(struct file *file)
{
	struct inode *inode = file->f_path.dentry->d_inode;
	struct timespec now;
	int ret;
	enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;

	/* First try to exhaust all avenues to not sync */
	if (IS_NOCMTIME(inode))
		return 0;

	now = current_fs_time(inode->i_sb);
	if (!timespec_equal(&inode->i_mtime, &now))
		sync_it = S_MTIME;

	if (!timespec_equal(&inode->i_ctime, &now))
		sync_it |= S_CTIME;

	if (IS_I_VERSION(inode))
		sync_it |= S_VERSION;

	if (!sync_it)
		return 0;

	/* Finally allowed to write? Takes lock. */
	if (mnt_want_write_file(file))
		return 0;

	/* Only change inode inside the lock region */
	if (sync_it & S_VERSION)
		inode_inc_iversion(inode);
	if (sync_it & S_CTIME)
		inode->i_ctime = now;
	if (sync_it & S_MTIME)
		inode->i_mtime = now;
	ret = btrfs_dirty_inode(inode);
	if (!ret)
		mark_inode_dirty_sync(inode);
	mnt_drop_write(file->f_path.mnt);
	return ret;
C
Chris Mason 已提交
4289 4290
}

4291 4292 4293 4294 4295
/*
 * find the highest existing sequence number in a directory
 * and then set the in-memory index_cnt variable to reflect
 * free sequence numbers
 */
4296 4297 4298 4299 4300 4301 4302 4303
static int btrfs_set_inode_index_count(struct inode *inode)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_key key, found_key;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	int ret;

4304
	key.objectid = btrfs_ino(inode);
4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335
	btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
	key.offset = (u64)-1;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		goto out;
	/* FIXME: we should be able to handle this */
	if (ret == 0)
		goto out;
	ret = 0;

	/*
	 * MAGIC NUMBER EXPLANATION:
	 * since we search a directory based on f_pos we have to start at 2
	 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
	 * else has to start at 2
	 */
	if (path->slots[0] == 0) {
		BTRFS_I(inode)->index_cnt = 2;
		goto out;
	}

	path->slots[0]--;

	leaf = path->nodes[0];
	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);

4336
	if (found_key.objectid != btrfs_ino(inode) ||
4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347
	    btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
		BTRFS_I(inode)->index_cnt = 2;
		goto out;
	}

	BTRFS_I(inode)->index_cnt = found_key.offset + 1;
out:
	btrfs_free_path(path);
	return ret;
}

4348 4349 4350 4351
/*
 * helper to find a free sequence number in a given directory.  This current
 * code is very simple, later versions will do smarter things in the btree
 */
4352
int btrfs_set_inode_index(struct inode *dir, u64 *index)
4353 4354 4355 4356
{
	int ret = 0;

	if (BTRFS_I(dir)->index_cnt == (u64)-1) {
4357 4358 4359 4360 4361 4362
		ret = btrfs_inode_delayed_dir_index_count(dir);
		if (ret) {
			ret = btrfs_set_inode_index_count(dir);
			if (ret)
				return ret;
		}
4363 4364
	}

4365
	*index = BTRFS_I(dir)->index_cnt;
4366 4367 4368 4369 4370
	BTRFS_I(dir)->index_cnt++;

	return ret;
}

C
Chris Mason 已提交
4371 4372
static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
				     struct btrfs_root *root,
4373
				     struct inode *dir,
4374
				     const char *name, int name_len,
4375 4376
				     u64 ref_objectid, u64 objectid, int mode,
				     u64 *index)
C
Chris Mason 已提交
4377 4378
{
	struct inode *inode;
4379
	struct btrfs_inode_item *inode_item;
C
Chris Mason 已提交
4380
	struct btrfs_key *location;
4381
	struct btrfs_path *path;
4382 4383 4384 4385
	struct btrfs_inode_ref *ref;
	struct btrfs_key key[2];
	u32 sizes[2];
	unsigned long ptr;
C
Chris Mason 已提交
4386 4387 4388
	int ret;
	int owner;

4389
	path = btrfs_alloc_path();
4390 4391
	if (!path)
		return ERR_PTR(-ENOMEM);
4392

C
Chris Mason 已提交
4393
	inode = new_inode(root->fs_info->sb);
4394 4395
	if (!inode) {
		btrfs_free_path(path);
C
Chris Mason 已提交
4396
		return ERR_PTR(-ENOMEM);
4397
	}
C
Chris Mason 已提交
4398

4399 4400 4401 4402 4403 4404
	/*
	 * we have to initialize this early, so we can reclaim the inode
	 * number if we fail afterwards in this function.
	 */
	inode->i_ino = objectid;

4405
	if (dir) {
4406 4407
		trace_btrfs_inode_request(dir);

4408
		ret = btrfs_set_inode_index(dir, index);
4409
		if (ret) {
4410
			btrfs_free_path(path);
4411
			iput(inode);
4412
			return ERR_PTR(ret);
4413
		}
4414 4415 4416 4417 4418 4419 4420
	}
	/*
	 * index_cnt is ignored for everything but a dir,
	 * btrfs_get_inode_index_count has an explanation for the magic
	 * number
	 */
	BTRFS_I(inode)->index_cnt = 2;
C
Chris Mason 已提交
4421
	BTRFS_I(inode)->root = root;
4422
	BTRFS_I(inode)->generation = trans->transid;
4423
	inode->i_generation = BTRFS_I(inode)->generation;
4424
	btrfs_set_inode_space_info(root, inode);
4425

4426
	if (S_ISDIR(mode))
C
Chris Mason 已提交
4427 4428 4429
		owner = 0;
	else
		owner = 1;
4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441

	key[0].objectid = objectid;
	btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
	key[0].offset = 0;

	key[1].objectid = objectid;
	btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
	key[1].offset = ref_objectid;

	sizes[0] = sizeof(struct btrfs_inode_item);
	sizes[1] = name_len + sizeof(*ref);

4442
	path->leave_spinning = 1;
4443 4444
	ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
	if (ret != 0)
4445 4446
		goto fail;

4447
	inode_init_owner(inode, dir, mode);
4448
	inode_set_bytes(inode, 0);
C
Chris Mason 已提交
4449
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4450 4451
	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
				  struct btrfs_inode_item);
4452
	fill_inode_item(trans, path->nodes[0], inode_item, inode);
4453 4454 4455 4456

	ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
			     struct btrfs_inode_ref);
	btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
4457
	btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
4458 4459 4460
	ptr = (unsigned long)(ref + 1);
	write_extent_buffer(path->nodes[0], name, ptr, name_len);

4461 4462 4463
	btrfs_mark_buffer_dirty(path->nodes[0]);
	btrfs_free_path(path);

C
Chris Mason 已提交
4464 4465 4466 4467 4468
	location = &BTRFS_I(inode)->location;
	location->objectid = objectid;
	location->offset = 0;
	btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);

4469 4470
	btrfs_inherit_iflags(inode, dir);

4471
	if (S_ISREG(mode)) {
4472 4473
		if (btrfs_test_opt(root, NODATASUM))
			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
4474 4475
		if (btrfs_test_opt(root, NODATACOW) ||
		    (BTRFS_I(dir)->flags & BTRFS_INODE_NODATACOW))
4476 4477 4478
			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
	}

C
Chris Mason 已提交
4479
	insert_inode_hash(inode);
4480
	inode_tree_add(inode);
4481 4482

	trace_btrfs_inode_new(inode);
4483
	btrfs_set_inode_last_trans(trans, inode);
4484

C
Chris Mason 已提交
4485
	return inode;
4486
fail:
4487 4488
	if (dir)
		BTRFS_I(dir)->index_cnt--;
4489
	btrfs_free_path(path);
4490
	iput(inode);
4491
	return ERR_PTR(ret);
C
Chris Mason 已提交
4492 4493 4494 4495 4496 4497 4498
}

static inline u8 btrfs_inode_type(struct inode *inode)
{
	return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
}

4499 4500 4501 4502 4503 4504
/*
 * utility function to add 'inode' into 'parent_inode' with
 * a give name and a given sequence number.
 * if 'add_backref' is true, also insert a backref from the
 * inode to the parent directory.
 */
4505 4506 4507
int btrfs_add_link(struct btrfs_trans_handle *trans,
		   struct inode *parent_inode, struct inode *inode,
		   const char *name, int name_len, int add_backref, u64 index)
C
Chris Mason 已提交
4508
{
4509
	int ret = 0;
C
Chris Mason 已提交
4510
	struct btrfs_key key;
4511
	struct btrfs_root *root = BTRFS_I(parent_inode)->root;
4512 4513
	u64 ino = btrfs_ino(inode);
	u64 parent_ino = btrfs_ino(parent_inode);
4514

4515
	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4516 4517
		memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
	} else {
4518
		key.objectid = ino;
4519 4520 4521 4522
		btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
		key.offset = 0;
	}

4523
	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4524 4525
		ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
					 key.objectid, root->root_key.objectid,
4526
					 parent_ino, index, name, name_len);
4527
	} else if (add_backref) {
4528 4529
		ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
					     parent_ino, index);
4530
	}
C
Chris Mason 已提交
4531 4532

	if (ret == 0) {
4533
		ret = btrfs_insert_dir_item(trans, root, name, name_len,
4534
					    parent_inode, &key,
4535 4536 4537
					    btrfs_inode_type(inode), index);
		BUG_ON(ret);

4538
		btrfs_i_size_write(parent_inode, parent_inode->i_size +
4539
				   name_len * 2);
4540
		parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
4541
		ret = btrfs_update_inode(trans, root, parent_inode);
C
Chris Mason 已提交
4542 4543 4544 4545 4546
	}
	return ret;
}

static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
4547 4548
			    struct inode *dir, struct dentry *dentry,
			    struct inode *inode, int backref, u64 index)
C
Chris Mason 已提交
4549
{
4550 4551 4552
	int err = btrfs_add_link(trans, dir, inode,
				 dentry->d_name.name, dentry->d_name.len,
				 backref, index);
C
Chris Mason 已提交
4553 4554 4555 4556 4557 4558 4559 4560 4561
	if (!err) {
		d_instantiate(dentry, inode);
		return 0;
	}
	if (err > 0)
		err = -EEXIST;
	return err;
}

J
Josef Bacik 已提交
4562 4563 4564 4565 4566
static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
			int mode, dev_t rdev)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(dir)->root;
4567
	struct inode *inode = NULL;
J
Josef Bacik 已提交
4568 4569 4570
	int err;
	int drop_inode = 0;
	u64 objectid;
4571
	unsigned long nr = 0;
4572
	u64 index = 0;
J
Josef Bacik 已提交
4573 4574 4575 4576

	if (!new_valid_dev(rdev))
		return -EINVAL;

4577 4578 4579 4580 4581
	/*
	 * 2 for inode item and ref
	 * 2 for dir items
	 * 1 for xattr if selinux is on
	 */
4582 4583 4584
	trans = btrfs_start_transaction(root, 5);
	if (IS_ERR(trans))
		return PTR_ERR(trans);
4585

4586 4587 4588 4589
	err = btrfs_find_free_ino(root, &objectid);
	if (err)
		goto out_unlock;

4590
	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4591
				dentry->d_name.len, btrfs_ino(dir), objectid,
4592
				mode, &index);
4593 4594
	if (IS_ERR(inode)) {
		err = PTR_ERR(inode);
J
Josef Bacik 已提交
4595
		goto out_unlock;
4596
	}
J
Josef Bacik 已提交
4597

4598
	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
J
Josef Bacik 已提交
4599 4600 4601 4602 4603
	if (err) {
		drop_inode = 1;
		goto out_unlock;
	}

4604
	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
J
Josef Bacik 已提交
4605 4606 4607 4608 4609
	if (err)
		drop_inode = 1;
	else {
		inode->i_op = &btrfs_special_inode_operations;
		init_special_inode(inode, inode->i_mode, rdev);
4610
		btrfs_update_inode(trans, root, inode);
J
Josef Bacik 已提交
4611 4612
	}
out_unlock:
4613
	nr = trans->blocks_used;
4614
	btrfs_end_transaction_throttle(trans, root);
4615
	btrfs_btree_balance_dirty(root, nr);
J
Josef Bacik 已提交
4616 4617 4618 4619 4620 4621 4622
	if (drop_inode) {
		inode_dec_link_count(inode);
		iput(inode);
	}
	return err;
}

C
Chris Mason 已提交
4623 4624 4625 4626 4627
static int btrfs_create(struct inode *dir, struct dentry *dentry,
			int mode, struct nameidata *nd)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(dir)->root;
4628
	struct inode *inode = NULL;
C
Chris Mason 已提交
4629
	int drop_inode = 0;
4630
	int err;
4631
	unsigned long nr = 0;
C
Chris Mason 已提交
4632
	u64 objectid;
4633
	u64 index = 0;
C
Chris Mason 已提交
4634

4635 4636 4637 4638 4639
	/*
	 * 2 for inode item and ref
	 * 2 for dir items
	 * 1 for xattr if selinux is on
	 */
4640 4641 4642
	trans = btrfs_start_transaction(root, 5);
	if (IS_ERR(trans))
		return PTR_ERR(trans);
4643

4644 4645 4646 4647
	err = btrfs_find_free_ino(root, &objectid);
	if (err)
		goto out_unlock;

4648
	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4649
				dentry->d_name.len, btrfs_ino(dir), objectid,
4650
				mode, &index);
4651 4652
	if (IS_ERR(inode)) {
		err = PTR_ERR(inode);
C
Chris Mason 已提交
4653
		goto out_unlock;
4654
	}
C
Chris Mason 已提交
4655

4656
	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
J
Josef Bacik 已提交
4657 4658 4659 4660 4661
	if (err) {
		drop_inode = 1;
		goto out_unlock;
	}

4662
	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
C
Chris Mason 已提交
4663 4664 4665 4666
	if (err)
		drop_inode = 1;
	else {
		inode->i_mapping->a_ops = &btrfs_aops;
4667
		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
C
Chris Mason 已提交
4668 4669
		inode->i_fop = &btrfs_file_operations;
		inode->i_op = &btrfs_file_inode_operations;
4670
		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
C
Chris Mason 已提交
4671 4672
	}
out_unlock:
4673
	nr = trans->blocks_used;
4674
	btrfs_end_transaction_throttle(trans, root);
C
Chris Mason 已提交
4675 4676 4677 4678
	if (drop_inode) {
		inode_dec_link_count(inode);
		iput(inode);
	}
4679
	btrfs_btree_balance_dirty(root, nr);
C
Chris Mason 已提交
4680 4681 4682 4683 4684 4685 4686 4687 4688
	return err;
}

static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
		      struct dentry *dentry)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(dir)->root;
	struct inode *inode = old_dentry->d_inode;
4689
	u64 index;
4690
	unsigned long nr = 0;
C
Chris Mason 已提交
4691 4692 4693
	int err;
	int drop_inode = 0;

4694 4695
	/* do not allow sys_link's with other subvols of the same device */
	if (root->objectid != BTRFS_I(inode)->root->objectid)
4696
		return -EXDEV;
4697

4698 4699
	if (inode->i_nlink == ~0U)
		return -EMLINK;
4700

4701
	err = btrfs_set_inode_index(dir, &index);
4702 4703 4704
	if (err)
		goto fail;

4705
	/*
4706
	 * 2 items for inode and inode ref
4707
	 * 2 items for dir items
4708
	 * 1 item for parent inode
4709
	 */
4710
	trans = btrfs_start_transaction(root, 5);
4711 4712 4713 4714
	if (IS_ERR(trans)) {
		err = PTR_ERR(trans);
		goto fail;
	}
4715

4716 4717
	btrfs_inc_nlink(inode);
	inode->i_ctime = CURRENT_TIME;
A
Al Viro 已提交
4718
	ihold(inode);
4719

4720
	err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
4721

4722
	if (err) {
4723
		drop_inode = 1;
4724
	} else {
4725
		struct dentry *parent = dentry->d_parent;
4726 4727
		err = btrfs_update_inode(trans, root, inode);
		BUG_ON(err);
4728
		btrfs_log_new_name(trans, inode, NULL, parent);
4729
	}
C
Chris Mason 已提交
4730

4731
	nr = trans->blocks_used;
4732
	btrfs_end_transaction_throttle(trans, root);
4733
fail:
C
Chris Mason 已提交
4734 4735 4736 4737
	if (drop_inode) {
		inode_dec_link_count(inode);
		iput(inode);
	}
4738
	btrfs_btree_balance_dirty(root, nr);
C
Chris Mason 已提交
4739 4740 4741 4742 4743
	return err;
}

static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
{
4744
	struct inode *inode = NULL;
C
Chris Mason 已提交
4745 4746 4747 4748
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(dir)->root;
	int err = 0;
	int drop_on_err = 0;
4749
	u64 objectid = 0;
4750
	u64 index = 0;
4751
	unsigned long nr = 1;
C
Chris Mason 已提交
4752

4753 4754 4755 4756 4757
	/*
	 * 2 items for inode and ref
	 * 2 items for dir items
	 * 1 for xattr if selinux is on
	 */
4758 4759 4760
	trans = btrfs_start_transaction(root, 5);
	if (IS_ERR(trans))
		return PTR_ERR(trans);
C
Chris Mason 已提交
4761

4762 4763 4764 4765
	err = btrfs_find_free_ino(root, &objectid);
	if (err)
		goto out_fail;

4766
	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4767
				dentry->d_name.len, btrfs_ino(dir), objectid,
4768
				S_IFDIR | mode, &index);
C
Chris Mason 已提交
4769 4770 4771 4772
	if (IS_ERR(inode)) {
		err = PTR_ERR(inode);
		goto out_fail;
	}
4773

C
Chris Mason 已提交
4774
	drop_on_err = 1;
J
Josef Bacik 已提交
4775

4776
	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
J
Josef Bacik 已提交
4777 4778 4779
	if (err)
		goto out_fail;

C
Chris Mason 已提交
4780 4781 4782
	inode->i_op = &btrfs_dir_inode_operations;
	inode->i_fop = &btrfs_dir_file_operations;

4783
	btrfs_i_size_write(inode, 0);
C
Chris Mason 已提交
4784 4785 4786
	err = btrfs_update_inode(trans, root, inode);
	if (err)
		goto out_fail;
4787

4788 4789
	err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
			     dentry->d_name.len, 0, index);
C
Chris Mason 已提交
4790 4791
	if (err)
		goto out_fail;
4792

C
Chris Mason 已提交
4793 4794 4795 4796
	d_instantiate(dentry, inode);
	drop_on_err = 0;

out_fail:
4797
	nr = trans->blocks_used;
4798
	btrfs_end_transaction_throttle(trans, root);
C
Chris Mason 已提交
4799 4800
	if (drop_on_err)
		iput(inode);
4801
	btrfs_btree_balance_dirty(root, nr);
C
Chris Mason 已提交
4802 4803 4804
	return err;
}

4805 4806 4807 4808
/* helper for btfs_get_extent.  Given an existing extent in the tree,
 * and an extent that you want to insert, deal with overlap and insert
 * the new extent into the tree.
 */
4809 4810
static int merge_extent_mapping(struct extent_map_tree *em_tree,
				struct extent_map *existing,
4811 4812
				struct extent_map *em,
				u64 map_start, u64 map_len)
4813 4814 4815
{
	u64 start_diff;

4816 4817 4818 4819
	BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
	start_diff = map_start - em->start;
	em->start = map_start;
	em->len = map_len;
4820 4821
	if (em->block_start < EXTENT_MAP_LAST_BYTE &&
	    !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
4822
		em->block_start += start_diff;
4823 4824
		em->block_len -= start_diff;
	}
4825
	return add_extent_mapping(em_tree, em);
4826 4827
}

4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838
static noinline int uncompress_inline(struct btrfs_path *path,
				      struct inode *inode, struct page *page,
				      size_t pg_offset, u64 extent_offset,
				      struct btrfs_file_extent_item *item)
{
	int ret;
	struct extent_buffer *leaf = path->nodes[0];
	char *tmp;
	size_t max_size;
	unsigned long inline_size;
	unsigned long ptr;
4839
	int compress_type;
4840 4841

	WARN_ON(pg_offset != 0);
4842
	compress_type = btrfs_file_extent_compression(leaf, item);
4843 4844 4845 4846
	max_size = btrfs_file_extent_ram_bytes(leaf, item);
	inline_size = btrfs_file_extent_inline_item_len(leaf,
					btrfs_item_nr(leaf, path->slots[0]));
	tmp = kmalloc(inline_size, GFP_NOFS);
4847 4848
	if (!tmp)
		return -ENOMEM;
4849 4850 4851 4852
	ptr = btrfs_file_extent_inline_start(item);

	read_extent_buffer(leaf, tmp, ptr, inline_size);

4853
	max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
4854 4855
	ret = btrfs_decompress(compress_type, tmp, page,
			       extent_offset, inline_size, max_size);
4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867
	if (ret) {
		char *kaddr = kmap_atomic(page, KM_USER0);
		unsigned long copy_size = min_t(u64,
				  PAGE_CACHE_SIZE - pg_offset,
				  max_size - extent_offset);
		memset(kaddr + pg_offset, 0, copy_size);
		kunmap_atomic(kaddr, KM_USER0);
	}
	kfree(tmp);
	return 0;
}

4868 4869
/*
 * a bit scary, this does extent mapping from logical file offset to the disk.
4870 4871
 * the ugly parts come from merging extents from the disk with the in-ram
 * representation.  This gets more complex because of the data=ordered code,
4872 4873 4874 4875
 * where the in-ram extents might be locked pending data=ordered completion.
 *
 * This also copies inline extents directly into the page.
 */
4876

4877
struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
4878
				    size_t pg_offset, u64 start, u64 len,
4879 4880 4881 4882
				    int create)
{
	int ret;
	int err = 0;
4883
	u64 bytenr;
4884 4885
	u64 extent_start = 0;
	u64 extent_end = 0;
4886
	u64 objectid = btrfs_ino(inode);
4887
	u32 found_type;
4888
	struct btrfs_path *path = NULL;
4889 4890
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_file_extent_item *item;
4891 4892
	struct extent_buffer *leaf;
	struct btrfs_key found_key;
4893 4894
	struct extent_map *em = NULL;
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4895
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4896
	struct btrfs_trans_handle *trans = NULL;
4897
	int compress_type;
4898 4899

again:
4900
	read_lock(&em_tree->lock);
4901
	em = lookup_extent_mapping(em_tree, start, len);
4902 4903
	if (em)
		em->bdev = root->fs_info->fs_devices->latest_bdev;
4904
	read_unlock(&em_tree->lock);
4905

4906
	if (em) {
4907 4908 4909
		if (em->start > start || em->start + em->len <= start)
			free_extent_map(em);
		else if (em->block_start == EXTENT_MAP_INLINE && page)
4910 4911 4912
			free_extent_map(em);
		else
			goto out;
4913
	}
4914
	em = alloc_extent_map();
4915
	if (!em) {
4916 4917
		err = -ENOMEM;
		goto out;
4918
	}
4919
	em->bdev = root->fs_info->fs_devices->latest_bdev;
4920
	em->start = EXTENT_MAP_HOLE;
4921
	em->orig_start = EXTENT_MAP_HOLE;
4922
	em->len = (u64)-1;
4923
	em->block_len = (u64)-1;
4924 4925 4926

	if (!path) {
		path = btrfs_alloc_path();
4927 4928 4929 4930 4931 4932 4933 4934 4935
		if (!path) {
			err = -ENOMEM;
			goto out;
		}
		/*
		 * Chances are we'll be called again, so go ahead and do
		 * readahead
		 */
		path->reada = 1;
4936 4937
	}

4938 4939
	ret = btrfs_lookup_file_extent(trans, root, path,
				       objectid, start, trans != NULL);
4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950
	if (ret < 0) {
		err = ret;
		goto out;
	}

	if (ret != 0) {
		if (path->slots[0] == 0)
			goto not_found;
		path->slots[0]--;
	}

4951 4952
	leaf = path->nodes[0];
	item = btrfs_item_ptr(leaf, path->slots[0],
4953 4954
			      struct btrfs_file_extent_item);
	/* are we inside the extent that was found? */
4955 4956 4957
	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
	found_type = btrfs_key_type(&found_key);
	if (found_key.objectid != objectid ||
4958 4959 4960 4961
	    found_type != BTRFS_EXTENT_DATA_KEY) {
		goto not_found;
	}

4962 4963
	found_type = btrfs_file_extent_type(leaf, item);
	extent_start = found_key.offset;
4964
	compress_type = btrfs_file_extent_compression(leaf, item);
4965 4966
	if (found_type == BTRFS_FILE_EXTENT_REG ||
	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4967
		extent_end = extent_start +
4968
		       btrfs_file_extent_num_bytes(leaf, item);
4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982
	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
		size_t size;
		size = btrfs_file_extent_inline_len(leaf, item);
		extent_end = (extent_start + size + root->sectorsize - 1) &
			~((u64)root->sectorsize - 1);
	}

	if (start >= extent_end) {
		path->slots[0]++;
		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret < 0) {
				err = ret;
				goto out;
4983
			}
4984 4985 4986
			if (ret > 0)
				goto not_found;
			leaf = path->nodes[0];
4987
		}
4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
		if (found_key.objectid != objectid ||
		    found_key.type != BTRFS_EXTENT_DATA_KEY)
			goto not_found;
		if (start + len <= found_key.offset)
			goto not_found;
		em->start = start;
		em->len = found_key.offset - start;
		goto not_found_em;
	}

4999 5000
	if (found_type == BTRFS_FILE_EXTENT_REG ||
	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
5001 5002
		em->start = extent_start;
		em->len = extent_end - extent_start;
5003 5004
		em->orig_start = extent_start -
				 btrfs_file_extent_offset(leaf, item);
5005 5006
		bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
		if (bytenr == 0) {
5007
			em->block_start = EXTENT_MAP_HOLE;
5008 5009
			goto insert;
		}
5010
		if (compress_type != BTRFS_COMPRESS_NONE) {
5011
			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
5012
			em->compress_type = compress_type;
5013 5014 5015 5016 5017 5018 5019
			em->block_start = bytenr;
			em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
									 item);
		} else {
			bytenr += btrfs_file_extent_offset(leaf, item);
			em->block_start = bytenr;
			em->block_len = em->len;
5020 5021
			if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
5022
		}
5023 5024
		goto insert;
	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
5025
		unsigned long ptr;
5026
		char *map;
5027 5028 5029
		size_t size;
		size_t extent_offset;
		size_t copy_size;
5030

5031
		em->block_start = EXTENT_MAP_INLINE;
5032
		if (!page || create) {
5033
			em->start = extent_start;
5034
			em->len = extent_end - extent_start;
5035 5036
			goto out;
		}
5037

5038 5039
		size = btrfs_file_extent_inline_len(leaf, item);
		extent_offset = page_offset(page) + pg_offset - extent_start;
5040
		copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
5041 5042
				size - extent_offset);
		em->start = extent_start + extent_offset;
5043 5044
		em->len = (copy_size + root->sectorsize - 1) &
			~((u64)root->sectorsize - 1);
5045
		em->orig_start = EXTENT_MAP_INLINE;
5046
		if (compress_type) {
5047
			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
5048 5049
			em->compress_type = compress_type;
		}
5050
		ptr = btrfs_file_extent_inline_start(item) + extent_offset;
5051
		if (create == 0 && !PageUptodate(page)) {
5052 5053
			if (btrfs_file_extent_compression(leaf, item) !=
			    BTRFS_COMPRESS_NONE) {
5054 5055 5056 5057 5058 5059 5060 5061
				ret = uncompress_inline(path, inode, page,
							pg_offset,
							extent_offset, item);
				BUG_ON(ret);
			} else {
				map = kmap(page);
				read_extent_buffer(leaf, map + pg_offset, ptr,
						   copy_size);
5062 5063 5064 5065 5066
				if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
					memset(map + pg_offset + copy_size, 0,
					       PAGE_CACHE_SIZE - pg_offset -
					       copy_size);
				}
5067 5068
				kunmap(page);
			}
5069 5070
			flush_dcache_page(page);
		} else if (create && PageUptodate(page)) {
5071
			WARN_ON(1);
5072 5073 5074 5075
			if (!trans) {
				kunmap(page);
				free_extent_map(em);
				em = NULL;
C
Chris Mason 已提交
5076

5077
				btrfs_release_path(path);
5078
				trans = btrfs_join_transaction(root);
C
Chris Mason 已提交
5079

5080 5081
				if (IS_ERR(trans))
					return ERR_CAST(trans);
5082 5083
				goto again;
			}
5084
			map = kmap(page);
5085
			write_extent_buffer(leaf, map + pg_offset, ptr,
5086
					    copy_size);
5087
			kunmap(page);
5088
			btrfs_mark_buffer_dirty(leaf);
5089
		}
5090
		set_extent_uptodate(io_tree, em->start,
5091
				    extent_map_end(em) - 1, NULL, GFP_NOFS);
5092 5093
		goto insert;
	} else {
5094
		printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
5095 5096 5097 5098
		WARN_ON(1);
	}
not_found:
	em->start = start;
5099
	em->len = len;
5100
not_found_em:
5101
	em->block_start = EXTENT_MAP_HOLE;
5102
	set_bit(EXTENT_FLAG_VACANCY, &em->flags);
5103
insert:
5104
	btrfs_release_path(path);
5105
	if (em->start > start || extent_map_end(em) <= start) {
5106 5107 5108 5109 5110
		printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
		       "[%llu %llu]\n", (unsigned long long)em->start,
		       (unsigned long long)em->len,
		       (unsigned long long)start,
		       (unsigned long long)len);
5111 5112 5113
		err = -EIO;
		goto out;
	}
5114 5115

	err = 0;
5116
	write_lock(&em_tree->lock);
5117
	ret = add_extent_mapping(em_tree, em);
5118 5119 5120 5121
	/* it is possible that someone inserted the extent into the tree
	 * while we had the lock dropped.  It is also possible that
	 * an overlapping map exists in the tree
	 */
5122
	if (ret == -EEXIST) {
5123
		struct extent_map *existing;
5124 5125 5126

		ret = 0;

5127
		existing = lookup_extent_mapping(em_tree, start, len);
5128 5129 5130 5131 5132
		if (existing && (existing->start > start ||
		    existing->start + existing->len <= start)) {
			free_extent_map(existing);
			existing = NULL;
		}
5133 5134 5135 5136 5137
		if (!existing) {
			existing = lookup_extent_mapping(em_tree, em->start,
							 em->len);
			if (existing) {
				err = merge_extent_mapping(em_tree, existing,
5138 5139
							   em, start,
							   root->sectorsize);
5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152
				free_extent_map(existing);
				if (err) {
					free_extent_map(em);
					em = NULL;
				}
			} else {
				err = -EIO;
				free_extent_map(em);
				em = NULL;
			}
		} else {
			free_extent_map(em);
			em = existing;
5153
			err = 0;
5154 5155
		}
	}
5156
	write_unlock(&em_tree->lock);
5157
out:
5158 5159 5160

	trace_btrfs_get_extent(root, em);

5161 5162
	if (path)
		btrfs_free_path(path);
5163 5164
	if (trans) {
		ret = btrfs_end_transaction(trans, root);
5165
		if (!err)
5166 5167 5168 5169 5170 5171 5172 5173 5174
			err = ret;
	}
	if (err) {
		free_extent_map(em);
		return ERR_PTR(err);
	}
	return em;
}

5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236
struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
					   size_t pg_offset, u64 start, u64 len,
					   int create)
{
	struct extent_map *em;
	struct extent_map *hole_em = NULL;
	u64 range_start = start;
	u64 end;
	u64 found;
	u64 found_end;
	int err = 0;

	em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
	if (IS_ERR(em))
		return em;
	if (em) {
		/*
		 * if our em maps to a hole, there might
		 * actually be delalloc bytes behind it
		 */
		if (em->block_start != EXTENT_MAP_HOLE)
			return em;
		else
			hole_em = em;
	}

	/* check to see if we've wrapped (len == -1 or similar) */
	end = start + len;
	if (end < start)
		end = (u64)-1;
	else
		end -= 1;

	em = NULL;

	/* ok, we didn't find anything, lets look for delalloc */
	found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
				 end, len, EXTENT_DELALLOC, 1);
	found_end = range_start + found;
	if (found_end < range_start)
		found_end = (u64)-1;

	/*
	 * we didn't find anything useful, return
	 * the original results from get_extent()
	 */
	if (range_start > end || found_end <= start) {
		em = hole_em;
		hole_em = NULL;
		goto out;
	}

	/* adjust the range_start to make sure it doesn't
	 * go backwards from the start they passed in
	 */
	range_start = max(start,range_start);
	found = found_end - range_start;

	if (found > 0) {
		u64 hole_start = start;
		u64 hole_len = len;

5237
		em = alloc_extent_map();
5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296
		if (!em) {
			err = -ENOMEM;
			goto out;
		}
		/*
		 * when btrfs_get_extent can't find anything it
		 * returns one huge hole
		 *
		 * make sure what it found really fits our range, and
		 * adjust to make sure it is based on the start from
		 * the caller
		 */
		if (hole_em) {
			u64 calc_end = extent_map_end(hole_em);

			if (calc_end <= start || (hole_em->start > end)) {
				free_extent_map(hole_em);
				hole_em = NULL;
			} else {
				hole_start = max(hole_em->start, start);
				hole_len = calc_end - hole_start;
			}
		}
		em->bdev = NULL;
		if (hole_em && range_start > hole_start) {
			/* our hole starts before our delalloc, so we
			 * have to return just the parts of the hole
			 * that go until  the delalloc starts
			 */
			em->len = min(hole_len,
				      range_start - hole_start);
			em->start = hole_start;
			em->orig_start = hole_start;
			/*
			 * don't adjust block start at all,
			 * it is fixed at EXTENT_MAP_HOLE
			 */
			em->block_start = hole_em->block_start;
			em->block_len = hole_len;
		} else {
			em->start = range_start;
			em->len = found;
			em->orig_start = range_start;
			em->block_start = EXTENT_MAP_DELALLOC;
			em->block_len = found;
		}
	} else if (hole_em) {
		return hole_em;
	}
out:

	free_extent_map(hole_em);
	if (err) {
		free_extent_map(em);
		return ERR_PTR(err);
	}
	return em;
}

5297
static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
5298
						  struct extent_map *em,
5299 5300 5301 5302 5303 5304 5305 5306
						  u64 start, u64 len)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
	struct btrfs_key ins;
	u64 alloc_hint;
	int ret;
5307
	bool insert = false;
5308

5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321
	/*
	 * Ok if the extent map we looked up is a hole and is for the exact
	 * range we want, there is no reason to allocate a new one, however if
	 * it is not right then we need to free this one and drop the cache for
	 * our range.
	 */
	if (em->block_start != EXTENT_MAP_HOLE || em->start != start ||
	    em->len != len) {
		free_extent_map(em);
		em = NULL;
		insert = true;
		btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
	}
5322

5323
	trans = btrfs_join_transaction(root);
5324 5325
	if (IS_ERR(trans))
		return ERR_CAST(trans);
5326

5327 5328 5329
	if (start <= BTRFS_I(inode)->disk_i_size && len < 64 * 1024)
		btrfs_add_inode_defrag(trans, inode);

5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340
	trans->block_rsv = &root->fs_info->delalloc_block_rsv;

	alloc_hint = get_extent_allocation_hint(inode, start, len);
	ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0,
				   alloc_hint, (u64)-1, &ins, 1);
	if (ret) {
		em = ERR_PTR(ret);
		goto out;
	}

	if (!em) {
5341
		em = alloc_extent_map();
5342 5343 5344 5345
		if (!em) {
			em = ERR_PTR(-ENOMEM);
			goto out;
		}
5346 5347 5348 5349 5350 5351 5352 5353 5354
	}

	em->start = start;
	em->orig_start = em->start;
	em->len = ins.offset;

	em->block_start = ins.objectid;
	em->block_len = ins.offset;
	em->bdev = root->fs_info->fs_devices->latest_bdev;
5355 5356 5357 5358 5359 5360

	/*
	 * We need to do this because if we're using the original em we searched
	 * for, we could have EXTENT_FLAG_VACANCY set, and we don't want that.
	 */
	em->flags = 0;
5361 5362
	set_bit(EXTENT_FLAG_PINNED, &em->flags);

5363
	while (insert) {
5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382
		write_lock(&em_tree->lock);
		ret = add_extent_mapping(em_tree, em);
		write_unlock(&em_tree->lock);
		if (ret != -EEXIST)
			break;
		btrfs_drop_extent_cache(inode, start, start + em->len - 1, 0);
	}

	ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
					   ins.offset, ins.offset, 0);
	if (ret) {
		btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
		em = ERR_PTR(ret);
	}
out:
	btrfs_end_transaction(trans, root);
	return em;
}

5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406
/*
 * returns 1 when the nocow is safe, < 1 on error, 0 if the
 * block must be cow'd
 */
static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
				      struct inode *inode, u64 offset, u64 len)
{
	struct btrfs_path *path;
	int ret;
	struct extent_buffer *leaf;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_file_extent_item *fi;
	struct btrfs_key key;
	u64 disk_bytenr;
	u64 backref_offset;
	u64 extent_end;
	u64 num_bytes;
	int slot;
	int found_type;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

5407
	ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423
				       offset, 0);
	if (ret < 0)
		goto out;

	slot = path->slots[0];
	if (ret == 1) {
		if (slot == 0) {
			/* can't find the item, must cow */
			ret = 0;
			goto out;
		}
		slot--;
	}
	ret = 0;
	leaf = path->nodes[0];
	btrfs_item_key_to_cpu(leaf, &key, slot);
5424
	if (key.objectid != btrfs_ino(inode) ||
5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457
	    key.type != BTRFS_EXTENT_DATA_KEY) {
		/* not our file or wrong item type, must cow */
		goto out;
	}

	if (key.offset > offset) {
		/* Wrong offset, must cow */
		goto out;
	}

	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
	found_type = btrfs_file_extent_type(leaf, fi);
	if (found_type != BTRFS_FILE_EXTENT_REG &&
	    found_type != BTRFS_FILE_EXTENT_PREALLOC) {
		/* not a regular extent, must cow */
		goto out;
	}
	disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
	backref_offset = btrfs_file_extent_offset(leaf, fi);

	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
	if (extent_end < offset + len) {
		/* extent doesn't include our full range, must cow */
		goto out;
	}

	if (btrfs_extent_readonly(root, disk_bytenr))
		goto out;

	/*
	 * look for other files referencing this extent, if we
	 * find any we must cow
	 */
5458
	if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482
				  key.offset - backref_offset, disk_bytenr))
		goto out;

	/*
	 * adjust disk_bytenr and num_bytes to cover just the bytes
	 * in this extent we are about to write.  If there
	 * are any csums in that range we have to cow in order
	 * to keep the csums correct
	 */
	disk_bytenr += backref_offset;
	disk_bytenr += offset - key.offset;
	num_bytes = min(offset + len, extent_end) - offset;
	if (csum_exist_in_range(root, disk_bytenr, num_bytes))
				goto out;
	/*
	 * all of the above have passed, it is safe to overwrite this extent
	 * without cow
	 */
	ret = 1;
out:
	btrfs_free_path(path);
	return ret;
}

5483 5484 5485 5486 5487 5488 5489
static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
				   struct buffer_head *bh_result, int create)
{
	struct extent_map *em;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	u64 start = iblock << inode->i_blkbits;
	u64 len = bh_result->b_size;
5490
	struct btrfs_trans_handle *trans;
5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534

	em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
	if (IS_ERR(em))
		return PTR_ERR(em);

	/*
	 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
	 * io.  INLINE is special, and we could probably kludge it in here, but
	 * it's still buffered so for safety lets just fall back to the generic
	 * buffered path.
	 *
	 * For COMPRESSED we _have_ to read the entire extent in so we can
	 * decompress it, so there will be buffering required no matter what we
	 * do, so go ahead and fallback to buffered.
	 *
	 * We return -ENOTBLK because thats what makes DIO go ahead and go back
	 * to buffered IO.  Don't blame me, this is the price we pay for using
	 * the generic code.
	 */
	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
	    em->block_start == EXTENT_MAP_INLINE) {
		free_extent_map(em);
		return -ENOTBLK;
	}

	/* Just a good old fashioned hole, return */
	if (!create && (em->block_start == EXTENT_MAP_HOLE ||
			test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
		free_extent_map(em);
		/* DIO will do one hole at a time, so just unlock a sector */
		unlock_extent(&BTRFS_I(inode)->io_tree, start,
			      start + root->sectorsize - 1, GFP_NOFS);
		return 0;
	}

	/*
	 * We don't allocate a new extent in the following cases
	 *
	 * 1) The inode is marked as NODATACOW.  In this case we'll just use the
	 * existing extent.
	 * 2) The extent is marked as PREALLOC.  We're good to go here and can
	 * just use the extent.
	 *
	 */
5535 5536
	if (!create) {
		len = em->len - (start - em->start);
5537
		goto map;
5538
	}
5539 5540 5541 5542 5543 5544

	if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
	    ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
	     em->block_start != EXTENT_MAP_HOLE)) {
		int type;
		int ret;
5545
		u64 block_start;
5546 5547 5548 5549 5550

		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
			type = BTRFS_ORDERED_PREALLOC;
		else
			type = BTRFS_ORDERED_NOCOW;
5551
		len = min(len, em->len - (start - em->start));
5552
		block_start = em->block_start + (start - em->start);
5553 5554 5555 5556 5557 5558

		/*
		 * we're not going to log anything, but we do need
		 * to make sure the current transaction stays open
		 * while we look for nocow cross refs
		 */
5559
		trans = btrfs_join_transaction(root);
5560
		if (IS_ERR(trans))
5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571
			goto must_cow;

		if (can_nocow_odirect(trans, inode, start, len) == 1) {
			ret = btrfs_add_ordered_extent_dio(inode, start,
					   block_start, len, len, type);
			btrfs_end_transaction(trans, root);
			if (ret) {
				free_extent_map(em);
				return ret;
			}
			goto unlock;
5572
		}
5573
		btrfs_end_transaction(trans, root);
5574
	}
5575 5576 5577 5578 5579 5580
must_cow:
	/*
	 * this will cow the extent, reset the len in case we changed
	 * it above
	 */
	len = bh_result->b_size;
5581
	em = btrfs_new_extent_direct(inode, em, start, len);
5582 5583 5584 5585
	if (IS_ERR(em))
		return PTR_ERR(em);
	len = min(len, em->len - (start - em->start));
unlock:
5586 5587 5588
	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1,
			  EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1,
			  0, NULL, GFP_NOFS);
5589 5590 5591
map:
	bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
		inode->i_blkbits;
5592
	bh_result->b_size = len;
5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609
	bh_result->b_bdev = em->bdev;
	set_buffer_mapped(bh_result);
	if (create && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
		set_buffer_new(bh_result);

	free_extent_map(em);

	return 0;
}

struct btrfs_dio_private {
	struct inode *inode;
	u64 logical_offset;
	u64 disk_bytenr;
	u64 bytes;
	u32 *csums;
	void *private;
5610 5611 5612 5613 5614 5615 5616 5617

	/* number of bios pending for this dio */
	atomic_t pending_bios;

	/* IO errors */
	int errors;

	struct bio *orig_bio;
5618 5619 5620 5621
};

static void btrfs_endio_direct_read(struct bio *bio, int err)
{
5622
	struct btrfs_dio_private *dip = bio->bi_private;
5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647
	struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
	struct bio_vec *bvec = bio->bi_io_vec;
	struct inode *inode = dip->inode;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	u64 start;
	u32 *private = dip->csums;

	start = dip->logical_offset;
	do {
		if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
			struct page *page = bvec->bv_page;
			char *kaddr;
			u32 csum = ~(u32)0;
			unsigned long flags;

			local_irq_save(flags);
			kaddr = kmap_atomic(page, KM_IRQ0);
			csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
					       csum, bvec->bv_len);
			btrfs_csum_final(csum, (char *)&csum);
			kunmap_atomic(kaddr, KM_IRQ0);
			local_irq_restore(flags);

			flush_dcache_page(bvec->bv_page);
			if (csum != *private) {
5648
				printk(KERN_ERR "btrfs csum failed ino %llu off"
5649
				      " %llu csum %u private %u\n",
5650 5651
				      (unsigned long long)btrfs_ino(inode),
				      (unsigned long long)start,
5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667
				      csum, *private);
				err = -EIO;
			}
		}

		start += bvec->bv_len;
		private++;
		bvec++;
	} while (bvec <= bvec_end);

	unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
		      dip->logical_offset + dip->bytes - 1, GFP_NOFS);
	bio->bi_private = dip->private;

	kfree(dip->csums);
	kfree(dip);
5668 5669 5670 5671

	/* If we had a csum failure make sure to clear the uptodate flag */
	if (err)
		clear_bit(BIO_UPTODATE, &bio->bi_flags);
5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682
	dio_end_io(bio, err);
}

static void btrfs_endio_direct_write(struct bio *bio, int err)
{
	struct btrfs_dio_private *dip = bio->bi_private;
	struct inode *inode = dip->inode;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
	struct btrfs_ordered_extent *ordered = NULL;
	struct extent_state *cached_state = NULL;
5683 5684
	u64 ordered_offset = dip->logical_offset;
	u64 ordered_bytes = dip->bytes;
5685 5686 5687 5688
	int ret;

	if (err)
		goto out_done;
5689 5690 5691 5692
again:
	ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
						   &ordered_offset,
						   ordered_bytes);
5693
	if (!ret)
5694
		goto out_test;
5695 5696 5697

	BUG_ON(!ordered);

5698
	trans = btrfs_join_transaction(root);
5699
	if (IS_ERR(trans)) {
5700 5701 5702 5703 5704 5705 5706 5707
		err = -ENOMEM;
		goto out;
	}
	trans->block_rsv = &root->fs_info->delalloc_block_rsv;

	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
		ret = btrfs_ordered_update_i_size(inode, 0, ordered);
		if (!ret)
5708
			err = btrfs_update_inode_fallback(trans, root, inode);
5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743
		goto out;
	}

	lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset,
			 ordered->file_offset + ordered->len - 1, 0,
			 &cached_state, GFP_NOFS);

	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
		ret = btrfs_mark_extent_written(trans, inode,
						ordered->file_offset,
						ordered->file_offset +
						ordered->len);
		if (ret) {
			err = ret;
			goto out_unlock;
		}
	} else {
		ret = insert_reserved_file_extent(trans, inode,
						  ordered->file_offset,
						  ordered->start,
						  ordered->disk_len,
						  ordered->len,
						  ordered->len,
						  0, 0, 0,
						  BTRFS_FILE_EXTENT_REG);
		unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
				   ordered->file_offset, ordered->len);
		if (ret) {
			err = ret;
			WARN_ON(1);
			goto out_unlock;
		}
	}

	add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
5744
	ret = btrfs_ordered_update_i_size(inode, 0, ordered);
5745
	if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
5746
		btrfs_update_inode_fallback(trans, root, inode);
5747
	ret = 0;
5748 5749 5750 5751 5752 5753 5754
out_unlock:
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
			     ordered->file_offset + ordered->len - 1,
			     &cached_state, GFP_NOFS);
out:
	btrfs_delalloc_release_metadata(inode, ordered->len);
	btrfs_end_transaction(trans, root);
5755
	ordered_offset = ordered->file_offset + ordered->len;
5756 5757
	btrfs_put_ordered_extent(ordered);
	btrfs_put_ordered_extent(ordered);
5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768

out_test:
	/*
	 * our bio might span multiple ordered extents.  If we haven't
	 * completed the accounting for the whole dio, go back and try again
	 */
	if (ordered_offset < dip->logical_offset + dip->bytes) {
		ordered_bytes = dip->logical_offset + dip->bytes -
			ordered_offset;
		goto again;
	}
5769 5770 5771 5772 5773
out_done:
	bio->bi_private = dip->private;

	kfree(dip->csums);
	kfree(dip);
5774 5775 5776 5777

	/* If we had an error make sure to clear the uptodate flag */
	if (err)
		clear_bit(BIO_UPTODATE, &bio->bi_flags);
5778 5779 5780
	dio_end_io(bio, err);
}

5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791
static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
				    struct bio *bio, int mirror_num,
				    unsigned long bio_flags, u64 offset)
{
	int ret;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
	BUG_ON(ret);
	return 0;
}

5792 5793 5794 5795 5796
static void btrfs_end_dio_bio(struct bio *bio, int err)
{
	struct btrfs_dio_private *dip = bio->bi_private;

	if (err) {
5797
		printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
5798
		      "sector %#Lx len %u err no %d\n",
5799
		      (unsigned long long)btrfs_ino(dip->inode), bio->bi_rw,
5800
		      (unsigned long long)bio->bi_sector, bio->bi_size, err);
5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832
		dip->errors = 1;

		/*
		 * before atomic variable goto zero, we must make sure
		 * dip->errors is perceived to be set.
		 */
		smp_mb__before_atomic_dec();
	}

	/* if there are more bios still pending for this dio, just exit */
	if (!atomic_dec_and_test(&dip->pending_bios))
		goto out;

	if (dip->errors)
		bio_io_error(dip->orig_bio);
	else {
		set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags);
		bio_endio(dip->orig_bio, 0);
	}
out:
	bio_put(bio);
}

static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
				       u64 first_sector, gfp_t gfp_flags)
{
	int nr_vecs = bio_get_nr_vecs(bdev);
	return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
}

static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
					 int rw, u64 file_offset, int skip_sum,
5833
					 u32 *csums, int async_submit)
5834 5835 5836 5837 5838 5839 5840 5841 5842 5843
{
	int write = rw & REQ_WRITE;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	int ret;

	bio_get(bio);
	ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
	if (ret)
		goto err;

5844 5845 5846 5847
	if (skip_sum)
		goto map;

	if (write && async_submit) {
5848 5849 5850 5851 5852 5853
		ret = btrfs_wq_submit_bio(root->fs_info,
				   inode, rw, bio, 0, 0,
				   file_offset,
				   __btrfs_submit_bio_start_direct_io,
				   __btrfs_submit_bio_done);
		goto err;
5854 5855 5856 5857 5858 5859 5860 5861
	} else if (write) {
		/*
		 * If we aren't doing async submit, calculate the csum of the
		 * bio now.
		 */
		ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
		if (ret)
			goto err;
5862 5863
	} else if (!skip_sum) {
		ret = btrfs_lookup_bio_sums_dio(root, inode, bio,
5864
					  file_offset, csums);
5865 5866 5867
		if (ret)
			goto err;
	}
5868

5869 5870
map:
	ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891
err:
	bio_put(bio);
	return ret;
}

static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
				    int skip_sum)
{
	struct inode *inode = dip->inode;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
	struct bio *bio;
	struct bio *orig_bio = dip->orig_bio;
	struct bio_vec *bvec = orig_bio->bi_io_vec;
	u64 start_sector = orig_bio->bi_sector;
	u64 file_offset = dip->logical_offset;
	u64 submit_len = 0;
	u64 map_length;
	int nr_pages = 0;
	u32 *csums = dip->csums;
	int ret = 0;
5892
	int async_submit = 0;
5893
	int write = rw & REQ_WRITE;
5894 5895 5896 5897 5898

	map_length = orig_bio->bi_size;
	ret = btrfs_map_block(map_tree, READ, start_sector << 9,
			      &map_length, NULL, 0);
	if (ret) {
5899
		bio_put(orig_bio);
5900 5901 5902
		return -EIO;
	}

5903 5904 5905 5906 5907
	if (map_length >= orig_bio->bi_size) {
		bio = orig_bio;
		goto submit;
	}

5908
	async_submit = 1;
5909 5910 5911 5912 5913 5914 5915
	bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
	if (!bio)
		return -ENOMEM;
	bio->bi_private = dip;
	bio->bi_end_io = btrfs_end_dio_bio;
	atomic_inc(&dip->pending_bios);

5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928
	while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
		if (unlikely(map_length < submit_len + bvec->bv_len ||
		    bio_add_page(bio, bvec->bv_page, bvec->bv_len,
				 bvec->bv_offset) < bvec->bv_len)) {
			/*
			 * inc the count before we submit the bio so
			 * we know the end IO handler won't happen before
			 * we inc the count. Otherwise, the dip might get freed
			 * before we're done setting it up
			 */
			atomic_inc(&dip->pending_bios);
			ret = __btrfs_submit_dio_bio(bio, inode, rw,
						     file_offset, skip_sum,
5929
						     csums, async_submit);
5930 5931 5932 5933 5934 5935
			if (ret) {
				bio_put(bio);
				atomic_dec(&dip->pending_bios);
				goto out_err;
			}

5936 5937
			/* Write's use the ordered csums */
			if (!write && !skip_sum)
5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965
				csums = csums + nr_pages;
			start_sector += submit_len >> 9;
			file_offset += submit_len;

			submit_len = 0;
			nr_pages = 0;

			bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
						  start_sector, GFP_NOFS);
			if (!bio)
				goto out_err;
			bio->bi_private = dip;
			bio->bi_end_io = btrfs_end_dio_bio;

			map_length = orig_bio->bi_size;
			ret = btrfs_map_block(map_tree, READ, start_sector << 9,
					      &map_length, NULL, 0);
			if (ret) {
				bio_put(bio);
				goto out_err;
			}
		} else {
			submit_len += bvec->bv_len;
			nr_pages ++;
			bvec++;
		}
	}

5966
submit:
5967
	ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
5968
				     csums, async_submit);
5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986
	if (!ret)
		return 0;

	bio_put(bio);
out_err:
	dip->errors = 1;
	/*
	 * before atomic variable goto zero, we must
	 * make sure dip->errors is perceived to be set.
	 */
	smp_mb__before_atomic_dec();
	if (atomic_dec_and_test(&dip->pending_bios))
		bio_io_error(dip->orig_bio);

	/* bio_end_io() will handle error, so we needn't return it */
	return 0;
}

5987 5988 5989 5990 5991 5992 5993
static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
				loff_t file_offset)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_dio_private *dip;
	struct bio_vec *bvec = bio->bi_io_vec;
	int skip_sum;
5994
	int write = rw & REQ_WRITE;
5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005
	int ret = 0;

	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;

	dip = kmalloc(sizeof(*dip), GFP_NOFS);
	if (!dip) {
		ret = -ENOMEM;
		goto free_ordered;
	}
	dip->csums = NULL;

6006 6007
	/* Write's use the ordered csum stuff, so we don't need dip->csums */
	if (!write && !skip_sum) {
6008 6009
		dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS);
		if (!dip->csums) {
D
Daniel J Blueman 已提交
6010
			kfree(dip);
6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025
			ret = -ENOMEM;
			goto free_ordered;
		}
	}

	dip->private = bio->bi_private;
	dip->inode = inode;
	dip->logical_offset = file_offset;

	dip->bytes = 0;
	do {
		dip->bytes += bvec->bv_len;
		bvec++;
	} while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1));

6026
	dip->disk_bytenr = (u64)bio->bi_sector << 9;
6027
	bio->bi_private = dip;
6028 6029 6030
	dip->errors = 0;
	dip->orig_bio = bio;
	atomic_set(&dip->pending_bios, 0);
6031 6032 6033 6034 6035 6036

	if (write)
		bio->bi_end_io = btrfs_endio_direct_write;
	else
		bio->bi_end_io = btrfs_endio_direct_read;

6037 6038
	ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
	if (!ret)
6039
		return;
6040 6041 6042 6043 6044 6045 6046
free_ordered:
	/*
	 * If this is a write, we need to clean up the reserved space and kill
	 * the ordered extent.
	 */
	if (write) {
		struct btrfs_ordered_extent *ordered;
6047
		ordered = btrfs_lookup_ordered_extent(inode, file_offset);
6048 6049 6050 6051 6052 6053 6054 6055 6056 6057
		if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
			btrfs_free_reserved_extent(root, ordered->start,
						   ordered->disk_len);
		btrfs_put_ordered_extent(ordered);
		btrfs_put_ordered_extent(ordered);
	}
	bio_endio(bio, ret);
}

C
Chris Mason 已提交
6058 6059 6060 6061 6062
static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
			const struct iovec *iov, loff_t offset,
			unsigned long nr_segs)
{
	int seg;
6063
	int i;
C
Chris Mason 已提交
6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077
	size_t size;
	unsigned long addr;
	unsigned blocksize_mask = root->sectorsize - 1;
	ssize_t retval = -EINVAL;
	loff_t end = offset;

	if (offset & blocksize_mask)
		goto out;

	/* Check the memory alignment.  Blocks cannot straddle pages */
	for (seg = 0; seg < nr_segs; seg++) {
		addr = (unsigned long)iov[seg].iov_base;
		size = iov[seg].iov_len;
		end += size;
6078
		if ((addr & blocksize_mask) || (size & blocksize_mask))
C
Chris Mason 已提交
6079
			goto out;
6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093

		/* If this is a write we don't need to check anymore */
		if (rw & WRITE)
			continue;

		/*
		 * Check to make sure we don't have duplicate iov_base's in this
		 * iovec, if so return EINVAL, otherwise we'll get csum errors
		 * when reading back.
		 */
		for (i = seg + 1; i < nr_segs; i++) {
			if (iov[seg].iov_base == iov[i].iov_base)
				goto out;
		}
C
Chris Mason 已提交
6094 6095 6096 6097 6098
	}
	retval = 0;
out:
	return retval;
}
6099 6100 6101 6102
static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
			const struct iovec *iov, loff_t offset,
			unsigned long nr_segs)
{
6103 6104 6105
	struct file *file = iocb->ki_filp;
	struct inode *inode = file->f_mapping->host;
	struct btrfs_ordered_extent *ordered;
6106
	struct extent_state *cached_state = NULL;
6107 6108
	u64 lockstart, lockend;
	ssize_t ret;
6109 6110
	int writing = rw & WRITE;
	int write_bits = 0;
6111
	size_t count = iov_length(iov, nr_segs);
6112

C
Chris Mason 已提交
6113 6114 6115 6116 6117
	if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
			    offset, nr_segs)) {
		return 0;
	}

6118
	lockstart = offset;
6119 6120 6121 6122 6123 6124 6125
	lockend = offset + count - 1;

	if (writing) {
		ret = btrfs_delalloc_reserve_space(inode, count);
		if (ret)
			goto out;
	}
6126

6127
	while (1) {
6128 6129
		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
				 0, &cached_state, GFP_NOFS);
6130 6131 6132 6133 6134 6135 6136 6137 6138
		/*
		 * We're concerned with the entire range that we're going to be
		 * doing DIO to, so we need to make sure theres no ordered
		 * extents in this range.
		 */
		ordered = btrfs_lookup_ordered_range(inode, lockstart,
						     lockend - lockstart + 1);
		if (!ordered)
			break;
6139 6140
		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
				     &cached_state, GFP_NOFS);
6141 6142 6143 6144 6145
		btrfs_start_ordered_extent(inode, ordered, 1);
		btrfs_put_ordered_extent(ordered);
		cond_resched();
	}

6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165
	/*
	 * we don't use btrfs_set_extent_delalloc because we don't want
	 * the dirty or uptodate bits
	 */
	if (writing) {
		write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING;
		ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
				     EXTENT_DELALLOC, 0, NULL, &cached_state,
				     GFP_NOFS);
		if (ret) {
			clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
					 lockend, EXTENT_LOCKED | write_bits,
					 1, 0, &cached_state, GFP_NOFS);
			goto out;
		}
	}

	free_extent_state(cached_state);
	cached_state = NULL;

C
Chris Mason 已提交
6166 6167 6168 6169
	ret = __blockdev_direct_IO(rw, iocb, inode,
		   BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
		   iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
		   btrfs_submit_direct, 0);
6170 6171

	if (ret < 0 && ret != -EIOCBQUEUED) {
6172 6173 6174 6175
		clear_extent_bit(&BTRFS_I(inode)->io_tree, offset,
			      offset + iov_length(iov, nr_segs) - 1,
			      EXTENT_LOCKED | write_bits, 1, 0,
			      &cached_state, GFP_NOFS);
6176 6177 6178 6179 6180
	} else if (ret >= 0 && ret < iov_length(iov, nr_segs)) {
		/*
		 * We're falling back to buffered, unlock the section we didn't
		 * do IO on.
		 */
6181 6182 6183 6184
		clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret,
			      offset + iov_length(iov, nr_segs) - 1,
			      EXTENT_LOCKED | write_bits, 1, 0,
			      &cached_state, GFP_NOFS);
6185
	}
6186 6187
out:
	free_extent_state(cached_state);
6188
	return ret;
6189 6190
}

Y
Yehuda Sadeh 已提交
6191 6192 6193
static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		__u64 start, __u64 len)
{
6194
	return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
Y
Yehuda Sadeh 已提交
6195 6196
}

6197
int btrfs_readpage(struct file *file, struct page *page)
6198
{
6199 6200
	struct extent_io_tree *tree;
	tree = &BTRFS_I(page->mapping->host)->io_tree;
6201
	return extent_read_full_page(tree, page, btrfs_get_extent, 0);
6202
}
6203

6204
static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
C
Chris Mason 已提交
6205
{
6206
	struct extent_io_tree *tree;
6207 6208 6209 6210 6211 6212 6213


	if (current->flags & PF_MEMALLOC) {
		redirty_page_for_writepage(wbc, page);
		unlock_page(page);
		return 0;
	}
6214
	tree = &BTRFS_I(page->mapping->host)->io_tree;
6215
	return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
6216 6217
}

6218 6219
int btrfs_writepages(struct address_space *mapping,
		     struct writeback_control *wbc)
6220
{
6221
	struct extent_io_tree *tree;
6222

6223
	tree = &BTRFS_I(mapping->host)->io_tree;
6224 6225 6226
	return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
}

6227 6228 6229 6230
static int
btrfs_readpages(struct file *file, struct address_space *mapping,
		struct list_head *pages, unsigned nr_pages)
{
6231 6232
	struct extent_io_tree *tree;
	tree = &BTRFS_I(mapping->host)->io_tree;
6233 6234 6235
	return extent_readpages(tree, mapping, pages, nr_pages,
				btrfs_get_extent);
}
6236
static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
6237
{
6238 6239
	struct extent_io_tree *tree;
	struct extent_map_tree *map;
6240
	int ret;
6241

6242 6243
	tree = &BTRFS_I(page->mapping->host)->io_tree;
	map = &BTRFS_I(page->mapping->host)->extent_tree;
6244
	ret = try_release_extent_mapping(map, tree, page, gfp_flags);
6245 6246 6247 6248
	if (ret == 1) {
		ClearPagePrivate(page);
		set_page_private(page, 0);
		page_cache_release(page);
C
Chris Mason 已提交
6249
	}
6250
	return ret;
C
Chris Mason 已提交
6251 6252
}

6253 6254
static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
{
6255 6256
	if (PageWriteback(page) || PageDirty(page))
		return 0;
6257
	return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
6258 6259
}

6260
static void btrfs_invalidatepage(struct page *page, unsigned long offset)
C
Chris Mason 已提交
6261
{
6262
	struct extent_io_tree *tree;
6263
	struct btrfs_ordered_extent *ordered;
6264
	struct extent_state *cached_state = NULL;
6265 6266
	u64 page_start = page_offset(page);
	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
C
Chris Mason 已提交
6267

6268 6269 6270 6271 6272 6273 6274 6275

	/*
	 * we have the page locked, so new writeback can't start,
	 * and the dirty bit won't be cleared while we are here.
	 *
	 * Wait for IO on this page so that we can safely clear
	 * the PagePrivate2 bit and do ordered accounting
	 */
6276
	wait_on_page_writeback(page);
6277

6278
	tree = &BTRFS_I(page->mapping->host)->io_tree;
6279 6280 6281 6282
	if (offset) {
		btrfs_releasepage(page, GFP_NOFS);
		return;
	}
6283 6284
	lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
			 GFP_NOFS);
6285 6286 6287
	ordered = btrfs_lookup_ordered_extent(page->mapping->host,
					   page_offset(page));
	if (ordered) {
6288 6289 6290 6291
		/*
		 * IO on this page will never be started, so we need
		 * to account for any ordered extents now
		 */
6292 6293
		clear_extent_bit(tree, page_start, page_end,
				 EXTENT_DIRTY | EXTENT_DELALLOC |
6294
				 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
6295
				 &cached_state, GFP_NOFS);
6296 6297 6298 6299 6300 6301 6302 6303
		/*
		 * whoever cleared the private bit is responsible
		 * for the finish_ordered_io
		 */
		if (TestClearPagePrivate2(page)) {
			btrfs_finish_ordered_io(page->mapping->host,
						page_start, page_end);
		}
6304
		btrfs_put_ordered_extent(ordered);
6305 6306 6307
		cached_state = NULL;
		lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
				 GFP_NOFS);
6308 6309
	}
	clear_extent_bit(tree, page_start, page_end,
6310
		 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
6311
		 EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS);
6312 6313
	__btrfs_releasepage(page, GFP_NOFS);

C
Chris Mason 已提交
6314
	ClearPageChecked(page);
6315 6316 6317 6318 6319
	if (PagePrivate(page)) {
		ClearPagePrivate(page);
		set_page_private(page, 0);
		page_cache_release(page);
	}
C
Chris Mason 已提交
6320 6321
}

6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336
/*
 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
 * called from a page fault handler when a page is first dirtied. Hence we must
 * be careful to check for EOF conditions here. We set the page up correctly
 * for a written page which means we get ENOSPC checking when writing into
 * holes and correct delalloc and unwritten extent mapping on filesystems that
 * support these features.
 *
 * We are not allowed to take the i_mutex here so we have to play games to
 * protect against truncate races as the page could now be beyond EOF.  Because
 * vmtruncate() writes the inode size before removing pages, once we have the
 * page lock we can determine safely if the page is beyond EOF. If it is not
 * beyond EOF, then the page is guaranteed safe against truncation until we
 * unlock the page.
 */
6337
int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
6338
{
6339
	struct page *page = vmf->page;
6340
	struct inode *inode = fdentry(vma->vm_file)->d_inode;
6341
	struct btrfs_root *root = BTRFS_I(inode)->root;
6342 6343
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
	struct btrfs_ordered_extent *ordered;
6344
	struct extent_state *cached_state = NULL;
6345 6346
	char *kaddr;
	unsigned long zero_start;
6347
	loff_t size;
6348
	int ret;
6349
	u64 page_start;
6350
	u64 page_end;
6351

6352 6353
	/* Need this to keep space reservations serialized */
	mutex_lock(&inode->i_mutex);
6354
	ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
6355
	mutex_unlock(&inode->i_mutex);
6356 6357
	if (!ret)
		ret = btrfs_update_time(vma->vm_file);
6358 6359 6360 6361 6362
	if (ret) {
		if (ret == -ENOMEM)
			ret = VM_FAULT_OOM;
		else /* -ENOSPC, -EIO, etc */
			ret = VM_FAULT_SIGBUS;
6363
		goto out;
6364
	}
6365

6366
	ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
6367
again:
6368 6369
	lock_page(page);
	size = i_size_read(inode);
6370 6371
	page_start = page_offset(page);
	page_end = page_start + PAGE_CACHE_SIZE - 1;
6372

6373
	if ((page->mapping != inode->i_mapping) ||
6374
	    (page_start >= size)) {
6375 6376 6377
		/* page got truncated out from underneath us */
		goto out_unlock;
	}
6378 6379
	wait_on_page_writeback(page);

6380 6381
	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
			 GFP_NOFS);
6382 6383
	set_page_extent_mapped(page);

6384 6385 6386 6387
	/*
	 * we can't set the delalloc bits if there are pending ordered
	 * extents.  Drop our locks and wait for them to finish
	 */
6388 6389
	ordered = btrfs_lookup_ordered_extent(inode, page_start);
	if (ordered) {
6390 6391
		unlock_extent_cached(io_tree, page_start, page_end,
				     &cached_state, GFP_NOFS);
6392
		unlock_page(page);
6393
		btrfs_start_ordered_extent(inode, ordered, 1);
6394 6395 6396 6397
		btrfs_put_ordered_extent(ordered);
		goto again;
	}

6398 6399 6400 6401 6402 6403 6404
	/*
	 * XXX - page_mkwrite gets called every time the page is dirtied, even
	 * if it was already dirty, so for space accounting reasons we need to
	 * clear any delalloc bits for the range we are fixing to save.  There
	 * is probably a better way to do this, but for now keep consistent with
	 * prepare_pages in the normal write path.
	 */
6405
	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
6406
			  EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
6407
			  0, 0, &cached_state, GFP_NOFS);
6408

6409 6410
	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
					&cached_state);
6411
	if (ret) {
6412 6413
		unlock_extent_cached(io_tree, page_start, page_end,
				     &cached_state, GFP_NOFS);
6414 6415 6416
		ret = VM_FAULT_SIGBUS;
		goto out_unlock;
	}
6417
	ret = 0;
6418 6419

	/* page is wholly or partially inside EOF */
6420
	if (page_start + PAGE_CACHE_SIZE > size)
6421
		zero_start = size & ~PAGE_CACHE_MASK;
6422
	else
6423
		zero_start = PAGE_CACHE_SIZE;
6424

6425 6426 6427 6428 6429 6430
	if (zero_start != PAGE_CACHE_SIZE) {
		kaddr = kmap(page);
		memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
		flush_dcache_page(page);
		kunmap(page);
	}
6431
	ClearPageChecked(page);
6432
	set_page_dirty(page);
6433
	SetPageUptodate(page);
6434

6435 6436 6437
	BTRFS_I(inode)->last_trans = root->fs_info->generation;
	BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;

6438
	unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
6439 6440

out_unlock:
6441 6442
	if (!ret)
		return VM_FAULT_LOCKED;
6443
	unlock_page(page);
6444
	btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
6445
out:
6446 6447 6448
	return ret;
}

6449
static int btrfs_truncate(struct inode *inode)
C
Chris Mason 已提交
6450 6451
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
6452
	struct btrfs_block_rsv *rsv;
C
Chris Mason 已提交
6453
	int ret;
6454
	int err = 0;
C
Chris Mason 已提交
6455
	struct btrfs_trans_handle *trans;
6456
	unsigned long nr;
6457
	u64 mask = root->sectorsize - 1;
6458
	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
C
Chris Mason 已提交
6459

6460 6461
	ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
	if (ret)
6462
		return ret;
6463

C
Chris Mason 已提交
6464
	btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
6465
	btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
C
Chris Mason 已提交
6466

6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505
	/*
	 * Yes ladies and gentelment, this is indeed ugly.  The fact is we have
	 * 3 things going on here
	 *
	 * 1) We need to reserve space for our orphan item and the space to
	 * delete our orphan item.  Lord knows we don't want to have a dangling
	 * orphan item because we didn't reserve space to remove it.
	 *
	 * 2) We need to reserve space to update our inode.
	 *
	 * 3) We need to have something to cache all the space that is going to
	 * be free'd up by the truncate operation, but also have some slack
	 * space reserved in case it uses space during the truncate (thank you
	 * very much snapshotting).
	 *
	 * And we need these to all be seperate.  The fact is we can use alot of
	 * space doing the truncate, and we have no earthly idea how much space
	 * we will use, so we need the truncate reservation to be seperate so it
	 * doesn't end up using space reserved for updating the inode or
	 * removing the orphan item.  We also need to be able to stop the
	 * transaction and start a new one, which means we need to be able to
	 * update the inode several times, and we have no idea of knowing how
	 * many times that will be, so we can't just reserve 1 item for the
	 * entirety of the opration, so that has to be done seperately as well.
	 * Then there is the orphan item, which does indeed need to be held on
	 * to for the whole operation, and we need nobody to touch this reserved
	 * space except the orphan code.
	 *
	 * So that leaves us with
	 *
	 * 1) root->orphan_block_rsv - for the orphan deletion.
	 * 2) rsv - for the truncate reservation, which we will steal from the
	 * transaction reservation.
	 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
	 * updating the inode.
	 */
	rsv = btrfs_alloc_block_rsv(root);
	if (!rsv)
		return -ENOMEM;
6506
	rsv->size = min_size;
6507

6508
	/*
6509
	 * 1 for the truncate slack space
6510 6511 6512 6513
	 * 1 for the orphan item we're going to add
	 * 1 for the orphan item deletion
	 * 1 for updating the inode.
	 */
6514
	trans = btrfs_start_transaction(root, 4);
6515 6516 6517 6518
	if (IS_ERR(trans)) {
		err = PTR_ERR(trans);
		goto out;
	}
6519

6520 6521 6522
	/* Migrate the slack space for the truncate to our reserve */
	ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
				      min_size);
6523
	BUG_ON(ret);
6524 6525 6526 6527

	ret = btrfs_orphan_add(trans, inode);
	if (ret) {
		btrfs_end_transaction(trans, root);
6528
		goto out;
6529 6530
	}

6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550
	/*
	 * setattr is responsible for setting the ordered_data_close flag,
	 * but that is only tested during the last file release.  That
	 * could happen well after the next commit, leaving a great big
	 * window where new writes may get lost if someone chooses to write
	 * to this file after truncating to zero
	 *
	 * The inode doesn't have any dirty data here, and so if we commit
	 * this is a noop.  If someone immediately starts writing to the inode
	 * it is very likely we'll catch some of their writes in this
	 * transaction, and the commit will find this file on the ordered
	 * data list with good things to send down.
	 *
	 * This is a best effort solution, there is still a window where
	 * using truncate to replace the contents of the file will
	 * end up with a zero length file after a crash.
	 */
	if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
		btrfs_add_ordered_operation(trans, root, inode);

6551
	while (1) {
6552
		ret = btrfs_block_rsv_refill(root, rsv, min_size);
6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564
		if (ret) {
			/*
			 * This can only happen with the original transaction we
			 * started above, every other time we shouldn't have a
			 * transaction started yet.
			 */
			if (ret == -EAGAIN)
				goto end_trans;
			err = ret;
			break;
		}

6565
		if (!trans) {
6566 6567
			/* Just need the 1 for updating the inode */
			trans = btrfs_start_transaction(root, 1);
6568
			if (IS_ERR(trans)) {
6569 6570 6571
				ret = err = PTR_ERR(trans);
				trans = NULL;
				break;
6572
			}
6573 6574
		}

6575 6576
		trans->block_rsv = rsv;

6577 6578 6579
		ret = btrfs_truncate_inode_items(trans, root, inode,
						 inode->i_size,
						 BTRFS_EXTENT_DATA_KEY);
6580 6581
		if (ret != -EAGAIN) {
			err = ret;
6582
			break;
6583
		}
C
Chris Mason 已提交
6584

6585
		trans->block_rsv = &root->fs_info->trans_block_rsv;
6586
		ret = btrfs_update_inode(trans, root, inode);
6587 6588 6589 6590
		if (ret) {
			err = ret;
			break;
		}
6591
end_trans:
6592 6593
		nr = trans->blocks_used;
		btrfs_end_transaction(trans, root);
6594
		trans = NULL;
6595 6596 6597 6598
		btrfs_btree_balance_dirty(root, nr);
	}

	if (ret == 0 && inode->i_nlink > 0) {
6599
		trans->block_rsv = root->orphan_block_rsv;
6600
		ret = btrfs_orphan_del(trans, inode);
6601 6602
		if (ret)
			err = ret;
6603 6604 6605 6606 6607 6608
	} else if (ret && inode->i_nlink > 0) {
		/*
		 * Failed to do the truncate, remove us from the in memory
		 * orphan list.
		 */
		ret = btrfs_orphan_del(NULL, inode);
6609 6610
	}

6611 6612 6613 6614 6615
	if (trans) {
		trans->block_rsv = &root->fs_info->trans_block_rsv;
		ret = btrfs_update_inode(trans, root, inode);
		if (ret && !err)
			err = ret;
6616

6617 6618 6619 6620
		nr = trans->blocks_used;
		ret = btrfs_end_transaction_throttle(trans, root);
		btrfs_btree_balance_dirty(root, nr);
	}
6621 6622 6623 6624

out:
	btrfs_free_block_rsv(root, rsv);

6625 6626
	if (ret && !err)
		err = ret;
6627

6628
	return err;
C
Chris Mason 已提交
6629 6630
}

6631 6632 6633
/*
 * create a new subvolume directory/inode (helper for the ioctl).
 */
6634
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
6635
			     struct btrfs_root *new_root, u64 new_dirid)
C
Chris Mason 已提交
6636 6637
{
	struct inode *inode;
6638
	int err;
6639
	u64 index = 0;
C
Chris Mason 已提交
6640

6641
	inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
6642
				new_dirid, S_IFDIR | 0700, &index);
6643
	if (IS_ERR(inode))
6644
		return PTR_ERR(inode);
C
Chris Mason 已提交
6645 6646 6647 6648
	inode->i_op = &btrfs_dir_inode_operations;
	inode->i_fop = &btrfs_dir_file_operations;

	inode->i_nlink = 1;
6649
	btrfs_i_size_write(inode, 0);
6650

6651 6652
	err = btrfs_update_inode(trans, new_root, inode);
	BUG_ON(err);
6653

6654
	iput(inode);
6655
	return 0;
C
Chris Mason 已提交
6656 6657 6658 6659 6660
}

struct inode *btrfs_alloc_inode(struct super_block *sb)
{
	struct btrfs_inode *ei;
Y
Yan, Zheng 已提交
6661
	struct inode *inode;
C
Chris Mason 已提交
6662 6663 6664 6665

	ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
	if (!ei)
		return NULL;
Y
Yan, Zheng 已提交
6666 6667 6668 6669 6670

	ei->root = NULL;
	ei->space_info = NULL;
	ei->generation = 0;
	ei->sequence = 0;
6671
	ei->last_trans = 0;
6672
	ei->last_sub_trans = 0;
6673
	ei->logged_trans = 0;
Y
Yan, Zheng 已提交
6674 6675 6676
	ei->delalloc_bytes = 0;
	ei->disk_i_size = 0;
	ei->flags = 0;
6677
	ei->csum_bytes = 0;
Y
Yan, Zheng 已提交
6678 6679 6680
	ei->index_cnt = (u64)-1;
	ei->last_unlink_trans = 0;

6681 6682 6683
	spin_lock_init(&ei->lock);
	ei->outstanding_extents = 0;
	ei->reserved_extents = 0;
Y
Yan, Zheng 已提交
6684 6685

	ei->ordered_data_close = 0;
6686
	ei->orphan_meta_reserved = 0;
Y
Yan, Zheng 已提交
6687
	ei->dummy_inode = 0;
6688
	ei->in_defrag = 0;
6689
	ei->delalloc_meta_reserved = 0;
6690
	ei->force_compress = BTRFS_COMPRESS_NONE;
Y
Yan, Zheng 已提交
6691

6692 6693
	ei->delayed_node = NULL;

Y
Yan, Zheng 已提交
6694
	inode = &ei->vfs_inode;
6695
	extent_map_tree_init(&ei->extent_tree);
6696 6697
	extent_io_tree_init(&ei->io_tree, &inode->i_data);
	extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
Y
Yan, Zheng 已提交
6698
	mutex_init(&ei->log_mutex);
6699
	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
6700
	INIT_LIST_HEAD(&ei->i_orphan);
Y
Yan, Zheng 已提交
6701
	INIT_LIST_HEAD(&ei->delalloc_inodes);
6702
	INIT_LIST_HEAD(&ei->ordered_operations);
Y
Yan, Zheng 已提交
6703 6704 6705
	RB_CLEAR_NODE(&ei->rb_node);

	return inode;
C
Chris Mason 已提交
6706 6707
}

N
Nick Piggin 已提交
6708 6709 6710 6711 6712 6713 6714
static void btrfs_i_callback(struct rcu_head *head)
{
	struct inode *inode = container_of(head, struct inode, i_rcu);
	INIT_LIST_HEAD(&inode->i_dentry);
	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
}

C
Chris Mason 已提交
6715 6716
void btrfs_destroy_inode(struct inode *inode)
{
6717
	struct btrfs_ordered_extent *ordered;
6718 6719
	struct btrfs_root *root = BTRFS_I(inode)->root;

C
Chris Mason 已提交
6720 6721
	WARN_ON(!list_empty(&inode->i_dentry));
	WARN_ON(inode->i_data.nrpages);
6722 6723
	WARN_ON(BTRFS_I(inode)->outstanding_extents);
	WARN_ON(BTRFS_I(inode)->reserved_extents);
6724 6725
	WARN_ON(BTRFS_I(inode)->delalloc_bytes);
	WARN_ON(BTRFS_I(inode)->csum_bytes);
C
Chris Mason 已提交
6726

6727 6728 6729 6730 6731 6732 6733 6734
	/*
	 * This can happen where we create an inode, but somebody else also
	 * created the same inode and we need to destroy the one we already
	 * created.
	 */
	if (!root)
		goto free;

6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745
	/*
	 * Make sure we're properly removed from the ordered operation
	 * lists.
	 */
	smp_mb();
	if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
		spin_lock(&root->fs_info->ordered_extent_lock);
		list_del_init(&BTRFS_I(inode)->ordered_operations);
		spin_unlock(&root->fs_info->ordered_extent_lock);
	}

6746
	spin_lock(&root->orphan_lock);
6747
	if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
6748 6749
		printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
		       (unsigned long long)btrfs_ino(inode));
6750
		list_del_init(&BTRFS_I(inode)->i_orphan);
6751
	}
6752
	spin_unlock(&root->orphan_lock);
6753

6754
	while (1) {
6755 6756 6757 6758
		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
		if (!ordered)
			break;
		else {
6759 6760 6761 6762
			printk(KERN_ERR "btrfs found ordered "
			       "extent %llu %llu on inode cleanup\n",
			       (unsigned long long)ordered->file_offset,
			       (unsigned long long)ordered->len);
6763 6764 6765 6766 6767
			btrfs_remove_ordered_extent(inode, ordered);
			btrfs_put_ordered_extent(ordered);
			btrfs_put_ordered_extent(ordered);
		}
	}
6768
	inode_tree_del(inode);
6769
	btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
6770
free:
6771
	btrfs_remove_delayed_node(inode);
N
Nick Piggin 已提交
6772
	call_rcu(&inode->i_rcu, btrfs_i_callback);
C
Chris Mason 已提交
6773 6774
}

6775
int btrfs_drop_inode(struct inode *inode)
6776 6777
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
6778

6779
	if (btrfs_root_refs(&root->root_item) == 0 &&
6780
	    !btrfs_is_free_space_inode(root, inode))
6781
		return 1;
6782
	else
6783
		return generic_drop_inode(inode);
6784 6785
}

6786
static void init_once(void *foo)
C
Chris Mason 已提交
6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802
{
	struct btrfs_inode *ei = (struct btrfs_inode *) foo;

	inode_init_once(&ei->vfs_inode);
}

void btrfs_destroy_cachep(void)
{
	if (btrfs_inode_cachep)
		kmem_cache_destroy(btrfs_inode_cachep);
	if (btrfs_trans_handle_cachep)
		kmem_cache_destroy(btrfs_trans_handle_cachep);
	if (btrfs_transaction_cachep)
		kmem_cache_destroy(btrfs_transaction_cachep);
	if (btrfs_path_cachep)
		kmem_cache_destroy(btrfs_path_cachep);
6803 6804
	if (btrfs_free_space_cachep)
		kmem_cache_destroy(btrfs_free_space_cachep);
C
Chris Mason 已提交
6805 6806 6807 6808
}

int btrfs_init_cachep(void)
{
6809 6810 6811
	btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
			sizeof(struct btrfs_inode), 0,
			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
C
Chris Mason 已提交
6812 6813
	if (!btrfs_inode_cachep)
		goto fail;
6814 6815 6816 6817

	btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
			sizeof(struct btrfs_trans_handle), 0,
			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
C
Chris Mason 已提交
6818 6819
	if (!btrfs_trans_handle_cachep)
		goto fail;
6820 6821 6822 6823

	btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
			sizeof(struct btrfs_transaction), 0,
			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
C
Chris Mason 已提交
6824 6825
	if (!btrfs_transaction_cachep)
		goto fail;
6826 6827 6828 6829

	btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
			sizeof(struct btrfs_path), 0,
			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
C
Chris Mason 已提交
6830 6831
	if (!btrfs_path_cachep)
		goto fail;
6832

6833 6834 6835 6836 6837 6838
	btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space_cache",
			sizeof(struct btrfs_free_space), 0,
			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
	if (!btrfs_free_space_cachep)
		goto fail;

C
Chris Mason 已提交
6839 6840 6841 6842 6843 6844 6845 6846 6847 6848
	return 0;
fail:
	btrfs_destroy_cachep();
	return -ENOMEM;
}

static int btrfs_getattr(struct vfsmount *mnt,
			 struct dentry *dentry, struct kstat *stat)
{
	struct inode *inode = dentry->d_inode;
6849 6850
	u32 blocksize = inode->i_sb->s_blocksize;

C
Chris Mason 已提交
6851
	generic_fillattr(inode, stat);
6852
	stat->dev = BTRFS_I(inode)->root->anon_dev;
6853
	stat->blksize = PAGE_CACHE_SIZE;
6854 6855
	stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
		ALIGN(BTRFS_I(inode)->delalloc_bytes, blocksize)) >> 9;
C
Chris Mason 已提交
6856 6857 6858
	return 0;
}

6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878
/*
 * If a file is moved, it will inherit the cow and compression flags of the new
 * directory.
 */
static void fixup_inode_flags(struct inode *dir, struct inode *inode)
{
	struct btrfs_inode *b_dir = BTRFS_I(dir);
	struct btrfs_inode *b_inode = BTRFS_I(inode);

	if (b_dir->flags & BTRFS_INODE_NODATACOW)
		b_inode->flags |= BTRFS_INODE_NODATACOW;
	else
		b_inode->flags &= ~BTRFS_INODE_NODATACOW;

	if (b_dir->flags & BTRFS_INODE_COMPRESS)
		b_inode->flags |= BTRFS_INODE_COMPRESS;
	else
		b_inode->flags &= ~BTRFS_INODE_COMPRESS;
}

6879 6880
static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
			   struct inode *new_dir, struct dentry *new_dentry)
C
Chris Mason 已提交
6881 6882 6883
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(old_dir)->root;
6884
	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
C
Chris Mason 已提交
6885 6886 6887
	struct inode *new_inode = new_dentry->d_inode;
	struct inode *old_inode = old_dentry->d_inode;
	struct timespec ctime = CURRENT_TIME;
6888
	u64 index = 0;
6889
	u64 root_objectid;
C
Chris Mason 已提交
6890
	int ret;
6891
	u64 old_ino = btrfs_ino(old_inode);
C
Chris Mason 已提交
6892

6893
	if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
6894 6895
		return -EPERM;

6896
	/* we only allow rename subvolume link between subvolumes */
6897
	if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
6898 6899
		return -EXDEV;

6900 6901
	if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
	    (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
C
Chris Mason 已提交
6902
		return -ENOTEMPTY;
6903

6904 6905 6906
	if (S_ISDIR(old_inode->i_mode) && new_inode &&
	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
		return -ENOTEMPTY;
6907 6908 6909 6910 6911
	/*
	 * we're using rename to replace one file with another.
	 * and the replacement file is large.  Start IO on it now so
	 * we don't add too much work to the end of the transaction
	 */
6912
	if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
6913 6914 6915
	    old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
		filemap_flush(old_inode->i_mapping);

6916
	/* close the racy window with snapshot create/destroy ioctl */
6917
	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
6918
		down_read(&root->fs_info->subvol_sem);
6919 6920 6921 6922 6923 6924 6925 6926 6927
	/*
	 * We want to reserve the absolute worst case amount of items.  So if
	 * both inodes are subvols and we need to unlink them then that would
	 * require 4 item modifications, but if they are both normal inodes it
	 * would require 5 item modifications, so we'll assume their normal
	 * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
	 * should cover the worst case number of items we'll modify.
	 */
	trans = btrfs_start_transaction(root, 20);
6928 6929 6930 6931
	if (IS_ERR(trans)) {
                ret = PTR_ERR(trans);
                goto out_notrans;
        }
6932

6933 6934
	if (dest != root)
		btrfs_record_root_in_trans(trans, dest);
6935

6936 6937 6938
	ret = btrfs_set_inode_index(new_dir, &index);
	if (ret)
		goto out_fail;
6939

6940
	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
6941 6942 6943
		/* force full log commit if subvolume involved. */
		root->fs_info->last_trans_log_full_commit = trans->transid;
	} else {
6944 6945 6946
		ret = btrfs_insert_inode_ref(trans, dest,
					     new_dentry->d_name.name,
					     new_dentry->d_name.len,
6947 6948
					     old_ino,
					     btrfs_ino(new_dir), index);
6949 6950
		if (ret)
			goto out_fail;
6951 6952 6953 6954 6955 6956 6957 6958 6959
		/*
		 * this is an ugly little race, but the rename is required
		 * to make sure that if we crash, the inode is either at the
		 * old name or the new one.  pinning the log transaction lets
		 * us make sure we don't allow a log commit to come in after
		 * we unlink the name but before we add the new name back in.
		 */
		btrfs_pin_log_trans(root);
	}
6960 6961 6962 6963
	/*
	 * make sure the inode gets flushed if it is replacing
	 * something.
	 */
6964
	if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
6965 6966
		btrfs_add_ordered_operation(trans, root, old_inode);

C
Chris Mason 已提交
6967 6968 6969
	old_dir->i_ctime = old_dir->i_mtime = ctime;
	new_dir->i_ctime = new_dir->i_mtime = ctime;
	old_inode->i_ctime = ctime;
6970

6971 6972 6973
	if (old_dentry->d_parent != new_dentry->d_parent)
		btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);

6974
	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
6975 6976 6977 6978 6979
		root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
		ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
					old_dentry->d_name.name,
					old_dentry->d_name.len);
	} else {
6980 6981 6982 6983 6984 6985
		ret = __btrfs_unlink_inode(trans, root, old_dir,
					old_dentry->d_inode,
					old_dentry->d_name.name,
					old_dentry->d_name.len);
		if (!ret)
			ret = btrfs_update_inode(trans, root, old_inode);
6986 6987
	}
	BUG_ON(ret);
C
Chris Mason 已提交
6988 6989 6990

	if (new_inode) {
		new_inode->i_ctime = CURRENT_TIME;
6991
		if (unlikely(btrfs_ino(new_inode) ==
6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005
			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
			root_objectid = BTRFS_I(new_inode)->location.objectid;
			ret = btrfs_unlink_subvol(trans, dest, new_dir,
						root_objectid,
						new_dentry->d_name.name,
						new_dentry->d_name.len);
			BUG_ON(new_inode->i_nlink == 0);
		} else {
			ret = btrfs_unlink_inode(trans, dest, new_dir,
						 new_dentry->d_inode,
						 new_dentry->d_name.name,
						 new_dentry->d_name.len);
		}
		BUG_ON(ret);
7006
		if (new_inode->i_nlink == 0) {
7007
			ret = btrfs_orphan_add(trans, new_dentry->d_inode);
7008
			BUG_ON(ret);
7009
		}
C
Chris Mason 已提交
7010
	}
7011

7012 7013
	fixup_inode_flags(new_dir, old_inode);

7014 7015
	ret = btrfs_add_link(trans, new_dir, old_inode,
			     new_dentry->d_name.name,
7016
			     new_dentry->d_name.len, 0, index);
7017
	BUG_ON(ret);
C
Chris Mason 已提交
7018

7019
	if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
7020
		struct dentry *parent = new_dentry->d_parent;
7021
		btrfs_log_new_name(trans, old_inode, old_dir, parent);
7022 7023
		btrfs_end_log_trans(root);
	}
C
Chris Mason 已提交
7024
out_fail:
7025
	btrfs_end_transaction_throttle(trans, root);
7026
out_notrans:
7027
	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
7028
		up_read(&root->fs_info->subvol_sem);
7029

C
Chris Mason 已提交
7030 7031 7032
	return ret;
}

7033 7034 7035 7036
/*
 * some fairly slow code that needs optimization. This walks the list
 * of all the inodes with pending delalloc and forces them to disk.
 */
Y
Yan, Zheng 已提交
7037
int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
7038 7039 7040
{
	struct list_head *head = &root->fs_info->delalloc_inodes;
	struct btrfs_inode *binode;
7041
	struct inode *inode;
7042

7043 7044 7045
	if (root->fs_info->sb->s_flags & MS_RDONLY)
		return -EROFS;

7046
	spin_lock(&root->fs_info->delalloc_lock);
7047
	while (!list_empty(head)) {
7048 7049
		binode = list_entry(head->next, struct btrfs_inode,
				    delalloc_inodes);
7050 7051 7052
		inode = igrab(&binode->vfs_inode);
		if (!inode)
			list_del_init(&binode->delalloc_inodes);
7053
		spin_unlock(&root->fs_info->delalloc_lock);
7054
		if (inode) {
7055
			filemap_flush(inode->i_mapping);
Y
Yan, Zheng 已提交
7056 7057 7058 7059
			if (delay_iput)
				btrfs_add_delayed_iput(inode);
			else
				iput(inode);
7060 7061
		}
		cond_resched();
7062
		spin_lock(&root->fs_info->delalloc_lock);
7063
	}
7064
	spin_unlock(&root->fs_info->delalloc_lock);
7065 7066 7067 7068 7069 7070

	/* the filemap_flush will queue IO into the worker threads, but
	 * we have to make sure the IO is actually started and that
	 * ordered extents get created before we return
	 */
	atomic_inc(&root->fs_info->async_submit_draining);
7071
	while (atomic_read(&root->fs_info->nr_async_submits) ||
7072
	      atomic_read(&root->fs_info->async_delalloc_pages)) {
7073
		wait_event(root->fs_info->async_submit_wait,
7074 7075
		   (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
		    atomic_read(&root->fs_info->async_delalloc_pages) == 0));
7076 7077
	}
	atomic_dec(&root->fs_info->async_submit_draining);
7078 7079 7080
	return 0;
}

C
Chris Mason 已提交
7081 7082 7083 7084 7085 7086 7087
static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
			 const char *symname)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(dir)->root;
	struct btrfs_path *path;
	struct btrfs_key key;
7088
	struct inode *inode = NULL;
C
Chris Mason 已提交
7089 7090 7091
	int err;
	int drop_inode = 0;
	u64 objectid;
7092
	u64 index = 0 ;
C
Chris Mason 已提交
7093 7094
	int name_len;
	int datasize;
7095
	unsigned long ptr;
C
Chris Mason 已提交
7096
	struct btrfs_file_extent_item *ei;
7097
	struct extent_buffer *leaf;
7098
	unsigned long nr = 0;
C
Chris Mason 已提交
7099 7100 7101 7102

	name_len = strlen(symname) + 1;
	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
		return -ENAMETOOLONG;
7103

7104 7105 7106 7107 7108
	/*
	 * 2 items for inode item and ref
	 * 2 items for dir items
	 * 1 item for xattr if selinux is on
	 */
7109 7110 7111
	trans = btrfs_start_transaction(root, 5);
	if (IS_ERR(trans))
		return PTR_ERR(trans);
7112

7113 7114 7115 7116
	err = btrfs_find_free_ino(root, &objectid);
	if (err)
		goto out_unlock;

7117
	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
7118
				dentry->d_name.len, btrfs_ino(dir), objectid,
7119
				S_IFLNK|S_IRWXUGO, &index);
7120 7121
	if (IS_ERR(inode)) {
		err = PTR_ERR(inode);
C
Chris Mason 已提交
7122
		goto out_unlock;
7123
	}
C
Chris Mason 已提交
7124

7125
	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
J
Josef Bacik 已提交
7126 7127 7128 7129 7130
	if (err) {
		drop_inode = 1;
		goto out_unlock;
	}

7131
	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
C
Chris Mason 已提交
7132 7133 7134 7135
	if (err)
		drop_inode = 1;
	else {
		inode->i_mapping->a_ops = &btrfs_aops;
7136
		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
C
Chris Mason 已提交
7137 7138
		inode->i_fop = &btrfs_file_operations;
		inode->i_op = &btrfs_file_inode_operations;
7139
		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
C
Chris Mason 已提交
7140 7141 7142 7143 7144
	}
	if (drop_inode)
		goto out_unlock;

	path = btrfs_alloc_path();
7145 7146 7147 7148 7149
	if (!path) {
		err = -ENOMEM;
		drop_inode = 1;
		goto out_unlock;
	}
7150
	key.objectid = btrfs_ino(inode);
C
Chris Mason 已提交
7151 7152 7153 7154 7155
	key.offset = 0;
	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
	datasize = btrfs_file_extent_calc_inline_size(name_len);
	err = btrfs_insert_empty_item(trans, root, path, &key,
				      datasize);
7156 7157
	if (err) {
		drop_inode = 1;
7158
		btrfs_free_path(path);
7159 7160
		goto out_unlock;
	}
7161 7162 7163 7164 7165
	leaf = path->nodes[0];
	ei = btrfs_item_ptr(leaf, path->slots[0],
			    struct btrfs_file_extent_item);
	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
	btrfs_set_file_extent_type(leaf, ei,
C
Chris Mason 已提交
7166
				   BTRFS_FILE_EXTENT_INLINE);
7167 7168 7169 7170 7171
	btrfs_set_file_extent_encryption(leaf, ei, 0);
	btrfs_set_file_extent_compression(leaf, ei, 0);
	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);

C
Chris Mason 已提交
7172
	ptr = btrfs_file_extent_inline_start(ei);
7173 7174
	write_extent_buffer(leaf, symname, ptr, name_len);
	btrfs_mark_buffer_dirty(leaf);
C
Chris Mason 已提交
7175
	btrfs_free_path(path);
7176

C
Chris Mason 已提交
7177 7178
	inode->i_op = &btrfs_symlink_inode_operations;
	inode->i_mapping->a_ops = &btrfs_symlink_aops;
7179
	inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
7180
	inode_set_bytes(inode, name_len);
7181
	btrfs_i_size_write(inode, name_len - 1);
7182 7183 7184
	err = btrfs_update_inode(trans, root, inode);
	if (err)
		drop_inode = 1;
C
Chris Mason 已提交
7185 7186

out_unlock:
7187
	nr = trans->blocks_used;
7188
	btrfs_end_transaction_throttle(trans, root);
C
Chris Mason 已提交
7189 7190 7191 7192
	if (drop_inode) {
		inode_dec_link_count(inode);
		iput(inode);
	}
7193
	btrfs_btree_balance_dirty(root, nr);
C
Chris Mason 已提交
7194 7195
	return err;
}
7196

7197 7198 7199 7200
static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
				       u64 start, u64 num_bytes, u64 min_size,
				       loff_t actual_len, u64 *alloc_hint,
				       struct btrfs_trans_handle *trans)
7201 7202 7203 7204
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_key ins;
	u64 cur_offset = start;
7205
	u64 i_size;
7206
	int ret = 0;
7207
	bool own_trans = true;
7208

7209 7210
	if (trans)
		own_trans = false;
7211
	while (num_bytes > 0) {
7212 7213 7214 7215 7216 7217
		if (own_trans) {
			trans = btrfs_start_transaction(root, 3);
			if (IS_ERR(trans)) {
				ret = PTR_ERR(trans);
				break;
			}
7218 7219
		}

7220 7221
		ret = btrfs_reserve_extent(trans, root, num_bytes, min_size,
					   0, *alloc_hint, (u64)-1, &ins, 1);
7222
		if (ret) {
7223 7224
			if (own_trans)
				btrfs_end_transaction(trans, root);
7225
			break;
7226
		}
7227

7228 7229 7230
		ret = insert_reserved_file_extent(trans, inode,
						  cur_offset, ins.objectid,
						  ins.offset, ins.offset,
7231
						  ins.offset, 0, 0, 0,
7232 7233
						  BTRFS_FILE_EXTENT_PREALLOC);
		BUG_ON(ret);
7234 7235
		btrfs_drop_extent_cache(inode, cur_offset,
					cur_offset + ins.offset -1, 0);
7236

7237 7238
		num_bytes -= ins.offset;
		cur_offset += ins.offset;
7239
		*alloc_hint = ins.objectid + ins.offset;
7240

7241
		inode->i_ctime = CURRENT_TIME;
7242
		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
7243
		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
7244 7245
		    (actual_len > inode->i_size) &&
		    (cur_offset > inode->i_size)) {
7246
			if (cur_offset > actual_len)
7247
				i_size = actual_len;
7248
			else
7249 7250 7251
				i_size = cur_offset;
			i_size_write(inode, i_size);
			btrfs_ordered_update_i_size(inode, i_size, NULL);
7252 7253
		}

7254 7255 7256
		ret = btrfs_update_inode(trans, root, inode);
		BUG_ON(ret);

7257 7258
		if (own_trans)
			btrfs_end_transaction(trans, root);
7259
	}
7260 7261 7262
	return ret;
}

7263 7264 7265 7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277 7278 7279 7280
int btrfs_prealloc_file_range(struct inode *inode, int mode,
			      u64 start, u64 num_bytes, u64 min_size,
			      loff_t actual_len, u64 *alloc_hint)
{
	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
					   min_size, actual_len, alloc_hint,
					   NULL);
}

int btrfs_prealloc_file_range_trans(struct inode *inode,
				    struct btrfs_trans_handle *trans, int mode,
				    u64 start, u64 num_bytes, u64 min_size,
				    loff_t actual_len, u64 *alloc_hint)
{
	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
					   min_size, actual_len, alloc_hint, trans);
}

7281 7282 7283 7284 7285
static int btrfs_set_page_dirty(struct page *page)
{
	return __set_page_dirty_nobuffers(page);
}

7286
static int btrfs_permission(struct inode *inode, int mask)
Y
Yan 已提交
7287
{
7288
	struct btrfs_root *root = BTRFS_I(inode)->root;
7289
	umode_t mode = inode->i_mode;
7290

7291 7292 7293 7294 7295 7296 7297
	if (mask & MAY_WRITE &&
	    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
		if (btrfs_root_readonly(root))
			return -EROFS;
		if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
			return -EACCES;
	}
7298
	return generic_permission(inode, mask);
Y
Yan 已提交
7299
}
C
Chris Mason 已提交
7300

7301
static const struct inode_operations btrfs_dir_inode_operations = {
7302
	.getattr	= btrfs_getattr,
C
Chris Mason 已提交
7303 7304 7305 7306 7307 7308 7309 7310 7311
	.lookup		= btrfs_lookup,
	.create		= btrfs_create,
	.unlink		= btrfs_unlink,
	.link		= btrfs_link,
	.mkdir		= btrfs_mkdir,
	.rmdir		= btrfs_rmdir,
	.rename		= btrfs_rename,
	.symlink	= btrfs_symlink,
	.setattr	= btrfs_setattr,
J
Josef Bacik 已提交
7312
	.mknod		= btrfs_mknod,
7313 7314
	.setxattr	= btrfs_setxattr,
	.getxattr	= btrfs_getxattr,
J
Josef Bacik 已提交
7315
	.listxattr	= btrfs_listxattr,
7316
	.removexattr	= btrfs_removexattr,
Y
Yan 已提交
7317
	.permission	= btrfs_permission,
7318
	.get_acl	= btrfs_get_acl,
C
Chris Mason 已提交
7319
};
7320
static const struct inode_operations btrfs_dir_ro_inode_operations = {
C
Chris Mason 已提交
7321
	.lookup		= btrfs_lookup,
Y
Yan 已提交
7322
	.permission	= btrfs_permission,
7323
	.get_acl	= btrfs_get_acl,
C
Chris Mason 已提交
7324
};
7325

7326
static const struct file_operations btrfs_dir_file_operations = {
C
Chris Mason 已提交
7327 7328
	.llseek		= generic_file_llseek,
	.read		= generic_read_dir,
7329
	.readdir	= btrfs_real_readdir,
7330
	.unlocked_ioctl	= btrfs_ioctl,
C
Chris Mason 已提交
7331
#ifdef CONFIG_COMPAT
7332
	.compat_ioctl	= btrfs_ioctl,
C
Chris Mason 已提交
7333
#endif
S
Sage Weil 已提交
7334
	.release        = btrfs_release_file,
7335
	.fsync		= btrfs_sync_file,
C
Chris Mason 已提交
7336 7337
};

7338
static struct extent_io_ops btrfs_extent_io_ops = {
7339
	.fill_delalloc = run_delalloc_range,
7340
	.submit_bio_hook = btrfs_submit_bio_hook,
7341
	.merge_bio_hook = btrfs_merge_bio_hook,
7342
	.readpage_end_io_hook = btrfs_readpage_end_io_hook,
7343
	.writepage_end_io_hook = btrfs_writepage_end_io_hook,
7344
	.writepage_start_hook = btrfs_writepage_start_hook,
7345 7346
	.set_bit_hook = btrfs_set_bit_hook,
	.clear_bit_hook = btrfs_clear_bit_hook,
7347 7348
	.merge_extent_hook = btrfs_merge_extent_hook,
	.split_extent_hook = btrfs_split_extent_hook,
7349 7350
};

7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362
/*
 * btrfs doesn't support the bmap operation because swapfiles
 * use bmap to make a mapping of extents in the file.  They assume
 * these extents won't change over the life of the file and they
 * use the bmap result to do IO directly to the drive.
 *
 * the btrfs bmap call would return logical addresses that aren't
 * suitable for IO and they also will change frequently as COW
 * operations happen.  So, swapfile + btrfs == corruption.
 *
 * For now we're avoiding this by dropping bmap.
 */
7363
static const struct address_space_operations btrfs_aops = {
C
Chris Mason 已提交
7364 7365
	.readpage	= btrfs_readpage,
	.writepage	= btrfs_writepage,
7366
	.writepages	= btrfs_writepages,
7367
	.readpages	= btrfs_readpages,
7368
	.direct_IO	= btrfs_direct_IO,
7369 7370
	.invalidatepage = btrfs_invalidatepage,
	.releasepage	= btrfs_releasepage,
7371
	.set_page_dirty	= btrfs_set_page_dirty,
7372
	.error_remove_page = generic_error_remove_page,
C
Chris Mason 已提交
7373 7374
};

7375
static const struct address_space_operations btrfs_symlink_aops = {
C
Chris Mason 已提交
7376 7377
	.readpage	= btrfs_readpage,
	.writepage	= btrfs_writepage,
C
Chris Mason 已提交
7378 7379
	.invalidatepage = btrfs_invalidatepage,
	.releasepage	= btrfs_releasepage,
C
Chris Mason 已提交
7380 7381
};

7382
static const struct inode_operations btrfs_file_inode_operations = {
C
Chris Mason 已提交
7383 7384
	.getattr	= btrfs_getattr,
	.setattr	= btrfs_setattr,
7385 7386
	.setxattr	= btrfs_setxattr,
	.getxattr	= btrfs_getxattr,
J
Josef Bacik 已提交
7387
	.listxattr      = btrfs_listxattr,
7388
	.removexattr	= btrfs_removexattr,
Y
Yan 已提交
7389
	.permission	= btrfs_permission,
Y
Yehuda Sadeh 已提交
7390
	.fiemap		= btrfs_fiemap,
7391
	.get_acl	= btrfs_get_acl,
C
Chris Mason 已提交
7392
};
7393
static const struct inode_operations btrfs_special_inode_operations = {
J
Josef Bacik 已提交
7394 7395
	.getattr	= btrfs_getattr,
	.setattr	= btrfs_setattr,
Y
Yan 已提交
7396
	.permission	= btrfs_permission,
7397 7398
	.setxattr	= btrfs_setxattr,
	.getxattr	= btrfs_getxattr,
J
Josef Bacik 已提交
7399
	.listxattr	= btrfs_listxattr,
7400
	.removexattr	= btrfs_removexattr,
7401
	.get_acl	= btrfs_get_acl,
J
Josef Bacik 已提交
7402
};
7403
static const struct inode_operations btrfs_symlink_inode_operations = {
C
Chris Mason 已提交
7404 7405 7406
	.readlink	= generic_readlink,
	.follow_link	= page_follow_link_light,
	.put_link	= page_put_link,
7407
	.getattr	= btrfs_getattr,
7408
	.setattr	= btrfs_setattr,
Y
Yan 已提交
7409
	.permission	= btrfs_permission,
J
Jim Owens 已提交
7410 7411 7412 7413
	.setxattr	= btrfs_setxattr,
	.getxattr	= btrfs_getxattr,
	.listxattr	= btrfs_listxattr,
	.removexattr	= btrfs_removexattr,
7414
	.get_acl	= btrfs_get_acl,
C
Chris Mason 已提交
7415
};
7416

7417
const struct dentry_operations btrfs_dentry_operations = {
7418
	.d_delete	= btrfs_dentry_delete,
7419
	.d_release	= btrfs_dentry_release,
7420
};