inode.c 196.5 KB
Newer Older
C
Chris Mason 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

19
#include <linux/kernel.h>
20
#include <linux/bio.h>
C
Chris Mason 已提交
21
#include <linux/buffer_head.h>
S
Sage Weil 已提交
22
#include <linux/file.h>
C
Chris Mason 已提交
23 24 25 26 27 28 29 30 31 32 33 34
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/mpage.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/statfs.h>
#include <linux/compat.h>
C
Chris Mason 已提交
35
#include <linux/bit_spinlock.h>
J
Josef Bacik 已提交
36
#include <linux/xattr.h>
J
Josef Bacik 已提交
37
#include <linux/posix_acl.h>
Y
Yan Zheng 已提交
38
#include <linux/falloc.h>
39
#include <linux/slab.h>
C
Chris Mason 已提交
40
#include "compat.h"
C
Chris Mason 已提交
41 42 43 44 45 46
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
#include "ioctl.h"
#include "print-tree.h"
47
#include "volumes.h"
48
#include "ordered-data.h"
49
#include "xattr.h"
50
#include "tree-log.h"
C
Chris Mason 已提交
51
#include "compression.h"
52
#include "locking.h"
53
#include "free-space-cache.h"
C
Chris Mason 已提交
54 55 56 57 58 59

struct btrfs_iget_args {
	u64 ino;
	struct btrfs_root *root;
};

60 61 62 63 64
static const struct inode_operations btrfs_dir_inode_operations;
static const struct inode_operations btrfs_symlink_inode_operations;
static const struct inode_operations btrfs_dir_ro_inode_operations;
static const struct inode_operations btrfs_special_inode_operations;
static const struct inode_operations btrfs_file_inode_operations;
65 66
static const struct address_space_operations btrfs_aops;
static const struct address_space_operations btrfs_symlink_aops;
67
static const struct file_operations btrfs_dir_file_operations;
68
static struct extent_io_ops btrfs_extent_io_ops;
C
Chris Mason 已提交
69 70 71 72 73

static struct kmem_cache *btrfs_inode_cachep;
struct kmem_cache *btrfs_trans_handle_cachep;
struct kmem_cache *btrfs_transaction_cachep;
struct kmem_cache *btrfs_path_cachep;
74
struct kmem_cache *btrfs_free_space_cachep;
C
Chris Mason 已提交
75 76 77 78 79 80 81 82 83 84 85 86

#define S_SHIFT 12
static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
	[S_IFREG >> S_SHIFT]	= BTRFS_FT_REG_FILE,
	[S_IFDIR >> S_SHIFT]	= BTRFS_FT_DIR,
	[S_IFCHR >> S_SHIFT]	= BTRFS_FT_CHRDEV,
	[S_IFBLK >> S_SHIFT]	= BTRFS_FT_BLKDEV,
	[S_IFIFO >> S_SHIFT]	= BTRFS_FT_FIFO,
	[S_IFSOCK >> S_SHIFT]	= BTRFS_FT_SOCK,
	[S_IFLNK >> S_SHIFT]	= BTRFS_FT_SYMLINK,
};

87
static void btrfs_truncate(struct inode *inode);
C
Chris Mason 已提交
88
static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
89 90 91 92
static noinline int cow_file_range(struct inode *inode,
				   struct page *locked_page,
				   u64 start, u64 end, int *page_started,
				   unsigned long *nr_written, int unlock);
93

94 95
static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
				     struct inode *inode,  struct inode *dir)
J
Jim Owens 已提交
96 97 98
{
	int err;

99
	err = btrfs_init_acl(trans, inode, dir);
J
Jim Owens 已提交
100
	if (!err)
101
		err = btrfs_xattr_security_init(trans, inode, dir);
J
Jim Owens 已提交
102 103 104
	return err;
}

C
Chris Mason 已提交
105 106 107 108 109
/*
 * this does all the hard work for inserting an inline extent into
 * the btree.  The caller should have done a btrfs_drop_extents so that
 * no overlapping inline items exist in the btree
 */
C
Chris Mason 已提交
110
static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
C
Chris Mason 已提交
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
				struct btrfs_root *root, struct inode *inode,
				u64 start, size_t size, size_t compressed_size,
				struct page **compressed_pages)
{
	struct btrfs_key key;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct page *page = NULL;
	char *kaddr;
	unsigned long ptr;
	struct btrfs_file_extent_item *ei;
	int err = 0;
	int ret;
	size_t cur_size = size;
	size_t datasize;
	unsigned long offset;
127
	int compress_type = BTRFS_COMPRESS_NONE;
C
Chris Mason 已提交
128 129

	if (compressed_size && compressed_pages) {
130
		compress_type = root->fs_info->compress_type;
C
Chris Mason 已提交
131 132 133
		cur_size = compressed_size;
	}

C
Chris Mason 已提交
134 135
	path = btrfs_alloc_path();
	if (!path)
C
Chris Mason 已提交
136 137
		return -ENOMEM;

138
	path->leave_spinning = 1;
C
Chris Mason 已提交
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
	btrfs_set_trans_block_group(trans, inode);

	key.objectid = inode->i_ino;
	key.offset = start;
	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
	datasize = btrfs_file_extent_calc_inline_size(cur_size);

	inode_add_bytes(inode, size);
	ret = btrfs_insert_empty_item(trans, root, path, &key,
				      datasize);
	BUG_ON(ret);
	if (ret) {
		err = ret;
		goto fail;
	}
	leaf = path->nodes[0];
	ei = btrfs_item_ptr(leaf, path->slots[0],
			    struct btrfs_file_extent_item);
	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
	btrfs_set_file_extent_encryption(leaf, ei, 0);
	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
	ptr = btrfs_file_extent_inline_start(ei);

164
	if (compress_type != BTRFS_COMPRESS_NONE) {
C
Chris Mason 已提交
165 166
		struct page *cpage;
		int i = 0;
C
Chris Mason 已提交
167
		while (compressed_size > 0) {
C
Chris Mason 已提交
168
			cpage = compressed_pages[i];
169
			cur_size = min_t(unsigned long, compressed_size,
C
Chris Mason 已提交
170 171
				       PAGE_CACHE_SIZE);

172
			kaddr = kmap_atomic(cpage, KM_USER0);
C
Chris Mason 已提交
173
			write_extent_buffer(leaf, kaddr, ptr, cur_size);
174
			kunmap_atomic(kaddr, KM_USER0);
C
Chris Mason 已提交
175 176 177 178 179 180

			i++;
			ptr += cur_size;
			compressed_size -= cur_size;
		}
		btrfs_set_file_extent_compression(leaf, ei,
181
						  compress_type);
C
Chris Mason 已提交
182 183 184 185 186 187 188 189 190 191 192 193 194
	} else {
		page = find_get_page(inode->i_mapping,
				     start >> PAGE_CACHE_SHIFT);
		btrfs_set_file_extent_compression(leaf, ei, 0);
		kaddr = kmap_atomic(page, KM_USER0);
		offset = start & (PAGE_CACHE_SIZE - 1);
		write_extent_buffer(leaf, kaddr + offset, ptr, size);
		kunmap_atomic(kaddr, KM_USER0);
		page_cache_release(page);
	}
	btrfs_mark_buffer_dirty(leaf);
	btrfs_free_path(path);

195 196 197 198 199 200 201 202 203
	/*
	 * we're an inline extent, so nobody can
	 * extend the file past i_size without locking
	 * a page we already have locked.
	 *
	 * We must do any isize and inode updates
	 * before we unlock the pages.  Otherwise we
	 * could end up racing with unlink.
	 */
C
Chris Mason 已提交
204 205
	BTRFS_I(inode)->disk_i_size = inode->i_size;
	btrfs_update_inode(trans, root, inode);
206

C
Chris Mason 已提交
207 208 209 210 211 212 213 214 215 216 217 218
	return 0;
fail:
	btrfs_free_path(path);
	return err;
}


/*
 * conditionally insert an inline extent into the file.  This
 * does the checks required to make sure the data is small enough
 * to fit as an inline extent.
 */
219
static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
C
Chris Mason 已提交
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
				 struct btrfs_root *root,
				 struct inode *inode, u64 start, u64 end,
				 size_t compressed_size,
				 struct page **compressed_pages)
{
	u64 isize = i_size_read(inode);
	u64 actual_end = min(end + 1, isize);
	u64 inline_len = actual_end - start;
	u64 aligned_end = (end + root->sectorsize - 1) &
			~((u64)root->sectorsize - 1);
	u64 hint_byte;
	u64 data_len = inline_len;
	int ret;

	if (compressed_size)
		data_len = compressed_size;

	if (start > 0 ||
C
Chris Mason 已提交
238
	    actual_end >= PAGE_CACHE_SIZE ||
C
Chris Mason 已提交
239 240 241 242 243 244 245 246
	    data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
	    (!compressed_size &&
	    (actual_end & (root->sectorsize - 1)) == 0) ||
	    end + 1 < isize ||
	    data_len > root->fs_info->max_inline) {
		return 1;
	}

Y
Yan, Zheng 已提交
247
	ret = btrfs_drop_extents(trans, inode, start, aligned_end,
C
Chris Mason 已提交
248
				 &hint_byte, 1);
C
Chris Mason 已提交
249 250 251 252 253 254 255 256
	BUG_ON(ret);

	if (isize > actual_end)
		inline_len = min_t(u64, isize, actual_end);
	ret = insert_inline_extent(trans, root, inode, start,
				   inline_len, compressed_size,
				   compressed_pages);
	BUG_ON(ret);
257
	btrfs_delalloc_release_metadata(inode, end + 1 - start);
C
Chris Mason 已提交
258
	btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
C
Chris Mason 已提交
259 260 261
	return 0;
}

262 263 264 265 266 267
struct async_extent {
	u64 start;
	u64 ram_size;
	u64 compressed_size;
	struct page **pages;
	unsigned long nr_pages;
268
	int compress_type;
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
	struct list_head list;
};

struct async_cow {
	struct inode *inode;
	struct btrfs_root *root;
	struct page *locked_page;
	u64 start;
	u64 end;
	struct list_head extents;
	struct btrfs_work work;
};

static noinline int add_async_extent(struct async_cow *cow,
				     u64 start, u64 ram_size,
				     u64 compressed_size,
				     struct page **pages,
286 287
				     unsigned long nr_pages,
				     int compress_type)
288 289 290 291 292 293 294 295 296
{
	struct async_extent *async_extent;

	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
	async_extent->start = start;
	async_extent->ram_size = ram_size;
	async_extent->compressed_size = compressed_size;
	async_extent->pages = pages;
	async_extent->nr_pages = nr_pages;
297
	async_extent->compress_type = compress_type;
298 299 300 301
	list_add_tail(&async_extent->list, &cow->extents);
	return 0;
}

C
Chris Mason 已提交
302
/*
303 304 305
 * we create compressed extents in two phases.  The first
 * phase compresses a range of pages that have already been
 * locked (both pages and state bits are locked).
C
Chris Mason 已提交
306
 *
307 308 309 310 311
 * This is done inside an ordered work queue, and the compression
 * is spread across many cpus.  The actual IO submission is step
 * two, and the ordered work queue takes care of making sure that
 * happens in the same order things were put onto the queue by
 * writepages and friends.
C
Chris Mason 已提交
312
 *
313 314 315 316
 * If this code finds it can't get good compression, it puts an
 * entry onto the work queue to write the uncompressed bytes.  This
 * makes sure that both compressed inodes and uncompressed inodes
 * are written in the same order that pdflush sent them down.
C
Chris Mason 已提交
317
 */
318 319 320 321 322
static noinline int compress_file_range(struct inode *inode,
					struct page *locked_page,
					u64 start, u64 end,
					struct async_cow *async_cow,
					int *num_added)
323 324 325
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
326 327
	u64 num_bytes;
	u64 blocksize = root->sectorsize;
C
Chris Mason 已提交
328
	u64 actual_end;
329
	u64 isize = i_size_read(inode);
330
	int ret = 0;
C
Chris Mason 已提交
331 332 333 334 335 336
	struct page **pages = NULL;
	unsigned long nr_pages;
	unsigned long nr_pages_ret = 0;
	unsigned long total_compressed = 0;
	unsigned long total_in = 0;
	unsigned long max_compressed = 128 * 1024;
337
	unsigned long max_uncompressed = 128 * 1024;
C
Chris Mason 已提交
338 339
	int i;
	int will_compress;
340
	int compress_type = root->fs_info->compress_type;
341

342
	actual_end = min_t(u64, isize, end + 1);
C
Chris Mason 已提交
343 344 345 346
again:
	will_compress = 0;
	nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
	nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
347

348 349 350 351 352 353 354 355 356 357 358 359 360
	/*
	 * we don't want to send crud past the end of i_size through
	 * compression, that's just a waste of CPU time.  So, if the
	 * end of the file is before the start of our current
	 * requested range of bytes, we bail out to the uncompressed
	 * cleanup code that can deal with all of this.
	 *
	 * It isn't really the fastest way to fix things, but this is a
	 * very uncommon corner.
	 */
	if (actual_end <= start)
		goto cleanup_and_bail_uncompressed;

C
Chris Mason 已提交
361 362 363 364
	total_compressed = actual_end - start;

	/* we want to make sure that amount of ram required to uncompress
	 * an extent is reasonable, so we limit the total size in ram
365 366 367 368 369 370 371
	 * of a compressed extent to 128k.  This is a crucial number
	 * because it also controls how easily we can spread reads across
	 * cpus for decompression.
	 *
	 * We also want to make sure the amount of IO required to do
	 * a random read is reasonably small, so we limit the size of
	 * a compressed extent to 128k.
C
Chris Mason 已提交
372 373
	 */
	total_compressed = min(total_compressed, max_uncompressed);
374
	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
375
	num_bytes = max(blocksize,  num_bytes);
C
Chris Mason 已提交
376 377
	total_in = 0;
	ret = 0;
378

379 380 381 382
	/*
	 * we do compression for mount -o compress and when the
	 * inode has not been flagged as nocompress.  This flag can
	 * change at any time if we discover bad compression ratios.
C
Chris Mason 已提交
383
	 */
384
	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
C
Chris Mason 已提交
385 386
	    (btrfs_test_opt(root, COMPRESS) ||
	     (BTRFS_I(inode)->force_compress))) {
C
Chris Mason 已提交
387
		WARN_ON(pages);
388
		pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
C
Chris Mason 已提交
389

390 391 392 393 394 395 396 397 398 399
		if (BTRFS_I(inode)->force_compress)
			compress_type = BTRFS_I(inode)->force_compress;

		ret = btrfs_compress_pages(compress_type,
					   inode->i_mapping, start,
					   total_compressed, pages,
					   nr_pages, &nr_pages_ret,
					   &total_in,
					   &total_compressed,
					   max_compressed);
C
Chris Mason 已提交
400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419

		if (!ret) {
			unsigned long offset = total_compressed &
				(PAGE_CACHE_SIZE - 1);
			struct page *page = pages[nr_pages_ret - 1];
			char *kaddr;

			/* zero the tail end of the last page, we might be
			 * sending it down to disk
			 */
			if (offset) {
				kaddr = kmap_atomic(page, KM_USER0);
				memset(kaddr + offset, 0,
				       PAGE_CACHE_SIZE - offset);
				kunmap_atomic(kaddr, KM_USER0);
			}
			will_compress = 1;
		}
	}
	if (start == 0) {
420
		trans = btrfs_join_transaction(root, 1);
421
		BUG_ON(IS_ERR(trans));
422
		btrfs_set_trans_block_group(trans, inode);
423
		trans->block_rsv = &root->fs_info->delalloc_block_rsv;
424

C
Chris Mason 已提交
425
		/* lets try to make an inline extent */
426
		if (ret || total_in < (actual_end - start)) {
C
Chris Mason 已提交
427
			/* we didn't compress the entire range, try
428
			 * to make an uncompressed inline extent.
C
Chris Mason 已提交
429 430 431 432
			 */
			ret = cow_file_range_inline(trans, root, inode,
						    start, end, 0, NULL);
		} else {
433
			/* try making a compressed inline extent */
C
Chris Mason 已提交
434 435 436 437 438
			ret = cow_file_range_inline(trans, root, inode,
						    start, end,
						    total_compressed, pages);
		}
		if (ret == 0) {
439 440 441 442 443
			/*
			 * inline extent creation worked, we don't need
			 * to create any more async work items.  Unlock
			 * and free up our temp pages.
			 */
C
Chris Mason 已提交
444
			extent_clear_unlock_delalloc(inode,
445 446 447
			     &BTRFS_I(inode)->io_tree,
			     start, end, NULL,
			     EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
448
			     EXTENT_CLEAR_DELALLOC |
449
			     EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
450 451

			btrfs_end_transaction(trans, root);
C
Chris Mason 已提交
452 453
			goto free_pages_out;
		}
454
		btrfs_end_transaction(trans, root);
C
Chris Mason 已提交
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
	}

	if (will_compress) {
		/*
		 * we aren't doing an inline extent round the compressed size
		 * up to a block size boundary so the allocator does sane
		 * things
		 */
		total_compressed = (total_compressed + blocksize - 1) &
			~(blocksize - 1);

		/*
		 * one last check to make sure the compression is really a
		 * win, compare the page count read with the blocks on disk
		 */
		total_in = (total_in + PAGE_CACHE_SIZE - 1) &
			~(PAGE_CACHE_SIZE - 1);
		if (total_compressed >= total_in) {
			will_compress = 0;
		} else {
			num_bytes = total_in;
		}
	}
	if (!will_compress && pages) {
		/*
		 * the compression code ran but failed to make things smaller,
		 * free any pages it allocated and our page pointer array
		 */
		for (i = 0; i < nr_pages_ret; i++) {
C
Chris Mason 已提交
484
			WARN_ON(pages[i]->mapping);
C
Chris Mason 已提交
485 486 487 488 489 490 491 492
			page_cache_release(pages[i]);
		}
		kfree(pages);
		pages = NULL;
		total_compressed = 0;
		nr_pages_ret = 0;

		/* flag the file so we don't compress in the future */
C
Chris Mason 已提交
493 494
		if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
		    !(BTRFS_I(inode)->force_compress)) {
C
Chris Mason 已提交
495
			BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
C
Chris Mason 已提交
496
		}
C
Chris Mason 已提交
497
	}
498 499
	if (will_compress) {
		*num_added += 1;
C
Chris Mason 已提交
500

501 502 503 504 505
		/* the async work queues will take care of doing actual
		 * allocation on disk for these compressed pages,
		 * and will submit them to the elevator.
		 */
		add_async_extent(async_cow, start, num_bytes,
506 507
				 total_compressed, pages, nr_pages_ret,
				 compress_type);
508

509
		if (start + num_bytes < end) {
510 511 512 513 514 515
			start += num_bytes;
			pages = NULL;
			cond_resched();
			goto again;
		}
	} else {
516
cleanup_and_bail_uncompressed:
517 518 519 520 521 522 523 524 525 526 527 528
		/*
		 * No compression, but we still need to write the pages in
		 * the file we've been given so far.  redirty the locked
		 * page if it corresponds to our extent and set things up
		 * for the async work queue to run cow_file_range to do
		 * the normal delalloc dance
		 */
		if (page_offset(locked_page) >= start &&
		    page_offset(locked_page) <= end) {
			__set_page_dirty_nobuffers(locked_page);
			/* unlocked later on in the async handlers */
		}
529 530
		add_async_extent(async_cow, start, end - start + 1,
				 0, NULL, 0, BTRFS_COMPRESS_NONE);
531 532
		*num_added += 1;
	}
533

534 535 536 537 538 539 540 541
out:
	return 0;

free_pages_out:
	for (i = 0; i < nr_pages_ret; i++) {
		WARN_ON(pages[i]->mapping);
		page_cache_release(pages[i]);
	}
C
Chris Mason 已提交
542
	kfree(pages);
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563

	goto out;
}

/*
 * phase two of compressed writeback.  This is the ordered portion
 * of the code, which only gets called in the order the work was
 * queued.  We walk all the async extents created by compress_file_range
 * and send them down to the disk.
 */
static noinline int submit_compressed_extents(struct inode *inode,
					      struct async_cow *async_cow)
{
	struct async_extent *async_extent;
	u64 alloc_hint = 0;
	struct btrfs_trans_handle *trans;
	struct btrfs_key ins;
	struct extent_map *em;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
	struct extent_io_tree *io_tree;
564
	int ret = 0;
565 566 567 568 569

	if (list_empty(&async_cow->extents))
		return 0;


C
Chris Mason 已提交
570
	while (!list_empty(&async_cow->extents)) {
571 572 573
		async_extent = list_entry(async_cow->extents.next,
					  struct async_extent, list);
		list_del(&async_extent->list);
C
Chris Mason 已提交
574

575 576
		io_tree = &BTRFS_I(inode)->io_tree;

577
retry:
578 579 580 581 582 583
		/* did the compression code fall back to uncompressed IO? */
		if (!async_extent->pages) {
			int page_started = 0;
			unsigned long nr_written = 0;

			lock_extent(io_tree, async_extent->start,
584 585
					 async_extent->start +
					 async_extent->ram_size - 1, GFP_NOFS);
586 587

			/* allocate blocks */
588 589 590 591 592
			ret = cow_file_range(inode, async_cow->locked_page,
					     async_extent->start,
					     async_extent->start +
					     async_extent->ram_size - 1,
					     &page_started, &nr_written, 0);
593 594 595 596 597 598 599

			/*
			 * if page_started, cow_file_range inserted an
			 * inline extent and took care of all the unlocking
			 * and IO for us.  Otherwise, we need to submit
			 * all those pages down to the drive.
			 */
600
			if (!page_started && !ret)
601 602
				extent_write_locked_range(io_tree,
						  inode, async_extent->start,
C
Chris Mason 已提交
603
						  async_extent->start +
604 605 606 607 608 609 610 611 612 613 614 615
						  async_extent->ram_size - 1,
						  btrfs_get_extent,
						  WB_SYNC_ALL);
			kfree(async_extent);
			cond_resched();
			continue;
		}

		lock_extent(io_tree, async_extent->start,
			    async_extent->start + async_extent->ram_size - 1,
			    GFP_NOFS);

616
		trans = btrfs_join_transaction(root, 1);
617
		BUG_ON(IS_ERR(trans));
618 619 620 621 622
		ret = btrfs_reserve_extent(trans, root,
					   async_extent->compressed_size,
					   async_extent->compressed_size,
					   0, alloc_hint,
					   (u64)-1, &ins, 1);
623 624
		btrfs_end_transaction(trans, root);

625 626 627 628 629 630 631 632 633 634 635 636 637 638 639
		if (ret) {
			int i;
			for (i = 0; i < async_extent->nr_pages; i++) {
				WARN_ON(async_extent->pages[i]->mapping);
				page_cache_release(async_extent->pages[i]);
			}
			kfree(async_extent->pages);
			async_extent->nr_pages = 0;
			async_extent->pages = NULL;
			unlock_extent(io_tree, async_extent->start,
				      async_extent->start +
				      async_extent->ram_size - 1, GFP_NOFS);
			goto retry;
		}

640 641 642 643 644 645 646 647
		/*
		 * here we're doing allocation and writeback of the
		 * compressed pages
		 */
		btrfs_drop_extent_cache(inode, async_extent->start,
					async_extent->start +
					async_extent->ram_size - 1, 0);

648
		em = alloc_extent_map(GFP_NOFS);
649
		BUG_ON(!em);
650 651
		em->start = async_extent->start;
		em->len = async_extent->ram_size;
652
		em->orig_start = em->start;
C
Chris Mason 已提交
653

654 655 656
		em->block_start = ins.objectid;
		em->block_len = ins.offset;
		em->bdev = root->fs_info->fs_devices->latest_bdev;
657
		em->compress_type = async_extent->compress_type;
658 659 660
		set_bit(EXTENT_FLAG_PINNED, &em->flags);
		set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);

C
Chris Mason 已提交
661
		while (1) {
662
			write_lock(&em_tree->lock);
663
			ret = add_extent_mapping(em_tree, em);
664
			write_unlock(&em_tree->lock);
665 666 667 668 669 670 671 672 673
			if (ret != -EEXIST) {
				free_extent_map(em);
				break;
			}
			btrfs_drop_extent_cache(inode, async_extent->start,
						async_extent->start +
						async_extent->ram_size - 1, 0);
		}

674 675 676 677 678 679 680
		ret = btrfs_add_ordered_extent_compress(inode,
						async_extent->start,
						ins.objectid,
						async_extent->ram_size,
						ins.offset,
						BTRFS_ORDERED_COMPRESSED,
						async_extent->compress_type);
681 682 683 684 685 686
		BUG_ON(ret);

		/*
		 * clear dirty, set writeback and unlock the pages.
		 */
		extent_clear_unlock_delalloc(inode,
687 688 689 690 691 692
				&BTRFS_I(inode)->io_tree,
				async_extent->start,
				async_extent->start +
				async_extent->ram_size - 1,
				NULL, EXTENT_CLEAR_UNLOCK_PAGE |
				EXTENT_CLEAR_UNLOCK |
693
				EXTENT_CLEAR_DELALLOC |
694
				EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
695 696

		ret = btrfs_submit_compressed_write(inode,
C
Chris Mason 已提交
697 698 699 700 701
				    async_extent->start,
				    async_extent->ram_size,
				    ins.objectid,
				    ins.offset, async_extent->pages,
				    async_extent->nr_pages);
702 703 704 705 706 707 708 709 710 711

		BUG_ON(ret);
		alloc_hint = ins.objectid + ins.offset;
		kfree(async_extent);
		cond_resched();
	}

	return 0;
}

712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743
static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
				      u64 num_bytes)
{
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
	struct extent_map *em;
	u64 alloc_hint = 0;

	read_lock(&em_tree->lock);
	em = search_extent_mapping(em_tree, start, num_bytes);
	if (em) {
		/*
		 * if block start isn't an actual block number then find the
		 * first block in this inode and use that as a hint.  If that
		 * block is also bogus then just don't worry about it.
		 */
		if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
			free_extent_map(em);
			em = search_extent_mapping(em_tree, 0, 0);
			if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
				alloc_hint = em->block_start;
			if (em)
				free_extent_map(em);
		} else {
			alloc_hint = em->block_start;
			free_extent_map(em);
		}
	}
	read_unlock(&em_tree->lock);

	return alloc_hint;
}

744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775
/*
 * when extent_io.c finds a delayed allocation range in the file,
 * the call backs end up in this code.  The basic idea is to
 * allocate extents on disk for the range, and create ordered data structs
 * in ram to track those extents.
 *
 * locked_page is the page that writepage had locked already.  We use
 * it to make sure we don't do extra locks or unlocks.
 *
 * *page_started is set to one if we unlock locked_page and do everything
 * required to start IO on it.  It may be clean and already done with
 * IO when we return.
 */
static noinline int cow_file_range(struct inode *inode,
				   struct page *locked_page,
				   u64 start, u64 end, int *page_started,
				   unsigned long *nr_written,
				   int unlock)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
	u64 alloc_hint = 0;
	u64 num_bytes;
	unsigned long ram_size;
	u64 disk_num_bytes;
	u64 cur_alloc_size;
	u64 blocksize = root->sectorsize;
	struct btrfs_key ins;
	struct extent_map *em;
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
	int ret = 0;

J
Josef Bacik 已提交
776
	BUG_ON(root == root->fs_info->tree_root);
777
	trans = btrfs_join_transaction(root, 1);
778
	BUG_ON(IS_ERR(trans));
779
	btrfs_set_trans_block_group(trans, inode);
780
	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
781 782 783 784 785 786 787 788 789 790 791 792

	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
	num_bytes = max(blocksize,  num_bytes);
	disk_num_bytes = num_bytes;
	ret = 0;

	if (start == 0) {
		/* lets try to make an inline extent */
		ret = cow_file_range_inline(trans, root, inode,
					    start, end, 0, NULL);
		if (ret == 0) {
			extent_clear_unlock_delalloc(inode,
793 794 795 796 797 798 799 800
				     &BTRFS_I(inode)->io_tree,
				     start, end, NULL,
				     EXTENT_CLEAR_UNLOCK_PAGE |
				     EXTENT_CLEAR_UNLOCK |
				     EXTENT_CLEAR_DELALLOC |
				     EXTENT_CLEAR_DIRTY |
				     EXTENT_SET_WRITEBACK |
				     EXTENT_END_WRITEBACK);
801

802 803 804 805 806 807 808 809 810 811 812
			*nr_written = *nr_written +
			     (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
			*page_started = 1;
			ret = 0;
			goto out;
		}
	}

	BUG_ON(disk_num_bytes >
	       btrfs_super_total_bytes(&root->fs_info->super_copy));

813
	alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
814 815
	btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);

C
Chris Mason 已提交
816
	while (disk_num_bytes > 0) {
817 818
		unsigned long op;

819
		cur_alloc_size = disk_num_bytes;
820
		ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
821
					   root->sectorsize, 0, alloc_hint,
822
					   (u64)-1, &ins, 1);
C
Chris Mason 已提交
823 824
		BUG_ON(ret);

825
		em = alloc_extent_map(GFP_NOFS);
826
		BUG_ON(!em);
827
		em->start = start;
828
		em->orig_start = em->start;
829 830
		ram_size = ins.offset;
		em->len = ins.offset;
C
Chris Mason 已提交
831

832
		em->block_start = ins.objectid;
C
Chris Mason 已提交
833
		em->block_len = ins.offset;
834
		em->bdev = root->fs_info->fs_devices->latest_bdev;
835
		set_bit(EXTENT_FLAG_PINNED, &em->flags);
C
Chris Mason 已提交
836

C
Chris Mason 已提交
837
		while (1) {
838
			write_lock(&em_tree->lock);
839
			ret = add_extent_mapping(em_tree, em);
840
			write_unlock(&em_tree->lock);
841 842 843 844 845
			if (ret != -EEXIST) {
				free_extent_map(em);
				break;
			}
			btrfs_drop_extent_cache(inode, start,
C
Chris Mason 已提交
846
						start + ram_size - 1, 0);
847 848
		}

849
		cur_alloc_size = ins.offset;
850
		ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
851
					       ram_size, cur_alloc_size, 0);
852
		BUG_ON(ret);
C
Chris Mason 已提交
853

854 855 856 857 858 859 860
		if (root->root_key.objectid ==
		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
			ret = btrfs_reloc_clone_csums(inode, start,
						      cur_alloc_size);
			BUG_ON(ret);
		}

C
Chris Mason 已提交
861
		if (disk_num_bytes < cur_alloc_size)
862
			break;
C
Chris Mason 已提交
863

C
Chris Mason 已提交
864 865 866
		/* we're not doing compressed IO, don't unlock the first
		 * page (which the caller expects to stay locked), don't
		 * clear any dirty bits and don't set any writeback bits
867 868 869
		 *
		 * Do set the Private2 bit so we know this page was properly
		 * setup for writepage
C
Chris Mason 已提交
870
		 */
871 872 873 874
		op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
		op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
			EXTENT_SET_PRIVATE2;

C
Chris Mason 已提交
875 876
		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
					     start, start + ram_size - 1,
877
					     locked_page, op);
C
Chris Mason 已提交
878
		disk_num_bytes -= cur_alloc_size;
879 880 881
		num_bytes -= cur_alloc_size;
		alloc_hint = ins.objectid + ins.offset;
		start += cur_alloc_size;
882 883
	}
out:
884
	ret = 0;
885
	btrfs_end_transaction(trans, root);
C
Chris Mason 已提交
886

887
	return ret;
888
}
C
Chris Mason 已提交
889

890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927
/*
 * work queue call back to started compression on a file and pages
 */
static noinline void async_cow_start(struct btrfs_work *work)
{
	struct async_cow *async_cow;
	int num_added = 0;
	async_cow = container_of(work, struct async_cow, work);

	compress_file_range(async_cow->inode, async_cow->locked_page,
			    async_cow->start, async_cow->end, async_cow,
			    &num_added);
	if (num_added == 0)
		async_cow->inode = NULL;
}

/*
 * work queue call back to submit previously compressed pages
 */
static noinline void async_cow_submit(struct btrfs_work *work)
{
	struct async_cow *async_cow;
	struct btrfs_root *root;
	unsigned long nr_pages;

	async_cow = container_of(work, struct async_cow, work);

	root = async_cow->root;
	nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
		PAGE_CACHE_SHIFT;

	atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);

	if (atomic_read(&root->fs_info->async_delalloc_pages) <
	    5 * 1042 * 1024 &&
	    waitqueue_active(&root->fs_info->async_submit_wait))
		wake_up(&root->fs_info->async_submit_wait);

C
Chris Mason 已提交
928
	if (async_cow->inode)
929 930
		submit_compressed_extents(async_cow->inode, async_cow);
}
C
Chris Mason 已提交
931

932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948
static noinline void async_cow_free(struct btrfs_work *work)
{
	struct async_cow *async_cow;
	async_cow = container_of(work, struct async_cow, work);
	kfree(async_cow);
}

static int cow_file_range_async(struct inode *inode, struct page *locked_page,
				u64 start, u64 end, int *page_started,
				unsigned long *nr_written)
{
	struct async_cow *async_cow;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	unsigned long nr_pages;
	u64 cur_end;
	int limit = 10 * 1024 * 1042;

949 950
	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
			 1, 0, NULL, GFP_NOFS);
C
Chris Mason 已提交
951
	while (start < end) {
952 953 954 955 956 957
		async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
		async_cow->inode = inode;
		async_cow->root = root;
		async_cow->locked_page = locked_page;
		async_cow->start = start;

958
		if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983
			cur_end = end;
		else
			cur_end = min(end, start + 512 * 1024 - 1);

		async_cow->end = cur_end;
		INIT_LIST_HEAD(&async_cow->extents);

		async_cow->work.func = async_cow_start;
		async_cow->work.ordered_func = async_cow_submit;
		async_cow->work.ordered_free = async_cow_free;
		async_cow->work.flags = 0;

		nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
			PAGE_CACHE_SHIFT;
		atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);

		btrfs_queue_worker(&root->fs_info->delalloc_workers,
				   &async_cow->work);

		if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
			wait_event(root->fs_info->async_submit_wait,
			   (atomic_read(&root->fs_info->async_delalloc_pages) <
			    limit));
		}

C
Chris Mason 已提交
984
		while (atomic_read(&root->fs_info->async_submit_draining) &&
985 986 987 988 989 990 991 992 993 994 995
		      atomic_read(&root->fs_info->async_delalloc_pages)) {
			wait_event(root->fs_info->async_submit_wait,
			  (atomic_read(&root->fs_info->async_delalloc_pages) ==
			   0));
		}

		*nr_written += nr_pages;
		start = cur_end + 1;
	}
	*page_started = 1;
	return 0;
996 997
}

C
Chris Mason 已提交
998
static noinline int csum_exist_in_range(struct btrfs_root *root,
999 1000 1001 1002 1003 1004
					u64 bytenr, u64 num_bytes)
{
	int ret;
	struct btrfs_ordered_sum *sums;
	LIST_HEAD(list);

Y
Yan Zheng 已提交
1005 1006
	ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
				       bytenr + num_bytes - 1, &list);
1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017
	if (ret == 0 && list_empty(&list))
		return 0;

	while (!list_empty(&list)) {
		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
		list_del(&sums->list);
		kfree(sums);
	}
	return 1;
}

C
Chris Mason 已提交
1018 1019 1020 1021 1022 1023 1024
/*
 * when nowcow writeback call back.  This checks for snapshots or COW copies
 * of the extents that exist in the file, and COWs the file as required.
 *
 * If no cow copies or snapshots exist, we write directly to the existing
 * blocks on disk
 */
1025 1026
static noinline int run_delalloc_nocow(struct inode *inode,
				       struct page *locked_page,
1027 1028
			      u64 start, u64 end, int *page_started, int force,
			      unsigned long *nr_written)
1029 1030
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
1031
	struct btrfs_trans_handle *trans;
1032 1033
	struct extent_buffer *leaf;
	struct btrfs_path *path;
Y
Yan Zheng 已提交
1034
	struct btrfs_file_extent_item *fi;
1035
	struct btrfs_key found_key;
Y
Yan Zheng 已提交
1036 1037 1038
	u64 cow_start;
	u64 cur_offset;
	u64 extent_end;
1039
	u64 extent_offset;
Y
Yan Zheng 已提交
1040 1041 1042 1043
	u64 disk_bytenr;
	u64 num_bytes;
	int extent_type;
	int ret;
Y
Yan Zheng 已提交
1044
	int type;
Y
Yan Zheng 已提交
1045 1046
	int nocow;
	int check_prev = 1;
J
Josef Bacik 已提交
1047
	bool nolock = false;
1048 1049 1050

	path = btrfs_alloc_path();
	BUG_ON(!path);
J
Josef Bacik 已提交
1051 1052 1053 1054 1055 1056
	if (root == root->fs_info->tree_root) {
		nolock = true;
		trans = btrfs_join_transaction_nolock(root, 1);
	} else {
		trans = btrfs_join_transaction(root, 1);
	}
1057
	BUG_ON(IS_ERR(trans));
1058

Y
Yan Zheng 已提交
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
	cow_start = (u64)-1;
	cur_offset = start;
	while (1) {
		ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
					       cur_offset, 0);
		BUG_ON(ret < 0);
		if (ret > 0 && path->slots[0] > 0 && check_prev) {
			leaf = path->nodes[0];
			btrfs_item_key_to_cpu(leaf, &found_key,
					      path->slots[0] - 1);
			if (found_key.objectid == inode->i_ino &&
			    found_key.type == BTRFS_EXTENT_DATA_KEY)
				path->slots[0]--;
		}
		check_prev = 0;
next_slot:
		leaf = path->nodes[0];
		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret < 0)
				BUG_ON(1);
			if (ret > 0)
				break;
			leaf = path->nodes[0];
		}
1084

Y
Yan Zheng 已提交
1085 1086
		nocow = 0;
		disk_bytenr = 0;
1087
		num_bytes = 0;
Y
Yan Zheng 已提交
1088 1089 1090 1091 1092 1093 1094 1095 1096
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);

		if (found_key.objectid > inode->i_ino ||
		    found_key.type > BTRFS_EXTENT_DATA_KEY ||
		    found_key.offset > end)
			break;

		if (found_key.offset > cur_offset) {
			extent_end = found_key.offset;
1097
			extent_type = 0;
Y
Yan Zheng 已提交
1098 1099 1100 1101 1102 1103 1104
			goto out_check;
		}

		fi = btrfs_item_ptr(leaf, path->slots[0],
				    struct btrfs_file_extent_item);
		extent_type = btrfs_file_extent_type(leaf, fi);

Y
Yan Zheng 已提交
1105 1106
		if (extent_type == BTRFS_FILE_EXTENT_REG ||
		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
Y
Yan Zheng 已提交
1107
			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1108
			extent_offset = btrfs_file_extent_offset(leaf, fi);
Y
Yan Zheng 已提交
1109 1110 1111 1112 1113 1114
			extent_end = found_key.offset +
				btrfs_file_extent_num_bytes(leaf, fi);
			if (extent_end <= start) {
				path->slots[0]++;
				goto next_slot;
			}
1115 1116
			if (disk_bytenr == 0)
				goto out_check;
Y
Yan Zheng 已提交
1117 1118 1119 1120
			if (btrfs_file_extent_compression(leaf, fi) ||
			    btrfs_file_extent_encryption(leaf, fi) ||
			    btrfs_file_extent_other_encoding(leaf, fi))
				goto out_check;
Y
Yan Zheng 已提交
1121 1122
			if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
				goto out_check;
1123
			if (btrfs_extent_readonly(root, disk_bytenr))
Y
Yan Zheng 已提交
1124
				goto out_check;
1125
			if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
1126 1127
						  found_key.offset -
						  extent_offset, disk_bytenr))
1128
				goto out_check;
1129
			disk_bytenr += extent_offset;
1130 1131 1132 1133 1134 1135 1136 1137 1138
			disk_bytenr += cur_offset - found_key.offset;
			num_bytes = min(end + 1, extent_end) - cur_offset;
			/*
			 * force cow if csum exists in the range.
			 * this ensure that csum for a given extent are
			 * either valid or do not exist.
			 */
			if (csum_exist_in_range(root, disk_bytenr, num_bytes))
				goto out_check;
Y
Yan Zheng 已提交
1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
			nocow = 1;
		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
			extent_end = found_key.offset +
				btrfs_file_extent_inline_len(leaf, fi);
			extent_end = ALIGN(extent_end, root->sectorsize);
		} else {
			BUG_ON(1);
		}
out_check:
		if (extent_end <= start) {
			path->slots[0]++;
			goto next_slot;
		}
		if (!nocow) {
			if (cow_start == (u64)-1)
				cow_start = cur_offset;
			cur_offset = extent_end;
			if (cur_offset > end)
				break;
			path->slots[0]++;
			goto next_slot;
1160 1161 1162
		}

		btrfs_release_path(root, path);
Y
Yan Zheng 已提交
1163 1164
		if (cow_start != (u64)-1) {
			ret = cow_file_range(inode, locked_page, cow_start,
1165 1166
					found_key.offset - 1, page_started,
					nr_written, 1);
Y
Yan Zheng 已提交
1167 1168
			BUG_ON(ret);
			cow_start = (u64)-1;
1169
		}
Y
Yan Zheng 已提交
1170

Y
Yan Zheng 已提交
1171 1172 1173 1174 1175
		if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
			struct extent_map *em;
			struct extent_map_tree *em_tree;
			em_tree = &BTRFS_I(inode)->extent_tree;
			em = alloc_extent_map(GFP_NOFS);
1176
			BUG_ON(!em);
Y
Yan Zheng 已提交
1177
			em->start = cur_offset;
1178
			em->orig_start = em->start;
Y
Yan Zheng 已提交
1179 1180 1181 1182 1183 1184
			em->len = num_bytes;
			em->block_len = num_bytes;
			em->block_start = disk_bytenr;
			em->bdev = root->fs_info->fs_devices->latest_bdev;
			set_bit(EXTENT_FLAG_PINNED, &em->flags);
			while (1) {
1185
				write_lock(&em_tree->lock);
Y
Yan Zheng 已提交
1186
				ret = add_extent_mapping(em_tree, em);
1187
				write_unlock(&em_tree->lock);
Y
Yan Zheng 已提交
1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
				if (ret != -EEXIST) {
					free_extent_map(em);
					break;
				}
				btrfs_drop_extent_cache(inode, em->start,
						em->start + em->len - 1, 0);
			}
			type = BTRFS_ORDERED_PREALLOC;
		} else {
			type = BTRFS_ORDERED_NOCOW;
		}
Y
Yan Zheng 已提交
1199 1200

		ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
Y
Yan Zheng 已提交
1201 1202
					       num_bytes, num_bytes, type);
		BUG_ON(ret);
1203

1204 1205 1206 1207 1208 1209 1210
		if (root->root_key.objectid ==
		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
			ret = btrfs_reloc_clone_csums(inode, cur_offset,
						      num_bytes);
			BUG_ON(ret);
		}

Y
Yan Zheng 已提交
1211
		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1212 1213 1214 1215
				cur_offset, cur_offset + num_bytes - 1,
				locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
				EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
				EXTENT_SET_PRIVATE2);
Y
Yan Zheng 已提交
1216 1217 1218
		cur_offset = extent_end;
		if (cur_offset > end)
			break;
1219
	}
Y
Yan Zheng 已提交
1220 1221 1222 1223 1224 1225
	btrfs_release_path(root, path);

	if (cur_offset <= end && cow_start == (u64)-1)
		cow_start = cur_offset;
	if (cow_start != (u64)-1) {
		ret = cow_file_range(inode, locked_page, cow_start, end,
1226
				     page_started, nr_written, 1);
Y
Yan Zheng 已提交
1227 1228 1229
		BUG_ON(ret);
	}

J
Josef Bacik 已提交
1230 1231 1232 1233 1234 1235 1236
	if (nolock) {
		ret = btrfs_end_transaction_nolock(trans, root);
		BUG_ON(ret);
	} else {
		ret = btrfs_end_transaction(trans, root);
		BUG_ON(ret);
	}
1237
	btrfs_free_path(path);
Y
Yan Zheng 已提交
1238
	return 0;
1239 1240
}

C
Chris Mason 已提交
1241 1242 1243
/*
 * extent_io.c call back to do delayed allocation processing
 */
C
Chris Mason 已提交
1244
static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1245 1246
			      u64 start, u64 end, int *page_started,
			      unsigned long *nr_written)
1247 1248
{
	int ret;
1249
	struct btrfs_root *root = BTRFS_I(inode)->root;
1250

1251
	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
C
Chris Mason 已提交
1252
		ret = run_delalloc_nocow(inode, locked_page, start, end,
C
Chris Mason 已提交
1253
					 page_started, 1, nr_written);
1254
	else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
Y
Yan Zheng 已提交
1255
		ret = run_delalloc_nocow(inode, locked_page, start, end,
C
Chris Mason 已提交
1256
					 page_started, 0, nr_written);
C
Chris Mason 已提交
1257 1258
	else if (!btrfs_test_opt(root, COMPRESS) &&
		 !(BTRFS_I(inode)->force_compress))
1259 1260
		ret = cow_file_range(inode, locked_page, start, end,
				      page_started, nr_written, 1);
1261
	else
1262
		ret = cow_file_range_async(inode, locked_page, start, end,
C
Chris Mason 已提交
1263
					   page_started, nr_written);
1264 1265 1266
	return ret;
}

J
Josef Bacik 已提交
1267
static int btrfs_split_extent_hook(struct inode *inode,
1268
				   struct extent_state *orig, u64 split)
J
Josef Bacik 已提交
1269
{
1270
	/* not delalloc, ignore it */
J
Josef Bacik 已提交
1271 1272 1273
	if (!(orig->state & EXTENT_DELALLOC))
		return 0;

1274
	atomic_inc(&BTRFS_I(inode)->outstanding_extents);
J
Josef Bacik 已提交
1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291
	return 0;
}

/*
 * extent_io.c merge_extent_hook, used to track merged delayed allocation
 * extents so we can keep track of new extents that are just merged onto old
 * extents, such as when we are doing sequential writes, so we can properly
 * account for the metadata space we'll need.
 */
static int btrfs_merge_extent_hook(struct inode *inode,
				   struct extent_state *new,
				   struct extent_state *other)
{
	/* not delalloc, ignore it */
	if (!(other->state & EXTENT_DELALLOC))
		return 0;

1292
	atomic_dec(&BTRFS_I(inode)->outstanding_extents);
J
Josef Bacik 已提交
1293 1294 1295
	return 0;
}

C
Chris Mason 已提交
1296 1297 1298 1299 1300
/*
 * extent_io.c set_bit_hook, used to track delayed allocation
 * bytes in this file, and to maintain the list of inodes that
 * have pending delalloc work to be done.
 */
1301 1302
static int btrfs_set_bit_hook(struct inode *inode,
			      struct extent_state *state, int *bits)
1303
{
J
Josef Bacik 已提交
1304

1305 1306 1307 1308 1309
	/*
	 * set_bit and clear bit hooks normally require _irqsave/restore
	 * but in this case, we are only testeing for the DELALLOC
	 * bit, which is only set or cleared with irqs on
	 */
1310
	if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1311
		struct btrfs_root *root = BTRFS_I(inode)->root;
1312
		u64 len = state->end + 1 - state->start;
J
Josef Bacik 已提交
1313 1314
		int do_list = (root->root_key.objectid !=
			       BTRFS_ROOT_TREE_OBJECTID);
J
Josef Bacik 已提交
1315

1316 1317 1318 1319
		if (*bits & EXTENT_FIRST_DELALLOC)
			*bits &= ~EXTENT_FIRST_DELALLOC;
		else
			atomic_inc(&BTRFS_I(inode)->outstanding_extents);
1320

1321
		spin_lock(&root->fs_info->delalloc_lock);
1322 1323
		BTRFS_I(inode)->delalloc_bytes += len;
		root->fs_info->delalloc_bytes += len;
J
Josef Bacik 已提交
1324
		if (do_list && list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1325 1326 1327
			list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
				      &root->fs_info->delalloc_inodes);
		}
1328
		spin_unlock(&root->fs_info->delalloc_lock);
1329 1330 1331 1332
	}
	return 0;
}

C
Chris Mason 已提交
1333 1334 1335
/*
 * extent_io.c clear_bit_hook, see set_bit_hook for why
 */
J
Josef Bacik 已提交
1336
static int btrfs_clear_bit_hook(struct inode *inode,
1337
				struct extent_state *state, int *bits)
1338
{
1339 1340 1341 1342 1343
	/*
	 * set_bit and clear bit hooks normally require _irqsave/restore
	 * but in this case, we are only testeing for the DELALLOC
	 * bit, which is only set or cleared with irqs on
	 */
1344
	if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1345
		struct btrfs_root *root = BTRFS_I(inode)->root;
1346
		u64 len = state->end + 1 - state->start;
J
Josef Bacik 已提交
1347 1348
		int do_list = (root->root_key.objectid !=
			       BTRFS_ROOT_TREE_OBJECTID);
1349

1350 1351 1352 1353 1354 1355 1356 1357
		if (*bits & EXTENT_FIRST_DELALLOC)
			*bits &= ~EXTENT_FIRST_DELALLOC;
		else if (!(*bits & EXTENT_DO_ACCOUNTING))
			atomic_dec(&BTRFS_I(inode)->outstanding_extents);

		if (*bits & EXTENT_DO_ACCOUNTING)
			btrfs_delalloc_release_metadata(inode, len);

J
Josef Bacik 已提交
1358 1359
		if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
		    && do_list)
1360
			btrfs_free_reserved_data_space(inode, len);
J
Josef Bacik 已提交
1361

1362
		spin_lock(&root->fs_info->delalloc_lock);
1363 1364 1365
		root->fs_info->delalloc_bytes -= len;
		BTRFS_I(inode)->delalloc_bytes -= len;

J
Josef Bacik 已提交
1366
		if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
1367 1368 1369
		    !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
			list_del_init(&BTRFS_I(inode)->delalloc_inodes);
		}
1370
		spin_unlock(&root->fs_info->delalloc_lock);
1371 1372 1373 1374
	}
	return 0;
}

C
Chris Mason 已提交
1375 1376 1377 1378
/*
 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
 * we don't create bios that span stripes or chunks
 */
1379
int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
C
Chris Mason 已提交
1380 1381
			 size_t size, struct bio *bio,
			 unsigned long bio_flags)
1382 1383 1384
{
	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
	struct btrfs_mapping_tree *map_tree;
1385
	u64 logical = (u64)bio->bi_sector << 9;
1386 1387 1388 1389
	u64 length = 0;
	u64 map_length;
	int ret;

1390 1391 1392
	if (bio_flags & EXTENT_BIO_COMPRESSED)
		return 0;

1393
	length = bio->bi_size;
1394 1395
	map_tree = &root->fs_info->mapping_tree;
	map_length = length;
1396
	ret = btrfs_map_block(map_tree, READ, logical,
1397
			      &map_length, NULL, 0);
1398

C
Chris Mason 已提交
1399
	if (map_length < length + size)
1400
		return 1;
1401
	return ret;
1402 1403
}

C
Chris Mason 已提交
1404 1405 1406 1407 1408 1409 1410 1411
/*
 * in order to insert checksums into the metadata in large chunks,
 * we wait until bio submission time.   All the pages in the bio are
 * checksummed and sums are attached onto the ordered extent record.
 *
 * At IO completion time the cums attached on the ordered extent record
 * are inserted into the btree
 */
C
Chris Mason 已提交
1412 1413
static int __btrfs_submit_bio_start(struct inode *inode, int rw,
				    struct bio *bio, int mirror_num,
1414 1415
				    unsigned long bio_flags,
				    u64 bio_offset)
1416 1417 1418
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	int ret = 0;
1419

1420
	ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1421
	BUG_ON(ret);
C
Chris Mason 已提交
1422 1423
	return 0;
}
1424

C
Chris Mason 已提交
1425 1426 1427 1428 1429 1430 1431 1432
/*
 * in order to insert checksums into the metadata in large chunks,
 * we wait until bio submission time.   All the pages in the bio are
 * checksummed and sums are attached onto the ordered extent record.
 *
 * At IO completion time the cums attached on the ordered extent record
 * are inserted into the btree
 */
1433
static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1434 1435
			  int mirror_num, unsigned long bio_flags,
			  u64 bio_offset)
C
Chris Mason 已提交
1436 1437
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
1438
	return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1439 1440
}

C
Chris Mason 已提交
1441
/*
1442 1443
 * extent_io.c submission hook. This does the right thing for csum calculation
 * on write, or reading the csums from the tree before a read
C
Chris Mason 已提交
1444
 */
1445
static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1446 1447
			  int mirror_num, unsigned long bio_flags,
			  u64 bio_offset)
1448 1449 1450
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	int ret = 0;
1451
	int skip_sum;
1452

1453
	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1454

J
Josef Bacik 已提交
1455 1456 1457 1458
	if (root == root->fs_info->tree_root)
		ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2);
	else
		ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1459
	BUG_ON(ret);
1460

1461
	if (!(rw & REQ_WRITE)) {
1462
		if (bio_flags & EXTENT_BIO_COMPRESSED) {
C
Chris Mason 已提交
1463 1464
			return btrfs_submit_compressed_read(inode, bio,
						    mirror_num, bio_flags);
1465 1466
		} else if (!skip_sum)
			btrfs_lookup_bio_sums(root, inode, bio, NULL);
1467
		goto mapit;
1468
	} else if (!skip_sum) {
1469 1470 1471
		/* csum items have already been cloned */
		if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
			goto mapit;
1472 1473
		/* we're doing a write, do the async checksumming */
		return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1474
				   inode, rw, bio, mirror_num,
1475 1476
				   bio_flags, bio_offset,
				   __btrfs_submit_bio_start,
C
Chris Mason 已提交
1477
				   __btrfs_submit_bio_done);
1478 1479
	}

1480
mapit:
1481
	return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1482
}
C
Chris Mason 已提交
1483

C
Chris Mason 已提交
1484 1485 1486 1487
/*
 * given a list of ordered sums record them in the inode.  This happens
 * at IO completion time based on sums calculated at bio submission time.
 */
1488
static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1489 1490 1491 1492 1493 1494
			     struct inode *inode, u64 file_offset,
			     struct list_head *list)
{
	struct btrfs_ordered_sum *sum;

	btrfs_set_trans_block_group(trans, inode);
Q
Qinghuang Feng 已提交
1495 1496

	list_for_each_entry(sum, list, list) {
1497 1498
		btrfs_csum_file_blocks(trans,
		       BTRFS_I(inode)->root->fs_info->csum_root, sum);
1499 1500 1501 1502
	}
	return 0;
}

1503 1504
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
			      struct extent_state **cached_state)
1505
{
C
Chris Mason 已提交
1506
	if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1507
		WARN_ON(1);
1508
	return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1509
				   cached_state, GFP_NOFS);
1510 1511
}

C
Chris Mason 已提交
1512
/* see btrfs_writepage_start_hook for details on why this is required */
1513 1514 1515 1516 1517
struct btrfs_writepage_fixup {
	struct page *page;
	struct btrfs_work work;
};

1518
static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1519 1520 1521
{
	struct btrfs_writepage_fixup *fixup;
	struct btrfs_ordered_extent *ordered;
1522
	struct extent_state *cached_state = NULL;
1523 1524 1525 1526 1527 1528 1529
	struct page *page;
	struct inode *inode;
	u64 page_start;
	u64 page_end;

	fixup = container_of(work, struct btrfs_writepage_fixup, work);
	page = fixup->page;
C
Chris Mason 已提交
1530
again:
1531 1532 1533 1534 1535 1536 1537 1538 1539 1540
	lock_page(page);
	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
		ClearPageChecked(page);
		goto out_page;
	}

	inode = page->mapping->host;
	page_start = page_offset(page);
	page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;

1541 1542
	lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
			 &cached_state, GFP_NOFS);
C
Chris Mason 已提交
1543 1544

	/* already ordered? We're done */
1545
	if (PagePrivate2(page))
1546
		goto out;
C
Chris Mason 已提交
1547 1548 1549

	ordered = btrfs_lookup_ordered_extent(inode, page_start);
	if (ordered) {
1550 1551
		unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
				     page_end, &cached_state, GFP_NOFS);
C
Chris Mason 已提交
1552 1553 1554 1555
		unlock_page(page);
		btrfs_start_ordered_extent(inode, ordered, 1);
		goto again;
	}
1556

1557
	BUG();
1558
	btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
1559 1560
	ClearPageChecked(page);
out:
1561 1562
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
			     &cached_state, GFP_NOFS);
1563 1564 1565
out_page:
	unlock_page(page);
	page_cache_release(page);
1566
	kfree(fixup);
1567 1568 1569 1570 1571 1572 1573 1574
}

/*
 * There are a few paths in the higher layers of the kernel that directly
 * set the page dirty bit without asking the filesystem if it is a
 * good idea.  This causes problems because we want to make sure COW
 * properly happens and the data=ordered rules are followed.
 *
C
Chris Mason 已提交
1575
 * In our case any range that doesn't have the ORDERED bit set
1576 1577 1578 1579
 * hasn't been properly setup for IO.  We kick off an async process
 * to fix it up.  The async helper will wait for ordered extents, set
 * the delalloc bit and make it safe to write the page.
 */
1580
static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1581 1582 1583 1584 1585
{
	struct inode *inode = page->mapping->host;
	struct btrfs_writepage_fixup *fixup;
	struct btrfs_root *root = BTRFS_I(inode)->root;

1586 1587
	/* this page is properly in the ordered list */
	if (TestClearPagePrivate2(page))
1588 1589 1590 1591 1592 1593 1594 1595
		return 0;

	if (PageChecked(page))
		return -EAGAIN;

	fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
	if (!fixup)
		return -EAGAIN;
1596

1597 1598 1599 1600 1601 1602 1603 1604
	SetPageChecked(page);
	page_cache_get(page);
	fixup->work.func = btrfs_writepage_fixup_worker;
	fixup->page = page;
	btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
	return -EAGAIN;
}

Y
Yan Zheng 已提交
1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622
static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
				       struct inode *inode, u64 file_pos,
				       u64 disk_bytenr, u64 disk_num_bytes,
				       u64 num_bytes, u64 ram_bytes,
				       u8 compression, u8 encryption,
				       u16 other_encoding, int extent_type)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_file_extent_item *fi;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key ins;
	u64 hint;
	int ret;

	path = btrfs_alloc_path();
	BUG_ON(!path);

1623
	path->leave_spinning = 1;
C
Chris Mason 已提交
1624 1625 1626 1627 1628 1629 1630 1631 1632 1633

	/*
	 * we may be replacing one extent in the tree with another.
	 * The new extent is pinned in the extent map, and we don't want
	 * to drop it from the cache until it is completely in the btree.
	 *
	 * So, tell btrfs_drop_extents to leave this extent in the cache.
	 * the caller is expected to unpin it and allow it to be merged
	 * with the others.
	 */
Y
Yan, Zheng 已提交
1634 1635
	ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
				 &hint, 0);
Y
Yan Zheng 已提交
1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655
	BUG_ON(ret);

	ins.objectid = inode->i_ino;
	ins.offset = file_pos;
	ins.type = BTRFS_EXTENT_DATA_KEY;
	ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
	BUG_ON(ret);
	leaf = path->nodes[0];
	fi = btrfs_item_ptr(leaf, path->slots[0],
			    struct btrfs_file_extent_item);
	btrfs_set_file_extent_generation(leaf, fi, trans->transid);
	btrfs_set_file_extent_type(leaf, fi, extent_type);
	btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
	btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
	btrfs_set_file_extent_offset(leaf, fi, 0);
	btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
	btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
	btrfs_set_file_extent_compression(leaf, fi, compression);
	btrfs_set_file_extent_encryption(leaf, fi, encryption);
	btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1656 1657 1658 1659

	btrfs_unlock_up_safe(path, 1);
	btrfs_set_lock_blocking(leaf);

Y
Yan Zheng 已提交
1660 1661 1662 1663 1664 1665 1666
	btrfs_mark_buffer_dirty(leaf);

	inode_add_bytes(inode, num_bytes);

	ins.objectid = disk_bytenr;
	ins.offset = disk_num_bytes;
	ins.type = BTRFS_EXTENT_ITEM_KEY;
1667 1668 1669
	ret = btrfs_alloc_reserved_file_extent(trans, root,
					root->root_key.objectid,
					inode->i_ino, file_pos, &ins);
Y
Yan Zheng 已提交
1670 1671
	BUG_ON(ret);
	btrfs_free_path(path);
1672

Y
Yan Zheng 已提交
1673 1674 1675
	return 0;
}

1676 1677 1678 1679 1680 1681
/*
 * helper function for btrfs_finish_ordered_io, this
 * just reads in some of the csum leaves to prime them into ram
 * before we start the transaction.  It limits the amount of btree
 * reads required while inside the transaction.
 */
C
Chris Mason 已提交
1682 1683 1684 1685
/* as ordered data IO finishes, this gets called so we can finish
 * an ordered extent if the range of bytes in the file it covers are
 * fully written.
 */
1686
static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1687 1688
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
1689
	struct btrfs_trans_handle *trans = NULL;
1690
	struct btrfs_ordered_extent *ordered_extent = NULL;
1691
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1692
	struct extent_state *cached_state = NULL;
1693
	int compress_type = 0;
1694
	int ret;
J
Josef Bacik 已提交
1695
	bool nolock = false;
1696

1697 1698
	ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
					     end - start + 1);
1699
	if (!ret)
1700 1701
		return 0;
	BUG_ON(!ordered_extent);
1702

J
Josef Bacik 已提交
1703 1704
	nolock = (root == root->fs_info->tree_root);

1705 1706 1707 1708
	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
		BUG_ON(!list_empty(&ordered_extent->list));
		ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
		if (!ret) {
J
Josef Bacik 已提交
1709 1710 1711 1712
			if (nolock)
				trans = btrfs_join_transaction_nolock(root, 1);
			else
				trans = btrfs_join_transaction(root, 1);
1713
			BUG_ON(IS_ERR(trans));
1714 1715
			btrfs_set_trans_block_group(trans, inode);
			trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1716 1717 1718 1719 1720
			ret = btrfs_update_inode(trans, root, inode);
			BUG_ON(ret);
		}
		goto out;
	}
1721

1722 1723 1724
	lock_extent_bits(io_tree, ordered_extent->file_offset,
			 ordered_extent->file_offset + ordered_extent->len - 1,
			 0, &cached_state, GFP_NOFS);
1725

J
Josef Bacik 已提交
1726 1727 1728 1729
	if (nolock)
		trans = btrfs_join_transaction_nolock(root, 1);
	else
		trans = btrfs_join_transaction(root, 1);
1730
	BUG_ON(IS_ERR(trans));
1731 1732
	btrfs_set_trans_block_group(trans, inode);
	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1733

C
Chris Mason 已提交
1734
	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1735
		compress_type = ordered_extent->compress_type;
Y
Yan Zheng 已提交
1736
	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1737
		BUG_ON(compress_type);
Y
Yan, Zheng 已提交
1738
		ret = btrfs_mark_extent_written(trans, inode,
Y
Yan Zheng 已提交
1739 1740 1741 1742 1743
						ordered_extent->file_offset,
						ordered_extent->file_offset +
						ordered_extent->len);
		BUG_ON(ret);
	} else {
1744
		BUG_ON(root == root->fs_info->tree_root);
Y
Yan Zheng 已提交
1745 1746 1747 1748 1749 1750
		ret = insert_reserved_file_extent(trans, inode,
						ordered_extent->file_offset,
						ordered_extent->start,
						ordered_extent->disk_len,
						ordered_extent->len,
						ordered_extent->len,
1751
						compress_type, 0, 0,
Y
Yan Zheng 已提交
1752
						BTRFS_FILE_EXTENT_REG);
C
Chris Mason 已提交
1753 1754 1755
		unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
				   ordered_extent->file_offset,
				   ordered_extent->len);
Y
Yan Zheng 已提交
1756 1757
		BUG_ON(ret);
	}
1758 1759 1760 1761
	unlock_extent_cached(io_tree, ordered_extent->file_offset,
			     ordered_extent->file_offset +
			     ordered_extent->len - 1, &cached_state, GFP_NOFS);

1762 1763 1764
	add_pending_csums(trans, inode, ordered_extent->file_offset,
			  &ordered_extent->list);

1765 1766 1767 1768
	btrfs_ordered_update_i_size(inode, 0, ordered_extent);
	ret = btrfs_update_inode(trans, root, inode);
	BUG_ON(ret);
out:
J
Josef Bacik 已提交
1769 1770 1771 1772 1773 1774 1775 1776 1777
	if (nolock) {
		if (trans)
			btrfs_end_transaction_nolock(trans, root);
	} else {
		btrfs_delalloc_release_metadata(inode, ordered_extent->len);
		if (trans)
			btrfs_end_transaction(trans, root);
	}

1778 1779 1780 1781 1782 1783 1784 1785
	/* once for us */
	btrfs_put_ordered_extent(ordered_extent);
	/* once for the tree */
	btrfs_put_ordered_extent(ordered_extent);

	return 0;
}

1786
static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1787 1788
				struct extent_state *state, int uptodate)
{
1789
	ClearPagePrivate2(page);
1790 1791 1792
	return btrfs_finish_ordered_io(page->mapping->host, start, end);
}

C
Chris Mason 已提交
1793 1794 1795 1796 1797 1798 1799 1800
/*
 * When IO fails, either with EIO or csum verification fails, we
 * try other mirrors that might have a good copy of the data.  This
 * io_failure_record is used to record state as we go through all the
 * mirrors.  If another mirror has good data, the page is set up to date
 * and things continue.  If a good mirror can't be found, the original
 * bio end_io callback is called to indicate things have failed.
 */
1801 1802 1803 1804 1805
struct io_failure_record {
	struct page *page;
	u64 start;
	u64 len;
	u64 logical;
1806
	unsigned long bio_flags;
1807 1808 1809
	int last_mirror;
};

1810
static int btrfs_io_failed_hook(struct bio *failed_bio,
1811 1812
			 struct page *page, u64 start, u64 end,
			 struct extent_state *state)
1813 1814 1815 1816 1817 1818
{
	struct io_failure_record *failrec = NULL;
	u64 private;
	struct extent_map *em;
	struct inode *inode = page->mapping->host;
	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1819
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1820 1821 1822
	struct bio *bio;
	int num_copies;
	int ret;
1823
	int rw;
1824 1825 1826 1827 1828 1829 1830 1831 1832 1833
	u64 logical;

	ret = get_state_private(failure_tree, start, &private);
	if (ret) {
		failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
		if (!failrec)
			return -ENOMEM;
		failrec->start = start;
		failrec->len = end - start + 1;
		failrec->last_mirror = 0;
1834
		failrec->bio_flags = 0;
1835

1836
		read_lock(&em_tree->lock);
1837 1838 1839 1840 1841
		em = lookup_extent_mapping(em_tree, start, failrec->len);
		if (em->start > start || em->start + em->len < start) {
			free_extent_map(em);
			em = NULL;
		}
1842
		read_unlock(&em_tree->lock);
1843 1844 1845 1846 1847 1848 1849

		if (!em || IS_ERR(em)) {
			kfree(failrec);
			return -EIO;
		}
		logical = start - em->start;
		logical = em->block_start + logical;
1850 1851 1852
		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
			logical = em->block_start;
			failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1853 1854
			extent_set_compress_type(&failrec->bio_flags,
						 em->compress_type);
1855
		}
1856 1857 1858 1859
		failrec->logical = logical;
		free_extent_map(em);
		set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
				EXTENT_DIRTY, GFP_NOFS);
1860 1861
		set_state_private(failure_tree, start,
				 (u64)(unsigned long)failrec);
1862
	} else {
1863
		failrec = (struct io_failure_record *)(unsigned long)private;
1864 1865 1866 1867 1868 1869
	}
	num_copies = btrfs_num_copies(
			      &BTRFS_I(inode)->root->fs_info->mapping_tree,
			      failrec->logical, failrec->len);
	failrec->last_mirror++;
	if (!state) {
1870
		spin_lock(&BTRFS_I(inode)->io_tree.lock);
1871 1872 1873 1874 1875
		state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
						    failrec->start,
						    EXTENT_LOCKED);
		if (state && state->start != failrec->start)
			state = NULL;
1876
		spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890
	}
	if (!state || failrec->last_mirror > num_copies) {
		set_state_private(failure_tree, failrec->start, 0);
		clear_extent_bits(failure_tree, failrec->start,
				  failrec->start + failrec->len - 1,
				  EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
		kfree(failrec);
		return -EIO;
	}
	bio = bio_alloc(GFP_NOFS, 1);
	bio->bi_private = state;
	bio->bi_end_io = failed_bio->bi_end_io;
	bio->bi_sector = failrec->logical >> 9;
	bio->bi_bdev = failed_bio->bi_bdev;
1891
	bio->bi_size = 0;
1892

1893
	bio_add_page(bio, page, failrec->len, start - page_offset(page));
1894
	if (failed_bio->bi_rw & REQ_WRITE)
1895 1896 1897 1898 1899
		rw = WRITE;
	else
		rw = READ;

	BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
C
Chris Mason 已提交
1900
						      failrec->last_mirror,
1901
						      failrec->bio_flags, 0);
1902 1903 1904
	return 0;
}

C
Chris Mason 已提交
1905 1906 1907 1908
/*
 * each time an IO finishes, we do a fast check in the IO failure tree
 * to see if we need to process or clean up an io_failure_record
 */
1909
static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1910 1911 1912 1913 1914 1915 1916 1917
{
	u64 private;
	u64 private_failure;
	struct io_failure_record *failure;
	int ret;

	private = 0;
	if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1918
			     (u64)-1, 1, EXTENT_DIRTY, 0)) {
1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933
		ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
					start, &private_failure);
		if (ret == 0) {
			failure = (struct io_failure_record *)(unsigned long)
				   private_failure;
			set_state_private(&BTRFS_I(inode)->io_failure_tree,
					  failure->start, 0);
			clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
					  failure->start,
					  failure->start + failure->len - 1,
					  EXTENT_DIRTY | EXTENT_LOCKED,
					  GFP_NOFS);
			kfree(failure);
		}
	}
1934 1935 1936
	return 0;
}

C
Chris Mason 已提交
1937 1938 1939 1940 1941
/*
 * when reads are done, we need to check csums to verify the data is correct
 * if there's a match, we allow the bio to finish.  If not, we go through
 * the io_failure_record routines to find good copies
 */
1942
static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1943
			       struct extent_state *state)
1944
{
1945
	size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1946
	struct inode *inode = page->mapping->host;
1947
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1948
	char *kaddr;
1949
	u64 private = ~(u32)0;
1950
	int ret;
1951 1952
	struct btrfs_root *root = BTRFS_I(inode)->root;
	u32 csum = ~(u32)0;
1953

1954 1955 1956 1957
	if (PageChecked(page)) {
		ClearPageChecked(page);
		goto good;
	}
1958 1959

	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
1960 1961 1962
		return 0;

	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1963
	    test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
1964 1965
		clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
				  GFP_NOFS);
1966
		return 0;
1967
	}
1968

Y
Yan 已提交
1969
	if (state && state->start == start) {
1970 1971 1972 1973 1974
		private = state->private;
		ret = 0;
	} else {
		ret = get_state_private(io_tree, start, &private);
	}
1975
	kaddr = kmap_atomic(page, KM_USER0);
C
Chris Mason 已提交
1976
	if (ret)
1977
		goto zeroit;
C
Chris Mason 已提交
1978

1979 1980
	csum = btrfs_csum_data(root, kaddr + offset, csum,  end - start + 1);
	btrfs_csum_final(csum, (char *)&csum);
C
Chris Mason 已提交
1981
	if (csum != private)
1982
		goto zeroit;
C
Chris Mason 已提交
1983

1984
	kunmap_atomic(kaddr, KM_USER0);
1985
good:
1986 1987 1988
	/* if the io failure tree for this inode is non-empty,
	 * check to see if we've recovered from a failed IO
	 */
1989
	btrfs_clean_io_failures(inode, start);
1990 1991 1992
	return 0;

zeroit:
C
Chris Mason 已提交
1993 1994 1995 1996 1997 1998
	if (printk_ratelimit()) {
		printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
		       "private %llu\n", page->mapping->host->i_ino,
		       (unsigned long long)start, csum,
		       (unsigned long long)private);
	}
1999 2000
	memset(kaddr + offset, 1, end - start + 1);
	flush_dcache_page(page);
2001
	kunmap_atomic(kaddr, KM_USER0);
2002 2003
	if (private == 0)
		return 0;
2004
	return -EIO;
2005
}
2006

Y
Yan, Zheng 已提交
2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054
struct delayed_iput {
	struct list_head list;
	struct inode *inode;
};

void btrfs_add_delayed_iput(struct inode *inode)
{
	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
	struct delayed_iput *delayed;

	if (atomic_add_unless(&inode->i_count, -1, 1))
		return;

	delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
	delayed->inode = inode;

	spin_lock(&fs_info->delayed_iput_lock);
	list_add_tail(&delayed->list, &fs_info->delayed_iputs);
	spin_unlock(&fs_info->delayed_iput_lock);
}

void btrfs_run_delayed_iputs(struct btrfs_root *root)
{
	LIST_HEAD(list);
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct delayed_iput *delayed;
	int empty;

	spin_lock(&fs_info->delayed_iput_lock);
	empty = list_empty(&fs_info->delayed_iputs);
	spin_unlock(&fs_info->delayed_iput_lock);
	if (empty)
		return;

	down_read(&root->fs_info->cleanup_work_sem);
	spin_lock(&fs_info->delayed_iput_lock);
	list_splice_init(&fs_info->delayed_iputs, &list);
	spin_unlock(&fs_info->delayed_iput_lock);

	while (!list_empty(&list)) {
		delayed = list_entry(list.next, struct delayed_iput, list);
		list_del(&delayed->list);
		iput(delayed->inode);
		kfree(delayed);
	}
	up_read(&root->fs_info->cleanup_work_sem);
}

2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171
/*
 * calculate extra metadata reservation when snapshotting a subvolume
 * contains orphan files.
 */
void btrfs_orphan_pre_snapshot(struct btrfs_trans_handle *trans,
				struct btrfs_pending_snapshot *pending,
				u64 *bytes_to_reserve)
{
	struct btrfs_root *root;
	struct btrfs_block_rsv *block_rsv;
	u64 num_bytes;
	int index;

	root = pending->root;
	if (!root->orphan_block_rsv || list_empty(&root->orphan_list))
		return;

	block_rsv = root->orphan_block_rsv;

	/* orphan block reservation for the snapshot */
	num_bytes = block_rsv->size;

	/*
	 * after the snapshot is created, COWing tree blocks may use more
	 * space than it frees. So we should make sure there is enough
	 * reserved space.
	 */
	index = trans->transid & 0x1;
	if (block_rsv->reserved + block_rsv->freed[index] < block_rsv->size) {
		num_bytes += block_rsv->size -
			     (block_rsv->reserved + block_rsv->freed[index]);
	}

	*bytes_to_reserve += num_bytes;
}

void btrfs_orphan_post_snapshot(struct btrfs_trans_handle *trans,
				struct btrfs_pending_snapshot *pending)
{
	struct btrfs_root *root = pending->root;
	struct btrfs_root *snap = pending->snap;
	struct btrfs_block_rsv *block_rsv;
	u64 num_bytes;
	int index;
	int ret;

	if (!root->orphan_block_rsv || list_empty(&root->orphan_list))
		return;

	/* refill source subvolume's orphan block reservation */
	block_rsv = root->orphan_block_rsv;
	index = trans->transid & 0x1;
	if (block_rsv->reserved + block_rsv->freed[index] < block_rsv->size) {
		num_bytes = block_rsv->size -
			    (block_rsv->reserved + block_rsv->freed[index]);
		ret = btrfs_block_rsv_migrate(&pending->block_rsv,
					      root->orphan_block_rsv,
					      num_bytes);
		BUG_ON(ret);
	}

	/* setup orphan block reservation for the snapshot */
	block_rsv = btrfs_alloc_block_rsv(snap);
	BUG_ON(!block_rsv);

	btrfs_add_durable_block_rsv(root->fs_info, block_rsv);
	snap->orphan_block_rsv = block_rsv;

	num_bytes = root->orphan_block_rsv->size;
	ret = btrfs_block_rsv_migrate(&pending->block_rsv,
				      block_rsv, num_bytes);
	BUG_ON(ret);

#if 0
	/* insert orphan item for the snapshot */
	WARN_ON(!root->orphan_item_inserted);
	ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
				       snap->root_key.objectid);
	BUG_ON(ret);
	snap->orphan_item_inserted = 1;
#endif
}

enum btrfs_orphan_cleanup_state {
	ORPHAN_CLEANUP_STARTED	= 1,
	ORPHAN_CLEANUP_DONE	= 2,
};

/*
 * This is called in transaction commmit time. If there are no orphan
 * files in the subvolume, it removes orphan item and frees block_rsv
 * structure.
 */
void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
			      struct btrfs_root *root)
{
	int ret;

	if (!list_empty(&root->orphan_list) ||
	    root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
		return;

	if (root->orphan_item_inserted &&
	    btrfs_root_refs(&root->root_item) > 0) {
		ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
					    root->root_key.objectid);
		BUG_ON(ret);
		root->orphan_item_inserted = 0;
	}

	if (root->orphan_block_rsv) {
		WARN_ON(root->orphan_block_rsv->size > 0);
		btrfs_free_block_rsv(root, root->orphan_block_rsv);
		root->orphan_block_rsv = NULL;
	}
}

2172 2173 2174
/*
 * This creates an orphan entry for the given inode in case something goes
 * wrong in the middle of an unlink/truncate.
2175 2176 2177
 *
 * NOTE: caller of this function should reserve 5 units of metadata for
 *	 this function.
2178 2179 2180 2181
 */
int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
2182 2183 2184 2185
	struct btrfs_block_rsv *block_rsv = NULL;
	int reserve = 0;
	int insert = 0;
	int ret;
2186

2187 2188 2189 2190
	if (!root->orphan_block_rsv) {
		block_rsv = btrfs_alloc_block_rsv(root);
		BUG_ON(!block_rsv);
	}
2191

2192 2193 2194 2195 2196 2197
	spin_lock(&root->orphan_lock);
	if (!root->orphan_block_rsv) {
		root->orphan_block_rsv = block_rsv;
	} else if (block_rsv) {
		btrfs_free_block_rsv(root, block_rsv);
		block_rsv = NULL;
2198 2199
	}

2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215
	if (list_empty(&BTRFS_I(inode)->i_orphan)) {
		list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
#if 0
		/*
		 * For proper ENOSPC handling, we should do orphan
		 * cleanup when mounting. But this introduces backward
		 * compatibility issue.
		 */
		if (!xchg(&root->orphan_item_inserted, 1))
			insert = 2;
		else
			insert = 1;
#endif
		insert = 1;
	} else {
		WARN_ON(!BTRFS_I(inode)->orphan_meta_reserved);
2216 2217
	}

2218 2219 2220 2221 2222
	if (!BTRFS_I(inode)->orphan_meta_reserved) {
		BTRFS_I(inode)->orphan_meta_reserved = 1;
		reserve = 1;
	}
	spin_unlock(&root->orphan_lock);
2223

2224 2225
	if (block_rsv)
		btrfs_add_durable_block_rsv(root->fs_info, block_rsv);
2226

2227 2228 2229 2230 2231
	/* grab metadata reservation from transaction handle */
	if (reserve) {
		ret = btrfs_orphan_reserve_metadata(trans, inode);
		BUG_ON(ret);
	}
2232

2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245
	/* insert an orphan item to track this unlinked/truncated file */
	if (insert >= 1) {
		ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
		BUG_ON(ret);
	}

	/* insert an orphan item to track subvolume contains orphan files */
	if (insert >= 2) {
		ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
					       root->root_key.objectid);
		BUG_ON(ret);
	}
	return 0;
2246 2247 2248 2249 2250 2251 2252 2253 2254
}

/*
 * We have done the truncate/delete so we can go ahead and remove the orphan
 * item for this particular inode.
 */
int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
2255 2256
	int delete_item = 0;
	int release_rsv = 0;
2257 2258
	int ret = 0;

2259 2260 2261 2262
	spin_lock(&root->orphan_lock);
	if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
		list_del_init(&BTRFS_I(inode)->i_orphan);
		delete_item = 1;
2263 2264
	}

2265 2266 2267
	if (BTRFS_I(inode)->orphan_meta_reserved) {
		BTRFS_I(inode)->orphan_meta_reserved = 0;
		release_rsv = 1;
2268
	}
2269
	spin_unlock(&root->orphan_lock);
2270

2271 2272 2273 2274
	if (trans && delete_item) {
		ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
		BUG_ON(ret);
	}
2275

2276 2277
	if (release_rsv)
		btrfs_orphan_release_metadata(inode);
2278

2279
	return 0;
2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294
}

/*
 * this cleans up any orphans that may be left on the list from the last use
 * of this root.
 */
void btrfs_orphan_cleanup(struct btrfs_root *root)
{
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key key, found_key;
	struct btrfs_trans_handle *trans;
	struct inode *inode;
	int ret = 0, nr_unlink = 0, nr_truncate = 0;

2295
	if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
2296
		return;
2297 2298 2299

	path = btrfs_alloc_path();
	BUG_ON(!path);
2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342
	path->reada = -1;

	key.objectid = BTRFS_ORPHAN_OBJECTID;
	btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
	key.offset = (u64)-1;

	while (1) {
		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		if (ret < 0) {
			printk(KERN_ERR "Error searching slot for orphan: %d"
			       "\n", ret);
			break;
		}

		/*
		 * if ret == 0 means we found what we were searching for, which
		 * is weird, but possible, so only screw with path if we didnt
		 * find the key and see if we have stuff that matches
		 */
		if (ret > 0) {
			if (path->slots[0] == 0)
				break;
			path->slots[0]--;
		}

		/* pull out the item */
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);

		/* make sure the item matches what we want */
		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
			break;
		if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
			break;

		/* release the path since we're done with it */
		btrfs_release_path(root, path);

		/*
		 * this is where we are basically btrfs_lookup, without the
		 * crossing root thing.  we store the inode number in the
		 * offset of the orphan item.
		 */
2343 2344 2345
		found_key.objectid = found_key.offset;
		found_key.type = BTRFS_INODE_ITEM_KEY;
		found_key.offset = 0;
2346
		inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
2347
		BUG_ON(IS_ERR(inode));
2348 2349 2350 2351 2352

		/*
		 * add this inode to the orphan list so btrfs_orphan_del does
		 * the proper thing when we hit it
		 */
2353
		spin_lock(&root->orphan_lock);
2354
		list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2355
		spin_unlock(&root->orphan_lock);
2356 2357 2358 2359 2360 2361 2362 2363

		/*
		 * if this is a bad inode, means we actually succeeded in
		 * removing the inode, but not the orphan record, which means
		 * we need to manually delete the orphan since iput will just
		 * do a destroy_inode
		 */
		if (is_bad_inode(inode)) {
2364
			trans = btrfs_start_transaction(root, 0);
2365
			BUG_ON(IS_ERR(trans));
2366
			btrfs_orphan_del(trans, inode);
2367
			btrfs_end_transaction(trans, root);
2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382
			iput(inode);
			continue;
		}

		/* if we have links, this was a truncate, lets do that */
		if (inode->i_nlink) {
			nr_truncate++;
			btrfs_truncate(inode);
		} else {
			nr_unlink++;
		}

		/* this will do delete_inode and everything for us */
		iput(inode);
	}
2383 2384 2385 2386 2387 2388 2389 2390 2391 2392
	btrfs_free_path(path);

	root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;

	if (root->orphan_block_rsv)
		btrfs_block_rsv_release(root, root->orphan_block_rsv,
					(u64)-1);

	if (root->orphan_block_rsv || root->orphan_item_inserted) {
		trans = btrfs_join_transaction(root, 1);
2393
		BUG_ON(IS_ERR(trans));
2394 2395
		btrfs_end_transaction(trans, root);
	}
2396 2397 2398 2399 2400 2401 2402

	if (nr_unlink)
		printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
	if (nr_truncate)
		printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
}

2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453
/*
 * very simple check to peek ahead in the leaf looking for xattrs.  If we
 * don't find any xattrs, we know there can't be any acls.
 *
 * slot is the slot the inode is in, objectid is the objectid of the inode
 */
static noinline int acls_after_inode_item(struct extent_buffer *leaf,
					  int slot, u64 objectid)
{
	u32 nritems = btrfs_header_nritems(leaf);
	struct btrfs_key found_key;
	int scanned = 0;

	slot++;
	while (slot < nritems) {
		btrfs_item_key_to_cpu(leaf, &found_key, slot);

		/* we found a different objectid, there must not be acls */
		if (found_key.objectid != objectid)
			return 0;

		/* we found an xattr, assume we've got an acl */
		if (found_key.type == BTRFS_XATTR_ITEM_KEY)
			return 1;

		/*
		 * we found a key greater than an xattr key, there can't
		 * be any acls later on
		 */
		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
			return 0;

		slot++;
		scanned++;

		/*
		 * it goes inode, inode backrefs, xattrs, extents,
		 * so if there are a ton of hard links to an inode there can
		 * be a lot of backrefs.  Don't waste time searching too hard,
		 * this is just an optimization
		 */
		if (scanned >= 8)
			break;
	}
	/* we hit the end of the leaf before we found an xattr or
	 * something larger than an xattr.  We have to assume the inode
	 * has acls
	 */
	return 1;
}

C
Chris Mason 已提交
2454 2455 2456
/*
 * read an inode from the btree into the in-memory inode
 */
2457
static void btrfs_read_locked_inode(struct inode *inode)
C
Chris Mason 已提交
2458 2459
{
	struct btrfs_path *path;
2460
	struct extent_buffer *leaf;
C
Chris Mason 已提交
2461
	struct btrfs_inode_item *inode_item;
2462
	struct btrfs_timespec *tspec;
C
Chris Mason 已提交
2463 2464
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_key location;
2465
	int maybe_acls;
C
Chris Mason 已提交
2466
	u64 alloc_group_block;
J
Josef Bacik 已提交
2467
	u32 rdev;
C
Chris Mason 已提交
2468 2469 2470 2471 2472
	int ret;

	path = btrfs_alloc_path();
	BUG_ON(!path);
	memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
C
Chris Mason 已提交
2473

C
Chris Mason 已提交
2474
	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2475
	if (ret)
C
Chris Mason 已提交
2476 2477
		goto make_bad;

2478 2479 2480 2481 2482 2483 2484 2485
	leaf = path->nodes[0];
	inode_item = btrfs_item_ptr(leaf, path->slots[0],
				    struct btrfs_inode_item);

	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
	inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
	inode->i_uid = btrfs_inode_uid(leaf, inode_item);
	inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2486
	btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499

	tspec = btrfs_inode_atime(inode_item);
	inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
	inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);

	tspec = btrfs_inode_mtime(inode_item);
	inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
	inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);

	tspec = btrfs_inode_ctime(inode_item);
	inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
	inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);

2500
	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2501
	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2502
	BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2503
	inode->i_generation = BTRFS_I(inode)->generation;
J
Josef Bacik 已提交
2504
	inode->i_rdev = 0;
2505 2506
	rdev = btrfs_inode_rdev(leaf, inode_item);

2507
	BTRFS_I(inode)->index_cnt = (u64)-1;
2508
	BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2509

2510
	alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2511

2512 2513 2514 2515 2516
	/*
	 * try to precache a NULL acl entry for files that don't have
	 * any xattrs or acls
	 */
	maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino);
2517 2518
	if (!maybe_acls)
		cache_no_acl(inode);
2519

2520 2521
	BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
						alloc_group_block, 0);
C
Chris Mason 已提交
2522 2523 2524 2525 2526 2527
	btrfs_free_path(path);
	inode_item = NULL;

	switch (inode->i_mode & S_IFMT) {
	case S_IFREG:
		inode->i_mapping->a_ops = &btrfs_aops;
C
Chris Mason 已提交
2528
		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2529
		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
C
Chris Mason 已提交
2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542
		inode->i_fop = &btrfs_file_operations;
		inode->i_op = &btrfs_file_inode_operations;
		break;
	case S_IFDIR:
		inode->i_fop = &btrfs_dir_file_operations;
		if (root == root->fs_info->tree_root)
			inode->i_op = &btrfs_dir_ro_inode_operations;
		else
			inode->i_op = &btrfs_dir_inode_operations;
		break;
	case S_IFLNK:
		inode->i_op = &btrfs_symlink_inode_operations;
		inode->i_mapping->a_ops = &btrfs_symlink_aops;
C
Chris Mason 已提交
2543
		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
C
Chris Mason 已提交
2544
		break;
J
Josef Bacik 已提交
2545
	default:
J
Jim Owens 已提交
2546
		inode->i_op = &btrfs_special_inode_operations;
J
Josef Bacik 已提交
2547 2548
		init_special_inode(inode, inode->i_mode, rdev);
		break;
C
Chris Mason 已提交
2549
	}
2550 2551

	btrfs_update_iflags(inode);
C
Chris Mason 已提交
2552 2553 2554 2555 2556 2557 2558
	return;

make_bad:
	btrfs_free_path(path);
	make_bad_inode(inode);
}

C
Chris Mason 已提交
2559 2560 2561
/*
 * given a leaf and an inode, copy the inode fields into the leaf
 */
2562 2563
static void fill_inode_item(struct btrfs_trans_handle *trans,
			    struct extent_buffer *leaf,
2564
			    struct btrfs_inode_item *item,
C
Chris Mason 已提交
2565 2566
			    struct inode *inode)
{
2567 2568
	btrfs_set_inode_uid(leaf, item, inode->i_uid);
	btrfs_set_inode_gid(leaf, item, inode->i_gid);
2569
	btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587
	btrfs_set_inode_mode(leaf, item, inode->i_mode);
	btrfs_set_inode_nlink(leaf, item, inode->i_nlink);

	btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
			       inode->i_atime.tv_sec);
	btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
				inode->i_atime.tv_nsec);

	btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
			       inode->i_mtime.tv_sec);
	btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
				inode->i_mtime.tv_nsec);

	btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
			       inode->i_ctime.tv_sec);
	btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
				inode->i_ctime.tv_nsec);

2588
	btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2589
	btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2590
	btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2591
	btrfs_set_inode_transid(leaf, item, trans->transid);
2592
	btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
Y
Yan 已提交
2593
	btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2594
	btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
C
Chris Mason 已提交
2595 2596
}

C
Chris Mason 已提交
2597 2598 2599
/*
 * copy everything in the in-memory inode into the btree.
 */
C
Chris Mason 已提交
2600 2601
noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
				struct btrfs_root *root, struct inode *inode)
C
Chris Mason 已提交
2602 2603 2604
{
	struct btrfs_inode_item *inode_item;
	struct btrfs_path *path;
2605
	struct extent_buffer *leaf;
C
Chris Mason 已提交
2606 2607 2608 2609
	int ret;

	path = btrfs_alloc_path();
	BUG_ON(!path);
2610
	path->leave_spinning = 1;
C
Chris Mason 已提交
2611 2612 2613 2614 2615 2616 2617 2618
	ret = btrfs_lookup_inode(trans, root, path,
				 &BTRFS_I(inode)->location, 1);
	if (ret) {
		if (ret > 0)
			ret = -ENOENT;
		goto failed;
	}

2619
	btrfs_unlock_up_safe(path, 1);
2620 2621
	leaf = path->nodes[0];
	inode_item = btrfs_item_ptr(leaf, path->slots[0],
C
Chris Mason 已提交
2622 2623
				  struct btrfs_inode_item);

2624
	fill_inode_item(trans, leaf, inode_item, inode);
2625
	btrfs_mark_buffer_dirty(leaf);
2626
	btrfs_set_inode_last_trans(trans, inode);
C
Chris Mason 已提交
2627 2628 2629 2630 2631 2632 2633
	ret = 0;
failed:
	btrfs_free_path(path);
	return ret;
}


C
Chris Mason 已提交
2634 2635 2636 2637 2638
/*
 * unlink helper that gets used here in inode.c and in the tree logging
 * recovery code.  It remove a link in a directory with a given name, and
 * also drops the back refs in the inode to the directory
 */
2639 2640 2641 2642
int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
		       struct btrfs_root *root,
		       struct inode *dir, struct inode *inode,
		       const char *name, int name_len)
C
Chris Mason 已提交
2643 2644 2645
{
	struct btrfs_path *path;
	int ret = 0;
2646
	struct extent_buffer *leaf;
C
Chris Mason 已提交
2647
	struct btrfs_dir_item *di;
2648
	struct btrfs_key key;
2649
	u64 index;
C
Chris Mason 已提交
2650 2651

	path = btrfs_alloc_path();
2652 2653
	if (!path) {
		ret = -ENOMEM;
2654
		goto out;
2655 2656
	}

2657
	path->leave_spinning = 1;
C
Chris Mason 已提交
2658 2659 2660 2661 2662 2663 2664 2665 2666 2667
	di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
				    name, name_len, -1);
	if (IS_ERR(di)) {
		ret = PTR_ERR(di);
		goto err;
	}
	if (!di) {
		ret = -ENOENT;
		goto err;
	}
2668 2669
	leaf = path->nodes[0];
	btrfs_dir_item_key_to_cpu(leaf, di, &key);
C
Chris Mason 已提交
2670
	ret = btrfs_delete_one_dir_name(trans, root, path, di);
2671 2672
	if (ret)
		goto err;
C
Chris Mason 已提交
2673 2674
	btrfs_release_path(root, path);

2675
	ret = btrfs_del_inode_ref(trans, root, name, name_len,
2676 2677
				  inode->i_ino,
				  dir->i_ino, &index);
2678
	if (ret) {
C
Chris Mason 已提交
2679
		printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2680
		       "inode %lu parent %lu\n", name_len, name,
2681
		       inode->i_ino, dir->i_ino);
2682 2683 2684
		goto err;
	}

C
Chris Mason 已提交
2685
	di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2686
					 index, name, name_len, -1);
C
Chris Mason 已提交
2687 2688 2689 2690 2691 2692 2693 2694 2695
	if (IS_ERR(di)) {
		ret = PTR_ERR(di);
		goto err;
	}
	if (!di) {
		ret = -ENOENT;
		goto err;
	}
	ret = btrfs_delete_one_dir_name(trans, root, path, di);
2696
	btrfs_release_path(root, path);
C
Chris Mason 已提交
2697

2698 2699
	ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
					 inode, dir->i_ino);
C
Chris Mason 已提交
2700
	BUG_ON(ret != 0 && ret != -ENOENT);
2701 2702 2703

	ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
					   dir, index);
2704 2705
	if (ret == -ENOENT)
		ret = 0;
C
Chris Mason 已提交
2706 2707
err:
	btrfs_free_path(path);
2708 2709 2710 2711 2712 2713 2714 2715 2716
	if (ret)
		goto out;

	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
	inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
	btrfs_update_inode(trans, root, dir);
	btrfs_drop_nlink(inode);
	ret = btrfs_update_inode(trans, root, inode);
out:
C
Chris Mason 已提交
2717 2718 2719
	return ret;
}

2720 2721 2722
/* helper to check if there is any shared block in the path */
static int check_path_shared(struct btrfs_root *root,
			     struct btrfs_path *path)
C
Chris Mason 已提交
2723
{
2724 2725
	struct extent_buffer *eb;
	int level;
2726
	u64 refs = 1;
2727

2728
	for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2729 2730
		int ret;

2731 2732 2733 2734 2735 2736 2737 2738 2739
		if (!path->nodes[level])
			break;
		eb = path->nodes[level];
		if (!btrfs_block_can_be_shared(root, eb))
			continue;
		ret = btrfs_lookup_extent_info(NULL, root, eb->start, eb->len,
					       &refs, NULL);
		if (refs > 1)
			return 1;
2740
	}
2741
	return 0;
C
Chris Mason 已提交
2742 2743
}

2744 2745 2746 2747 2748 2749 2750 2751 2752
/*
 * helper to start transaction for unlink and rmdir.
 *
 * unlink and rmdir are special in btrfs, they do not always free space.
 * so in enospc case, we should make sure they will free space before
 * allowing them to use the global metadata reservation.
 */
static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
						       struct dentry *dentry)
2753
{
C
Chris Mason 已提交
2754
	struct btrfs_trans_handle *trans;
2755
	struct btrfs_root *root = BTRFS_I(dir)->root;
2756
	struct btrfs_path *path;
2757
	struct btrfs_inode_ref *ref;
2758
	struct btrfs_dir_item *di;
2759
	struct inode *inode = dentry->d_inode;
2760
	u64 index;
2761 2762
	int check_link = 1;
	int err = -ENOSPC;
2763 2764
	int ret;

2765 2766 2767
	trans = btrfs_start_transaction(root, 10);
	if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
		return trans;
2768

2769 2770
	if (inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
		return ERR_PTR(-ENOSPC);
2771

2772 2773 2774
	/* check if there is someone else holds reference */
	if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1)
		return ERR_PTR(-ENOSPC);
2775

2776 2777
	if (atomic_read(&inode->i_count) > 2)
		return ERR_PTR(-ENOSPC);
2778

2779 2780 2781 2782 2783 2784 2785
	if (xchg(&root->fs_info->enospc_unlink, 1))
		return ERR_PTR(-ENOSPC);

	path = btrfs_alloc_path();
	if (!path) {
		root->fs_info->enospc_unlink = 0;
		return ERR_PTR(-ENOMEM);
2786 2787
	}

2788
	trans = btrfs_start_transaction(root, 0);
2789
	if (IS_ERR(trans)) {
2790 2791 2792 2793
		btrfs_free_path(path);
		root->fs_info->enospc_unlink = 0;
		return trans;
	}
2794

2795 2796
	path->skip_locking = 1;
	path->search_commit_root = 1;
2797

2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808
	ret = btrfs_lookup_inode(trans, root, path,
				&BTRFS_I(dir)->location, 0);
	if (ret < 0) {
		err = ret;
		goto out;
	}
	if (ret == 0) {
		if (check_path_shared(root, path))
			goto out;
	} else {
		check_link = 0;
2809
	}
2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915
	btrfs_release_path(root, path);

	ret = btrfs_lookup_inode(trans, root, path,
				&BTRFS_I(inode)->location, 0);
	if (ret < 0) {
		err = ret;
		goto out;
	}
	if (ret == 0) {
		if (check_path_shared(root, path))
			goto out;
	} else {
		check_link = 0;
	}
	btrfs_release_path(root, path);

	if (ret == 0 && S_ISREG(inode->i_mode)) {
		ret = btrfs_lookup_file_extent(trans, root, path,
					       inode->i_ino, (u64)-1, 0);
		if (ret < 0) {
			err = ret;
			goto out;
		}
		BUG_ON(ret == 0);
		if (check_path_shared(root, path))
			goto out;
		btrfs_release_path(root, path);
	}

	if (!check_link) {
		err = 0;
		goto out;
	}

	di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
				dentry->d_name.name, dentry->d_name.len, 0);
	if (IS_ERR(di)) {
		err = PTR_ERR(di);
		goto out;
	}
	if (di) {
		if (check_path_shared(root, path))
			goto out;
	} else {
		err = 0;
		goto out;
	}
	btrfs_release_path(root, path);

	ref = btrfs_lookup_inode_ref(trans, root, path,
				dentry->d_name.name, dentry->d_name.len,
				inode->i_ino, dir->i_ino, 0);
	if (IS_ERR(ref)) {
		err = PTR_ERR(ref);
		goto out;
	}
	BUG_ON(!ref);
	if (check_path_shared(root, path))
		goto out;
	index = btrfs_inode_ref_index(path->nodes[0], ref);
	btrfs_release_path(root, path);

	di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, index,
				dentry->d_name.name, dentry->d_name.len, 0);
	if (IS_ERR(di)) {
		err = PTR_ERR(di);
		goto out;
	}
	BUG_ON(ret == -ENOENT);
	if (check_path_shared(root, path))
		goto out;

	err = 0;
out:
	btrfs_free_path(path);
	if (err) {
		btrfs_end_transaction(trans, root);
		root->fs_info->enospc_unlink = 0;
		return ERR_PTR(err);
	}

	trans->block_rsv = &root->fs_info->global_block_rsv;
	return trans;
}

static void __unlink_end_trans(struct btrfs_trans_handle *trans,
			       struct btrfs_root *root)
{
	if (trans->block_rsv == &root->fs_info->global_block_rsv) {
		BUG_ON(!root->fs_info->enospc_unlink);
		root->fs_info->enospc_unlink = 0;
	}
	btrfs_end_transaction_throttle(trans, root);
}

static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
{
	struct btrfs_root *root = BTRFS_I(dir)->root;
	struct btrfs_trans_handle *trans;
	struct inode *inode = dentry->d_inode;
	int ret;
	unsigned long nr = 0;

	trans = __unlink_start_trans(dir, dentry);
	if (IS_ERR(trans))
		return PTR_ERR(trans);
2916

C
Chris Mason 已提交
2917
	btrfs_set_trans_block_group(trans, dir);
2918 2919 2920

	btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);

2921 2922
	ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
				 dentry->d_name.name, dentry->d_name.len);
2923
	BUG_ON(ret);
2924

2925
	if (inode->i_nlink == 0) {
2926
		ret = btrfs_orphan_add(trans, inode);
2927 2928
		BUG_ON(ret);
	}
2929

2930
	nr = trans->blocks_used;
2931
	__unlink_end_trans(trans, root);
2932
	btrfs_btree_balance_dirty(root, nr);
C
Chris Mason 已提交
2933 2934 2935
	return ret;
}

2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997
int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
			struct btrfs_root *root,
			struct inode *dir, u64 objectid,
			const char *name, int name_len)
{
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_dir_item *di;
	struct btrfs_key key;
	u64 index;
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
				   name, name_len, -1);
	BUG_ON(!di || IS_ERR(di));

	leaf = path->nodes[0];
	btrfs_dir_item_key_to_cpu(leaf, di, &key);
	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
	ret = btrfs_delete_one_dir_name(trans, root, path, di);
	BUG_ON(ret);
	btrfs_release_path(root, path);

	ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
				 objectid, root->root_key.objectid,
				 dir->i_ino, &index, name, name_len);
	if (ret < 0) {
		BUG_ON(ret != -ENOENT);
		di = btrfs_search_dir_index_item(root, path, dir->i_ino,
						 name, name_len);
		BUG_ON(!di || IS_ERR(di));

		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
		btrfs_release_path(root, path);
		index = key.offset;
	}

	di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
					 index, name, name_len, -1);
	BUG_ON(!di || IS_ERR(di));

	leaf = path->nodes[0];
	btrfs_dir_item_key_to_cpu(leaf, di, &key);
	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
	ret = btrfs_delete_one_dir_name(trans, root, path, di);
	BUG_ON(ret);
	btrfs_release_path(root, path);

	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
	ret = btrfs_update_inode(trans, root, dir);
	BUG_ON(ret);

	btrfs_free_path(path);
	return 0;
}

C
Chris Mason 已提交
2998 2999 3000
static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
{
	struct inode *inode = dentry->d_inode;
3001
	int err = 0;
C
Chris Mason 已提交
3002 3003
	struct btrfs_root *root = BTRFS_I(dir)->root;
	struct btrfs_trans_handle *trans;
3004
	unsigned long nr = 0;
C
Chris Mason 已提交
3005

3006
	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
3007
	    inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
Y
Yan 已提交
3008 3009
		return -ENOTEMPTY;

3010 3011
	trans = __unlink_start_trans(dir, dentry);
	if (IS_ERR(trans))
3012 3013
		return PTR_ERR(trans);

C
Chris Mason 已提交
3014 3015
	btrfs_set_trans_block_group(trans, dir);

3016 3017 3018 3019 3020 3021 3022 3023
	if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
		err = btrfs_unlink_subvol(trans, root, dir,
					  BTRFS_I(inode)->location.objectid,
					  dentry->d_name.name,
					  dentry->d_name.len);
		goto out;
	}

3024 3025
	err = btrfs_orphan_add(trans, inode);
	if (err)
3026
		goto out;
3027

C
Chris Mason 已提交
3028
	/* now the directory is empty */
3029 3030
	err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
				 dentry->d_name.name, dentry->d_name.len);
C
Chris Mason 已提交
3031
	if (!err)
3032
		btrfs_i_size_write(inode, 0);
3033
out:
3034
	nr = trans->blocks_used;
3035
	__unlink_end_trans(trans, root);
3036
	btrfs_btree_balance_dirty(root, nr);
3037

C
Chris Mason 已提交
3038 3039 3040
	return err;
}

3041
#if 0
3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069
/*
 * when truncating bytes in a file, it is possible to avoid reading
 * the leaves that contain only checksum items.  This can be the
 * majority of the IO required to delete a large file, but it must
 * be done carefully.
 *
 * The keys in the level just above the leaves are checked to make sure
 * the lowest key in a given leaf is a csum key, and starts at an offset
 * after the new  size.
 *
 * Then the key for the next leaf is checked to make sure it also has
 * a checksum item for the same file.  If it does, we know our target leaf
 * contains only checksum items, and it can be safely freed without reading
 * it.
 *
 * This is just an optimization targeted at large files.  It may do
 * nothing.  It will return 0 unless things went badly.
 */
static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
				     struct btrfs_root *root,
				     struct btrfs_path *path,
				     struct inode *inode, u64 new_size)
{
	struct btrfs_key key;
	int ret;
	int nritems;
	struct btrfs_key found_key;
	struct btrfs_key other_key;
Y
Yan Zheng 已提交
3070 3071 3072
	struct btrfs_leaf_ref *ref;
	u64 leaf_gen;
	u64 leaf_start;
3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165

	path->lowest_level = 1;
	key.objectid = inode->i_ino;
	key.type = BTRFS_CSUM_ITEM_KEY;
	key.offset = new_size;
again:
	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
	if (ret < 0)
		goto out;

	if (path->nodes[1] == NULL) {
		ret = 0;
		goto out;
	}
	ret = 0;
	btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
	nritems = btrfs_header_nritems(path->nodes[1]);

	if (!nritems)
		goto out;

	if (path->slots[1] >= nritems)
		goto next_node;

	/* did we find a key greater than anything we want to delete? */
	if (found_key.objectid > inode->i_ino ||
	   (found_key.objectid == inode->i_ino && found_key.type > key.type))
		goto out;

	/* we check the next key in the node to make sure the leave contains
	 * only checksum items.  This comparison doesn't work if our
	 * leaf is the last one in the node
	 */
	if (path->slots[1] + 1 >= nritems) {
next_node:
		/* search forward from the last key in the node, this
		 * will bring us into the next node in the tree
		 */
		btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);

		/* unlikely, but we inc below, so check to be safe */
		if (found_key.offset == (u64)-1)
			goto out;

		/* search_forward needs a path with locks held, do the
		 * search again for the original key.  It is possible
		 * this will race with a balance and return a path that
		 * we could modify, but this drop is just an optimization
		 * and is allowed to miss some leaves.
		 */
		btrfs_release_path(root, path);
		found_key.offset++;

		/* setup a max key for search_forward */
		other_key.offset = (u64)-1;
		other_key.type = key.type;
		other_key.objectid = key.objectid;

		path->keep_locks = 1;
		ret = btrfs_search_forward(root, &found_key, &other_key,
					   path, 0, 0);
		path->keep_locks = 0;
		if (ret || found_key.objectid != key.objectid ||
		    found_key.type != key.type) {
			ret = 0;
			goto out;
		}

		key.offset = found_key.offset;
		btrfs_release_path(root, path);
		cond_resched();
		goto again;
	}

	/* we know there's one more slot after us in the tree,
	 * read that key so we can verify it is also a checksum item
	 */
	btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);

	if (found_key.objectid < inode->i_ino)
		goto next_key;

	if (found_key.type != key.type || found_key.offset < new_size)
		goto next_key;

	/*
	 * if the key for the next leaf isn't a csum key from this objectid,
	 * we can't be sure there aren't good items inside this leaf.
	 * Bail out
	 */
	if (other_key.objectid != inode->i_ino || other_key.type != key.type)
		goto out;

Y
Yan Zheng 已提交
3166 3167
	leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
	leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
3168 3169 3170 3171
	/*
	 * it is safe to delete this leaf, it contains only
	 * csum items from this inode at an offset >= new_size
	 */
Y
Yan Zheng 已提交
3172
	ret = btrfs_del_leaf(trans, root, path, leaf_start);
3173 3174
	BUG_ON(ret);

Y
Yan Zheng 已提交
3175 3176 3177 3178 3179 3180 3181 3182 3183
	if (root->ref_cows && leaf_gen < trans->transid) {
		ref = btrfs_alloc_leaf_ref(root, 0);
		if (ref) {
			ref->root_gen = root->root_key.offset;
			ref->bytenr = leaf_start;
			ref->owner = 0;
			ref->generation = leaf_gen;
			ref->nritems = 0;

3184 3185
			btrfs_sort_leaf_ref(ref);

Y
Yan Zheng 已提交
3186 3187 3188 3189 3190 3191 3192
			ret = btrfs_add_leaf_ref(root, ref, 0);
			WARN_ON(ret);
			btrfs_free_leaf_ref(root, ref);
		} else {
			WARN_ON(1);
		}
	}
3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210
next_key:
	btrfs_release_path(root, path);

	if (other_key.objectid == inode->i_ino &&
	    other_key.type == key.type && other_key.offset > key.offset) {
		key.offset = other_key.offset;
		cond_resched();
		goto again;
	}
	ret = 0;
out:
	/* fixup any changes we've made to the path */
	path->lowest_level = 0;
	path->keep_locks = 0;
	btrfs_release_path(root, path);
	return ret;
}

3211 3212
#endif

C
Chris Mason 已提交
3213 3214 3215
/*
 * this can truncate away extent items, csum items and directory items.
 * It starts at a high offset and removes keys until it can't find
C
Chris Mason 已提交
3216
 * any higher than new_size
C
Chris Mason 已提交
3217 3218 3219
 *
 * csum items that cross the new i_size are truncated to the new size
 * as well.
3220 3221 3222
 *
 * min_type is the minimum key type to truncate down to.  If set to 0, this
 * will kill all the items on this inode, including the INODE_ITEM_KEY.
C
Chris Mason 已提交
3223
 */
3224 3225 3226 3227
int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
			       struct btrfs_root *root,
			       struct inode *inode,
			       u64 new_size, u32 min_type)
C
Chris Mason 已提交
3228 3229
{
	struct btrfs_path *path;
3230
	struct extent_buffer *leaf;
C
Chris Mason 已提交
3231
	struct btrfs_file_extent_item *fi;
3232 3233
	struct btrfs_key key;
	struct btrfs_key found_key;
C
Chris Mason 已提交
3234
	u64 extent_start = 0;
3235
	u64 extent_num_bytes = 0;
3236
	u64 extent_offset = 0;
C
Chris Mason 已提交
3237
	u64 item_end = 0;
3238 3239
	u64 mask = root->sectorsize - 1;
	u32 found_type = (u8)-1;
C
Chris Mason 已提交
3240 3241
	int found_extent;
	int del_item;
3242 3243
	int pending_del_nr = 0;
	int pending_del_slot = 0;
3244
	int extent_type = -1;
3245
	int encoding;
3246 3247 3248 3249
	int ret;
	int err = 0;

	BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
C
Chris Mason 已提交
3250

3251
	if (root->ref_cows || root == root->fs_info->tree_root)
3252
		btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
3253

C
Chris Mason 已提交
3254 3255
	path = btrfs_alloc_path();
	BUG_ON(!path);
J
Julia Lawall 已提交
3256
	path->reada = -1;
3257

C
Chris Mason 已提交
3258 3259
	key.objectid = inode->i_ino;
	key.offset = (u64)-1;
3260 3261
	key.type = (u8)-1;

3262
search_again:
3263
	path->leave_spinning = 1;
3264
	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3265 3266 3267 3268
	if (ret < 0) {
		err = ret;
		goto out;
	}
C
Chris Mason 已提交
3269

3270
	if (ret > 0) {
3271 3272 3273
		/* there are no items in the tree for us to truncate, we're
		 * done
		 */
3274 3275
		if (path->slots[0] == 0)
			goto out;
3276 3277 3278
		path->slots[0]--;
	}

C
Chris Mason 已提交
3279
	while (1) {
C
Chris Mason 已提交
3280
		fi = NULL;
3281 3282 3283
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
		found_type = btrfs_key_type(&found_key);
3284
		encoding = 0;
C
Chris Mason 已提交
3285

3286
		if (found_key.objectid != inode->i_ino)
C
Chris Mason 已提交
3287
			break;
3288

3289
		if (found_type < min_type)
C
Chris Mason 已提交
3290 3291
			break;

3292
		item_end = found_key.offset;
C
Chris Mason 已提交
3293
		if (found_type == BTRFS_EXTENT_DATA_KEY) {
3294
			fi = btrfs_item_ptr(leaf, path->slots[0],
C
Chris Mason 已提交
3295
					    struct btrfs_file_extent_item);
3296
			extent_type = btrfs_file_extent_type(leaf, fi);
3297 3298 3299 3300
			encoding = btrfs_file_extent_compression(leaf, fi);
			encoding |= btrfs_file_extent_encryption(leaf, fi);
			encoding |= btrfs_file_extent_other_encoding(leaf, fi);

3301
			if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3302
				item_end +=
3303
				    btrfs_file_extent_num_bytes(leaf, fi);
3304 3305
			} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
				item_end += btrfs_file_extent_inline_len(leaf,
C
Chris Mason 已提交
3306
									 fi);
C
Chris Mason 已提交
3307
			}
3308
			item_end--;
C
Chris Mason 已提交
3309
		}
3310 3311 3312 3313
		if (found_type > min_type) {
			del_item = 1;
		} else {
			if (item_end < new_size)
3314
				break;
3315 3316 3317 3318
			if (found_key.offset >= new_size)
				del_item = 1;
			else
				del_item = 0;
C
Chris Mason 已提交
3319 3320 3321
		}
		found_extent = 0;
		/* FIXME, shrink the extent if the ref count is only 1 */
3322 3323 3324 3325
		if (found_type != BTRFS_EXTENT_DATA_KEY)
			goto delete;

		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
C
Chris Mason 已提交
3326
			u64 num_dec;
3327
			extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
3328
			if (!del_item && !encoding) {
3329 3330
				u64 orig_num_bytes =
					btrfs_file_extent_num_bytes(leaf, fi);
3331
				extent_num_bytes = new_size -
3332
					found_key.offset + root->sectorsize - 1;
3333 3334
				extent_num_bytes = extent_num_bytes &
					~((u64)root->sectorsize - 1);
3335 3336 3337
				btrfs_set_file_extent_num_bytes(leaf, fi,
							 extent_num_bytes);
				num_dec = (orig_num_bytes -
C
Chris Mason 已提交
3338
					   extent_num_bytes);
3339
				if (root->ref_cows && extent_start != 0)
3340
					inode_sub_bytes(inode, num_dec);
3341
				btrfs_mark_buffer_dirty(leaf);
C
Chris Mason 已提交
3342
			} else {
3343 3344 3345
				extent_num_bytes =
					btrfs_file_extent_disk_num_bytes(leaf,
									 fi);
3346 3347 3348
				extent_offset = found_key.offset -
					btrfs_file_extent_offset(leaf, fi);

C
Chris Mason 已提交
3349
				/* FIXME blocksize != 4096 */
C
Chris Mason 已提交
3350
				num_dec = btrfs_file_extent_num_bytes(leaf, fi);
C
Chris Mason 已提交
3351 3352
				if (extent_start != 0) {
					found_extent = 1;
3353
					if (root->ref_cows)
3354
						inode_sub_bytes(inode, num_dec);
3355
				}
C
Chris Mason 已提交
3356
			}
C
Chris Mason 已提交
3357
		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
C
Chris Mason 已提交
3358 3359 3360 3361 3362 3363 3364 3365
			/*
			 * we can't truncate inline items that have had
			 * special encodings
			 */
			if (!del_item &&
			    btrfs_file_extent_compression(leaf, fi) == 0 &&
			    btrfs_file_extent_encryption(leaf, fi) == 0 &&
			    btrfs_file_extent_other_encoding(leaf, fi) == 0) {
3366 3367 3368
				u32 size = new_size - found_key.offset;

				if (root->ref_cows) {
3369 3370
					inode_sub_bytes(inode, item_end + 1 -
							new_size);
3371 3372 3373
				}
				size =
				    btrfs_file_extent_calc_inline_size(size);
C
Chris Mason 已提交
3374
				ret = btrfs_truncate_item(trans, root, path,
3375
							  size, 1);
C
Chris Mason 已提交
3376
				BUG_ON(ret);
3377
			} else if (root->ref_cows) {
3378 3379
				inode_sub_bytes(inode, item_end + 1 -
						found_key.offset);
C
Chris Mason 已提交
3380
			}
C
Chris Mason 已提交
3381
		}
3382
delete:
C
Chris Mason 已提交
3383
		if (del_item) {
3384 3385 3386 3387 3388 3389 3390 3391 3392 3393
			if (!pending_del_nr) {
				/* no pending yet, add ourselves */
				pending_del_slot = path->slots[0];
				pending_del_nr = 1;
			} else if (pending_del_nr &&
				   path->slots[0] + 1 == pending_del_slot) {
				/* hop on the pending chunk */
				pending_del_nr++;
				pending_del_slot = path->slots[0];
			} else {
C
Chris Mason 已提交
3394
				BUG();
3395
			}
C
Chris Mason 已提交
3396 3397 3398
		} else {
			break;
		}
3399 3400
		if (found_extent && (root->ref_cows ||
				     root == root->fs_info->tree_root)) {
3401
			btrfs_set_path_blocking(path);
C
Chris Mason 已提交
3402
			ret = btrfs_free_extent(trans, root, extent_start,
3403 3404 3405
						extent_num_bytes, 0,
						btrfs_header_owner(leaf),
						inode->i_ino, extent_offset);
C
Chris Mason 已提交
3406 3407
			BUG_ON(ret);
		}
3408

3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424
		if (found_type == BTRFS_INODE_ITEM_KEY)
			break;

		if (path->slots[0] == 0 ||
		    path->slots[0] != pending_del_slot) {
			if (root->ref_cows) {
				err = -EAGAIN;
				goto out;
			}
			if (pending_del_nr) {
				ret = btrfs_del_items(trans, root, path,
						pending_del_slot,
						pending_del_nr);
				BUG_ON(ret);
				pending_del_nr = 0;
			}
3425 3426
			btrfs_release_path(root, path);
			goto search_again;
3427 3428
		} else {
			path->slots[0]--;
3429
		}
C
Chris Mason 已提交
3430
	}
3431
out:
3432 3433 3434
	if (pending_del_nr) {
		ret = btrfs_del_items(trans, root, path, pending_del_slot,
				      pending_del_nr);
3435
		BUG_ON(ret);
3436
	}
C
Chris Mason 已提交
3437
	btrfs_free_path(path);
3438
	return err;
C
Chris Mason 已提交
3439 3440 3441 3442 3443 3444 3445 3446 3447
}

/*
 * taken from block_truncate_page, but does cow as it zeros out
 * any bytes left in the last page in the file.
 */
static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
{
	struct inode *inode = mapping->host;
3448
	struct btrfs_root *root = BTRFS_I(inode)->root;
3449 3450
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
	struct btrfs_ordered_extent *ordered;
3451
	struct extent_state *cached_state = NULL;
3452
	char *kaddr;
3453
	u32 blocksize = root->sectorsize;
C
Chris Mason 已提交
3454 3455 3456 3457
	pgoff_t index = from >> PAGE_CACHE_SHIFT;
	unsigned offset = from & (PAGE_CACHE_SIZE-1);
	struct page *page;
	int ret = 0;
3458
	u64 page_start;
3459
	u64 page_end;
C
Chris Mason 已提交
3460 3461 3462

	if ((offset & (blocksize - 1)) == 0)
		goto out;
3463
	ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
3464 3465
	if (ret)
		goto out;
C
Chris Mason 已提交
3466 3467

	ret = -ENOMEM;
3468
again:
C
Chris Mason 已提交
3469
	page = grab_cache_page(mapping, index);
3470
	if (!page) {
3471
		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
C
Chris Mason 已提交
3472
		goto out;
3473
	}
3474 3475 3476 3477

	page_start = page_offset(page);
	page_end = page_start + PAGE_CACHE_SIZE - 1;

C
Chris Mason 已提交
3478
	if (!PageUptodate(page)) {
C
Chris Mason 已提交
3479
		ret = btrfs_readpage(NULL, page);
C
Chris Mason 已提交
3480
		lock_page(page);
3481 3482 3483 3484 3485
		if (page->mapping != mapping) {
			unlock_page(page);
			page_cache_release(page);
			goto again;
		}
C
Chris Mason 已提交
3486 3487
		if (!PageUptodate(page)) {
			ret = -EIO;
3488
			goto out_unlock;
C
Chris Mason 已提交
3489 3490
		}
	}
3491
	wait_on_page_writeback(page);
3492

3493 3494
	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
			 GFP_NOFS);
3495 3496 3497 3498
	set_page_extent_mapped(page);

	ordered = btrfs_lookup_ordered_extent(inode, page_start);
	if (ordered) {
3499 3500
		unlock_extent_cached(io_tree, page_start, page_end,
				     &cached_state, GFP_NOFS);
3501 3502
		unlock_page(page);
		page_cache_release(page);
3503
		btrfs_start_ordered_extent(inode, ordered, 1);
3504 3505 3506 3507
		btrfs_put_ordered_extent(ordered);
		goto again;
	}

3508
	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
3509
			  EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
3510
			  0, 0, &cached_state, GFP_NOFS);
3511

3512 3513
	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
					&cached_state);
J
Josef Bacik 已提交
3514
	if (ret) {
3515 3516
		unlock_extent_cached(io_tree, page_start, page_end,
				     &cached_state, GFP_NOFS);
J
Josef Bacik 已提交
3517 3518 3519
		goto out_unlock;
	}

3520 3521 3522 3523 3524 3525 3526
	ret = 0;
	if (offset != PAGE_CACHE_SIZE) {
		kaddr = kmap(page);
		memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
		flush_dcache_page(page);
		kunmap(page);
	}
3527
	ClearPageChecked(page);
3528
	set_page_dirty(page);
3529 3530
	unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
			     GFP_NOFS);
C
Chris Mason 已提交
3531

3532
out_unlock:
3533
	if (ret)
3534
		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
C
Chris Mason 已提交
3535 3536 3537 3538 3539 3540
	unlock_page(page);
	page_cache_release(page);
out:
	return ret;
}

Y
Yan Zheng 已提交
3541
int btrfs_cont_expand(struct inode *inode, loff_t size)
C
Chris Mason 已提交
3542
{
Y
Yan Zheng 已提交
3543 3544 3545
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3546
	struct extent_map *em = NULL;
3547
	struct extent_state *cached_state = NULL;
Y
Yan Zheng 已提交
3548 3549 3550 3551 3552 3553
	u64 mask = root->sectorsize - 1;
	u64 hole_start = (inode->i_size + mask) & ~mask;
	u64 block_end = (size + mask) & ~mask;
	u64 last_byte;
	u64 cur_offset;
	u64 hole_size;
J
Josef Bacik 已提交
3554
	int err = 0;
C
Chris Mason 已提交
3555

Y
Yan Zheng 已提交
3556 3557 3558 3559 3560 3561 3562
	if (size <= hole_start)
		return 0;

	while (1) {
		struct btrfs_ordered_extent *ordered;
		btrfs_wait_ordered_range(inode, hole_start,
					 block_end - hole_start);
3563 3564
		lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
				 &cached_state, GFP_NOFS);
Y
Yan Zheng 已提交
3565 3566 3567
		ordered = btrfs_lookup_ordered_extent(inode, hole_start);
		if (!ordered)
			break;
3568 3569
		unlock_extent_cached(io_tree, hole_start, block_end - 1,
				     &cached_state, GFP_NOFS);
Y
Yan Zheng 已提交
3570 3571
		btrfs_put_ordered_extent(ordered);
	}
C
Chris Mason 已提交
3572

Y
Yan Zheng 已提交
3573 3574 3575 3576 3577 3578 3579
	cur_offset = hole_start;
	while (1) {
		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
				block_end - cur_offset, 0);
		BUG_ON(IS_ERR(em) || !em);
		last_byte = min(extent_map_end(em), block_end);
		last_byte = (last_byte + mask) & ~mask;
3580
		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3581
			u64 hint_byte = 0;
Y
Yan Zheng 已提交
3582
			hole_size = last_byte - cur_offset;
J
Josef Bacik 已提交
3583

3584 3585 3586
			trans = btrfs_start_transaction(root, 2);
			if (IS_ERR(trans)) {
				err = PTR_ERR(trans);
J
Josef Bacik 已提交
3587
				break;
3588
			}
3589 3590 3591 3592 3593 3594 3595
			btrfs_set_trans_block_group(trans, inode);

			err = btrfs_drop_extents(trans, inode, cur_offset,
						 cur_offset + hole_size,
						 &hint_byte, 1);
			BUG_ON(err);

Y
Yan Zheng 已提交
3596 3597 3598 3599
			err = btrfs_insert_file_extent(trans, root,
					inode->i_ino, cur_offset, 0,
					0, hole_size, 0, hole_size,
					0, 0, 0);
3600 3601
			BUG_ON(err);

Y
Yan Zheng 已提交
3602 3603
			btrfs_drop_extent_cache(inode, hole_start,
					last_byte - 1, 0);
3604 3605

			btrfs_end_transaction(trans, root);
Y
Yan Zheng 已提交
3606 3607
		}
		free_extent_map(em);
3608
		em = NULL;
Y
Yan Zheng 已提交
3609
		cur_offset = last_byte;
3610
		if (cur_offset >= block_end)
Y
Yan Zheng 已提交
3611 3612
			break;
	}
3613

3614
	free_extent_map(em);
3615 3616
	unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
			     GFP_NOFS);
Y
Yan Zheng 已提交
3617 3618
	return err;
}
C
Chris Mason 已提交
3619

3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640
static int btrfs_setattr_size(struct inode *inode, struct iattr *attr)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
	unsigned long nr;
	int ret;

	if (attr->ia_size == inode->i_size)
		return 0;

	if (attr->ia_size > inode->i_size) {
		unsigned long limit;
		limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
		if (attr->ia_size > inode->i_sb->s_maxbytes)
			return -EFBIG;
		if (limit != RLIM_INFINITY && attr->ia_size > limit) {
			send_sig(SIGXFSZ, current, 0);
			return -EFBIG;
		}
	}

3641 3642 3643
	trans = btrfs_start_transaction(root, 5);
	if (IS_ERR(trans))
		return PTR_ERR(trans);
3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663

	btrfs_set_trans_block_group(trans, inode);

	ret = btrfs_orphan_add(trans, inode);
	BUG_ON(ret);

	nr = trans->blocks_used;
	btrfs_end_transaction(trans, root);
	btrfs_btree_balance_dirty(root, nr);

	if (attr->ia_size > inode->i_size) {
		ret = btrfs_cont_expand(inode, attr->ia_size);
		if (ret) {
			btrfs_truncate(inode);
			return ret;
		}

		i_size_write(inode, attr->ia_size);
		btrfs_ordered_update_i_size(inode, inode->i_size, NULL);

3664 3665
		trans = btrfs_start_transaction(root, 0);
		BUG_ON(IS_ERR(trans));
3666
		btrfs_set_trans_block_group(trans, inode);
3667 3668
		trans->block_rsv = root->orphan_block_rsv;
		BUG_ON(!trans->block_rsv);
3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696

		ret = btrfs_update_inode(trans, root, inode);
		BUG_ON(ret);
		if (inode->i_nlink > 0) {
			ret = btrfs_orphan_del(trans, inode);
			BUG_ON(ret);
		}
		nr = trans->blocks_used;
		btrfs_end_transaction(trans, root);
		btrfs_btree_balance_dirty(root, nr);
		return 0;
	}

	/*
	 * We're truncating a file that used to have good data down to
	 * zero. Make sure it gets into the ordered flush list so that
	 * any new writes get down to disk quickly.
	 */
	if (attr->ia_size == 0)
		BTRFS_I(inode)->ordered_data_close = 1;

	/* we don't support swapfiles, so vmtruncate shouldn't fail */
	ret = vmtruncate(inode, attr->ia_size);
	BUG_ON(ret);

	return 0;
}

Y
Yan Zheng 已提交
3697 3698 3699
static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
{
	struct inode *inode = dentry->d_inode;
L
Li Zefan 已提交
3700
	struct btrfs_root *root = BTRFS_I(inode)->root;
Y
Yan Zheng 已提交
3701
	int err;
C
Chris Mason 已提交
3702

L
Li Zefan 已提交
3703 3704 3705
	if (btrfs_root_readonly(root))
		return -EROFS;

Y
Yan Zheng 已提交
3706 3707 3708
	err = inode_change_ok(inode, attr);
	if (err)
		return err;
C
Chris Mason 已提交
3709

3710
	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
3711 3712 3713
		err = btrfs_setattr_size(inode, attr);
		if (err)
			return err;
C
Chris Mason 已提交
3714
	}
Y
Yan Zheng 已提交
3715

C
Christoph Hellwig 已提交
3716 3717 3718 3719 3720 3721 3722
	if (attr->ia_valid) {
		setattr_copy(inode, attr);
		mark_inode_dirty(inode);

		if (attr->ia_valid & ATTR_MODE)
			err = btrfs_acl_chmod(inode);
	}
J
Josef Bacik 已提交
3723

C
Chris Mason 已提交
3724 3725
	return err;
}
3726

A
Al Viro 已提交
3727
void btrfs_evict_inode(struct inode *inode)
C
Chris Mason 已提交
3728 3729 3730
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(inode)->root;
3731
	unsigned long nr;
C
Chris Mason 已提交
3732 3733 3734
	int ret;

	truncate_inode_pages(&inode->i_data, 0);
3735 3736
	if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
			       root == root->fs_info->tree_root))
A
Al Viro 已提交
3737 3738
		goto no_delete;

C
Chris Mason 已提交
3739
	if (is_bad_inode(inode)) {
3740
		btrfs_orphan_del(NULL, inode);
C
Chris Mason 已提交
3741 3742
		goto no_delete;
	}
A
Al Viro 已提交
3743
	/* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
C
Chris Mason 已提交
3744
	btrfs_wait_ordered_range(inode, 0, (u64)-1);
3745

3746 3747 3748 3749 3750
	if (root->fs_info->log_root_recovering) {
		BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan));
		goto no_delete;
	}

3751 3752 3753 3754 3755
	if (inode->i_nlink > 0) {
		BUG_ON(btrfs_root_refs(&root->root_item) != 0);
		goto no_delete;
	}

3756
	btrfs_i_size_write(inode, 0);
3757

3758
	while (1) {
3759 3760
		trans = btrfs_start_transaction(root, 0);
		BUG_ON(IS_ERR(trans));
3761
		btrfs_set_trans_block_group(trans, inode);
3762 3763 3764 3765 3766 3767 3768 3769 3770 3771
		trans->block_rsv = root->orphan_block_rsv;

		ret = btrfs_block_rsv_check(trans, root,
					    root->orphan_block_rsv, 0, 5);
		if (ret) {
			BUG_ON(ret != -EAGAIN);
			ret = btrfs_commit_transaction(trans, root);
			BUG_ON(ret);
			continue;
		}
3772

3773
		ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
3774 3775
		if (ret != -EAGAIN)
			break;
3776

3777 3778 3779 3780
		nr = trans->blocks_used;
		btrfs_end_transaction(trans, root);
		trans = NULL;
		btrfs_btree_balance_dirty(root, nr);
3781

3782
	}
3783

3784 3785 3786 3787
	if (ret == 0) {
		ret = btrfs_orphan_del(trans, inode);
		BUG_ON(ret);
	}
3788

3789
	nr = trans->blocks_used;
3790
	btrfs_end_transaction(trans, root);
3791
	btrfs_btree_balance_dirty(root, nr);
C
Chris Mason 已提交
3792
no_delete:
A
Al Viro 已提交
3793
	end_writeback(inode);
3794
	return;
C
Chris Mason 已提交
3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808
}

/*
 * this returns the key found in the dir entry in the location pointer.
 * If no dir entries were found, location->objectid is 0.
 */
static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
			       struct btrfs_key *location)
{
	const char *name = dentry->d_name.name;
	int namelen = dentry->d_name.len;
	struct btrfs_dir_item *di;
	struct btrfs_path *path;
	struct btrfs_root *root = BTRFS_I(dir)->root;
Y
Yan 已提交
3809
	int ret = 0;
C
Chris Mason 已提交
3810 3811 3812

	path = btrfs_alloc_path();
	BUG_ON(!path);
3813

C
Chris Mason 已提交
3814 3815
	di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
				    namelen, 0);
Y
Yan 已提交
3816 3817
	if (IS_ERR(di))
		ret = PTR_ERR(di);
C
Chris Mason 已提交
3818 3819

	if (!di || IS_ERR(di))
3820
		goto out_err;
C
Chris Mason 已提交
3821

3822
	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
C
Chris Mason 已提交
3823 3824 3825
out:
	btrfs_free_path(path);
	return ret;
3826 3827 3828
out_err:
	location->objectid = 0;
	goto out;
C
Chris Mason 已提交
3829 3830 3831 3832 3833 3834 3835 3836
}

/*
 * when we hit a tree root in a directory, the btrfs part of the inode
 * needs to be changed to reflect the root directory of the tree root.  This
 * is kind of like crossing a mount point.
 */
static int fixup_tree_root_location(struct btrfs_root *root,
3837 3838 3839 3840
				    struct inode *dir,
				    struct dentry *dentry,
				    struct btrfs_key *location,
				    struct btrfs_root **sub_root)
C
Chris Mason 已提交
3841
{
3842 3843 3844 3845 3846 3847
	struct btrfs_path *path;
	struct btrfs_root *new_root;
	struct btrfs_root_ref *ref;
	struct extent_buffer *leaf;
	int ret;
	int err = 0;
C
Chris Mason 已提交
3848

3849 3850 3851 3852 3853
	path = btrfs_alloc_path();
	if (!path) {
		err = -ENOMEM;
		goto out;
	}
C
Chris Mason 已提交
3854

3855 3856 3857 3858 3859 3860 3861 3862 3863
	err = -ENOENT;
	ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
				  BTRFS_I(dir)->root->root_key.objectid,
				  location->objectid);
	if (ret) {
		if (ret < 0)
			err = ret;
		goto out;
	}
C
Chris Mason 已提交
3864

3865 3866 3867 3868 3869
	leaf = path->nodes[0];
	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
	if (btrfs_root_ref_dirid(leaf, ref) != dir->i_ino ||
	    btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
		goto out;
C
Chris Mason 已提交
3870

3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897
	ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
				   (unsigned long)(ref + 1),
				   dentry->d_name.len);
	if (ret)
		goto out;

	btrfs_release_path(root->fs_info->tree_root, path);

	new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
	if (IS_ERR(new_root)) {
		err = PTR_ERR(new_root);
		goto out;
	}

	if (btrfs_root_refs(&new_root->root_item) == 0) {
		err = -ENOENT;
		goto out;
	}

	*sub_root = new_root;
	location->objectid = btrfs_root_dirid(&new_root->root_item);
	location->type = BTRFS_INODE_ITEM_KEY;
	location->offset = 0;
	err = 0;
out:
	btrfs_free_path(path);
	return err;
C
Chris Mason 已提交
3898 3899
}

3900 3901 3902 3903
static void inode_tree_add(struct inode *inode)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_inode *entry;
3904 3905 3906 3907 3908
	struct rb_node **p;
	struct rb_node *parent;
again:
	p = &root->inode_tree.rb_node;
	parent = NULL;
3909

A
Al Viro 已提交
3910
	if (inode_unhashed(inode))
3911 3912
		return;

3913 3914 3915 3916 3917 3918
	spin_lock(&root->inode_lock);
	while (*p) {
		parent = *p;
		entry = rb_entry(parent, struct btrfs_inode, rb_node);

		if (inode->i_ino < entry->vfs_inode.i_ino)
3919
			p = &parent->rb_left;
3920
		else if (inode->i_ino > entry->vfs_inode.i_ino)
3921
			p = &parent->rb_right;
3922 3923
		else {
			WARN_ON(!(entry->vfs_inode.i_state &
A
Al Viro 已提交
3924
				  (I_WILL_FREE | I_FREEING)));
3925 3926 3927 3928
			rb_erase(parent, &root->inode_tree);
			RB_CLEAR_NODE(parent);
			spin_unlock(&root->inode_lock);
			goto again;
3929 3930 3931 3932 3933 3934 3935 3936 3937 3938
		}
	}
	rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
	rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
	spin_unlock(&root->inode_lock);
}

static void inode_tree_del(struct inode *inode)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
3939
	int empty = 0;
3940

3941
	spin_lock(&root->inode_lock);
3942 3943 3944
	if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
		rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
		RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3945
		empty = RB_EMPTY_ROOT(&root->inode_tree);
3946
	}
3947
	spin_unlock(&root->inode_lock);
3948

3949 3950 3951 3952 3953 3954 3955 3956
	/*
	 * Free space cache has inodes in the tree root, but the tree root has a
	 * root_refs of 0, so this could end up dropping the tree root as a
	 * snapshot, so we need the extra !root->fs_info->tree_root check to
	 * make sure we don't drop it.
	 */
	if (empty && btrfs_root_refs(&root->root_item) == 0 &&
	    root != root->fs_info->tree_root) {
3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009
		synchronize_srcu(&root->fs_info->subvol_srcu);
		spin_lock(&root->inode_lock);
		empty = RB_EMPTY_ROOT(&root->inode_tree);
		spin_unlock(&root->inode_lock);
		if (empty)
			btrfs_add_dead_root(root);
	}
}

int btrfs_invalidate_inodes(struct btrfs_root *root)
{
	struct rb_node *node;
	struct rb_node *prev;
	struct btrfs_inode *entry;
	struct inode *inode;
	u64 objectid = 0;

	WARN_ON(btrfs_root_refs(&root->root_item) != 0);

	spin_lock(&root->inode_lock);
again:
	node = root->inode_tree.rb_node;
	prev = NULL;
	while (node) {
		prev = node;
		entry = rb_entry(node, struct btrfs_inode, rb_node);

		if (objectid < entry->vfs_inode.i_ino)
			node = node->rb_left;
		else if (objectid > entry->vfs_inode.i_ino)
			node = node->rb_right;
		else
			break;
	}
	if (!node) {
		while (prev) {
			entry = rb_entry(prev, struct btrfs_inode, rb_node);
			if (objectid <= entry->vfs_inode.i_ino) {
				node = prev;
				break;
			}
			prev = rb_next(prev);
		}
	}
	while (node) {
		entry = rb_entry(node, struct btrfs_inode, rb_node);
		objectid = entry->vfs_inode.i_ino + 1;
		inode = igrab(&entry->vfs_inode);
		if (inode) {
			spin_unlock(&root->inode_lock);
			if (atomic_read(&inode->i_count) > 1)
				d_prune_aliases(inode);
			/*
4010
			 * btrfs_drop_inode will have it removed from
4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026
			 * the inode cache when its usage count
			 * hits zero.
			 */
			iput(inode);
			cond_resched();
			spin_lock(&root->inode_lock);
			goto again;
		}

		if (cond_resched_lock(&root->inode_lock))
			goto again;

		node = rb_next(node);
	}
	spin_unlock(&root->inode_lock);
	return 0;
4027 4028
}

4029 4030 4031 4032 4033
static int btrfs_init_locked_inode(struct inode *inode, void *p)
{
	struct btrfs_iget_args *args = p;
	inode->i_ino = args->ino;
	BTRFS_I(inode)->root = args->root;
J
Josef Bacik 已提交
4034
	btrfs_set_inode_space_info(args->root, inode);
C
Chris Mason 已提交
4035 4036 4037 4038 4039 4040
	return 0;
}

static int btrfs_find_actor(struct inode *inode, void *opaque)
{
	struct btrfs_iget_args *args = opaque;
C
Chris Mason 已提交
4041 4042
	return args->ino == inode->i_ino &&
		args->root == BTRFS_I(inode)->root;
C
Chris Mason 已提交
4043 4044
}

4045 4046 4047
static struct inode *btrfs_iget_locked(struct super_block *s,
				       u64 objectid,
				       struct btrfs_root *root)
C
Chris Mason 已提交
4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059
{
	struct inode *inode;
	struct btrfs_iget_args args;
	args.ino = objectid;
	args.root = root;

	inode = iget5_locked(s, objectid, btrfs_find_actor,
			     btrfs_init_locked_inode,
			     (void *)&args);
	return inode;
}

B
Balaji Rao 已提交
4060 4061 4062 4063
/* Get an inode object given its location and corresponding root.
 * Returns in *is_new if the inode was read from disk
 */
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
4064
			 struct btrfs_root *root, int *new)
B
Balaji Rao 已提交
4065 4066 4067 4068 4069
{
	struct inode *inode;

	inode = btrfs_iget_locked(s, location->objectid, root);
	if (!inode)
4070
		return ERR_PTR(-ENOMEM);
B
Balaji Rao 已提交
4071 4072 4073 4074 4075

	if (inode->i_state & I_NEW) {
		BTRFS_I(inode)->root = root;
		memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
		btrfs_read_locked_inode(inode);
4076 4077

		inode_tree_add(inode);
B
Balaji Rao 已提交
4078
		unlock_new_inode(inode);
4079 4080
		if (new)
			*new = 1;
B
Balaji Rao 已提交
4081 4082 4083 4084 4085
	}

	return inode;
}

4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107
static struct inode *new_simple_dir(struct super_block *s,
				    struct btrfs_key *key,
				    struct btrfs_root *root)
{
	struct inode *inode = new_inode(s);

	if (!inode)
		return ERR_PTR(-ENOMEM);

	BTRFS_I(inode)->root = root;
	memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
	BTRFS_I(inode)->dummy_inode = 1;

	inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
	inode->i_op = &simple_dir_inode_operations;
	inode->i_fop = &simple_dir_operations;
	inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;

	return inode;
}

4108
struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
C
Chris Mason 已提交
4109
{
C
Chris Mason 已提交
4110
	struct inode *inode;
4111
	struct btrfs_root *root = BTRFS_I(dir)->root;
C
Chris Mason 已提交
4112 4113
	struct btrfs_root *sub_root = root;
	struct btrfs_key location;
4114
	int index;
4115
	int ret;
C
Chris Mason 已提交
4116 4117 4118

	if (dentry->d_name.len > BTRFS_NAME_LEN)
		return ERR_PTR(-ENAMETOOLONG);
4119

C
Chris Mason 已提交
4120
	ret = btrfs_inode_by_name(dir, dentry, &location);
4121

C
Chris Mason 已提交
4122 4123
	if (ret < 0)
		return ERR_PTR(ret);
4124

4125 4126 4127 4128
	if (location.objectid == 0)
		return NULL;

	if (location.type == BTRFS_INODE_ITEM_KEY) {
4129
		inode = btrfs_iget(dir->i_sb, &location, root, NULL);
4130 4131 4132 4133 4134
		return inode;
	}

	BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);

4135
	index = srcu_read_lock(&root->fs_info->subvol_srcu);
4136 4137 4138 4139 4140 4141 4142 4143
	ret = fixup_tree_root_location(root, dir, dentry,
				       &location, &sub_root);
	if (ret < 0) {
		if (ret != -ENOENT)
			inode = ERR_PTR(ret);
		else
			inode = new_simple_dir(dir->i_sb, &location, sub_root);
	} else {
4144
		inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
C
Chris Mason 已提交
4145
	}
4146 4147
	srcu_read_unlock(&root->fs_info->subvol_srcu, index);

4148
	if (!IS_ERR(inode) && root != sub_root) {
4149 4150 4151 4152 4153 4154
		down_read(&root->fs_info->cleanup_work_sem);
		if (!(inode->i_sb->s_flags & MS_RDONLY))
			btrfs_orphan_cleanup(sub_root);
		up_read(&root->fs_info->cleanup_work_sem);
	}

4155 4156 4157
	return inode;
}

N
Nick Piggin 已提交
4158
static int btrfs_dentry_delete(const struct dentry *dentry)
4159 4160 4161
{
	struct btrfs_root *root;

4162 4163
	if (!dentry->d_inode && !IS_ROOT(dentry))
		dentry = dentry->d_parent;
4164

4165 4166 4167 4168 4169
	if (dentry->d_inode) {
		root = BTRFS_I(dentry->d_inode)->root;
		if (btrfs_root_refs(&root->root_item) == 0)
			return 1;
	}
4170 4171 4172
	return 0;
}

4173 4174 4175 4176 4177 4178 4179 4180
static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
				   struct nameidata *nd)
{
	struct inode *inode;

	inode = btrfs_lookup_dentry(dir, dentry);
	if (IS_ERR(inode))
		return ERR_CAST(inode);
4181

C
Chris Mason 已提交
4182 4183 4184 4185 4186 4187 4188
	return d_splice_alias(inode, dentry);
}

static unsigned char btrfs_filetype_table[] = {
	DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
};

4189 4190
static int btrfs_real_readdir(struct file *filp, void *dirent,
			      filldir_t filldir)
C
Chris Mason 已提交
4191
{
4192
	struct inode *inode = filp->f_dentry->d_inode;
C
Chris Mason 已提交
4193 4194 4195 4196
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_item *item;
	struct btrfs_dir_item *di;
	struct btrfs_key key;
4197
	struct btrfs_key found_key;
C
Chris Mason 已提交
4198 4199 4200
	struct btrfs_path *path;
	int ret;
	u32 nritems;
4201
	struct extent_buffer *leaf;
C
Chris Mason 已提交
4202 4203 4204 4205 4206 4207 4208 4209
	int slot;
	int advance;
	unsigned char d_type;
	int over = 0;
	u32 di_cur;
	u32 di_total;
	u32 di_len;
	int key_type = BTRFS_DIR_INDEX_KEY;
4210 4211 4212
	char tmp_name[32];
	char *name_ptr;
	int name_len;
C
Chris Mason 已提交
4213 4214 4215 4216

	/* FIXME, use a real flag for deciding about the key type */
	if (root->fs_info->tree_root == root)
		key_type = BTRFS_DIR_ITEM_KEY;
4217

4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228
	/* special case for "." */
	if (filp->f_pos == 0) {
		over = filldir(dirent, ".", 1,
			       1, inode->i_ino,
			       DT_DIR);
		if (over)
			return 0;
		filp->f_pos = 1;
	}
	/* special case for .., just use the back ref */
	if (filp->f_pos == 1) {
4229
		u64 pino = parent_ino(filp->f_path.dentry);
4230
		over = filldir(dirent, "..", 2,
4231
			       2, pino, DT_DIR);
4232
		if (over)
4233
			return 0;
4234 4235
		filp->f_pos = 2;
	}
4236 4237 4238
	path = btrfs_alloc_path();
	path->reada = 2;

C
Chris Mason 已提交
4239 4240
	btrfs_set_key_type(&key, key_type);
	key.offset = filp->f_pos;
4241
	key.objectid = inode->i_ino;
4242

C
Chris Mason 已提交
4243 4244 4245 4246
	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		goto err;
	advance = 0;
4247 4248

	while (1) {
4249 4250
		leaf = path->nodes[0];
		nritems = btrfs_header_nritems(leaf);
C
Chris Mason 已提交
4251 4252
		slot = path->slots[0];
		if (advance || slot >= nritems) {
4253
			if (slot >= nritems - 1) {
C
Chris Mason 已提交
4254 4255 4256
				ret = btrfs_next_leaf(root, path);
				if (ret)
					break;
4257 4258
				leaf = path->nodes[0];
				nritems = btrfs_header_nritems(leaf);
C
Chris Mason 已提交
4259 4260 4261 4262 4263 4264
				slot = path->slots[0];
			} else {
				slot++;
				path->slots[0]++;
			}
		}
4265

C
Chris Mason 已提交
4266
		advance = 1;
4267 4268 4269 4270
		item = btrfs_item_nr(leaf, slot);
		btrfs_item_key_to_cpu(leaf, &found_key, slot);

		if (found_key.objectid != key.objectid)
C
Chris Mason 已提交
4271
			break;
4272
		if (btrfs_key_type(&found_key) != key_type)
C
Chris Mason 已提交
4273
			break;
4274
		if (found_key.offset < filp->f_pos)
C
Chris Mason 已提交
4275
			continue;
4276 4277

		filp->f_pos = found_key.offset;
4278

C
Chris Mason 已提交
4279 4280
		di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
		di_cur = 0;
4281
		di_total = btrfs_item_size(leaf, item);
4282 4283

		while (di_cur < di_total) {
4284 4285 4286
			struct btrfs_key location;

			name_len = btrfs_dir_name_len(leaf, di);
4287
			if (name_len <= sizeof(tmp_name)) {
4288 4289 4290
				name_ptr = tmp_name;
			} else {
				name_ptr = kmalloc(name_len, GFP_NOFS);
4291 4292 4293 4294
				if (!name_ptr) {
					ret = -ENOMEM;
					goto err;
				}
4295 4296 4297 4298 4299 4300
			}
			read_extent_buffer(leaf, name_ptr,
					   (unsigned long)(di + 1), name_len);

			d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
			btrfs_dir_item_key_to_cpu(leaf, di, &location);
4301 4302 4303 4304 4305 4306 4307 4308 4309

			/* is this a reference to our own snapshot? If so
			 * skip it
			 */
			if (location.type == BTRFS_ROOT_ITEM_KEY &&
			    location.objectid == root->root_key.objectid) {
				over = 0;
				goto skip;
			}
4310
			over = filldir(dirent, name_ptr, name_len,
4311
				       found_key.offset, location.objectid,
C
Chris Mason 已提交
4312
				       d_type);
4313

4314
skip:
4315 4316 4317
			if (name_ptr != tmp_name)
				kfree(name_ptr);

C
Chris Mason 已提交
4318 4319
			if (over)
				goto nopos;
J
Josef Bacik 已提交
4320
			di_len = btrfs_dir_name_len(leaf, di) +
4321
				 btrfs_dir_data_len(leaf, di) + sizeof(*di);
C
Chris Mason 已提交
4322 4323 4324 4325
			di_cur += di_len;
			di = (struct btrfs_dir_item *)((char *)di + di_len);
		}
	}
4326 4327

	/* Reached end of directory/root. Bump pos past the last item. */
4328
	if (key_type == BTRFS_DIR_INDEX_KEY)
4329 4330 4331 4332 4333
		/*
		 * 32-bit glibc will use getdents64, but then strtol -
		 * so the last number we can serve is this.
		 */
		filp->f_pos = 0x7fffffff;
4334 4335
	else
		filp->f_pos++;
C
Chris Mason 已提交
4336 4337 4338 4339 4340 4341 4342
nopos:
	ret = 0;
err:
	btrfs_free_path(path);
	return ret;
}

4343
int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
C
Chris Mason 已提交
4344 4345 4346 4347
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
	int ret = 0;
4348
	bool nolock = false;
C
Chris Mason 已提交
4349

4350
	if (BTRFS_I(inode)->dummy_inode)
4351 4352
		return 0;

4353 4354 4355
	smp_mb();
	nolock = (root->fs_info->closing && root == root->fs_info->tree_root);

4356
	if (wbc->sync_mode == WB_SYNC_ALL) {
4357 4358 4359 4360
		if (nolock)
			trans = btrfs_join_transaction_nolock(root, 1);
		else
			trans = btrfs_join_transaction(root, 1);
4361 4362
		if (IS_ERR(trans))
			return PTR_ERR(trans);
C
Chris Mason 已提交
4363
		btrfs_set_trans_block_group(trans, inode);
4364 4365 4366 4367
		if (nolock)
			ret = btrfs_end_transaction_nolock(trans, root);
		else
			ret = btrfs_commit_transaction(trans, root);
C
Chris Mason 已提交
4368 4369 4370 4371 4372
	}
	return ret;
}

/*
4373
 * This is somewhat expensive, updating the tree every time the
C
Chris Mason 已提交
4374 4375 4376 4377 4378 4379 4380 4381
 * inode changes.  But, it is most likely to find the inode in cache.
 * FIXME, needs more benchmarking...there are no reasons other than performance
 * to keep or drop this code.
 */
void btrfs_dirty_inode(struct inode *inode)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
4382 4383 4384 4385
	int ret;

	if (BTRFS_I(inode)->dummy_inode)
		return;
C
Chris Mason 已提交
4386

4387
	trans = btrfs_join_transaction(root, 1);
4388
	BUG_ON(IS_ERR(trans));
C
Chris Mason 已提交
4389
	btrfs_set_trans_block_group(trans, inode);
4390 4391

	ret = btrfs_update_inode(trans, root, inode);
4392 4393 4394 4395
	if (ret && ret == -ENOSPC) {
		/* whoops, lets try again with the full transaction */
		btrfs_end_transaction(trans, root);
		trans = btrfs_start_transaction(root, 1);
4396 4397 4398 4399 4400 4401 4402 4403
		if (IS_ERR(trans)) {
			if (printk_ratelimit()) {
				printk(KERN_ERR "btrfs: fail to "
				       "dirty  inode %lu error %ld\n",
				       inode->i_ino, PTR_ERR(trans));
			}
			return;
		}
4404
		btrfs_set_trans_block_group(trans, inode);
4405

4406 4407
		ret = btrfs_update_inode(trans, root, inode);
		if (ret) {
4408 4409 4410 4411 4412
			if (printk_ratelimit()) {
				printk(KERN_ERR "btrfs: fail to "
				       "dirty  inode %lu error %d\n",
				       inode->i_ino, ret);
			}
4413 4414
		}
	}
C
Chris Mason 已提交
4415 4416 4417
	btrfs_end_transaction(trans, root);
}

C
Chris Mason 已提交
4418 4419 4420 4421 4422
/*
 * find the highest existing sequence number in a directory
 * and then set the in-memory index_cnt variable to reflect
 * free sequence numbers
 */
4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474
static int btrfs_set_inode_index_count(struct inode *inode)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_key key, found_key;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	int ret;

	key.objectid = inode->i_ino;
	btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
	key.offset = (u64)-1;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		goto out;
	/* FIXME: we should be able to handle this */
	if (ret == 0)
		goto out;
	ret = 0;

	/*
	 * MAGIC NUMBER EXPLANATION:
	 * since we search a directory based on f_pos we have to start at 2
	 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
	 * else has to start at 2
	 */
	if (path->slots[0] == 0) {
		BTRFS_I(inode)->index_cnt = 2;
		goto out;
	}

	path->slots[0]--;

	leaf = path->nodes[0];
	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);

	if (found_key.objectid != inode->i_ino ||
	    btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
		BTRFS_I(inode)->index_cnt = 2;
		goto out;
	}

	BTRFS_I(inode)->index_cnt = found_key.offset + 1;
out:
	btrfs_free_path(path);
	return ret;
}

C
Chris Mason 已提交
4475 4476 4477 4478
/*
 * helper to find a free sequence number in a given directory.  This current
 * code is very simple, later versions will do smarter things in the btree
 */
4479
int btrfs_set_inode_index(struct inode *dir, u64 *index)
4480 4481 4482 4483 4484
{
	int ret = 0;

	if (BTRFS_I(dir)->index_cnt == (u64)-1) {
		ret = btrfs_set_inode_index_count(dir);
C
Chris Mason 已提交
4485
		if (ret)
4486 4487 4488
			return ret;
	}

4489
	*index = BTRFS_I(dir)->index_cnt;
4490 4491 4492 4493 4494
	BTRFS_I(dir)->index_cnt++;

	return ret;
}

C
Chris Mason 已提交
4495 4496
static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
				     struct btrfs_root *root,
4497
				     struct inode *dir,
4498
				     const char *name, int name_len,
4499 4500
				     u64 ref_objectid, u64 objectid,
				     u64 alloc_hint, int mode, u64 *index)
C
Chris Mason 已提交
4501 4502
{
	struct inode *inode;
4503
	struct btrfs_inode_item *inode_item;
C
Chris Mason 已提交
4504
	struct btrfs_key *location;
4505
	struct btrfs_path *path;
4506 4507 4508 4509
	struct btrfs_inode_ref *ref;
	struct btrfs_key key[2];
	u32 sizes[2];
	unsigned long ptr;
C
Chris Mason 已提交
4510 4511 4512
	int ret;
	int owner;

4513 4514 4515
	path = btrfs_alloc_path();
	BUG_ON(!path);

C
Chris Mason 已提交
4516 4517 4518 4519
	inode = new_inode(root->fs_info->sb);
	if (!inode)
		return ERR_PTR(-ENOMEM);

4520
	if (dir) {
4521
		ret = btrfs_set_inode_index(dir, index);
4522 4523
		if (ret) {
			iput(inode);
4524
			return ERR_PTR(ret);
4525
		}
4526 4527 4528 4529 4530 4531 4532
	}
	/*
	 * index_cnt is ignored for everything but a dir,
	 * btrfs_get_inode_index_count has an explanation for the magic
	 * number
	 */
	BTRFS_I(inode)->index_cnt = 2;
C
Chris Mason 已提交
4533
	BTRFS_I(inode)->root = root;
4534
	BTRFS_I(inode)->generation = trans->transid;
4535
	inode->i_generation = BTRFS_I(inode)->generation;
J
Josef Bacik 已提交
4536
	btrfs_set_inode_space_info(root, inode);
4537

C
Chris Mason 已提交
4538 4539 4540 4541
	if (mode & S_IFDIR)
		owner = 0;
	else
		owner = 1;
4542 4543
	BTRFS_I(inode)->block_group =
			btrfs_find_block_group(root, 0, alloc_hint, owner);
4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555

	key[0].objectid = objectid;
	btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
	key[0].offset = 0;

	key[1].objectid = objectid;
	btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
	key[1].offset = ref_objectid;

	sizes[0] = sizeof(struct btrfs_inode_item);
	sizes[1] = name_len + sizeof(*ref);

4556
	path->leave_spinning = 1;
4557 4558
	ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
	if (ret != 0)
4559 4560
		goto fail;

4561
	inode_init_owner(inode, dir, mode);
C
Chris Mason 已提交
4562
	inode->i_ino = objectid;
4563
	inode_set_bytes(inode, 0);
C
Chris Mason 已提交
4564
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4565 4566
	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
				  struct btrfs_inode_item);
4567
	fill_inode_item(trans, path->nodes[0], inode_item, inode);
4568 4569 4570 4571

	ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
			     struct btrfs_inode_ref);
	btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
4572
	btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
4573 4574 4575
	ptr = (unsigned long)(ref + 1);
	write_extent_buffer(path->nodes[0], name, ptr, name_len);

4576 4577 4578
	btrfs_mark_buffer_dirty(path->nodes[0]);
	btrfs_free_path(path);

C
Chris Mason 已提交
4579 4580 4581 4582 4583
	location = &BTRFS_I(inode)->location;
	location->objectid = objectid;
	location->offset = 0;
	btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);

4584 4585
	btrfs_inherit_iflags(inode, dir);

4586 4587 4588 4589 4590 4591 4592
	if ((mode & S_IFREG)) {
		if (btrfs_test_opt(root, NODATASUM))
			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
		if (btrfs_test_opt(root, NODATACOW))
			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
	}

C
Chris Mason 已提交
4593
	insert_inode_hash(inode);
4594
	inode_tree_add(inode);
C
Chris Mason 已提交
4595
	return inode;
4596
fail:
4597 4598
	if (dir)
		BTRFS_I(dir)->index_cnt--;
4599
	btrfs_free_path(path);
4600
	iput(inode);
4601
	return ERR_PTR(ret);
C
Chris Mason 已提交
4602 4603 4604 4605 4606 4607 4608
}

static inline u8 btrfs_inode_type(struct inode *inode)
{
	return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
}

C
Chris Mason 已提交
4609 4610 4611 4612 4613 4614
/*
 * utility function to add 'inode' into 'parent_inode' with
 * a give name and a given sequence number.
 * if 'add_backref' is true, also insert a backref from the
 * inode to the parent directory.
 */
4615 4616 4617
int btrfs_add_link(struct btrfs_trans_handle *trans,
		   struct inode *parent_inode, struct inode *inode,
		   const char *name, int name_len, int add_backref, u64 index)
C
Chris Mason 已提交
4618
{
4619
	int ret = 0;
C
Chris Mason 已提交
4620
	struct btrfs_key key;
4621
	struct btrfs_root *root = BTRFS_I(parent_inode)->root;
4622

4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640
	if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
		memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
	} else {
		key.objectid = inode->i_ino;
		btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
		key.offset = 0;
	}

	if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
		ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
					 key.objectid, root->root_key.objectid,
					 parent_inode->i_ino,
					 index, name, name_len);
	} else if (add_backref) {
		ret = btrfs_insert_inode_ref(trans, root,
					     name, name_len, inode->i_ino,
					     parent_inode->i_ino, index);
	}
C
Chris Mason 已提交
4641 4642

	if (ret == 0) {
4643 4644 4645 4646 4647
		ret = btrfs_insert_dir_item(trans, root, name, name_len,
					    parent_inode->i_ino, &key,
					    btrfs_inode_type(inode), index);
		BUG_ON(ret);

4648
		btrfs_i_size_write(parent_inode, parent_inode->i_size +
4649
				   name_len * 2);
4650
		parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
4651
		ret = btrfs_update_inode(trans, root, parent_inode);
C
Chris Mason 已提交
4652 4653 4654 4655 4656
	}
	return ret;
}

static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
4657 4658
			    struct inode *dir, struct dentry *dentry,
			    struct inode *inode, int backref, u64 index)
C
Chris Mason 已提交
4659
{
4660 4661 4662
	int err = btrfs_add_link(trans, dir, inode,
				 dentry->d_name.name, dentry->d_name.len,
				 backref, index);
C
Chris Mason 已提交
4663 4664 4665 4666 4667 4668 4669 4670 4671
	if (!err) {
		d_instantiate(dentry, inode);
		return 0;
	}
	if (err > 0)
		err = -EEXIST;
	return err;
}

J
Josef Bacik 已提交
4672 4673 4674 4675 4676
static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
			int mode, dev_t rdev)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(dir)->root;
4677
	struct inode *inode = NULL;
J
Josef Bacik 已提交
4678 4679 4680
	int err;
	int drop_inode = 0;
	u64 objectid;
4681
	unsigned long nr = 0;
4682
	u64 index = 0;
J
Josef Bacik 已提交
4683 4684 4685 4686

	if (!new_valid_dev(rdev))
		return -EINVAL;

4687 4688 4689 4690
	err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
	if (err)
		return err;

J
Josef Bacik 已提交
4691 4692 4693 4694 4695
	/*
	 * 2 for inode item and ref
	 * 2 for dir items
	 * 1 for xattr if selinux is on
	 */
4696 4697 4698
	trans = btrfs_start_transaction(root, 5);
	if (IS_ERR(trans))
		return PTR_ERR(trans);
4699

J
Josef Bacik 已提交
4700 4701
	btrfs_set_trans_block_group(trans, dir);

4702
	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4703
				dentry->d_name.len, dir->i_ino, objectid,
4704
				BTRFS_I(dir)->block_group, mode, &index);
J
Josef Bacik 已提交
4705 4706 4707 4708
	err = PTR_ERR(inode);
	if (IS_ERR(inode))
		goto out_unlock;

4709
	err = btrfs_init_inode_security(trans, inode, dir);
J
Josef Bacik 已提交
4710 4711 4712 4713 4714
	if (err) {
		drop_inode = 1;
		goto out_unlock;
	}

J
Josef Bacik 已提交
4715
	btrfs_set_trans_block_group(trans, inode);
4716
	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
J
Josef Bacik 已提交
4717 4718 4719 4720 4721
	if (err)
		drop_inode = 1;
	else {
		inode->i_op = &btrfs_special_inode_operations;
		init_special_inode(inode, inode->i_mode, rdev);
4722
		btrfs_update_inode(trans, root, inode);
J
Josef Bacik 已提交
4723 4724 4725 4726
	}
	btrfs_update_inode_block_group(trans, inode);
	btrfs_update_inode_block_group(trans, dir);
out_unlock:
4727
	nr = trans->blocks_used;
4728
	btrfs_end_transaction_throttle(trans, root);
4729
	btrfs_btree_balance_dirty(root, nr);
J
Josef Bacik 已提交
4730 4731 4732 4733 4734 4735 4736
	if (drop_inode) {
		inode_dec_link_count(inode);
		iput(inode);
	}
	return err;
}

C
Chris Mason 已提交
4737 4738 4739 4740 4741
static int btrfs_create(struct inode *dir, struct dentry *dentry,
			int mode, struct nameidata *nd)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(dir)->root;
4742
	struct inode *inode = NULL;
C
Chris Mason 已提交
4743
	int drop_inode = 0;
4744
	int err;
4745
	unsigned long nr = 0;
C
Chris Mason 已提交
4746
	u64 objectid;
4747
	u64 index = 0;
C
Chris Mason 已提交
4748

4749 4750 4751
	err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
	if (err)
		return err;
J
Josef Bacik 已提交
4752 4753 4754 4755 4756
	/*
	 * 2 for inode item and ref
	 * 2 for dir items
	 * 1 for xattr if selinux is on
	 */
4757 4758 4759
	trans = btrfs_start_transaction(root, 5);
	if (IS_ERR(trans))
		return PTR_ERR(trans);
J
Josef Bacik 已提交
4760

C
Chris Mason 已提交
4761 4762
	btrfs_set_trans_block_group(trans, dir);

4763
	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4764 4765
				dentry->d_name.len, dir->i_ino, objectid,
				BTRFS_I(dir)->block_group, mode, &index);
C
Chris Mason 已提交
4766 4767 4768 4769
	err = PTR_ERR(inode);
	if (IS_ERR(inode))
		goto out_unlock;

4770
	err = btrfs_init_inode_security(trans, inode, dir);
J
Josef Bacik 已提交
4771 4772 4773 4774 4775
	if (err) {
		drop_inode = 1;
		goto out_unlock;
	}

C
Chris Mason 已提交
4776
	btrfs_set_trans_block_group(trans, inode);
4777
	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
C
Chris Mason 已提交
4778 4779 4780 4781
	if (err)
		drop_inode = 1;
	else {
		inode->i_mapping->a_ops = &btrfs_aops;
C
Chris Mason 已提交
4782
		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
C
Chris Mason 已提交
4783 4784
		inode->i_fop = &btrfs_file_operations;
		inode->i_op = &btrfs_file_inode_operations;
4785
		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
C
Chris Mason 已提交
4786 4787 4788 4789
	}
	btrfs_update_inode_block_group(trans, inode);
	btrfs_update_inode_block_group(trans, dir);
out_unlock:
4790
	nr = trans->blocks_used;
4791
	btrfs_end_transaction_throttle(trans, root);
C
Chris Mason 已提交
4792 4793 4794 4795
	if (drop_inode) {
		inode_dec_link_count(inode);
		iput(inode);
	}
4796
	btrfs_btree_balance_dirty(root, nr);
C
Chris Mason 已提交
4797 4798 4799 4800 4801 4802 4803 4804 4805
	return err;
}

static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
		      struct dentry *dentry)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(dir)->root;
	struct inode *inode = old_dentry->d_inode;
4806
	u64 index;
4807
	unsigned long nr = 0;
C
Chris Mason 已提交
4808 4809 4810 4811 4812 4813
	int err;
	int drop_inode = 0;

	if (inode->i_nlink == 0)
		return -ENOENT;

4814 4815 4816 4817
	/* do not allow sys_link's with other subvols of the same device */
	if (root->objectid != BTRFS_I(inode)->root->objectid)
		return -EPERM;

J
Josef Bacik 已提交
4818
	btrfs_inc_nlink(inode);
4819
	inode->i_ctime = CURRENT_TIME;
J
Josef Bacik 已提交
4820

4821
	err = btrfs_set_inode_index(dir, &index);
4822 4823 4824
	if (err)
		goto fail;

4825
	/*
M
Miao Xie 已提交
4826
	 * 2 items for inode and inode ref
4827
	 * 2 items for dir items
M
Miao Xie 已提交
4828
	 * 1 item for parent inode
4829
	 */
M
Miao Xie 已提交
4830
	trans = btrfs_start_transaction(root, 5);
4831 4832 4833 4834
	if (IS_ERR(trans)) {
		err = PTR_ERR(trans);
		goto fail;
	}
4835

C
Chris Mason 已提交
4836
	btrfs_set_trans_block_group(trans, dir);
A
Al Viro 已提交
4837
	ihold(inode);
4838

4839
	err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
4840

4841
	if (err) {
4842
		drop_inode = 1;
4843
	} else {
4844
		struct dentry *parent = dget_parent(dentry);
4845 4846 4847
		btrfs_update_inode_block_group(trans, dir);
		err = btrfs_update_inode(trans, root, inode);
		BUG_ON(err);
4848 4849
		btrfs_log_new_name(trans, inode, NULL, parent);
		dput(parent);
4850
	}
C
Chris Mason 已提交
4851

4852
	nr = trans->blocks_used;
4853
	btrfs_end_transaction_throttle(trans, root);
4854
fail:
C
Chris Mason 已提交
4855 4856 4857 4858
	if (drop_inode) {
		inode_dec_link_count(inode);
		iput(inode);
	}
4859
	btrfs_btree_balance_dirty(root, nr);
C
Chris Mason 已提交
4860 4861 4862 4863 4864
	return err;
}

static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
{
4865
	struct inode *inode = NULL;
C
Chris Mason 已提交
4866 4867 4868 4869
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(dir)->root;
	int err = 0;
	int drop_on_err = 0;
4870
	u64 objectid = 0;
4871
	u64 index = 0;
4872
	unsigned long nr = 1;
C
Chris Mason 已提交
4873

4874 4875 4876 4877
	err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
	if (err)
		return err;

J
Josef Bacik 已提交
4878 4879 4880 4881 4882
	/*
	 * 2 items for inode and ref
	 * 2 items for dir items
	 * 1 for xattr if selinux is on
	 */
4883 4884 4885
	trans = btrfs_start_transaction(root, 5);
	if (IS_ERR(trans))
		return PTR_ERR(trans);
J
Josef Bacik 已提交
4886
	btrfs_set_trans_block_group(trans, dir);
C
Chris Mason 已提交
4887

4888
	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4889
				dentry->d_name.len, dir->i_ino, objectid,
4890 4891
				BTRFS_I(dir)->block_group, S_IFDIR | mode,
				&index);
C
Chris Mason 已提交
4892 4893 4894 4895
	if (IS_ERR(inode)) {
		err = PTR_ERR(inode);
		goto out_fail;
	}
4896

C
Chris Mason 已提交
4897
	drop_on_err = 1;
J
Josef Bacik 已提交
4898

4899
	err = btrfs_init_inode_security(trans, inode, dir);
J
Josef Bacik 已提交
4900 4901 4902
	if (err)
		goto out_fail;

C
Chris Mason 已提交
4903 4904 4905 4906
	inode->i_op = &btrfs_dir_inode_operations;
	inode->i_fop = &btrfs_dir_file_operations;
	btrfs_set_trans_block_group(trans, inode);

4907
	btrfs_i_size_write(inode, 0);
C
Chris Mason 已提交
4908 4909 4910
	err = btrfs_update_inode(trans, root, inode);
	if (err)
		goto out_fail;
4911

4912 4913
	err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
			     dentry->d_name.len, 0, index);
C
Chris Mason 已提交
4914 4915
	if (err)
		goto out_fail;
4916

C
Chris Mason 已提交
4917 4918 4919 4920 4921 4922
	d_instantiate(dentry, inode);
	drop_on_err = 0;
	btrfs_update_inode_block_group(trans, inode);
	btrfs_update_inode_block_group(trans, dir);

out_fail:
4923
	nr = trans->blocks_used;
4924
	btrfs_end_transaction_throttle(trans, root);
C
Chris Mason 已提交
4925 4926
	if (drop_on_err)
		iput(inode);
4927
	btrfs_btree_balance_dirty(root, nr);
C
Chris Mason 已提交
4928 4929 4930
	return err;
}

C
Chris Mason 已提交
4931 4932 4933 4934
/* helper for btfs_get_extent.  Given an existing extent in the tree,
 * and an extent that you want to insert, deal with overlap and insert
 * the new extent into the tree.
 */
4935 4936
static int merge_extent_mapping(struct extent_map_tree *em_tree,
				struct extent_map *existing,
4937 4938
				struct extent_map *em,
				u64 map_start, u64 map_len)
4939 4940 4941
{
	u64 start_diff;

4942 4943 4944 4945
	BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
	start_diff = map_start - em->start;
	em->start = map_start;
	em->len = map_len;
C
Chris Mason 已提交
4946 4947
	if (em->block_start < EXTENT_MAP_LAST_BYTE &&
	    !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
4948
		em->block_start += start_diff;
C
Chris Mason 已提交
4949 4950
		em->block_len -= start_diff;
	}
4951
	return add_extent_mapping(em_tree, em);
4952 4953
}

C
Chris Mason 已提交
4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964
static noinline int uncompress_inline(struct btrfs_path *path,
				      struct inode *inode, struct page *page,
				      size_t pg_offset, u64 extent_offset,
				      struct btrfs_file_extent_item *item)
{
	int ret;
	struct extent_buffer *leaf = path->nodes[0];
	char *tmp;
	size_t max_size;
	unsigned long inline_size;
	unsigned long ptr;
4965
	int compress_type;
C
Chris Mason 已提交
4966 4967

	WARN_ON(pg_offset != 0);
4968
	compress_type = btrfs_file_extent_compression(leaf, item);
C
Chris Mason 已提交
4969 4970 4971 4972 4973 4974 4975 4976
	max_size = btrfs_file_extent_ram_bytes(leaf, item);
	inline_size = btrfs_file_extent_inline_item_len(leaf,
					btrfs_item_nr(leaf, path->slots[0]));
	tmp = kmalloc(inline_size, GFP_NOFS);
	ptr = btrfs_file_extent_inline_start(item);

	read_extent_buffer(leaf, tmp, ptr, inline_size);

4977
	max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
4978 4979
	ret = btrfs_decompress(compress_type, tmp, page,
			       extent_offset, inline_size, max_size);
C
Chris Mason 已提交
4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991
	if (ret) {
		char *kaddr = kmap_atomic(page, KM_USER0);
		unsigned long copy_size = min_t(u64,
				  PAGE_CACHE_SIZE - pg_offset,
				  max_size - extent_offset);
		memset(kaddr + pg_offset, 0, copy_size);
		kunmap_atomic(kaddr, KM_USER0);
	}
	kfree(tmp);
	return 0;
}

C
Chris Mason 已提交
4992 4993
/*
 * a bit scary, this does extent mapping from logical file offset to the disk.
C
Chris Mason 已提交
4994 4995
 * the ugly parts come from merging extents from the disk with the in-ram
 * representation.  This gets more complex because of the data=ordered code,
C
Chris Mason 已提交
4996 4997 4998 4999
 * where the in-ram extents might be locked pending data=ordered completion.
 *
 * This also copies inline extents directly into the page.
 */
C
Chris Mason 已提交
5000

5001
struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
5002
				    size_t pg_offset, u64 start, u64 len,
5003 5004 5005 5006
				    int create)
{
	int ret;
	int err = 0;
5007
	u64 bytenr;
5008 5009 5010 5011
	u64 extent_start = 0;
	u64 extent_end = 0;
	u64 objectid = inode->i_ino;
	u32 found_type;
5012
	struct btrfs_path *path = NULL;
5013 5014
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_file_extent_item *item;
5015 5016
	struct extent_buffer *leaf;
	struct btrfs_key found_key;
5017 5018
	struct extent_map *em = NULL;
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
5019
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5020
	struct btrfs_trans_handle *trans = NULL;
5021
	int compress_type;
5022 5023

again:
5024
	read_lock(&em_tree->lock);
5025
	em = lookup_extent_mapping(em_tree, start, len);
5026 5027
	if (em)
		em->bdev = root->fs_info->fs_devices->latest_bdev;
5028
	read_unlock(&em_tree->lock);
5029

5030
	if (em) {
5031 5032 5033
		if (em->start > start || em->start + em->len <= start)
			free_extent_map(em);
		else if (em->block_start == EXTENT_MAP_INLINE && page)
5034 5035 5036
			free_extent_map(em);
		else
			goto out;
5037
	}
5038
	em = alloc_extent_map(GFP_NOFS);
5039
	if (!em) {
5040 5041
		err = -ENOMEM;
		goto out;
5042
	}
5043
	em->bdev = root->fs_info->fs_devices->latest_bdev;
5044
	em->start = EXTENT_MAP_HOLE;
5045
	em->orig_start = EXTENT_MAP_HOLE;
5046
	em->len = (u64)-1;
C
Chris Mason 已提交
5047
	em->block_len = (u64)-1;
5048 5049 5050 5051 5052 5053

	if (!path) {
		path = btrfs_alloc_path();
		BUG_ON(!path);
	}

5054 5055
	ret = btrfs_lookup_file_extent(trans, root, path,
				       objectid, start, trans != NULL);
5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066
	if (ret < 0) {
		err = ret;
		goto out;
	}

	if (ret != 0) {
		if (path->slots[0] == 0)
			goto not_found;
		path->slots[0]--;
	}

5067 5068
	leaf = path->nodes[0];
	item = btrfs_item_ptr(leaf, path->slots[0],
5069 5070
			      struct btrfs_file_extent_item);
	/* are we inside the extent that was found? */
5071 5072 5073
	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
	found_type = btrfs_key_type(&found_key);
	if (found_key.objectid != objectid ||
5074 5075 5076 5077
	    found_type != BTRFS_EXTENT_DATA_KEY) {
		goto not_found;
	}

5078 5079
	found_type = btrfs_file_extent_type(leaf, item);
	extent_start = found_key.offset;
5080
	compress_type = btrfs_file_extent_compression(leaf, item);
Y
Yan Zheng 已提交
5081 5082
	if (found_type == BTRFS_FILE_EXTENT_REG ||
	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
5083
		extent_end = extent_start +
5084
		       btrfs_file_extent_num_bytes(leaf, item);
Y
Yan Zheng 已提交
5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098
	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
		size_t size;
		size = btrfs_file_extent_inline_len(leaf, item);
		extent_end = (extent_start + size + root->sectorsize - 1) &
			~((u64)root->sectorsize - 1);
	}

	if (start >= extent_end) {
		path->slots[0]++;
		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret < 0) {
				err = ret;
				goto out;
5099
			}
Y
Yan Zheng 已提交
5100 5101 5102
			if (ret > 0)
				goto not_found;
			leaf = path->nodes[0];
5103
		}
Y
Yan Zheng 已提交
5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
		if (found_key.objectid != objectid ||
		    found_key.type != BTRFS_EXTENT_DATA_KEY)
			goto not_found;
		if (start + len <= found_key.offset)
			goto not_found;
		em->start = start;
		em->len = found_key.offset - start;
		goto not_found_em;
	}

Y
Yan Zheng 已提交
5115 5116
	if (found_type == BTRFS_FILE_EXTENT_REG ||
	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
Y
Yan Zheng 已提交
5117 5118
		em->start = extent_start;
		em->len = extent_end - extent_start;
5119 5120
		em->orig_start = extent_start -
				 btrfs_file_extent_offset(leaf, item);
5121 5122
		bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
		if (bytenr == 0) {
5123
			em->block_start = EXTENT_MAP_HOLE;
5124 5125
			goto insert;
		}
5126
		if (compress_type != BTRFS_COMPRESS_NONE) {
C
Chris Mason 已提交
5127
			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
5128
			em->compress_type = compress_type;
C
Chris Mason 已提交
5129 5130 5131 5132 5133 5134 5135
			em->block_start = bytenr;
			em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
									 item);
		} else {
			bytenr += btrfs_file_extent_offset(leaf, item);
			em->block_start = bytenr;
			em->block_len = em->len;
Y
Yan Zheng 已提交
5136 5137
			if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
C
Chris Mason 已提交
5138
		}
5139 5140
		goto insert;
	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
5141
		unsigned long ptr;
5142
		char *map;
5143 5144 5145
		size_t size;
		size_t extent_offset;
		size_t copy_size;
5146

5147
		em->block_start = EXTENT_MAP_INLINE;
C
Chris Mason 已提交
5148
		if (!page || create) {
5149
			em->start = extent_start;
Y
Yan Zheng 已提交
5150
			em->len = extent_end - extent_start;
5151 5152
			goto out;
		}
5153

Y
Yan Zheng 已提交
5154 5155
		size = btrfs_file_extent_inline_len(leaf, item);
		extent_offset = page_offset(page) + pg_offset - extent_start;
5156
		copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
5157 5158
				size - extent_offset);
		em->start = extent_start + extent_offset;
5159 5160
		em->len = (copy_size + root->sectorsize - 1) &
			~((u64)root->sectorsize - 1);
5161
		em->orig_start = EXTENT_MAP_INLINE;
5162
		if (compress_type) {
C
Chris Mason 已提交
5163
			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
5164 5165
			em->compress_type = compress_type;
		}
5166
		ptr = btrfs_file_extent_inline_start(item) + extent_offset;
5167
		if (create == 0 && !PageUptodate(page)) {
5168 5169
			if (btrfs_file_extent_compression(leaf, item) !=
			    BTRFS_COMPRESS_NONE) {
C
Chris Mason 已提交
5170 5171 5172 5173 5174 5175 5176 5177
				ret = uncompress_inline(path, inode, page,
							pg_offset,
							extent_offset, item);
				BUG_ON(ret);
			} else {
				map = kmap(page);
				read_extent_buffer(leaf, map + pg_offset, ptr,
						   copy_size);
5178 5179 5180 5181 5182
				if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
					memset(map + pg_offset + copy_size, 0,
					       PAGE_CACHE_SIZE - pg_offset -
					       copy_size);
				}
C
Chris Mason 已提交
5183 5184
				kunmap(page);
			}
5185 5186
			flush_dcache_page(page);
		} else if (create && PageUptodate(page)) {
5187
			WARN_ON(1);
5188 5189 5190 5191 5192
			if (!trans) {
				kunmap(page);
				free_extent_map(em);
				em = NULL;
				btrfs_release_path(root, path);
5193
				trans = btrfs_join_transaction(root, 1);
5194 5195
				if (IS_ERR(trans))
					return ERR_CAST(trans);
5196 5197
				goto again;
			}
C
Chris Mason 已提交
5198
			map = kmap(page);
5199
			write_extent_buffer(leaf, map + pg_offset, ptr,
5200
					    copy_size);
C
Chris Mason 已提交
5201
			kunmap(page);
5202
			btrfs_mark_buffer_dirty(leaf);
5203
		}
5204 5205
		set_extent_uptodate(io_tree, em->start,
				    extent_map_end(em) - 1, GFP_NOFS);
5206 5207
		goto insert;
	} else {
C
Chris Mason 已提交
5208
		printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
5209 5210 5211 5212
		WARN_ON(1);
	}
not_found:
	em->start = start;
5213
	em->len = len;
5214
not_found_em:
5215
	em->block_start = EXTENT_MAP_HOLE;
Y
Yan Zheng 已提交
5216
	set_bit(EXTENT_FLAG_VACANCY, &em->flags);
5217 5218
insert:
	btrfs_release_path(root, path);
5219
	if (em->start > start || extent_map_end(em) <= start) {
C
Chris Mason 已提交
5220 5221 5222 5223 5224
		printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
		       "[%llu %llu]\n", (unsigned long long)em->start,
		       (unsigned long long)em->len,
		       (unsigned long long)start,
		       (unsigned long long)len);
5225 5226 5227
		err = -EIO;
		goto out;
	}
5228 5229

	err = 0;
5230
	write_lock(&em_tree->lock);
5231
	ret = add_extent_mapping(em_tree, em);
5232 5233 5234 5235
	/* it is possible that someone inserted the extent into the tree
	 * while we had the lock dropped.  It is also possible that
	 * an overlapping map exists in the tree
	 */
5236
	if (ret == -EEXIST) {
5237
		struct extent_map *existing;
5238 5239 5240

		ret = 0;

5241
		existing = lookup_extent_mapping(em_tree, start, len);
5242 5243 5244 5245 5246
		if (existing && (existing->start > start ||
		    existing->start + existing->len <= start)) {
			free_extent_map(existing);
			existing = NULL;
		}
5247 5248 5249 5250 5251
		if (!existing) {
			existing = lookup_extent_mapping(em_tree, em->start,
							 em->len);
			if (existing) {
				err = merge_extent_mapping(em_tree, existing,
5252 5253
							   em, start,
							   root->sectorsize);
5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266
				free_extent_map(existing);
				if (err) {
					free_extent_map(em);
					em = NULL;
				}
			} else {
				err = -EIO;
				free_extent_map(em);
				em = NULL;
			}
		} else {
			free_extent_map(em);
			em = existing;
5267
			err = 0;
5268 5269
		}
	}
5270
	write_unlock(&em_tree->lock);
5271
out:
5272 5273
	if (path)
		btrfs_free_path(path);
5274 5275
	if (trans) {
		ret = btrfs_end_transaction(trans, root);
C
Chris Mason 已提交
5276
		if (!err)
5277 5278 5279 5280 5281 5282 5283 5284 5285
			err = ret;
	}
	if (err) {
		free_extent_map(em);
		return ERR_PTR(err);
	}
	return em;
}

5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407
struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
					   size_t pg_offset, u64 start, u64 len,
					   int create)
{
	struct extent_map *em;
	struct extent_map *hole_em = NULL;
	u64 range_start = start;
	u64 end;
	u64 found;
	u64 found_end;
	int err = 0;

	em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
	if (IS_ERR(em))
		return em;
	if (em) {
		/*
		 * if our em maps to a hole, there might
		 * actually be delalloc bytes behind it
		 */
		if (em->block_start != EXTENT_MAP_HOLE)
			return em;
		else
			hole_em = em;
	}

	/* check to see if we've wrapped (len == -1 or similar) */
	end = start + len;
	if (end < start)
		end = (u64)-1;
	else
		end -= 1;

	em = NULL;

	/* ok, we didn't find anything, lets look for delalloc */
	found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
				 end, len, EXTENT_DELALLOC, 1);
	found_end = range_start + found;
	if (found_end < range_start)
		found_end = (u64)-1;

	/*
	 * we didn't find anything useful, return
	 * the original results from get_extent()
	 */
	if (range_start > end || found_end <= start) {
		em = hole_em;
		hole_em = NULL;
		goto out;
	}

	/* adjust the range_start to make sure it doesn't
	 * go backwards from the start they passed in
	 */
	range_start = max(start,range_start);
	found = found_end - range_start;

	if (found > 0) {
		u64 hole_start = start;
		u64 hole_len = len;

		em = alloc_extent_map(GFP_NOFS);
		if (!em) {
			err = -ENOMEM;
			goto out;
		}
		/*
		 * when btrfs_get_extent can't find anything it
		 * returns one huge hole
		 *
		 * make sure what it found really fits our range, and
		 * adjust to make sure it is based on the start from
		 * the caller
		 */
		if (hole_em) {
			u64 calc_end = extent_map_end(hole_em);

			if (calc_end <= start || (hole_em->start > end)) {
				free_extent_map(hole_em);
				hole_em = NULL;
			} else {
				hole_start = max(hole_em->start, start);
				hole_len = calc_end - hole_start;
			}
		}
		em->bdev = NULL;
		if (hole_em && range_start > hole_start) {
			/* our hole starts before our delalloc, so we
			 * have to return just the parts of the hole
			 * that go until  the delalloc starts
			 */
			em->len = min(hole_len,
				      range_start - hole_start);
			em->start = hole_start;
			em->orig_start = hole_start;
			/*
			 * don't adjust block start at all,
			 * it is fixed at EXTENT_MAP_HOLE
			 */
			em->block_start = hole_em->block_start;
			em->block_len = hole_len;
		} else {
			em->start = range_start;
			em->len = found;
			em->orig_start = range_start;
			em->block_start = EXTENT_MAP_DELALLOC;
			em->block_len = found;
		}
	} else if (hole_em) {
		return hole_em;
	}
out:

	free_extent_map(hole_em);
	if (err) {
		free_extent_map(em);
		return ERR_PTR(err);
	}
	return em;
}

5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421
static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
						  u64 start, u64 len)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
	struct extent_map *em;
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
	struct btrfs_key ins;
	u64 alloc_hint;
	int ret;

	btrfs_drop_extent_cache(inode, start, start + len - 1, 0);

	trans = btrfs_join_transaction(root, 0);
5422 5423
	if (IS_ERR(trans))
		return ERR_CAST(trans);
5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469

	trans->block_rsv = &root->fs_info->delalloc_block_rsv;

	alloc_hint = get_extent_allocation_hint(inode, start, len);
	ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0,
				   alloc_hint, (u64)-1, &ins, 1);
	if (ret) {
		em = ERR_PTR(ret);
		goto out;
	}

	em = alloc_extent_map(GFP_NOFS);
	if (!em) {
		em = ERR_PTR(-ENOMEM);
		goto out;
	}

	em->start = start;
	em->orig_start = em->start;
	em->len = ins.offset;

	em->block_start = ins.objectid;
	em->block_len = ins.offset;
	em->bdev = root->fs_info->fs_devices->latest_bdev;
	set_bit(EXTENT_FLAG_PINNED, &em->flags);

	while (1) {
		write_lock(&em_tree->lock);
		ret = add_extent_mapping(em_tree, em);
		write_unlock(&em_tree->lock);
		if (ret != -EEXIST)
			break;
		btrfs_drop_extent_cache(inode, start, start + em->len - 1, 0);
	}

	ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
					   ins.offset, ins.offset, 0);
	if (ret) {
		btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
		em = ERR_PTR(ret);
	}
out:
	btrfs_end_transaction(trans, root);
	return em;
}

5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569
/*
 * returns 1 when the nocow is safe, < 1 on error, 0 if the
 * block must be cow'd
 */
static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
				      struct inode *inode, u64 offset, u64 len)
{
	struct btrfs_path *path;
	int ret;
	struct extent_buffer *leaf;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_file_extent_item *fi;
	struct btrfs_key key;
	u64 disk_bytenr;
	u64 backref_offset;
	u64 extent_end;
	u64 num_bytes;
	int slot;
	int found_type;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
				       offset, 0);
	if (ret < 0)
		goto out;

	slot = path->slots[0];
	if (ret == 1) {
		if (slot == 0) {
			/* can't find the item, must cow */
			ret = 0;
			goto out;
		}
		slot--;
	}
	ret = 0;
	leaf = path->nodes[0];
	btrfs_item_key_to_cpu(leaf, &key, slot);
	if (key.objectid != inode->i_ino ||
	    key.type != BTRFS_EXTENT_DATA_KEY) {
		/* not our file or wrong item type, must cow */
		goto out;
	}

	if (key.offset > offset) {
		/* Wrong offset, must cow */
		goto out;
	}

	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
	found_type = btrfs_file_extent_type(leaf, fi);
	if (found_type != BTRFS_FILE_EXTENT_REG &&
	    found_type != BTRFS_FILE_EXTENT_PREALLOC) {
		/* not a regular extent, must cow */
		goto out;
	}
	disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
	backref_offset = btrfs_file_extent_offset(leaf, fi);

	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
	if (extent_end < offset + len) {
		/* extent doesn't include our full range, must cow */
		goto out;
	}

	if (btrfs_extent_readonly(root, disk_bytenr))
		goto out;

	/*
	 * look for other files referencing this extent, if we
	 * find any we must cow
	 */
	if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
				  key.offset - backref_offset, disk_bytenr))
		goto out;

	/*
	 * adjust disk_bytenr and num_bytes to cover just the bytes
	 * in this extent we are about to write.  If there
	 * are any csums in that range we have to cow in order
	 * to keep the csums correct
	 */
	disk_bytenr += backref_offset;
	disk_bytenr += offset - key.offset;
	num_bytes = min(offset + len, extent_end) - offset;
	if (csum_exist_in_range(root, disk_bytenr, num_bytes))
				goto out;
	/*
	 * all of the above have passed, it is safe to overwrite this extent
	 * without cow
	 */
	ret = 1;
out:
	btrfs_free_path(path);
	return ret;
}

5570 5571 5572 5573 5574 5575 5576
static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
				   struct buffer_head *bh_result, int create)
{
	struct extent_map *em;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	u64 start = iblock << inode->i_blkbits;
	u64 len = bh_result->b_size;
5577
	struct btrfs_trans_handle *trans;
5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621

	em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
	if (IS_ERR(em))
		return PTR_ERR(em);

	/*
	 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
	 * io.  INLINE is special, and we could probably kludge it in here, but
	 * it's still buffered so for safety lets just fall back to the generic
	 * buffered path.
	 *
	 * For COMPRESSED we _have_ to read the entire extent in so we can
	 * decompress it, so there will be buffering required no matter what we
	 * do, so go ahead and fallback to buffered.
	 *
	 * We return -ENOTBLK because thats what makes DIO go ahead and go back
	 * to buffered IO.  Don't blame me, this is the price we pay for using
	 * the generic code.
	 */
	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
	    em->block_start == EXTENT_MAP_INLINE) {
		free_extent_map(em);
		return -ENOTBLK;
	}

	/* Just a good old fashioned hole, return */
	if (!create && (em->block_start == EXTENT_MAP_HOLE ||
			test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
		free_extent_map(em);
		/* DIO will do one hole at a time, so just unlock a sector */
		unlock_extent(&BTRFS_I(inode)->io_tree, start,
			      start + root->sectorsize - 1, GFP_NOFS);
		return 0;
	}

	/*
	 * We don't allocate a new extent in the following cases
	 *
	 * 1) The inode is marked as NODATACOW.  In this case we'll just use the
	 * existing extent.
	 * 2) The extent is marked as PREALLOC.  We're good to go here and can
	 * just use the extent.
	 *
	 */
5622 5623
	if (!create) {
		len = em->len - (start - em->start);
5624
		goto map;
5625
	}
5626 5627 5628 5629 5630 5631

	if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
	    ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
	     em->block_start != EXTENT_MAP_HOLE)) {
		int type;
		int ret;
5632
		u64 block_start;
5633 5634 5635 5636 5637

		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
			type = BTRFS_ORDERED_PREALLOC;
		else
			type = BTRFS_ORDERED_NOCOW;
5638
		len = min(len, em->len - (start - em->start));
5639
		block_start = em->block_start + (start - em->start);
5640 5641 5642 5643 5644 5645 5646

		/*
		 * we're not going to log anything, but we do need
		 * to make sure the current transaction stays open
		 * while we look for nocow cross refs
		 */
		trans = btrfs_join_transaction(root, 0);
5647
		if (IS_ERR(trans))
5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658
			goto must_cow;

		if (can_nocow_odirect(trans, inode, start, len) == 1) {
			ret = btrfs_add_ordered_extent_dio(inode, start,
					   block_start, len, len, type);
			btrfs_end_transaction(trans, root);
			if (ret) {
				free_extent_map(em);
				return ret;
			}
			goto unlock;
5659
		}
5660
		btrfs_end_transaction(trans, root);
5661
	}
5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673
must_cow:
	/*
	 * this will cow the extent, reset the len in case we changed
	 * it above
	 */
	len = bh_result->b_size;
	free_extent_map(em);
	em = btrfs_new_extent_direct(inode, start, len);
	if (IS_ERR(em))
		return PTR_ERR(em);
	len = min(len, em->len - (start - em->start));
unlock:
5674 5675 5676
	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1,
			  EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1,
			  0, NULL, GFP_NOFS);
5677 5678 5679
map:
	bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
		inode->i_blkbits;
5680
	bh_result->b_size = len;
5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697
	bh_result->b_bdev = em->bdev;
	set_buffer_mapped(bh_result);
	if (create && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
		set_buffer_new(bh_result);

	free_extent_map(em);

	return 0;
}

struct btrfs_dio_private {
	struct inode *inode;
	u64 logical_offset;
	u64 disk_bytenr;
	u64 bytes;
	u32 *csums;
	void *private;
M
Miao Xie 已提交
5698 5699 5700 5701 5702 5703 5704 5705

	/* number of bios pending for this dio */
	atomic_t pending_bios;

	/* IO errors */
	int errors;

	struct bio *orig_bio;
5706 5707 5708 5709
};

static void btrfs_endio_direct_read(struct bio *bio, int err)
{
M
Miao Xie 已提交
5710
	struct btrfs_dio_private *dip = bio->bi_private;
5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765
	struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
	struct bio_vec *bvec = bio->bi_io_vec;
	struct inode *inode = dip->inode;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	u64 start;
	u32 *private = dip->csums;

	start = dip->logical_offset;
	do {
		if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
			struct page *page = bvec->bv_page;
			char *kaddr;
			u32 csum = ~(u32)0;
			unsigned long flags;

			local_irq_save(flags);
			kaddr = kmap_atomic(page, KM_IRQ0);
			csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
					       csum, bvec->bv_len);
			btrfs_csum_final(csum, (char *)&csum);
			kunmap_atomic(kaddr, KM_IRQ0);
			local_irq_restore(flags);

			flush_dcache_page(bvec->bv_page);
			if (csum != *private) {
				printk(KERN_ERR "btrfs csum failed ino %lu off"
				      " %llu csum %u private %u\n",
				      inode->i_ino, (unsigned long long)start,
				      csum, *private);
				err = -EIO;
			}
		}

		start += bvec->bv_len;
		private++;
		bvec++;
	} while (bvec <= bvec_end);

	unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
		      dip->logical_offset + dip->bytes - 1, GFP_NOFS);
	bio->bi_private = dip->private;

	kfree(dip->csums);
	kfree(dip);
	dio_end_io(bio, err);
}

static void btrfs_endio_direct_write(struct bio *bio, int err)
{
	struct btrfs_dio_private *dip = bio->bi_private;
	struct inode *inode = dip->inode;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
	struct btrfs_ordered_extent *ordered = NULL;
	struct extent_state *cached_state = NULL;
5766 5767
	u64 ordered_offset = dip->logical_offset;
	u64 ordered_bytes = dip->bytes;
5768 5769 5770 5771
	int ret;

	if (err)
		goto out_done;
5772 5773 5774 5775
again:
	ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
						   &ordered_offset,
						   ordered_bytes);
5776
	if (!ret)
5777
		goto out_test;
5778 5779 5780 5781

	BUG_ON(!ordered);

	trans = btrfs_join_transaction(root, 1);
5782
	if (IS_ERR(trans)) {
5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836
		err = -ENOMEM;
		goto out;
	}
	trans->block_rsv = &root->fs_info->delalloc_block_rsv;

	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
		ret = btrfs_ordered_update_i_size(inode, 0, ordered);
		if (!ret)
			ret = btrfs_update_inode(trans, root, inode);
		err = ret;
		goto out;
	}

	lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset,
			 ordered->file_offset + ordered->len - 1, 0,
			 &cached_state, GFP_NOFS);

	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
		ret = btrfs_mark_extent_written(trans, inode,
						ordered->file_offset,
						ordered->file_offset +
						ordered->len);
		if (ret) {
			err = ret;
			goto out_unlock;
		}
	} else {
		ret = insert_reserved_file_extent(trans, inode,
						  ordered->file_offset,
						  ordered->start,
						  ordered->disk_len,
						  ordered->len,
						  ordered->len,
						  0, 0, 0,
						  BTRFS_FILE_EXTENT_REG);
		unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
				   ordered->file_offset, ordered->len);
		if (ret) {
			err = ret;
			WARN_ON(1);
			goto out_unlock;
		}
	}

	add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
	btrfs_ordered_update_i_size(inode, 0, ordered);
	btrfs_update_inode(trans, root, inode);
out_unlock:
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
			     ordered->file_offset + ordered->len - 1,
			     &cached_state, GFP_NOFS);
out:
	btrfs_delalloc_release_metadata(inode, ordered->len);
	btrfs_end_transaction(trans, root);
5837
	ordered_offset = ordered->file_offset + ordered->len;
5838 5839
	btrfs_put_ordered_extent(ordered);
	btrfs_put_ordered_extent(ordered);
5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850

out_test:
	/*
	 * our bio might span multiple ordered extents.  If we haven't
	 * completed the accounting for the whole dio, go back and try again
	 */
	if (ordered_offset < dip->logical_offset + dip->bytes) {
		ordered_bytes = dip->logical_offset + dip->bytes -
			ordered_offset;
		goto again;
	}
5851 5852 5853 5854 5855 5856 5857 5858
out_done:
	bio->bi_private = dip->private;

	kfree(dip->csums);
	kfree(dip);
	dio_end_io(bio, err);
}

5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869
static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
				    struct bio *bio, int mirror_num,
				    unsigned long bio_flags, u64 offset)
{
	int ret;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
	BUG_ON(ret);
	return 0;
}

M
Miao Xie 已提交
5870 5871 5872 5873 5874 5875
static void btrfs_end_dio_bio(struct bio *bio, int err)
{
	struct btrfs_dio_private *dip = bio->bi_private;

	if (err) {
		printk(KERN_ERR "btrfs direct IO failed ino %lu rw %lu "
J
Jan Beulich 已提交
5876 5877 5878
		      "sector %#Lx len %u err no %d\n",
		      dip->inode->i_ino, bio->bi_rw,
		      (unsigned long long)bio->bi_sector, bio->bi_size, err);
M
Miao Xie 已提交
5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039
		dip->errors = 1;

		/*
		 * before atomic variable goto zero, we must make sure
		 * dip->errors is perceived to be set.
		 */
		smp_mb__before_atomic_dec();
	}

	/* if there are more bios still pending for this dio, just exit */
	if (!atomic_dec_and_test(&dip->pending_bios))
		goto out;

	if (dip->errors)
		bio_io_error(dip->orig_bio);
	else {
		set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags);
		bio_endio(dip->orig_bio, 0);
	}
out:
	bio_put(bio);
}

static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
				       u64 first_sector, gfp_t gfp_flags)
{
	int nr_vecs = bio_get_nr_vecs(bdev);
	return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
}

static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
					 int rw, u64 file_offset, int skip_sum,
					 u32 *csums)
{
	int write = rw & REQ_WRITE;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	int ret;

	bio_get(bio);
	ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
	if (ret)
		goto err;

	if (write && !skip_sum) {
		ret = btrfs_wq_submit_bio(root->fs_info,
				   inode, rw, bio, 0, 0,
				   file_offset,
				   __btrfs_submit_bio_start_direct_io,
				   __btrfs_submit_bio_done);
		goto err;
	} else if (!skip_sum)
		btrfs_lookup_bio_sums_dio(root, inode, bio,
					  file_offset, csums);

	ret = btrfs_map_bio(root, rw, bio, 0, 1);
err:
	bio_put(bio);
	return ret;
}

static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
				    int skip_sum)
{
	struct inode *inode = dip->inode;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
	struct bio *bio;
	struct bio *orig_bio = dip->orig_bio;
	struct bio_vec *bvec = orig_bio->bi_io_vec;
	u64 start_sector = orig_bio->bi_sector;
	u64 file_offset = dip->logical_offset;
	u64 submit_len = 0;
	u64 map_length;
	int nr_pages = 0;
	u32 *csums = dip->csums;
	int ret = 0;

	bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
	if (!bio)
		return -ENOMEM;
	bio->bi_private = dip;
	bio->bi_end_io = btrfs_end_dio_bio;
	atomic_inc(&dip->pending_bios);

	map_length = orig_bio->bi_size;
	ret = btrfs_map_block(map_tree, READ, start_sector << 9,
			      &map_length, NULL, 0);
	if (ret) {
		bio_put(bio);
		return -EIO;
	}

	while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
		if (unlikely(map_length < submit_len + bvec->bv_len ||
		    bio_add_page(bio, bvec->bv_page, bvec->bv_len,
				 bvec->bv_offset) < bvec->bv_len)) {
			/*
			 * inc the count before we submit the bio so
			 * we know the end IO handler won't happen before
			 * we inc the count. Otherwise, the dip might get freed
			 * before we're done setting it up
			 */
			atomic_inc(&dip->pending_bios);
			ret = __btrfs_submit_dio_bio(bio, inode, rw,
						     file_offset, skip_sum,
						     csums);
			if (ret) {
				bio_put(bio);
				atomic_dec(&dip->pending_bios);
				goto out_err;
			}

			if (!skip_sum)
				csums = csums + nr_pages;
			start_sector += submit_len >> 9;
			file_offset += submit_len;

			submit_len = 0;
			nr_pages = 0;

			bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
						  start_sector, GFP_NOFS);
			if (!bio)
				goto out_err;
			bio->bi_private = dip;
			bio->bi_end_io = btrfs_end_dio_bio;

			map_length = orig_bio->bi_size;
			ret = btrfs_map_block(map_tree, READ, start_sector << 9,
					      &map_length, NULL, 0);
			if (ret) {
				bio_put(bio);
				goto out_err;
			}
		} else {
			submit_len += bvec->bv_len;
			nr_pages ++;
			bvec++;
		}
	}

	ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
				     csums);
	if (!ret)
		return 0;

	bio_put(bio);
out_err:
	dip->errors = 1;
	/*
	 * before atomic variable goto zero, we must
	 * make sure dip->errors is perceived to be set.
	 */
	smp_mb__before_atomic_dec();
	if (atomic_dec_and_test(&dip->pending_bios))
		bio_io_error(dip->orig_bio);

	/* bio_end_io() will handle error, so we needn't return it */
	return 0;
}

6040 6041 6042 6043 6044 6045 6046
static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
				loff_t file_offset)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_dio_private *dip;
	struct bio_vec *bvec = bio->bi_io_vec;
	int skip_sum;
6047
	int write = rw & REQ_WRITE;
6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061
	int ret = 0;

	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;

	dip = kmalloc(sizeof(*dip), GFP_NOFS);
	if (!dip) {
		ret = -ENOMEM;
		goto free_ordered;
	}
	dip->csums = NULL;

	if (!skip_sum) {
		dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS);
		if (!dip->csums) {
D
Daniel J Blueman 已提交
6062
			kfree(dip);
6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077
			ret = -ENOMEM;
			goto free_ordered;
		}
	}

	dip->private = bio->bi_private;
	dip->inode = inode;
	dip->logical_offset = file_offset;

	dip->bytes = 0;
	do {
		dip->bytes += bvec->bv_len;
		bvec++;
	} while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1));

6078
	dip->disk_bytenr = (u64)bio->bi_sector << 9;
6079
	bio->bi_private = dip;
M
Miao Xie 已提交
6080 6081 6082
	dip->errors = 0;
	dip->orig_bio = bio;
	atomic_set(&dip->pending_bios, 0);
6083 6084 6085 6086 6087 6088

	if (write)
		bio->bi_end_io = btrfs_endio_direct_write;
	else
		bio->bi_end_io = btrfs_endio_direct_read;

M
Miao Xie 已提交
6089 6090
	ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
	if (!ret)
6091
		return;
6092 6093 6094 6095 6096 6097 6098
free_ordered:
	/*
	 * If this is a write, we need to clean up the reserved space and kill
	 * the ordered extent.
	 */
	if (write) {
		struct btrfs_ordered_extent *ordered;
6099
		ordered = btrfs_lookup_ordered_extent(inode, file_offset);
6100 6101 6102 6103 6104 6105 6106 6107 6108 6109
		if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
			btrfs_free_reserved_extent(root, ordered->start,
						   ordered->disk_len);
		btrfs_put_ordered_extent(ordered);
		btrfs_put_ordered_extent(ordered);
	}
	bio_endio(bio, ret);
}

C
Chris Mason 已提交
6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135
static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
			const struct iovec *iov, loff_t offset,
			unsigned long nr_segs)
{
	int seg;
	size_t size;
	unsigned long addr;
	unsigned blocksize_mask = root->sectorsize - 1;
	ssize_t retval = -EINVAL;
	loff_t end = offset;

	if (offset & blocksize_mask)
		goto out;

	/* Check the memory alignment.  Blocks cannot straddle pages */
	for (seg = 0; seg < nr_segs; seg++) {
		addr = (unsigned long)iov[seg].iov_base;
		size = iov[seg].iov_len;
		end += size;
		if ((addr & blocksize_mask) || (size & blocksize_mask)) 
			goto out;
	}
	retval = 0;
out:
	return retval;
}
6136 6137 6138 6139
static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
			const struct iovec *iov, loff_t offset,
			unsigned long nr_segs)
{
6140 6141 6142
	struct file *file = iocb->ki_filp;
	struct inode *inode = file->f_mapping->host;
	struct btrfs_ordered_extent *ordered;
6143
	struct extent_state *cached_state = NULL;
6144 6145
	u64 lockstart, lockend;
	ssize_t ret;
6146 6147
	int writing = rw & WRITE;
	int write_bits = 0;
6148
	size_t count = iov_length(iov, nr_segs);
6149

C
Chris Mason 已提交
6150 6151 6152 6153 6154
	if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
			    offset, nr_segs)) {
		return 0;
	}

6155
	lockstart = offset;
6156 6157 6158 6159 6160 6161 6162
	lockend = offset + count - 1;

	if (writing) {
		ret = btrfs_delalloc_reserve_space(inode, count);
		if (ret)
			goto out;
	}
6163

6164
	while (1) {
6165 6166
		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
				 0, &cached_state, GFP_NOFS);
6167 6168 6169 6170 6171 6172 6173 6174 6175
		/*
		 * We're concerned with the entire range that we're going to be
		 * doing DIO to, so we need to make sure theres no ordered
		 * extents in this range.
		 */
		ordered = btrfs_lookup_ordered_range(inode, lockstart,
						     lockend - lockstart + 1);
		if (!ordered)
			break;
6176 6177
		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
				     &cached_state, GFP_NOFS);
6178 6179 6180 6181 6182
		btrfs_start_ordered_extent(inode, ordered, 1);
		btrfs_put_ordered_extent(ordered);
		cond_resched();
	}

6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202
	/*
	 * we don't use btrfs_set_extent_delalloc because we don't want
	 * the dirty or uptodate bits
	 */
	if (writing) {
		write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING;
		ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
				     EXTENT_DELALLOC, 0, NULL, &cached_state,
				     GFP_NOFS);
		if (ret) {
			clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
					 lockend, EXTENT_LOCKED | write_bits,
					 1, 0, &cached_state, GFP_NOFS);
			goto out;
		}
	}

	free_extent_state(cached_state);
	cached_state = NULL;

C
Chris Mason 已提交
6203 6204 6205 6206
	ret = __blockdev_direct_IO(rw, iocb, inode,
		   BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
		   iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
		   btrfs_submit_direct, 0);
6207 6208

	if (ret < 0 && ret != -EIOCBQUEUED) {
6209 6210 6211 6212
		clear_extent_bit(&BTRFS_I(inode)->io_tree, offset,
			      offset + iov_length(iov, nr_segs) - 1,
			      EXTENT_LOCKED | write_bits, 1, 0,
			      &cached_state, GFP_NOFS);
6213 6214 6215 6216 6217
	} else if (ret >= 0 && ret < iov_length(iov, nr_segs)) {
		/*
		 * We're falling back to buffered, unlock the section we didn't
		 * do IO on.
		 */
6218 6219 6220 6221
		clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret,
			      offset + iov_length(iov, nr_segs) - 1,
			      EXTENT_LOCKED | write_bits, 1, 0,
			      &cached_state, GFP_NOFS);
6222
	}
6223 6224
out:
	free_extent_state(cached_state);
6225
	return ret;
6226 6227
}

Y
Yehuda Sadeh 已提交
6228 6229 6230
static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		__u64 start, __u64 len)
{
6231
	return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
Y
Yehuda Sadeh 已提交
6232 6233
}

6234
int btrfs_readpage(struct file *file, struct page *page)
C
Chris Mason 已提交
6235
{
6236 6237
	struct extent_io_tree *tree;
	tree = &BTRFS_I(page->mapping->host)->io_tree;
6238
	return extent_read_full_page(tree, page, btrfs_get_extent);
C
Chris Mason 已提交
6239
}
6240

6241
static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
C
Chris Mason 已提交
6242
{
6243
	struct extent_io_tree *tree;
6244 6245 6246 6247 6248 6249 6250


	if (current->flags & PF_MEMALLOC) {
		redirty_page_for_writepage(wbc, page);
		unlock_page(page);
		return 0;
	}
6251
	tree = &BTRFS_I(page->mapping->host)->io_tree;
6252
	return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
C
Chris Mason 已提交
6253 6254
}

6255 6256
int btrfs_writepages(struct address_space *mapping,
		     struct writeback_control *wbc)
C
Chris Mason 已提交
6257
{
6258
	struct extent_io_tree *tree;
6259

6260
	tree = &BTRFS_I(mapping->host)->io_tree;
C
Chris Mason 已提交
6261 6262 6263
	return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
}

C
Chris Mason 已提交
6264 6265 6266 6267
static int
btrfs_readpages(struct file *file, struct address_space *mapping,
		struct list_head *pages, unsigned nr_pages)
{
6268 6269
	struct extent_io_tree *tree;
	tree = &BTRFS_I(mapping->host)->io_tree;
C
Chris Mason 已提交
6270 6271 6272
	return extent_readpages(tree, mapping, pages, nr_pages,
				btrfs_get_extent);
}
6273
static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
C
Chris Mason 已提交
6274
{
6275 6276
	struct extent_io_tree *tree;
	struct extent_map_tree *map;
6277
	int ret;
6278

6279 6280
	tree = &BTRFS_I(page->mapping->host)->io_tree;
	map = &BTRFS_I(page->mapping->host)->extent_tree;
6281
	ret = try_release_extent_mapping(map, tree, page, gfp_flags);
6282 6283 6284 6285
	if (ret == 1) {
		ClearPagePrivate(page);
		set_page_private(page, 0);
		page_cache_release(page);
C
Chris Mason 已提交
6286
	}
6287
	return ret;
C
Chris Mason 已提交
6288 6289
}

6290 6291
static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
{
6292 6293
	if (PageWriteback(page) || PageDirty(page))
		return 0;
6294
	return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
6295 6296
}

6297
static void btrfs_invalidatepage(struct page *page, unsigned long offset)
C
Chris Mason 已提交
6298
{
6299
	struct extent_io_tree *tree;
6300
	struct btrfs_ordered_extent *ordered;
6301
	struct extent_state *cached_state = NULL;
6302 6303
	u64 page_start = page_offset(page);
	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
C
Chris Mason 已提交
6304

6305 6306 6307 6308 6309 6310 6311 6312

	/*
	 * we have the page locked, so new writeback can't start,
	 * and the dirty bit won't be cleared while we are here.
	 *
	 * Wait for IO on this page so that we can safely clear
	 * the PagePrivate2 bit and do ordered accounting
	 */
6313
	wait_on_page_writeback(page);
6314

6315
	tree = &BTRFS_I(page->mapping->host)->io_tree;
6316 6317 6318 6319
	if (offset) {
		btrfs_releasepage(page, GFP_NOFS);
		return;
	}
6320 6321
	lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
			 GFP_NOFS);
6322 6323 6324
	ordered = btrfs_lookup_ordered_extent(page->mapping->host,
					   page_offset(page));
	if (ordered) {
6325 6326 6327 6328
		/*
		 * IO on this page will never be started, so we need
		 * to account for any ordered extents now
		 */
6329 6330
		clear_extent_bit(tree, page_start, page_end,
				 EXTENT_DIRTY | EXTENT_DELALLOC |
6331
				 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
6332
				 &cached_state, GFP_NOFS);
6333 6334 6335 6336 6337 6338 6339 6340
		/*
		 * whoever cleared the private bit is responsible
		 * for the finish_ordered_io
		 */
		if (TestClearPagePrivate2(page)) {
			btrfs_finish_ordered_io(page->mapping->host,
						page_start, page_end);
		}
6341
		btrfs_put_ordered_extent(ordered);
6342 6343 6344
		cached_state = NULL;
		lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
				 GFP_NOFS);
6345 6346
	}
	clear_extent_bit(tree, page_start, page_end,
6347
		 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
6348
		 EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS);
6349 6350
	__btrfs_releasepage(page, GFP_NOFS);

C
Chris Mason 已提交
6351
	ClearPageChecked(page);
6352 6353 6354 6355 6356
	if (PagePrivate(page)) {
		ClearPagePrivate(page);
		set_page_private(page, 0);
		page_cache_release(page);
	}
C
Chris Mason 已提交
6357 6358
}

C
Chris Mason 已提交
6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373
/*
 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
 * called from a page fault handler when a page is first dirtied. Hence we must
 * be careful to check for EOF conditions here. We set the page up correctly
 * for a written page which means we get ENOSPC checking when writing into
 * holes and correct delalloc and unwritten extent mapping on filesystems that
 * support these features.
 *
 * We are not allowed to take the i_mutex here so we have to play games to
 * protect against truncate races as the page could now be beyond EOF.  Because
 * vmtruncate() writes the inode size before removing pages, once we have the
 * page lock we can determine safely if the page is beyond EOF. If it is not
 * beyond EOF, then the page is guaranteed safe against truncation until we
 * unlock the page.
 */
6374
int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
C
Chris Mason 已提交
6375
{
6376
	struct page *page = vmf->page;
6377
	struct inode *inode = fdentry(vma->vm_file)->d_inode;
6378
	struct btrfs_root *root = BTRFS_I(inode)->root;
6379 6380
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
	struct btrfs_ordered_extent *ordered;
6381
	struct extent_state *cached_state = NULL;
6382 6383
	char *kaddr;
	unsigned long zero_start;
C
Chris Mason 已提交
6384
	loff_t size;
6385
	int ret;
6386
	u64 page_start;
6387
	u64 page_end;
C
Chris Mason 已提交
6388

6389
	ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
6390 6391 6392 6393 6394
	if (ret) {
		if (ret == -ENOMEM)
			ret = VM_FAULT_OOM;
		else /* -ENOSPC, -EIO, etc */
			ret = VM_FAULT_SIGBUS;
6395
		goto out;
6396
	}
6397

6398
	ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
6399
again:
C
Chris Mason 已提交
6400 6401
	lock_page(page);
	size = i_size_read(inode);
6402 6403
	page_start = page_offset(page);
	page_end = page_start + PAGE_CACHE_SIZE - 1;
6404

C
Chris Mason 已提交
6405
	if ((page->mapping != inode->i_mapping) ||
6406
	    (page_start >= size)) {
C
Chris Mason 已提交
6407 6408 6409
		/* page got truncated out from underneath us */
		goto out_unlock;
	}
6410 6411
	wait_on_page_writeback(page);

6412 6413
	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
			 GFP_NOFS);
6414 6415
	set_page_extent_mapped(page);

6416 6417 6418 6419
	/*
	 * we can't set the delalloc bits if there are pending ordered
	 * extents.  Drop our locks and wait for them to finish
	 */
6420 6421
	ordered = btrfs_lookup_ordered_extent(inode, page_start);
	if (ordered) {
6422 6423
		unlock_extent_cached(io_tree, page_start, page_end,
				     &cached_state, GFP_NOFS);
6424
		unlock_page(page);
6425
		btrfs_start_ordered_extent(inode, ordered, 1);
6426 6427 6428 6429
		btrfs_put_ordered_extent(ordered);
		goto again;
	}

J
Josef Bacik 已提交
6430 6431 6432 6433 6434 6435 6436
	/*
	 * XXX - page_mkwrite gets called every time the page is dirtied, even
	 * if it was already dirty, so for space accounting reasons we need to
	 * clear any delalloc bits for the range we are fixing to save.  There
	 * is probably a better way to do this, but for now keep consistent with
	 * prepare_pages in the normal write path.
	 */
6437
	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
6438
			  EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
6439
			  0, 0, &cached_state, GFP_NOFS);
J
Josef Bacik 已提交
6440

6441 6442
	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
					&cached_state);
J
Josef Bacik 已提交
6443
	if (ret) {
6444 6445
		unlock_extent_cached(io_tree, page_start, page_end,
				     &cached_state, GFP_NOFS);
J
Josef Bacik 已提交
6446 6447 6448
		ret = VM_FAULT_SIGBUS;
		goto out_unlock;
	}
6449
	ret = 0;
C
Chris Mason 已提交
6450 6451

	/* page is wholly or partially inside EOF */
6452
	if (page_start + PAGE_CACHE_SIZE > size)
6453
		zero_start = size & ~PAGE_CACHE_MASK;
C
Chris Mason 已提交
6454
	else
6455
		zero_start = PAGE_CACHE_SIZE;
C
Chris Mason 已提交
6456

6457 6458 6459 6460 6461 6462
	if (zero_start != PAGE_CACHE_SIZE) {
		kaddr = kmap(page);
		memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
		flush_dcache_page(page);
		kunmap(page);
	}
6463
	ClearPageChecked(page);
6464
	set_page_dirty(page);
6465
	SetPageUptodate(page);
6466

6467 6468 6469
	BTRFS_I(inode)->last_trans = root->fs_info->generation;
	BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;

6470
	unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
C
Chris Mason 已提交
6471 6472

out_unlock:
6473 6474
	if (!ret)
		return VM_FAULT_LOCKED;
C
Chris Mason 已提交
6475
	unlock_page(page);
6476
	btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
6477
out:
C
Chris Mason 已提交
6478 6479 6480
	return ret;
}

C
Chris Mason 已提交
6481 6482 6483 6484 6485
static void btrfs_truncate(struct inode *inode)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	int ret;
	struct btrfs_trans_handle *trans;
6486
	unsigned long nr;
6487
	u64 mask = root->sectorsize - 1;
C
Chris Mason 已提交
6488

6489 6490
	if (!S_ISREG(inode->i_mode)) {
		WARN_ON(1);
C
Chris Mason 已提交
6491
		return;
6492
	}
C
Chris Mason 已提交
6493

6494 6495 6496
	ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
	if (ret)
		return;
6497

C
Chris Mason 已提交
6498
	btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
6499
	btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
C
Chris Mason 已提交
6500

6501 6502
	trans = btrfs_start_transaction(root, 0);
	BUG_ON(IS_ERR(trans));
6503
	btrfs_set_trans_block_group(trans, inode);
6504
	trans->block_rsv = root->orphan_block_rsv;
6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525

	/*
	 * setattr is responsible for setting the ordered_data_close flag,
	 * but that is only tested during the last file release.  That
	 * could happen well after the next commit, leaving a great big
	 * window where new writes may get lost if someone chooses to write
	 * to this file after truncating to zero
	 *
	 * The inode doesn't have any dirty data here, and so if we commit
	 * this is a noop.  If someone immediately starts writing to the inode
	 * it is very likely we'll catch some of their writes in this
	 * transaction, and the commit will find this file on the ordered
	 * data list with good things to send down.
	 *
	 * This is a best effort solution, there is still a window where
	 * using truncate to replace the contents of the file will
	 * end up with a zero length file after a crash.
	 */
	if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
		btrfs_add_ordered_operation(trans, root, inode);

6526
	while (1) {
6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543
		if (!trans) {
			trans = btrfs_start_transaction(root, 0);
			BUG_ON(IS_ERR(trans));
			btrfs_set_trans_block_group(trans, inode);
			trans->block_rsv = root->orphan_block_rsv;
		}

		ret = btrfs_block_rsv_check(trans, root,
					    root->orphan_block_rsv, 0, 5);
		if (ret) {
			BUG_ON(ret != -EAGAIN);
			ret = btrfs_commit_transaction(trans, root);
			BUG_ON(ret);
			trans = NULL;
			continue;
		}

6544 6545 6546 6547 6548
		ret = btrfs_truncate_inode_items(trans, root, inode,
						 inode->i_size,
						 BTRFS_EXTENT_DATA_KEY);
		if (ret != -EAGAIN)
			break;
C
Chris Mason 已提交
6549

6550 6551
		ret = btrfs_update_inode(trans, root, inode);
		BUG_ON(ret);
6552

6553 6554
		nr = trans->blocks_used;
		btrfs_end_transaction(trans, root);
6555
		trans = NULL;
6556 6557 6558 6559 6560 6561 6562 6563 6564
		btrfs_btree_balance_dirty(root, nr);
	}

	if (ret == 0 && inode->i_nlink > 0) {
		ret = btrfs_orphan_del(trans, inode);
		BUG_ON(ret);
	}

	ret = btrfs_update_inode(trans, root, inode);
6565 6566 6567
	BUG_ON(ret);

	nr = trans->blocks_used;
6568
	ret = btrfs_end_transaction_throttle(trans, root);
C
Chris Mason 已提交
6569
	BUG_ON(ret);
6570
	btrfs_btree_balance_dirty(root, nr);
C
Chris Mason 已提交
6571 6572
}

C
Chris Mason 已提交
6573 6574 6575
/*
 * create a new subvolume directory/inode (helper for the ioctl).
 */
6576
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
6577
			     struct btrfs_root *new_root,
6578
			     u64 new_dirid, u64 alloc_hint)
C
Chris Mason 已提交
6579 6580
{
	struct inode *inode;
6581
	int err;
6582
	u64 index = 0;
C
Chris Mason 已提交
6583

6584
	inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
6585
				new_dirid, alloc_hint, S_IFDIR | 0700, &index);
6586
	if (IS_ERR(inode))
C
Christoph Hellwig 已提交
6587
		return PTR_ERR(inode);
C
Chris Mason 已提交
6588 6589 6590 6591
	inode->i_op = &btrfs_dir_inode_operations;
	inode->i_fop = &btrfs_dir_file_operations;

	inode->i_nlink = 1;
6592
	btrfs_i_size_write(inode, 0);
6593

6594 6595
	err = btrfs_update_inode(trans, new_root, inode);
	BUG_ON(err);
6596

6597
	iput(inode);
6598
	return 0;
C
Chris Mason 已提交
6599 6600
}

C
Chris Mason 已提交
6601 6602 6603
/* helper function for file defrag and space balancing.  This
 * forces readahead on a given range of bytes in an inode
 */
6604
unsigned long btrfs_force_ra(struct address_space *mapping,
6605 6606 6607
			      struct file_ra_state *ra, struct file *file,
			      pgoff_t offset, pgoff_t last_index)
{
6608
	pgoff_t req_size = last_index - offset + 1;
6609 6610 6611 6612 6613

	page_cache_sync_readahead(mapping, ra, file, offset, req_size);
	return offset + req_size;
}

C
Chris Mason 已提交
6614 6615 6616
struct inode *btrfs_alloc_inode(struct super_block *sb)
{
	struct btrfs_inode *ei;
Y
Yan, Zheng 已提交
6617
	struct inode *inode;
C
Chris Mason 已提交
6618 6619 6620 6621

	ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
	if (!ei)
		return NULL;
Y
Yan, Zheng 已提交
6622 6623 6624 6625 6626

	ei->root = NULL;
	ei->space_info = NULL;
	ei->generation = 0;
	ei->sequence = 0;
6627
	ei->last_trans = 0;
6628
	ei->last_sub_trans = 0;
6629
	ei->logged_trans = 0;
Y
Yan, Zheng 已提交
6630 6631 6632 6633 6634 6635 6636
	ei->delalloc_bytes = 0;
	ei->reserved_bytes = 0;
	ei->disk_i_size = 0;
	ei->flags = 0;
	ei->index_cnt = (u64)-1;
	ei->last_unlink_trans = 0;

6637
	atomic_set(&ei->outstanding_extents, 0);
6638
	atomic_set(&ei->reserved_extents, 0);
Y
Yan, Zheng 已提交
6639 6640

	ei->ordered_data_close = 0;
6641
	ei->orphan_meta_reserved = 0;
Y
Yan, Zheng 已提交
6642
	ei->dummy_inode = 0;
6643
	ei->force_compress = BTRFS_COMPRESS_NONE;
Y
Yan, Zheng 已提交
6644 6645 6646 6647 6648 6649

	inode = &ei->vfs_inode;
	extent_map_tree_init(&ei->extent_tree, GFP_NOFS);
	extent_io_tree_init(&ei->io_tree, &inode->i_data, GFP_NOFS);
	extent_io_tree_init(&ei->io_failure_tree, &inode->i_data, GFP_NOFS);
	mutex_init(&ei->log_mutex);
6650
	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
6651
	INIT_LIST_HEAD(&ei->i_orphan);
Y
Yan, Zheng 已提交
6652
	INIT_LIST_HEAD(&ei->delalloc_inodes);
6653
	INIT_LIST_HEAD(&ei->ordered_operations);
Y
Yan, Zheng 已提交
6654 6655 6656
	RB_CLEAR_NODE(&ei->rb_node);

	return inode;
C
Chris Mason 已提交
6657 6658
}

N
Nick Piggin 已提交
6659 6660 6661 6662 6663 6664 6665
static void btrfs_i_callback(struct rcu_head *head)
{
	struct inode *inode = container_of(head, struct inode, i_rcu);
	INIT_LIST_HEAD(&inode->i_dentry);
	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
}

C
Chris Mason 已提交
6666 6667
void btrfs_destroy_inode(struct inode *inode)
{
6668
	struct btrfs_ordered_extent *ordered;
6669 6670
	struct btrfs_root *root = BTRFS_I(inode)->root;

C
Chris Mason 已提交
6671 6672
	WARN_ON(!list_empty(&inode->i_dentry));
	WARN_ON(inode->i_data.nrpages);
6673
	WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents));
6674
	WARN_ON(atomic_read(&BTRFS_I(inode)->reserved_extents));
C
Chris Mason 已提交
6675

6676 6677 6678 6679 6680 6681 6682 6683
	/*
	 * This can happen where we create an inode, but somebody else also
	 * created the same inode and we need to destroy the one we already
	 * created.
	 */
	if (!root)
		goto free;

6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694
	/*
	 * Make sure we're properly removed from the ordered operation
	 * lists.
	 */
	smp_mb();
	if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
		spin_lock(&root->fs_info->ordered_extent_lock);
		list_del_init(&BTRFS_I(inode)->ordered_operations);
		spin_unlock(&root->fs_info->ordered_extent_lock);
	}

6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709
	if (root == root->fs_info->tree_root) {
		struct btrfs_block_group_cache *block_group;

		block_group = btrfs_lookup_block_group(root->fs_info,
						BTRFS_I(inode)->block_group);
		if (block_group && block_group->inode == inode) {
			spin_lock(&block_group->lock);
			block_group->inode = NULL;
			spin_unlock(&block_group->lock);
			btrfs_put_block_group(block_group);
		} else if (block_group) {
			btrfs_put_block_group(block_group);
		}
	}

6710
	spin_lock(&root->orphan_lock);
6711
	if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
6712 6713 6714
		printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n",
		       inode->i_ino);
		list_del_init(&BTRFS_I(inode)->i_orphan);
6715
	}
6716
	spin_unlock(&root->orphan_lock);
6717

C
Chris Mason 已提交
6718
	while (1) {
6719 6720 6721 6722
		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
		if (!ordered)
			break;
		else {
C
Chris Mason 已提交
6723 6724 6725 6726
			printk(KERN_ERR "btrfs found ordered "
			       "extent %llu %llu on inode cleanup\n",
			       (unsigned long long)ordered->file_offset,
			       (unsigned long long)ordered->len);
6727 6728 6729 6730 6731
			btrfs_remove_ordered_extent(inode, ordered);
			btrfs_put_ordered_extent(ordered);
			btrfs_put_ordered_extent(ordered);
		}
	}
6732
	inode_tree_del(inode);
6733
	btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
6734
free:
N
Nick Piggin 已提交
6735
	call_rcu(&inode->i_rcu, btrfs_i_callback);
C
Chris Mason 已提交
6736 6737
}

6738
int btrfs_drop_inode(struct inode *inode)
6739 6740
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
6741

6742 6743
	if (btrfs_root_refs(&root->root_item) == 0 &&
	    root != root->fs_info->tree_root)
6744
		return 1;
6745
	else
6746
		return generic_drop_inode(inode);
6747 6748
}

6749
static void init_once(void *foo)
C
Chris Mason 已提交
6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765
{
	struct btrfs_inode *ei = (struct btrfs_inode *) foo;

	inode_init_once(&ei->vfs_inode);
}

void btrfs_destroy_cachep(void)
{
	if (btrfs_inode_cachep)
		kmem_cache_destroy(btrfs_inode_cachep);
	if (btrfs_trans_handle_cachep)
		kmem_cache_destroy(btrfs_trans_handle_cachep);
	if (btrfs_transaction_cachep)
		kmem_cache_destroy(btrfs_transaction_cachep);
	if (btrfs_path_cachep)
		kmem_cache_destroy(btrfs_path_cachep);
6766 6767
	if (btrfs_free_space_cachep)
		kmem_cache_destroy(btrfs_free_space_cachep);
C
Chris Mason 已提交
6768 6769 6770 6771
}

int btrfs_init_cachep(void)
{
6772 6773 6774
	btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
			sizeof(struct btrfs_inode), 0,
			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
C
Chris Mason 已提交
6775 6776
	if (!btrfs_inode_cachep)
		goto fail;
6777 6778 6779 6780

	btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
			sizeof(struct btrfs_trans_handle), 0,
			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
C
Chris Mason 已提交
6781 6782
	if (!btrfs_trans_handle_cachep)
		goto fail;
6783 6784 6785 6786

	btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
			sizeof(struct btrfs_transaction), 0,
			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
C
Chris Mason 已提交
6787 6788
	if (!btrfs_transaction_cachep)
		goto fail;
6789 6790 6791 6792

	btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
			sizeof(struct btrfs_path), 0,
			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
C
Chris Mason 已提交
6793 6794
	if (!btrfs_path_cachep)
		goto fail;
6795

6796 6797 6798 6799 6800 6801
	btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space_cache",
			sizeof(struct btrfs_free_space), 0,
			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
	if (!btrfs_free_space_cachep)
		goto fail;

C
Chris Mason 已提交
6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812
	return 0;
fail:
	btrfs_destroy_cachep();
	return -ENOMEM;
}

static int btrfs_getattr(struct vfsmount *mnt,
			 struct dentry *dentry, struct kstat *stat)
{
	struct inode *inode = dentry->d_inode;
	generic_fillattr(inode, stat);
6813
	stat->dev = BTRFS_I(inode)->root->anon_super.s_dev;
C
Chris Mason 已提交
6814
	stat->blksize = PAGE_CACHE_SIZE;
6815 6816
	stat->blocks = (inode_get_bytes(inode) +
			BTRFS_I(inode)->delalloc_bytes) >> 9;
C
Chris Mason 已提交
6817 6818 6819
	return 0;
}

C
Chris Mason 已提交
6820 6821
static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
			   struct inode *new_dir, struct dentry *new_dentry)
C
Chris Mason 已提交
6822 6823 6824
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(old_dir)->root;
6825
	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
C
Chris Mason 已提交
6826 6827 6828
	struct inode *new_inode = new_dentry->d_inode;
	struct inode *old_inode = old_dentry->d_inode;
	struct timespec ctime = CURRENT_TIME;
6829
	u64 index = 0;
6830
	u64 root_objectid;
C
Chris Mason 已提交
6831 6832
	int ret;

6833 6834 6835
	if (new_dir->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
		return -EPERM;

6836 6837
	/* we only allow rename subvolume link between subvolumes */
	if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
6838 6839
		return -EXDEV;

6840 6841
	if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
	    (new_inode && new_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID))
C
Chris Mason 已提交
6842
		return -ENOTEMPTY;
6843

6844 6845 6846
	if (S_ISDIR(old_inode->i_mode) && new_inode &&
	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
		return -ENOTEMPTY;
6847 6848 6849 6850 6851
	/*
	 * we're using rename to replace one file with another.
	 * and the replacement file is large.  Start IO on it now so
	 * we don't add too much work to the end of the transaction
	 */
6852
	if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
6853 6854 6855
	    old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
		filemap_flush(old_inode->i_mapping);

6856 6857 6858
	/* close the racy window with snapshot create/destroy ioctl */
	if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
		down_read(&root->fs_info->subvol_sem);
6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869
	/*
	 * We want to reserve the absolute worst case amount of items.  So if
	 * both inodes are subvols and we need to unlink them then that would
	 * require 4 item modifications, but if they are both normal inodes it
	 * would require 5 item modifications, so we'll assume their normal
	 * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
	 * should cover the worst case number of items we'll modify.
	 */
	trans = btrfs_start_transaction(root, 20);
	if (IS_ERR(trans))
		return PTR_ERR(trans);
6870

6871
	btrfs_set_trans_block_group(trans, new_dir);
6872

6873 6874
	if (dest != root)
		btrfs_record_root_in_trans(trans, dest);
6875

6876 6877 6878
	ret = btrfs_set_inode_index(new_dir, &index);
	if (ret)
		goto out_fail;
6879

6880
	if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
6881 6882 6883
		/* force full log commit if subvolume involved. */
		root->fs_info->last_trans_log_full_commit = trans->transid;
	} else {
6884 6885 6886 6887 6888 6889 6890
		ret = btrfs_insert_inode_ref(trans, dest,
					     new_dentry->d_name.name,
					     new_dentry->d_name.len,
					     old_inode->i_ino,
					     new_dir->i_ino, index);
		if (ret)
			goto out_fail;
6891 6892 6893 6894 6895 6896 6897 6898 6899
		/*
		 * this is an ugly little race, but the rename is required
		 * to make sure that if we crash, the inode is either at the
		 * old name or the new one.  pinning the log transaction lets
		 * us make sure we don't allow a log commit to come in after
		 * we unlink the name but before we add the new name back in.
		 */
		btrfs_pin_log_trans(root);
	}
6900 6901 6902 6903 6904 6905 6906 6907 6908
	/*
	 * make sure the inode gets flushed if it is replacing
	 * something.
	 */
	if (new_inode && new_inode->i_size &&
	    old_inode && S_ISREG(old_inode->i_mode)) {
		btrfs_add_ordered_operation(trans, root, old_inode);
	}

C
Chris Mason 已提交
6909 6910 6911
	old_dir->i_ctime = old_dir->i_mtime = ctime;
	new_dir->i_ctime = new_dir->i_mtime = ctime;
	old_inode->i_ctime = ctime;
6912

6913 6914 6915
	if (old_dentry->d_parent != new_dentry->d_parent)
		btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);

6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928
	if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
		root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
		ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
					old_dentry->d_name.name,
					old_dentry->d_name.len);
	} else {
		btrfs_inc_nlink(old_dentry->d_inode);
		ret = btrfs_unlink_inode(trans, root, old_dir,
					 old_dentry->d_inode,
					 old_dentry->d_name.name,
					 old_dentry->d_name.len);
	}
	BUG_ON(ret);
C
Chris Mason 已提交
6929 6930 6931

	if (new_inode) {
		new_inode->i_ctime = CURRENT_TIME;
6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946
		if (unlikely(new_inode->i_ino ==
			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
			root_objectid = BTRFS_I(new_inode)->location.objectid;
			ret = btrfs_unlink_subvol(trans, dest, new_dir,
						root_objectid,
						new_dentry->d_name.name,
						new_dentry->d_name.len);
			BUG_ON(new_inode->i_nlink == 0);
		} else {
			ret = btrfs_unlink_inode(trans, dest, new_dir,
						 new_dentry->d_inode,
						 new_dentry->d_name.name,
						 new_dentry->d_name.len);
		}
		BUG_ON(ret);
6947
		if (new_inode->i_nlink == 0) {
6948
			ret = btrfs_orphan_add(trans, new_dentry->d_inode);
6949
			BUG_ON(ret);
6950
		}
C
Chris Mason 已提交
6951
	}
6952

6953 6954
	ret = btrfs_add_link(trans, new_dir, old_inode,
			     new_dentry->d_name.name,
6955
			     new_dentry->d_name.len, 0, index);
6956
	BUG_ON(ret);
C
Chris Mason 已提交
6957

6958
	if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) {
6959 6960 6961
		struct dentry *parent = dget_parent(new_dentry);
		btrfs_log_new_name(trans, old_inode, old_dir, parent);
		dput(parent);
6962 6963
		btrfs_end_log_trans(root);
	}
C
Chris Mason 已提交
6964
out_fail:
6965
	btrfs_end_transaction_throttle(trans, root);
6966

6967 6968
	if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
		up_read(&root->fs_info->subvol_sem);
J
Josef Bacik 已提交
6969

C
Chris Mason 已提交
6970 6971 6972
	return ret;
}

C
Chris Mason 已提交
6973 6974 6975 6976
/*
 * some fairly slow code that needs optimization. This walks the list
 * of all the inodes with pending delalloc and forces them to disk.
 */
Y
Yan, Zheng 已提交
6977
int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
6978 6979 6980
{
	struct list_head *head = &root->fs_info->delalloc_inodes;
	struct btrfs_inode *binode;
6981
	struct inode *inode;
6982

Y
Yan Zheng 已提交
6983 6984 6985
	if (root->fs_info->sb->s_flags & MS_RDONLY)
		return -EROFS;

6986
	spin_lock(&root->fs_info->delalloc_lock);
C
Chris Mason 已提交
6987
	while (!list_empty(head)) {
6988 6989
		binode = list_entry(head->next, struct btrfs_inode,
				    delalloc_inodes);
6990 6991 6992
		inode = igrab(&binode->vfs_inode);
		if (!inode)
			list_del_init(&binode->delalloc_inodes);
6993
		spin_unlock(&root->fs_info->delalloc_lock);
6994
		if (inode) {
6995
			filemap_flush(inode->i_mapping);
Y
Yan, Zheng 已提交
6996 6997 6998 6999
			if (delay_iput)
				btrfs_add_delayed_iput(inode);
			else
				iput(inode);
7000 7001
		}
		cond_resched();
7002
		spin_lock(&root->fs_info->delalloc_lock);
7003
	}
7004
	spin_unlock(&root->fs_info->delalloc_lock);
7005 7006 7007 7008 7009 7010

	/* the filemap_flush will queue IO into the worker threads, but
	 * we have to make sure the IO is actually started and that
	 * ordered extents get created before we return
	 */
	atomic_inc(&root->fs_info->async_submit_draining);
C
Chris Mason 已提交
7011
	while (atomic_read(&root->fs_info->nr_async_submits) ||
7012
	      atomic_read(&root->fs_info->async_delalloc_pages)) {
7013
		wait_event(root->fs_info->async_submit_wait,
7014 7015
		   (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
		    atomic_read(&root->fs_info->async_delalloc_pages) == 0));
7016 7017
	}
	atomic_dec(&root->fs_info->async_submit_draining);
7018 7019 7020
	return 0;
}

J
Josef Bacik 已提交
7021 7022
int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput,
				   int sync)
7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043
{
	struct btrfs_inode *binode;
	struct inode *inode = NULL;

	spin_lock(&root->fs_info->delalloc_lock);
	while (!list_empty(&root->fs_info->delalloc_inodes)) {
		binode = list_entry(root->fs_info->delalloc_inodes.next,
				    struct btrfs_inode, delalloc_inodes);
		inode = igrab(&binode->vfs_inode);
		if (inode) {
			list_move_tail(&binode->delalloc_inodes,
				       &root->fs_info->delalloc_inodes);
			break;
		}

		list_del_init(&binode->delalloc_inodes);
		cond_resched_lock(&root->fs_info->delalloc_lock);
	}
	spin_unlock(&root->fs_info->delalloc_lock);

	if (inode) {
J
Josef Bacik 已提交
7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063
		if (sync) {
			filemap_write_and_wait(inode->i_mapping);
			/*
			 * We have to do this because compression doesn't
			 * actually set PG_writeback until it submits the pages
			 * for IO, which happens in an async thread, so we could
			 * race and not actually wait for any writeback pages
			 * because they've not been submitted yet.  Technically
			 * this could still be the case for the ordered stuff
			 * since the async thread may not have started to do its
			 * work yet.  If this becomes the case then we need to
			 * figure out a way to make sure that in writepage we
			 * wait for any async pages to be submitted before
			 * returning so that fdatawait does what its supposed to
			 * do.
			 */
			btrfs_wait_ordered_range(inode, 0, (u64)-1);
		} else {
			filemap_flush(inode->i_mapping);
		}
7064 7065 7066 7067 7068 7069 7070 7071 7072
		if (delay_iput)
			btrfs_add_delayed_iput(inode);
		else
			iput(inode);
		return 1;
	}
	return 0;
}

C
Chris Mason 已提交
7073 7074 7075 7076 7077 7078 7079
static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
			 const char *symname)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(dir)->root;
	struct btrfs_path *path;
	struct btrfs_key key;
7080
	struct inode *inode = NULL;
C
Chris Mason 已提交
7081 7082 7083
	int err;
	int drop_inode = 0;
	u64 objectid;
7084
	u64 index = 0 ;
C
Chris Mason 已提交
7085 7086
	int name_len;
	int datasize;
7087
	unsigned long ptr;
C
Chris Mason 已提交
7088
	struct btrfs_file_extent_item *ei;
7089
	struct extent_buffer *leaf;
7090
	unsigned long nr = 0;
C
Chris Mason 已提交
7091 7092 7093 7094

	name_len = strlen(symname) + 1;
	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
		return -ENAMETOOLONG;
7095

7096 7097 7098
	err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
	if (err)
		return err;
J
Josef Bacik 已提交
7099 7100 7101 7102 7103
	/*
	 * 2 items for inode item and ref
	 * 2 items for dir items
	 * 1 item for xattr if selinux is on
	 */
7104 7105 7106
	trans = btrfs_start_transaction(root, 5);
	if (IS_ERR(trans))
		return PTR_ERR(trans);
7107

C
Chris Mason 已提交
7108 7109
	btrfs_set_trans_block_group(trans, dir);

7110
	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
7111
				dentry->d_name.len, dir->i_ino, objectid,
7112 7113
				BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
				&index);
C
Chris Mason 已提交
7114 7115 7116 7117
	err = PTR_ERR(inode);
	if (IS_ERR(inode))
		goto out_unlock;

7118
	err = btrfs_init_inode_security(trans, inode, dir);
J
Josef Bacik 已提交
7119 7120 7121 7122 7123
	if (err) {
		drop_inode = 1;
		goto out_unlock;
	}

C
Chris Mason 已提交
7124
	btrfs_set_trans_block_group(trans, inode);
7125
	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
C
Chris Mason 已提交
7126 7127 7128 7129
	if (err)
		drop_inode = 1;
	else {
		inode->i_mapping->a_ops = &btrfs_aops;
C
Chris Mason 已提交
7130
		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
C
Chris Mason 已提交
7131 7132
		inode->i_fop = &btrfs_file_operations;
		inode->i_op = &btrfs_file_inode_operations;
7133
		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
C
Chris Mason 已提交
7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147
	}
	btrfs_update_inode_block_group(trans, inode);
	btrfs_update_inode_block_group(trans, dir);
	if (drop_inode)
		goto out_unlock;

	path = btrfs_alloc_path();
	BUG_ON(!path);
	key.objectid = inode->i_ino;
	key.offset = 0;
	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
	datasize = btrfs_file_extent_calc_inline_size(name_len);
	err = btrfs_insert_empty_item(trans, root, path, &key,
				      datasize);
7148 7149 7150 7151
	if (err) {
		drop_inode = 1;
		goto out_unlock;
	}
7152 7153 7154 7155 7156
	leaf = path->nodes[0];
	ei = btrfs_item_ptr(leaf, path->slots[0],
			    struct btrfs_file_extent_item);
	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
	btrfs_set_file_extent_type(leaf, ei,
C
Chris Mason 已提交
7157
				   BTRFS_FILE_EXTENT_INLINE);
C
Chris Mason 已提交
7158 7159 7160 7161 7162
	btrfs_set_file_extent_encryption(leaf, ei, 0);
	btrfs_set_file_extent_compression(leaf, ei, 0);
	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);

C
Chris Mason 已提交
7163
	ptr = btrfs_file_extent_inline_start(ei);
7164 7165
	write_extent_buffer(leaf, symname, ptr, name_len);
	btrfs_mark_buffer_dirty(leaf);
C
Chris Mason 已提交
7166
	btrfs_free_path(path);
7167

C
Chris Mason 已提交
7168 7169
	inode->i_op = &btrfs_symlink_inode_operations;
	inode->i_mapping->a_ops = &btrfs_symlink_aops;
C
Chris Mason 已提交
7170
	inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
Y
Yan Zheng 已提交
7171
	inode_set_bytes(inode, name_len);
7172
	btrfs_i_size_write(inode, name_len - 1);
7173 7174 7175
	err = btrfs_update_inode(trans, root, inode);
	if (err)
		drop_inode = 1;
C
Chris Mason 已提交
7176 7177

out_unlock:
7178
	nr = trans->blocks_used;
7179
	btrfs_end_transaction_throttle(trans, root);
C
Chris Mason 已提交
7180 7181 7182 7183
	if (drop_inode) {
		inode_dec_link_count(inode);
		iput(inode);
	}
7184
	btrfs_btree_balance_dirty(root, nr);
C
Chris Mason 已提交
7185 7186
	return err;
}
7187

7188 7189 7190 7191
static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
				       u64 start, u64 num_bytes, u64 min_size,
				       loff_t actual_len, u64 *alloc_hint,
				       struct btrfs_trans_handle *trans)
Y
Yan Zheng 已提交
7192 7193 7194 7195
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_key ins;
	u64 cur_offset = start;
7196
	u64 i_size;
Y
Yan Zheng 已提交
7197
	int ret = 0;
7198
	bool own_trans = true;
Y
Yan Zheng 已提交
7199

7200 7201
	if (trans)
		own_trans = false;
Y
Yan Zheng 已提交
7202
	while (num_bytes > 0) {
7203 7204 7205 7206 7207 7208
		if (own_trans) {
			trans = btrfs_start_transaction(root, 3);
			if (IS_ERR(trans)) {
				ret = PTR_ERR(trans);
				break;
			}
7209 7210
		}

7211 7212
		ret = btrfs_reserve_extent(trans, root, num_bytes, min_size,
					   0, *alloc_hint, (u64)-1, &ins, 1);
7213
		if (ret) {
7214 7215
			if (own_trans)
				btrfs_end_transaction(trans, root);
7216
			break;
Y
Yan Zheng 已提交
7217
		}
7218

Y
Yan Zheng 已提交
7219 7220 7221
		ret = insert_reserved_file_extent(trans, inode,
						  cur_offset, ins.objectid,
						  ins.offset, ins.offset,
Y
Yan, Zheng 已提交
7222
						  ins.offset, 0, 0, 0,
Y
Yan Zheng 已提交
7223 7224
						  BTRFS_FILE_EXTENT_PREALLOC);
		BUG_ON(ret);
C
Chris Mason 已提交
7225 7226
		btrfs_drop_extent_cache(inode, cur_offset,
					cur_offset + ins.offset -1, 0);
7227

Y
Yan Zheng 已提交
7228 7229
		num_bytes -= ins.offset;
		cur_offset += ins.offset;
7230
		*alloc_hint = ins.objectid + ins.offset;
7231

Y
Yan Zheng 已提交
7232
		inode->i_ctime = CURRENT_TIME;
7233
		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
Y
Yan Zheng 已提交
7234
		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
7235 7236
		    (actual_len > inode->i_size) &&
		    (cur_offset > inode->i_size)) {
7237
			if (cur_offset > actual_len)
7238
				i_size = actual_len;
7239
			else
7240 7241 7242
				i_size = cur_offset;
			i_size_write(inode, i_size);
			btrfs_ordered_update_i_size(inode, i_size, NULL);
7243 7244
		}

Y
Yan Zheng 已提交
7245 7246 7247
		ret = btrfs_update_inode(trans, root, inode);
		BUG_ON(ret);

7248 7249
		if (own_trans)
			btrfs_end_transaction(trans, root);
7250
	}
Y
Yan Zheng 已提交
7251 7252 7253
	return ret;
}

7254 7255 7256 7257 7258 7259 7260 7261 7262 7263 7264 7265 7266 7267 7268 7269 7270 7271
int btrfs_prealloc_file_range(struct inode *inode, int mode,
			      u64 start, u64 num_bytes, u64 min_size,
			      loff_t actual_len, u64 *alloc_hint)
{
	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
					   min_size, actual_len, alloc_hint,
					   NULL);
}

int btrfs_prealloc_file_range_trans(struct inode *inode,
				    struct btrfs_trans_handle *trans, int mode,
				    u64 start, u64 num_bytes, u64 min_size,
				    loff_t actual_len, u64 *alloc_hint)
{
	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
					   min_size, actual_len, alloc_hint, trans);
}

7272 7273 7274 7275 7276
static int btrfs_set_page_dirty(struct page *page)
{
	return __set_page_dirty_nobuffers(page);
}

7277
static int btrfs_permission(struct inode *inode, int mask, unsigned int flags)
Y
Yan 已提交
7278
{
L
Li Zefan 已提交
7279 7280 7281 7282
	struct btrfs_root *root = BTRFS_I(inode)->root;

	if (btrfs_root_readonly(root) && (mask & MAY_WRITE))
		return -EROFS;
7283
	if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE))
Y
Yan 已提交
7284
		return -EACCES;
7285
	return generic_permission(inode, mask, flags, btrfs_check_acl);
Y
Yan 已提交
7286
}
C
Chris Mason 已提交
7287

7288
static const struct inode_operations btrfs_dir_inode_operations = {
7289
	.getattr	= btrfs_getattr,
C
Chris Mason 已提交
7290 7291 7292 7293 7294 7295 7296 7297 7298
	.lookup		= btrfs_lookup,
	.create		= btrfs_create,
	.unlink		= btrfs_unlink,
	.link		= btrfs_link,
	.mkdir		= btrfs_mkdir,
	.rmdir		= btrfs_rmdir,
	.rename		= btrfs_rename,
	.symlink	= btrfs_symlink,
	.setattr	= btrfs_setattr,
J
Josef Bacik 已提交
7299
	.mknod		= btrfs_mknod,
7300 7301
	.setxattr	= btrfs_setxattr,
	.getxattr	= btrfs_getxattr,
J
Josef Bacik 已提交
7302
	.listxattr	= btrfs_listxattr,
7303
	.removexattr	= btrfs_removexattr,
Y
Yan 已提交
7304
	.permission	= btrfs_permission,
C
Chris Mason 已提交
7305
};
7306
static const struct inode_operations btrfs_dir_ro_inode_operations = {
C
Chris Mason 已提交
7307
	.lookup		= btrfs_lookup,
Y
Yan 已提交
7308
	.permission	= btrfs_permission,
C
Chris Mason 已提交
7309
};
7310

7311
static const struct file_operations btrfs_dir_file_operations = {
C
Chris Mason 已提交
7312 7313
	.llseek		= generic_file_llseek,
	.read		= generic_read_dir,
7314
	.readdir	= btrfs_real_readdir,
C
Christoph Hellwig 已提交
7315
	.unlocked_ioctl	= btrfs_ioctl,
C
Chris Mason 已提交
7316
#ifdef CONFIG_COMPAT
C
Christoph Hellwig 已提交
7317
	.compat_ioctl	= btrfs_ioctl,
C
Chris Mason 已提交
7318
#endif
S
Sage Weil 已提交
7319
	.release        = btrfs_release_file,
7320
	.fsync		= btrfs_sync_file,
C
Chris Mason 已提交
7321 7322
};

7323
static struct extent_io_ops btrfs_extent_io_ops = {
7324
	.fill_delalloc = run_delalloc_range,
7325
	.submit_bio_hook = btrfs_submit_bio_hook,
7326
	.merge_bio_hook = btrfs_merge_bio_hook,
7327
	.readpage_end_io_hook = btrfs_readpage_end_io_hook,
7328
	.writepage_end_io_hook = btrfs_writepage_end_io_hook,
7329
	.writepage_start_hook = btrfs_writepage_start_hook,
7330
	.readpage_io_failed_hook = btrfs_io_failed_hook,
C
Chris Mason 已提交
7331 7332
	.set_bit_hook = btrfs_set_bit_hook,
	.clear_bit_hook = btrfs_clear_bit_hook,
J
Josef Bacik 已提交
7333 7334
	.merge_extent_hook = btrfs_merge_extent_hook,
	.split_extent_hook = btrfs_split_extent_hook,
7335 7336
};

7337 7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348
/*
 * btrfs doesn't support the bmap operation because swapfiles
 * use bmap to make a mapping of extents in the file.  They assume
 * these extents won't change over the life of the file and they
 * use the bmap result to do IO directly to the drive.
 *
 * the btrfs bmap call would return logical addresses that aren't
 * suitable for IO and they also will change frequently as COW
 * operations happen.  So, swapfile + btrfs == corruption.
 *
 * For now we're avoiding this by dropping bmap.
 */
7349
static const struct address_space_operations btrfs_aops = {
C
Chris Mason 已提交
7350 7351
	.readpage	= btrfs_readpage,
	.writepage	= btrfs_writepage,
C
Chris Mason 已提交
7352
	.writepages	= btrfs_writepages,
C
Chris Mason 已提交
7353
	.readpages	= btrfs_readpages,
C
Chris Mason 已提交
7354
	.sync_page	= block_sync_page,
7355
	.direct_IO	= btrfs_direct_IO,
7356 7357
	.invalidatepage = btrfs_invalidatepage,
	.releasepage	= btrfs_releasepage,
7358
	.set_page_dirty	= btrfs_set_page_dirty,
7359
	.error_remove_page = generic_error_remove_page,
C
Chris Mason 已提交
7360 7361
};

7362
static const struct address_space_operations btrfs_symlink_aops = {
C
Chris Mason 已提交
7363 7364
	.readpage	= btrfs_readpage,
	.writepage	= btrfs_writepage,
C
Chris Mason 已提交
7365 7366
	.invalidatepage = btrfs_invalidatepage,
	.releasepage	= btrfs_releasepage,
C
Chris Mason 已提交
7367 7368
};

7369
static const struct inode_operations btrfs_file_inode_operations = {
C
Chris Mason 已提交
7370 7371 7372
	.truncate	= btrfs_truncate,
	.getattr	= btrfs_getattr,
	.setattr	= btrfs_setattr,
7373 7374
	.setxattr	= btrfs_setxattr,
	.getxattr	= btrfs_getxattr,
J
Josef Bacik 已提交
7375
	.listxattr      = btrfs_listxattr,
7376
	.removexattr	= btrfs_removexattr,
Y
Yan 已提交
7377
	.permission	= btrfs_permission,
Y
Yehuda Sadeh 已提交
7378
	.fiemap		= btrfs_fiemap,
C
Chris Mason 已提交
7379
};
7380
static const struct inode_operations btrfs_special_inode_operations = {
J
Josef Bacik 已提交
7381 7382
	.getattr	= btrfs_getattr,
	.setattr	= btrfs_setattr,
Y
Yan 已提交
7383
	.permission	= btrfs_permission,
7384 7385
	.setxattr	= btrfs_setxattr,
	.getxattr	= btrfs_getxattr,
J
Josef Bacik 已提交
7386
	.listxattr	= btrfs_listxattr,
7387
	.removexattr	= btrfs_removexattr,
J
Josef Bacik 已提交
7388
};
7389
static const struct inode_operations btrfs_symlink_inode_operations = {
C
Chris Mason 已提交
7390 7391 7392
	.readlink	= generic_readlink,
	.follow_link	= page_follow_link_light,
	.put_link	= page_put_link,
7393
	.getattr	= btrfs_getattr,
Y
Yan 已提交
7394
	.permission	= btrfs_permission,
J
Jim Owens 已提交
7395 7396 7397 7398
	.setxattr	= btrfs_setxattr,
	.getxattr	= btrfs_getxattr,
	.listxattr	= btrfs_listxattr,
	.removexattr	= btrfs_removexattr,
C
Chris Mason 已提交
7399
};
7400

7401
const struct dentry_operations btrfs_dentry_operations = {
7402 7403
	.d_delete	= btrfs_dentry_delete,
};