inode.c 200.5 KB
Newer Older
C
Chris Mason 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

19
#include <linux/kernel.h>
20
#include <linux/bio.h>
C
Chris Mason 已提交
21
#include <linux/buffer_head.h>
S
Sage Weil 已提交
22
#include <linux/file.h>
C
Chris Mason 已提交
23 24 25 26 27 28 29 30 31 32 33 34
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/mpage.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/statfs.h>
#include <linux/compat.h>
C
Chris Mason 已提交
35
#include <linux/bit_spinlock.h>
J
Josef Bacik 已提交
36
#include <linux/xattr.h>
J
Josef Bacik 已提交
37
#include <linux/posix_acl.h>
Y
Yan Zheng 已提交
38
#include <linux/falloc.h>
39
#include <linux/slab.h>
C
Chris Mason 已提交
40
#include "compat.h"
C
Chris Mason 已提交
41 42 43 44 45 46
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
#include "ioctl.h"
#include "print-tree.h"
47
#include "volumes.h"
48
#include "ordered-data.h"
49
#include "xattr.h"
50
#include "tree-log.h"
C
Chris Mason 已提交
51
#include "compression.h"
52
#include "locking.h"
53
#include "free-space-cache.h"
54
#include "inode-map.h"
C
Chris Mason 已提交
55 56 57 58 59 60

struct btrfs_iget_args {
	u64 ino;
	struct btrfs_root *root;
};

61 62 63 64 65
static const struct inode_operations btrfs_dir_inode_operations;
static const struct inode_operations btrfs_symlink_inode_operations;
static const struct inode_operations btrfs_dir_ro_inode_operations;
static const struct inode_operations btrfs_special_inode_operations;
static const struct inode_operations btrfs_file_inode_operations;
66 67
static const struct address_space_operations btrfs_aops;
static const struct address_space_operations btrfs_symlink_aops;
68
static const struct file_operations btrfs_dir_file_operations;
69
static struct extent_io_ops btrfs_extent_io_ops;
C
Chris Mason 已提交
70 71 72 73 74

static struct kmem_cache *btrfs_inode_cachep;
struct kmem_cache *btrfs_trans_handle_cachep;
struct kmem_cache *btrfs_transaction_cachep;
struct kmem_cache *btrfs_path_cachep;
75
struct kmem_cache *btrfs_free_space_cachep;
C
Chris Mason 已提交
76 77 78 79 80 81 82 83 84 85 86 87

#define S_SHIFT 12
static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
	[S_IFREG >> S_SHIFT]	= BTRFS_FT_REG_FILE,
	[S_IFDIR >> S_SHIFT]	= BTRFS_FT_DIR,
	[S_IFCHR >> S_SHIFT]	= BTRFS_FT_CHRDEV,
	[S_IFBLK >> S_SHIFT]	= BTRFS_FT_BLKDEV,
	[S_IFIFO >> S_SHIFT]	= BTRFS_FT_FIFO,
	[S_IFSOCK >> S_SHIFT]	= BTRFS_FT_SOCK,
	[S_IFLNK >> S_SHIFT]	= BTRFS_FT_SYMLINK,
};

88 89
static int btrfs_setsize(struct inode *inode, loff_t newsize);
static int btrfs_truncate(struct inode *inode);
C
Chris Mason 已提交
90
static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
91 92 93 94
static noinline int cow_file_range(struct inode *inode,
				   struct page *locked_page,
				   u64 start, u64 end, int *page_started,
				   unsigned long *nr_written, int unlock);
95

96 97
static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
				     struct inode *inode,  struct inode *dir)
J
Jim Owens 已提交
98 99 100
{
	int err;

101
	err = btrfs_init_acl(trans, inode, dir);
J
Jim Owens 已提交
102
	if (!err)
103
		err = btrfs_xattr_security_init(trans, inode, dir);
J
Jim Owens 已提交
104 105 106
	return err;
}

C
Chris Mason 已提交
107 108 109 110 111
/*
 * this does all the hard work for inserting an inline extent into
 * the btree.  The caller should have done a btrfs_drop_extents so that
 * no overlapping inline items exist in the btree
 */
C
Chris Mason 已提交
112
static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
C
Chris Mason 已提交
113 114
				struct btrfs_root *root, struct inode *inode,
				u64 start, size_t size, size_t compressed_size,
115
				int compress_type,
C
Chris Mason 已提交
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
				struct page **compressed_pages)
{
	struct btrfs_key key;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct page *page = NULL;
	char *kaddr;
	unsigned long ptr;
	struct btrfs_file_extent_item *ei;
	int err = 0;
	int ret;
	size_t cur_size = size;
	size_t datasize;
	unsigned long offset;

131
	if (compressed_size && compressed_pages)
C
Chris Mason 已提交
132 133
		cur_size = compressed_size;

C
Chris Mason 已提交
134 135
	path = btrfs_alloc_path();
	if (!path)
C
Chris Mason 已提交
136 137
		return -ENOMEM;

138
	path->leave_spinning = 1;
C
Chris Mason 已提交
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
	btrfs_set_trans_block_group(trans, inode);

	key.objectid = inode->i_ino;
	key.offset = start;
	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
	datasize = btrfs_file_extent_calc_inline_size(cur_size);

	inode_add_bytes(inode, size);
	ret = btrfs_insert_empty_item(trans, root, path, &key,
				      datasize);
	BUG_ON(ret);
	if (ret) {
		err = ret;
		goto fail;
	}
	leaf = path->nodes[0];
	ei = btrfs_item_ptr(leaf, path->slots[0],
			    struct btrfs_file_extent_item);
	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
	btrfs_set_file_extent_encryption(leaf, ei, 0);
	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
	ptr = btrfs_file_extent_inline_start(ei);

164
	if (compress_type != BTRFS_COMPRESS_NONE) {
C
Chris Mason 已提交
165 166
		struct page *cpage;
		int i = 0;
C
Chris Mason 已提交
167
		while (compressed_size > 0) {
C
Chris Mason 已提交
168
			cpage = compressed_pages[i];
169
			cur_size = min_t(unsigned long, compressed_size,
C
Chris Mason 已提交
170 171
				       PAGE_CACHE_SIZE);

172
			kaddr = kmap_atomic(cpage, KM_USER0);
C
Chris Mason 已提交
173
			write_extent_buffer(leaf, kaddr, ptr, cur_size);
174
			kunmap_atomic(kaddr, KM_USER0);
C
Chris Mason 已提交
175 176 177 178 179 180

			i++;
			ptr += cur_size;
			compressed_size -= cur_size;
		}
		btrfs_set_file_extent_compression(leaf, ei,
181
						  compress_type);
C
Chris Mason 已提交
182 183 184 185 186 187 188 189 190 191 192 193 194
	} else {
		page = find_get_page(inode->i_mapping,
				     start >> PAGE_CACHE_SHIFT);
		btrfs_set_file_extent_compression(leaf, ei, 0);
		kaddr = kmap_atomic(page, KM_USER0);
		offset = start & (PAGE_CACHE_SIZE - 1);
		write_extent_buffer(leaf, kaddr + offset, ptr, size);
		kunmap_atomic(kaddr, KM_USER0);
		page_cache_release(page);
	}
	btrfs_mark_buffer_dirty(leaf);
	btrfs_free_path(path);

195 196 197 198 199 200 201 202 203
	/*
	 * we're an inline extent, so nobody can
	 * extend the file past i_size without locking
	 * a page we already have locked.
	 *
	 * We must do any isize and inode updates
	 * before we unlock the pages.  Otherwise we
	 * could end up racing with unlink.
	 */
C
Chris Mason 已提交
204 205
	BTRFS_I(inode)->disk_i_size = inode->i_size;
	btrfs_update_inode(trans, root, inode);
206

C
Chris Mason 已提交
207 208 209 210 211 212 213 214 215 216 217 218
	return 0;
fail:
	btrfs_free_path(path);
	return err;
}


/*
 * conditionally insert an inline extent into the file.  This
 * does the checks required to make sure the data is small enough
 * to fit as an inline extent.
 */
219
static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
C
Chris Mason 已提交
220 221
				 struct btrfs_root *root,
				 struct inode *inode, u64 start, u64 end,
222
				 size_t compressed_size, int compress_type,
C
Chris Mason 已提交
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
				 struct page **compressed_pages)
{
	u64 isize = i_size_read(inode);
	u64 actual_end = min(end + 1, isize);
	u64 inline_len = actual_end - start;
	u64 aligned_end = (end + root->sectorsize - 1) &
			~((u64)root->sectorsize - 1);
	u64 hint_byte;
	u64 data_len = inline_len;
	int ret;

	if (compressed_size)
		data_len = compressed_size;

	if (start > 0 ||
C
Chris Mason 已提交
238
	    actual_end >= PAGE_CACHE_SIZE ||
C
Chris Mason 已提交
239 240 241 242 243 244 245 246
	    data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
	    (!compressed_size &&
	    (actual_end & (root->sectorsize - 1)) == 0) ||
	    end + 1 < isize ||
	    data_len > root->fs_info->max_inline) {
		return 1;
	}

Y
Yan, Zheng 已提交
247
	ret = btrfs_drop_extents(trans, inode, start, aligned_end,
C
Chris Mason 已提交
248
				 &hint_byte, 1);
C
Chris Mason 已提交
249 250 251 252 253 254
	BUG_ON(ret);

	if (isize > actual_end)
		inline_len = min_t(u64, isize, actual_end);
	ret = insert_inline_extent(trans, root, inode, start,
				   inline_len, compressed_size,
255
				   compress_type, compressed_pages);
C
Chris Mason 已提交
256
	BUG_ON(ret);
257
	btrfs_delalloc_release_metadata(inode, end + 1 - start);
C
Chris Mason 已提交
258
	btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
C
Chris Mason 已提交
259 260 261
	return 0;
}

262 263 264 265 266 267
struct async_extent {
	u64 start;
	u64 ram_size;
	u64 compressed_size;
	struct page **pages;
	unsigned long nr_pages;
268
	int compress_type;
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
	struct list_head list;
};

struct async_cow {
	struct inode *inode;
	struct btrfs_root *root;
	struct page *locked_page;
	u64 start;
	u64 end;
	struct list_head extents;
	struct btrfs_work work;
};

static noinline int add_async_extent(struct async_cow *cow,
				     u64 start, u64 ram_size,
				     u64 compressed_size,
				     struct page **pages,
286 287
				     unsigned long nr_pages,
				     int compress_type)
288 289 290 291
{
	struct async_extent *async_extent;

	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
292
	BUG_ON(!async_extent);
293 294 295 296 297
	async_extent->start = start;
	async_extent->ram_size = ram_size;
	async_extent->compressed_size = compressed_size;
	async_extent->pages = pages;
	async_extent->nr_pages = nr_pages;
298
	async_extent->compress_type = compress_type;
299 300 301 302
	list_add_tail(&async_extent->list, &cow->extents);
	return 0;
}

C
Chris Mason 已提交
303
/*
304 305 306
 * we create compressed extents in two phases.  The first
 * phase compresses a range of pages that have already been
 * locked (both pages and state bits are locked).
C
Chris Mason 已提交
307
 *
308 309 310 311 312
 * This is done inside an ordered work queue, and the compression
 * is spread across many cpus.  The actual IO submission is step
 * two, and the ordered work queue takes care of making sure that
 * happens in the same order things were put onto the queue by
 * writepages and friends.
C
Chris Mason 已提交
313
 *
314 315 316 317
 * If this code finds it can't get good compression, it puts an
 * entry onto the work queue to write the uncompressed bytes.  This
 * makes sure that both compressed inodes and uncompressed inodes
 * are written in the same order that pdflush sent them down.
C
Chris Mason 已提交
318
 */
319 320 321 322 323
static noinline int compress_file_range(struct inode *inode,
					struct page *locked_page,
					u64 start, u64 end,
					struct async_cow *async_cow,
					int *num_added)
324 325 326
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
327 328
	u64 num_bytes;
	u64 blocksize = root->sectorsize;
C
Chris Mason 已提交
329
	u64 actual_end;
330
	u64 isize = i_size_read(inode);
331
	int ret = 0;
C
Chris Mason 已提交
332 333 334 335 336 337
	struct page **pages = NULL;
	unsigned long nr_pages;
	unsigned long nr_pages_ret = 0;
	unsigned long total_compressed = 0;
	unsigned long total_in = 0;
	unsigned long max_compressed = 128 * 1024;
338
	unsigned long max_uncompressed = 128 * 1024;
C
Chris Mason 已提交
339 340
	int i;
	int will_compress;
341
	int compress_type = root->fs_info->compress_type;
342

343
	actual_end = min_t(u64, isize, end + 1);
C
Chris Mason 已提交
344 345 346 347
again:
	will_compress = 0;
	nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
	nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
348

349 350 351 352 353 354 355 356 357 358 359 360 361
	/*
	 * we don't want to send crud past the end of i_size through
	 * compression, that's just a waste of CPU time.  So, if the
	 * end of the file is before the start of our current
	 * requested range of bytes, we bail out to the uncompressed
	 * cleanup code that can deal with all of this.
	 *
	 * It isn't really the fastest way to fix things, but this is a
	 * very uncommon corner.
	 */
	if (actual_end <= start)
		goto cleanup_and_bail_uncompressed;

C
Chris Mason 已提交
362 363 364 365
	total_compressed = actual_end - start;

	/* we want to make sure that amount of ram required to uncompress
	 * an extent is reasonable, so we limit the total size in ram
366 367 368 369 370 371 372
	 * of a compressed extent to 128k.  This is a crucial number
	 * because it also controls how easily we can spread reads across
	 * cpus for decompression.
	 *
	 * We also want to make sure the amount of IO required to do
	 * a random read is reasonably small, so we limit the size of
	 * a compressed extent to 128k.
C
Chris Mason 已提交
373 374
	 */
	total_compressed = min(total_compressed, max_uncompressed);
375
	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
376
	num_bytes = max(blocksize,  num_bytes);
C
Chris Mason 已提交
377 378
	total_in = 0;
	ret = 0;
379

380 381 382 383
	/*
	 * we do compression for mount -o compress and when the
	 * inode has not been flagged as nocompress.  This flag can
	 * change at any time if we discover bad compression ratios.
C
Chris Mason 已提交
384
	 */
385
	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
C
Chris Mason 已提交
386
	    (btrfs_test_opt(root, COMPRESS) ||
387 388
	     (BTRFS_I(inode)->force_compress) ||
	     (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
C
Chris Mason 已提交
389
		WARN_ON(pages);
390
		pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
391
		BUG_ON(!pages);
C
Chris Mason 已提交
392

393 394 395 396 397 398 399 400 401 402
		if (BTRFS_I(inode)->force_compress)
			compress_type = BTRFS_I(inode)->force_compress;

		ret = btrfs_compress_pages(compress_type,
					   inode->i_mapping, start,
					   total_compressed, pages,
					   nr_pages, &nr_pages_ret,
					   &total_in,
					   &total_compressed,
					   max_compressed);
C
Chris Mason 已提交
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422

		if (!ret) {
			unsigned long offset = total_compressed &
				(PAGE_CACHE_SIZE - 1);
			struct page *page = pages[nr_pages_ret - 1];
			char *kaddr;

			/* zero the tail end of the last page, we might be
			 * sending it down to disk
			 */
			if (offset) {
				kaddr = kmap_atomic(page, KM_USER0);
				memset(kaddr + offset, 0,
				       PAGE_CACHE_SIZE - offset);
				kunmap_atomic(kaddr, KM_USER0);
			}
			will_compress = 1;
		}
	}
	if (start == 0) {
423
		trans = btrfs_join_transaction(root, 1);
424
		BUG_ON(IS_ERR(trans));
425
		btrfs_set_trans_block_group(trans, inode);
426
		trans->block_rsv = &root->fs_info->delalloc_block_rsv;
427

C
Chris Mason 已提交
428
		/* lets try to make an inline extent */
429
		if (ret || total_in < (actual_end - start)) {
C
Chris Mason 已提交
430
			/* we didn't compress the entire range, try
431
			 * to make an uncompressed inline extent.
C
Chris Mason 已提交
432 433
			 */
			ret = cow_file_range_inline(trans, root, inode,
434
						    start, end, 0, 0, NULL);
C
Chris Mason 已提交
435
		} else {
436
			/* try making a compressed inline extent */
C
Chris Mason 已提交
437 438
			ret = cow_file_range_inline(trans, root, inode,
						    start, end,
439 440
						    total_compressed,
						    compress_type, pages);
C
Chris Mason 已提交
441 442
		}
		if (ret == 0) {
443 444 445 446 447
			/*
			 * inline extent creation worked, we don't need
			 * to create any more async work items.  Unlock
			 * and free up our temp pages.
			 */
C
Chris Mason 已提交
448
			extent_clear_unlock_delalloc(inode,
449 450 451
			     &BTRFS_I(inode)->io_tree,
			     start, end, NULL,
			     EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
452
			     EXTENT_CLEAR_DELALLOC |
453
			     EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
454 455

			btrfs_end_transaction(trans, root);
C
Chris Mason 已提交
456 457
			goto free_pages_out;
		}
458
		btrfs_end_transaction(trans, root);
C
Chris Mason 已提交
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487
	}

	if (will_compress) {
		/*
		 * we aren't doing an inline extent round the compressed size
		 * up to a block size boundary so the allocator does sane
		 * things
		 */
		total_compressed = (total_compressed + blocksize - 1) &
			~(blocksize - 1);

		/*
		 * one last check to make sure the compression is really a
		 * win, compare the page count read with the blocks on disk
		 */
		total_in = (total_in + PAGE_CACHE_SIZE - 1) &
			~(PAGE_CACHE_SIZE - 1);
		if (total_compressed >= total_in) {
			will_compress = 0;
		} else {
			num_bytes = total_in;
		}
	}
	if (!will_compress && pages) {
		/*
		 * the compression code ran but failed to make things smaller,
		 * free any pages it allocated and our page pointer array
		 */
		for (i = 0; i < nr_pages_ret; i++) {
C
Chris Mason 已提交
488
			WARN_ON(pages[i]->mapping);
C
Chris Mason 已提交
489 490 491 492 493 494 495 496
			page_cache_release(pages[i]);
		}
		kfree(pages);
		pages = NULL;
		total_compressed = 0;
		nr_pages_ret = 0;

		/* flag the file so we don't compress in the future */
C
Chris Mason 已提交
497 498
		if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
		    !(BTRFS_I(inode)->force_compress)) {
C
Chris Mason 已提交
499
			BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
C
Chris Mason 已提交
500
		}
C
Chris Mason 已提交
501
	}
502 503
	if (will_compress) {
		*num_added += 1;
C
Chris Mason 已提交
504

505 506 507 508 509
		/* the async work queues will take care of doing actual
		 * allocation on disk for these compressed pages,
		 * and will submit them to the elevator.
		 */
		add_async_extent(async_cow, start, num_bytes,
510 511
				 total_compressed, pages, nr_pages_ret,
				 compress_type);
512

513
		if (start + num_bytes < end) {
514 515 516 517 518 519
			start += num_bytes;
			pages = NULL;
			cond_resched();
			goto again;
		}
	} else {
520
cleanup_and_bail_uncompressed:
521 522 523 524 525 526 527 528 529 530 531 532
		/*
		 * No compression, but we still need to write the pages in
		 * the file we've been given so far.  redirty the locked
		 * page if it corresponds to our extent and set things up
		 * for the async work queue to run cow_file_range to do
		 * the normal delalloc dance
		 */
		if (page_offset(locked_page) >= start &&
		    page_offset(locked_page) <= end) {
			__set_page_dirty_nobuffers(locked_page);
			/* unlocked later on in the async handlers */
		}
533 534
		add_async_extent(async_cow, start, end - start + 1,
				 0, NULL, 0, BTRFS_COMPRESS_NONE);
535 536
		*num_added += 1;
	}
537

538 539 540 541 542 543 544 545
out:
	return 0;

free_pages_out:
	for (i = 0; i < nr_pages_ret; i++) {
		WARN_ON(pages[i]->mapping);
		page_cache_release(pages[i]);
	}
C
Chris Mason 已提交
546
	kfree(pages);
547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567

	goto out;
}

/*
 * phase two of compressed writeback.  This is the ordered portion
 * of the code, which only gets called in the order the work was
 * queued.  We walk all the async extents created by compress_file_range
 * and send them down to the disk.
 */
static noinline int submit_compressed_extents(struct inode *inode,
					      struct async_cow *async_cow)
{
	struct async_extent *async_extent;
	u64 alloc_hint = 0;
	struct btrfs_trans_handle *trans;
	struct btrfs_key ins;
	struct extent_map *em;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
	struct extent_io_tree *io_tree;
568
	int ret = 0;
569 570 571 572 573

	if (list_empty(&async_cow->extents))
		return 0;


C
Chris Mason 已提交
574
	while (!list_empty(&async_cow->extents)) {
575 576 577
		async_extent = list_entry(async_cow->extents.next,
					  struct async_extent, list);
		list_del(&async_extent->list);
C
Chris Mason 已提交
578

579 580
		io_tree = &BTRFS_I(inode)->io_tree;

581
retry:
582 583 584 585 586 587
		/* did the compression code fall back to uncompressed IO? */
		if (!async_extent->pages) {
			int page_started = 0;
			unsigned long nr_written = 0;

			lock_extent(io_tree, async_extent->start,
588 589
					 async_extent->start +
					 async_extent->ram_size - 1, GFP_NOFS);
590 591

			/* allocate blocks */
592 593 594 595 596
			ret = cow_file_range(inode, async_cow->locked_page,
					     async_extent->start,
					     async_extent->start +
					     async_extent->ram_size - 1,
					     &page_started, &nr_written, 0);
597 598 599 600 601 602 603

			/*
			 * if page_started, cow_file_range inserted an
			 * inline extent and took care of all the unlocking
			 * and IO for us.  Otherwise, we need to submit
			 * all those pages down to the drive.
			 */
604
			if (!page_started && !ret)
605 606
				extent_write_locked_range(io_tree,
						  inode, async_extent->start,
C
Chris Mason 已提交
607
						  async_extent->start +
608 609 610 611 612 613 614 615 616 617 618 619
						  async_extent->ram_size - 1,
						  btrfs_get_extent,
						  WB_SYNC_ALL);
			kfree(async_extent);
			cond_resched();
			continue;
		}

		lock_extent(io_tree, async_extent->start,
			    async_extent->start + async_extent->ram_size - 1,
			    GFP_NOFS);

620
		trans = btrfs_join_transaction(root, 1);
621
		BUG_ON(IS_ERR(trans));
622 623 624 625 626
		ret = btrfs_reserve_extent(trans, root,
					   async_extent->compressed_size,
					   async_extent->compressed_size,
					   0, alloc_hint,
					   (u64)-1, &ins, 1);
627 628
		btrfs_end_transaction(trans, root);

629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
		if (ret) {
			int i;
			for (i = 0; i < async_extent->nr_pages; i++) {
				WARN_ON(async_extent->pages[i]->mapping);
				page_cache_release(async_extent->pages[i]);
			}
			kfree(async_extent->pages);
			async_extent->nr_pages = 0;
			async_extent->pages = NULL;
			unlock_extent(io_tree, async_extent->start,
				      async_extent->start +
				      async_extent->ram_size - 1, GFP_NOFS);
			goto retry;
		}

644 645 646 647 648 649 650 651
		/*
		 * here we're doing allocation and writeback of the
		 * compressed pages
		 */
		btrfs_drop_extent_cache(inode, async_extent->start,
					async_extent->start +
					async_extent->ram_size - 1, 0);

652
		em = alloc_extent_map(GFP_NOFS);
653
		BUG_ON(!em);
654 655
		em->start = async_extent->start;
		em->len = async_extent->ram_size;
656
		em->orig_start = em->start;
C
Chris Mason 已提交
657

658 659 660
		em->block_start = ins.objectid;
		em->block_len = ins.offset;
		em->bdev = root->fs_info->fs_devices->latest_bdev;
661
		em->compress_type = async_extent->compress_type;
662 663 664
		set_bit(EXTENT_FLAG_PINNED, &em->flags);
		set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);

C
Chris Mason 已提交
665
		while (1) {
666
			write_lock(&em_tree->lock);
667
			ret = add_extent_mapping(em_tree, em);
668
			write_unlock(&em_tree->lock);
669 670 671 672 673 674 675 676 677
			if (ret != -EEXIST) {
				free_extent_map(em);
				break;
			}
			btrfs_drop_extent_cache(inode, async_extent->start,
						async_extent->start +
						async_extent->ram_size - 1, 0);
		}

678 679 680 681 682 683 684
		ret = btrfs_add_ordered_extent_compress(inode,
						async_extent->start,
						ins.objectid,
						async_extent->ram_size,
						ins.offset,
						BTRFS_ORDERED_COMPRESSED,
						async_extent->compress_type);
685 686 687 688 689 690
		BUG_ON(ret);

		/*
		 * clear dirty, set writeback and unlock the pages.
		 */
		extent_clear_unlock_delalloc(inode,
691 692 693 694 695 696
				&BTRFS_I(inode)->io_tree,
				async_extent->start,
				async_extent->start +
				async_extent->ram_size - 1,
				NULL, EXTENT_CLEAR_UNLOCK_PAGE |
				EXTENT_CLEAR_UNLOCK |
697
				EXTENT_CLEAR_DELALLOC |
698
				EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
699 700

		ret = btrfs_submit_compressed_write(inode,
C
Chris Mason 已提交
701 702 703 704 705
				    async_extent->start,
				    async_extent->ram_size,
				    ins.objectid,
				    ins.offset, async_extent->pages,
				    async_extent->nr_pages);
706 707 708 709 710 711 712 713 714 715

		BUG_ON(ret);
		alloc_hint = ins.objectid + ins.offset;
		kfree(async_extent);
		cond_resched();
	}

	return 0;
}

716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747
static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
				      u64 num_bytes)
{
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
	struct extent_map *em;
	u64 alloc_hint = 0;

	read_lock(&em_tree->lock);
	em = search_extent_mapping(em_tree, start, num_bytes);
	if (em) {
		/*
		 * if block start isn't an actual block number then find the
		 * first block in this inode and use that as a hint.  If that
		 * block is also bogus then just don't worry about it.
		 */
		if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
			free_extent_map(em);
			em = search_extent_mapping(em_tree, 0, 0);
			if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
				alloc_hint = em->block_start;
			if (em)
				free_extent_map(em);
		} else {
			alloc_hint = em->block_start;
			free_extent_map(em);
		}
	}
	read_unlock(&em_tree->lock);

	return alloc_hint;
}

748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779
/*
 * when extent_io.c finds a delayed allocation range in the file,
 * the call backs end up in this code.  The basic idea is to
 * allocate extents on disk for the range, and create ordered data structs
 * in ram to track those extents.
 *
 * locked_page is the page that writepage had locked already.  We use
 * it to make sure we don't do extra locks or unlocks.
 *
 * *page_started is set to one if we unlock locked_page and do everything
 * required to start IO on it.  It may be clean and already done with
 * IO when we return.
 */
static noinline int cow_file_range(struct inode *inode,
				   struct page *locked_page,
				   u64 start, u64 end, int *page_started,
				   unsigned long *nr_written,
				   int unlock)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
	u64 alloc_hint = 0;
	u64 num_bytes;
	unsigned long ram_size;
	u64 disk_num_bytes;
	u64 cur_alloc_size;
	u64 blocksize = root->sectorsize;
	struct btrfs_key ins;
	struct extent_map *em;
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
	int ret = 0;

J
Josef Bacik 已提交
780
	BUG_ON(root == root->fs_info->tree_root);
781
	trans = btrfs_join_transaction(root, 1);
782
	BUG_ON(IS_ERR(trans));
783
	btrfs_set_trans_block_group(trans, inode);
784
	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
785 786 787 788 789 790 791 792 793

	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
	num_bytes = max(blocksize,  num_bytes);
	disk_num_bytes = num_bytes;
	ret = 0;

	if (start == 0) {
		/* lets try to make an inline extent */
		ret = cow_file_range_inline(trans, root, inode,
794
					    start, end, 0, 0, NULL);
795 796
		if (ret == 0) {
			extent_clear_unlock_delalloc(inode,
797 798 799 800 801 802 803 804
				     &BTRFS_I(inode)->io_tree,
				     start, end, NULL,
				     EXTENT_CLEAR_UNLOCK_PAGE |
				     EXTENT_CLEAR_UNLOCK |
				     EXTENT_CLEAR_DELALLOC |
				     EXTENT_CLEAR_DIRTY |
				     EXTENT_SET_WRITEBACK |
				     EXTENT_END_WRITEBACK);
805

806 807 808 809 810 811 812 813 814 815 816
			*nr_written = *nr_written +
			     (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
			*page_started = 1;
			ret = 0;
			goto out;
		}
	}

	BUG_ON(disk_num_bytes >
	       btrfs_super_total_bytes(&root->fs_info->super_copy));

817
	alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
818 819
	btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);

C
Chris Mason 已提交
820
	while (disk_num_bytes > 0) {
821 822
		unsigned long op;

823
		cur_alloc_size = disk_num_bytes;
824
		ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
825
					   root->sectorsize, 0, alloc_hint,
826
					   (u64)-1, &ins, 1);
C
Chris Mason 已提交
827 828
		BUG_ON(ret);

829
		em = alloc_extent_map(GFP_NOFS);
830
		BUG_ON(!em);
831
		em->start = start;
832
		em->orig_start = em->start;
833 834
		ram_size = ins.offset;
		em->len = ins.offset;
C
Chris Mason 已提交
835

836
		em->block_start = ins.objectid;
C
Chris Mason 已提交
837
		em->block_len = ins.offset;
838
		em->bdev = root->fs_info->fs_devices->latest_bdev;
839
		set_bit(EXTENT_FLAG_PINNED, &em->flags);
C
Chris Mason 已提交
840

C
Chris Mason 已提交
841
		while (1) {
842
			write_lock(&em_tree->lock);
843
			ret = add_extent_mapping(em_tree, em);
844
			write_unlock(&em_tree->lock);
845 846 847 848 849
			if (ret != -EEXIST) {
				free_extent_map(em);
				break;
			}
			btrfs_drop_extent_cache(inode, start,
C
Chris Mason 已提交
850
						start + ram_size - 1, 0);
851 852
		}

853
		cur_alloc_size = ins.offset;
854
		ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
855
					       ram_size, cur_alloc_size, 0);
856
		BUG_ON(ret);
C
Chris Mason 已提交
857

858 859 860 861 862 863 864
		if (root->root_key.objectid ==
		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
			ret = btrfs_reloc_clone_csums(inode, start,
						      cur_alloc_size);
			BUG_ON(ret);
		}

C
Chris Mason 已提交
865
		if (disk_num_bytes < cur_alloc_size)
866
			break;
C
Chris Mason 已提交
867

C
Chris Mason 已提交
868 869 870
		/* we're not doing compressed IO, don't unlock the first
		 * page (which the caller expects to stay locked), don't
		 * clear any dirty bits and don't set any writeback bits
871 872 873
		 *
		 * Do set the Private2 bit so we know this page was properly
		 * setup for writepage
C
Chris Mason 已提交
874
		 */
875 876 877 878
		op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
		op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
			EXTENT_SET_PRIVATE2;

C
Chris Mason 已提交
879 880
		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
					     start, start + ram_size - 1,
881
					     locked_page, op);
C
Chris Mason 已提交
882
		disk_num_bytes -= cur_alloc_size;
883 884 885
		num_bytes -= cur_alloc_size;
		alloc_hint = ins.objectid + ins.offset;
		start += cur_alloc_size;
886 887
	}
out:
888
	ret = 0;
889
	btrfs_end_transaction(trans, root);
C
Chris Mason 已提交
890

891
	return ret;
892
}
C
Chris Mason 已提交
893

894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931
/*
 * work queue call back to started compression on a file and pages
 */
static noinline void async_cow_start(struct btrfs_work *work)
{
	struct async_cow *async_cow;
	int num_added = 0;
	async_cow = container_of(work, struct async_cow, work);

	compress_file_range(async_cow->inode, async_cow->locked_page,
			    async_cow->start, async_cow->end, async_cow,
			    &num_added);
	if (num_added == 0)
		async_cow->inode = NULL;
}

/*
 * work queue call back to submit previously compressed pages
 */
static noinline void async_cow_submit(struct btrfs_work *work)
{
	struct async_cow *async_cow;
	struct btrfs_root *root;
	unsigned long nr_pages;

	async_cow = container_of(work, struct async_cow, work);

	root = async_cow->root;
	nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
		PAGE_CACHE_SHIFT;

	atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);

	if (atomic_read(&root->fs_info->async_delalloc_pages) <
	    5 * 1042 * 1024 &&
	    waitqueue_active(&root->fs_info->async_submit_wait))
		wake_up(&root->fs_info->async_submit_wait);

C
Chris Mason 已提交
932
	if (async_cow->inode)
933 934
		submit_compressed_extents(async_cow->inode, async_cow);
}
C
Chris Mason 已提交
935

936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952
static noinline void async_cow_free(struct btrfs_work *work)
{
	struct async_cow *async_cow;
	async_cow = container_of(work, struct async_cow, work);
	kfree(async_cow);
}

static int cow_file_range_async(struct inode *inode, struct page *locked_page,
				u64 start, u64 end, int *page_started,
				unsigned long *nr_written)
{
	struct async_cow *async_cow;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	unsigned long nr_pages;
	u64 cur_end;
	int limit = 10 * 1024 * 1042;

953 954
	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
			 1, 0, NULL, GFP_NOFS);
C
Chris Mason 已提交
955
	while (start < end) {
956 957 958 959 960 961
		async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
		async_cow->inode = inode;
		async_cow->root = root;
		async_cow->locked_page = locked_page;
		async_cow->start = start;

962
		if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987
			cur_end = end;
		else
			cur_end = min(end, start + 512 * 1024 - 1);

		async_cow->end = cur_end;
		INIT_LIST_HEAD(&async_cow->extents);

		async_cow->work.func = async_cow_start;
		async_cow->work.ordered_func = async_cow_submit;
		async_cow->work.ordered_free = async_cow_free;
		async_cow->work.flags = 0;

		nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
			PAGE_CACHE_SHIFT;
		atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);

		btrfs_queue_worker(&root->fs_info->delalloc_workers,
				   &async_cow->work);

		if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
			wait_event(root->fs_info->async_submit_wait,
			   (atomic_read(&root->fs_info->async_delalloc_pages) <
			    limit));
		}

C
Chris Mason 已提交
988
		while (atomic_read(&root->fs_info->async_submit_draining) &&
989 990 991 992 993 994 995 996 997 998 999
		      atomic_read(&root->fs_info->async_delalloc_pages)) {
			wait_event(root->fs_info->async_submit_wait,
			  (atomic_read(&root->fs_info->async_delalloc_pages) ==
			   0));
		}

		*nr_written += nr_pages;
		start = cur_end + 1;
	}
	*page_started = 1;
	return 0;
1000 1001
}

C
Chris Mason 已提交
1002
static noinline int csum_exist_in_range(struct btrfs_root *root,
1003 1004 1005 1006 1007 1008
					u64 bytenr, u64 num_bytes)
{
	int ret;
	struct btrfs_ordered_sum *sums;
	LIST_HEAD(list);

Y
Yan Zheng 已提交
1009 1010
	ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
				       bytenr + num_bytes - 1, &list);
1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
	if (ret == 0 && list_empty(&list))
		return 0;

	while (!list_empty(&list)) {
		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
		list_del(&sums->list);
		kfree(sums);
	}
	return 1;
}

C
Chris Mason 已提交
1022 1023 1024 1025 1026 1027 1028
/*
 * when nowcow writeback call back.  This checks for snapshots or COW copies
 * of the extents that exist in the file, and COWs the file as required.
 *
 * If no cow copies or snapshots exist, we write directly to the existing
 * blocks on disk
 */
1029 1030
static noinline int run_delalloc_nocow(struct inode *inode,
				       struct page *locked_page,
1031 1032
			      u64 start, u64 end, int *page_started, int force,
			      unsigned long *nr_written)
1033 1034
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
1035
	struct btrfs_trans_handle *trans;
1036 1037
	struct extent_buffer *leaf;
	struct btrfs_path *path;
Y
Yan Zheng 已提交
1038
	struct btrfs_file_extent_item *fi;
1039
	struct btrfs_key found_key;
Y
Yan Zheng 已提交
1040 1041 1042
	u64 cow_start;
	u64 cur_offset;
	u64 extent_end;
1043
	u64 extent_offset;
Y
Yan Zheng 已提交
1044 1045 1046 1047
	u64 disk_bytenr;
	u64 num_bytes;
	int extent_type;
	int ret;
Y
Yan Zheng 已提交
1048
	int type;
Y
Yan Zheng 已提交
1049 1050
	int nocow;
	int check_prev = 1;
J
Josef Bacik 已提交
1051
	bool nolock = false;
1052 1053 1054

	path = btrfs_alloc_path();
	BUG_ON(!path);
J
Josef Bacik 已提交
1055 1056 1057 1058 1059 1060
	if (root == root->fs_info->tree_root) {
		nolock = true;
		trans = btrfs_join_transaction_nolock(root, 1);
	} else {
		trans = btrfs_join_transaction(root, 1);
	}
1061
	BUG_ON(IS_ERR(trans));
1062

Y
Yan Zheng 已提交
1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087
	cow_start = (u64)-1;
	cur_offset = start;
	while (1) {
		ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
					       cur_offset, 0);
		BUG_ON(ret < 0);
		if (ret > 0 && path->slots[0] > 0 && check_prev) {
			leaf = path->nodes[0];
			btrfs_item_key_to_cpu(leaf, &found_key,
					      path->slots[0] - 1);
			if (found_key.objectid == inode->i_ino &&
			    found_key.type == BTRFS_EXTENT_DATA_KEY)
				path->slots[0]--;
		}
		check_prev = 0;
next_slot:
		leaf = path->nodes[0];
		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret < 0)
				BUG_ON(1);
			if (ret > 0)
				break;
			leaf = path->nodes[0];
		}
1088

Y
Yan Zheng 已提交
1089 1090
		nocow = 0;
		disk_bytenr = 0;
1091
		num_bytes = 0;
Y
Yan Zheng 已提交
1092 1093 1094 1095 1096 1097 1098 1099 1100
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);

		if (found_key.objectid > inode->i_ino ||
		    found_key.type > BTRFS_EXTENT_DATA_KEY ||
		    found_key.offset > end)
			break;

		if (found_key.offset > cur_offset) {
			extent_end = found_key.offset;
1101
			extent_type = 0;
Y
Yan Zheng 已提交
1102 1103 1104 1105 1106 1107 1108
			goto out_check;
		}

		fi = btrfs_item_ptr(leaf, path->slots[0],
				    struct btrfs_file_extent_item);
		extent_type = btrfs_file_extent_type(leaf, fi);

Y
Yan Zheng 已提交
1109 1110
		if (extent_type == BTRFS_FILE_EXTENT_REG ||
		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
Y
Yan Zheng 已提交
1111
			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1112
			extent_offset = btrfs_file_extent_offset(leaf, fi);
Y
Yan Zheng 已提交
1113 1114 1115 1116 1117 1118
			extent_end = found_key.offset +
				btrfs_file_extent_num_bytes(leaf, fi);
			if (extent_end <= start) {
				path->slots[0]++;
				goto next_slot;
			}
1119 1120
			if (disk_bytenr == 0)
				goto out_check;
Y
Yan Zheng 已提交
1121 1122 1123 1124
			if (btrfs_file_extent_compression(leaf, fi) ||
			    btrfs_file_extent_encryption(leaf, fi) ||
			    btrfs_file_extent_other_encoding(leaf, fi))
				goto out_check;
Y
Yan Zheng 已提交
1125 1126
			if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
				goto out_check;
1127
			if (btrfs_extent_readonly(root, disk_bytenr))
Y
Yan Zheng 已提交
1128
				goto out_check;
1129
			if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
1130 1131
						  found_key.offset -
						  extent_offset, disk_bytenr))
1132
				goto out_check;
1133
			disk_bytenr += extent_offset;
1134 1135 1136 1137 1138 1139 1140 1141 1142
			disk_bytenr += cur_offset - found_key.offset;
			num_bytes = min(end + 1, extent_end) - cur_offset;
			/*
			 * force cow if csum exists in the range.
			 * this ensure that csum for a given extent are
			 * either valid or do not exist.
			 */
			if (csum_exist_in_range(root, disk_bytenr, num_bytes))
				goto out_check;
Y
Yan Zheng 已提交
1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
			nocow = 1;
		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
			extent_end = found_key.offset +
				btrfs_file_extent_inline_len(leaf, fi);
			extent_end = ALIGN(extent_end, root->sectorsize);
		} else {
			BUG_ON(1);
		}
out_check:
		if (extent_end <= start) {
			path->slots[0]++;
			goto next_slot;
		}
		if (!nocow) {
			if (cow_start == (u64)-1)
				cow_start = cur_offset;
			cur_offset = extent_end;
			if (cur_offset > end)
				break;
			path->slots[0]++;
			goto next_slot;
1164 1165 1166
		}

		btrfs_release_path(root, path);
Y
Yan Zheng 已提交
1167 1168
		if (cow_start != (u64)-1) {
			ret = cow_file_range(inode, locked_page, cow_start,
1169 1170
					found_key.offset - 1, page_started,
					nr_written, 1);
Y
Yan Zheng 已提交
1171 1172
			BUG_ON(ret);
			cow_start = (u64)-1;
1173
		}
Y
Yan Zheng 已提交
1174

Y
Yan Zheng 已提交
1175 1176 1177 1178 1179
		if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
			struct extent_map *em;
			struct extent_map_tree *em_tree;
			em_tree = &BTRFS_I(inode)->extent_tree;
			em = alloc_extent_map(GFP_NOFS);
1180
			BUG_ON(!em);
Y
Yan Zheng 已提交
1181
			em->start = cur_offset;
1182
			em->orig_start = em->start;
Y
Yan Zheng 已提交
1183 1184 1185 1186 1187 1188
			em->len = num_bytes;
			em->block_len = num_bytes;
			em->block_start = disk_bytenr;
			em->bdev = root->fs_info->fs_devices->latest_bdev;
			set_bit(EXTENT_FLAG_PINNED, &em->flags);
			while (1) {
1189
				write_lock(&em_tree->lock);
Y
Yan Zheng 已提交
1190
				ret = add_extent_mapping(em_tree, em);
1191
				write_unlock(&em_tree->lock);
Y
Yan Zheng 已提交
1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
				if (ret != -EEXIST) {
					free_extent_map(em);
					break;
				}
				btrfs_drop_extent_cache(inode, em->start,
						em->start + em->len - 1, 0);
			}
			type = BTRFS_ORDERED_PREALLOC;
		} else {
			type = BTRFS_ORDERED_NOCOW;
		}
Y
Yan Zheng 已提交
1203 1204

		ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
Y
Yan Zheng 已提交
1205 1206
					       num_bytes, num_bytes, type);
		BUG_ON(ret);
1207

1208 1209 1210 1211 1212 1213 1214
		if (root->root_key.objectid ==
		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
			ret = btrfs_reloc_clone_csums(inode, cur_offset,
						      num_bytes);
			BUG_ON(ret);
		}

Y
Yan Zheng 已提交
1215
		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1216 1217 1218 1219
				cur_offset, cur_offset + num_bytes - 1,
				locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
				EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
				EXTENT_SET_PRIVATE2);
Y
Yan Zheng 已提交
1220 1221 1222
		cur_offset = extent_end;
		if (cur_offset > end)
			break;
1223
	}
Y
Yan Zheng 已提交
1224 1225 1226 1227 1228 1229
	btrfs_release_path(root, path);

	if (cur_offset <= end && cow_start == (u64)-1)
		cow_start = cur_offset;
	if (cow_start != (u64)-1) {
		ret = cow_file_range(inode, locked_page, cow_start, end,
1230
				     page_started, nr_written, 1);
Y
Yan Zheng 已提交
1231 1232 1233
		BUG_ON(ret);
	}

J
Josef Bacik 已提交
1234 1235 1236 1237 1238 1239 1240
	if (nolock) {
		ret = btrfs_end_transaction_nolock(trans, root);
		BUG_ON(ret);
	} else {
		ret = btrfs_end_transaction(trans, root);
		BUG_ON(ret);
	}
1241
	btrfs_free_path(path);
Y
Yan Zheng 已提交
1242
	return 0;
1243 1244
}

C
Chris Mason 已提交
1245 1246 1247
/*
 * extent_io.c call back to do delayed allocation processing
 */
C
Chris Mason 已提交
1248
static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1249 1250
			      u64 start, u64 end, int *page_started,
			      unsigned long *nr_written)
1251 1252
{
	int ret;
1253
	struct btrfs_root *root = BTRFS_I(inode)->root;
1254

1255
	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
C
Chris Mason 已提交
1256
		ret = run_delalloc_nocow(inode, locked_page, start, end,
C
Chris Mason 已提交
1257
					 page_started, 1, nr_written);
1258
	else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
Y
Yan Zheng 已提交
1259
		ret = run_delalloc_nocow(inode, locked_page, start, end,
C
Chris Mason 已提交
1260
					 page_started, 0, nr_written);
C
Chris Mason 已提交
1261
	else if (!btrfs_test_opt(root, COMPRESS) &&
1262 1263
		 !(BTRFS_I(inode)->force_compress) &&
		 !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))
1264 1265
		ret = cow_file_range(inode, locked_page, start, end,
				      page_started, nr_written, 1);
1266
	else
1267
		ret = cow_file_range_async(inode, locked_page, start, end,
C
Chris Mason 已提交
1268
					   page_started, nr_written);
1269 1270 1271
	return ret;
}

J
Josef Bacik 已提交
1272
static int btrfs_split_extent_hook(struct inode *inode,
1273
				   struct extent_state *orig, u64 split)
J
Josef Bacik 已提交
1274
{
1275
	/* not delalloc, ignore it */
J
Josef Bacik 已提交
1276 1277 1278
	if (!(orig->state & EXTENT_DELALLOC))
		return 0;

1279
	atomic_inc(&BTRFS_I(inode)->outstanding_extents);
J
Josef Bacik 已提交
1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
	return 0;
}

/*
 * extent_io.c merge_extent_hook, used to track merged delayed allocation
 * extents so we can keep track of new extents that are just merged onto old
 * extents, such as when we are doing sequential writes, so we can properly
 * account for the metadata space we'll need.
 */
static int btrfs_merge_extent_hook(struct inode *inode,
				   struct extent_state *new,
				   struct extent_state *other)
{
	/* not delalloc, ignore it */
	if (!(other->state & EXTENT_DELALLOC))
		return 0;

1297
	atomic_dec(&BTRFS_I(inode)->outstanding_extents);
J
Josef Bacik 已提交
1298 1299 1300
	return 0;
}

C
Chris Mason 已提交
1301 1302 1303 1304 1305
/*
 * extent_io.c set_bit_hook, used to track delayed allocation
 * bytes in this file, and to maintain the list of inodes that
 * have pending delalloc work to be done.
 */
1306 1307
static int btrfs_set_bit_hook(struct inode *inode,
			      struct extent_state *state, int *bits)
1308
{
J
Josef Bacik 已提交
1309

1310 1311 1312 1313 1314
	/*
	 * set_bit and clear bit hooks normally require _irqsave/restore
	 * but in this case, we are only testeing for the DELALLOC
	 * bit, which is only set or cleared with irqs on
	 */
1315
	if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1316
		struct btrfs_root *root = BTRFS_I(inode)->root;
1317
		u64 len = state->end + 1 - state->start;
J
Josef Bacik 已提交
1318 1319
		int do_list = (root->root_key.objectid !=
			       BTRFS_ROOT_TREE_OBJECTID);
J
Josef Bacik 已提交
1320

1321 1322 1323 1324
		if (*bits & EXTENT_FIRST_DELALLOC)
			*bits &= ~EXTENT_FIRST_DELALLOC;
		else
			atomic_inc(&BTRFS_I(inode)->outstanding_extents);
1325

1326
		spin_lock(&root->fs_info->delalloc_lock);
1327 1328
		BTRFS_I(inode)->delalloc_bytes += len;
		root->fs_info->delalloc_bytes += len;
J
Josef Bacik 已提交
1329
		if (do_list && list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1330 1331 1332
			list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
				      &root->fs_info->delalloc_inodes);
		}
1333
		spin_unlock(&root->fs_info->delalloc_lock);
1334 1335 1336 1337
	}
	return 0;
}

C
Chris Mason 已提交
1338 1339 1340
/*
 * extent_io.c clear_bit_hook, see set_bit_hook for why
 */
J
Josef Bacik 已提交
1341
static int btrfs_clear_bit_hook(struct inode *inode,
1342
				struct extent_state *state, int *bits)
1343
{
1344 1345 1346 1347 1348
	/*
	 * set_bit and clear bit hooks normally require _irqsave/restore
	 * but in this case, we are only testeing for the DELALLOC
	 * bit, which is only set or cleared with irqs on
	 */
1349
	if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1350
		struct btrfs_root *root = BTRFS_I(inode)->root;
1351
		u64 len = state->end + 1 - state->start;
J
Josef Bacik 已提交
1352 1353
		int do_list = (root->root_key.objectid !=
			       BTRFS_ROOT_TREE_OBJECTID);
1354

1355 1356 1357 1358 1359 1360 1361 1362
		if (*bits & EXTENT_FIRST_DELALLOC)
			*bits &= ~EXTENT_FIRST_DELALLOC;
		else if (!(*bits & EXTENT_DO_ACCOUNTING))
			atomic_dec(&BTRFS_I(inode)->outstanding_extents);

		if (*bits & EXTENT_DO_ACCOUNTING)
			btrfs_delalloc_release_metadata(inode, len);

J
Josef Bacik 已提交
1363 1364
		if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
		    && do_list)
1365
			btrfs_free_reserved_data_space(inode, len);
J
Josef Bacik 已提交
1366

1367
		spin_lock(&root->fs_info->delalloc_lock);
1368 1369 1370
		root->fs_info->delalloc_bytes -= len;
		BTRFS_I(inode)->delalloc_bytes -= len;

J
Josef Bacik 已提交
1371
		if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
1372 1373 1374
		    !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
			list_del_init(&BTRFS_I(inode)->delalloc_inodes);
		}
1375
		spin_unlock(&root->fs_info->delalloc_lock);
1376 1377 1378 1379
	}
	return 0;
}

C
Chris Mason 已提交
1380 1381 1382 1383
/*
 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
 * we don't create bios that span stripes or chunks
 */
1384
int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
C
Chris Mason 已提交
1385 1386
			 size_t size, struct bio *bio,
			 unsigned long bio_flags)
1387 1388 1389
{
	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
	struct btrfs_mapping_tree *map_tree;
1390
	u64 logical = (u64)bio->bi_sector << 9;
1391 1392 1393 1394
	u64 length = 0;
	u64 map_length;
	int ret;

1395 1396 1397
	if (bio_flags & EXTENT_BIO_COMPRESSED)
		return 0;

1398
	length = bio->bi_size;
1399 1400
	map_tree = &root->fs_info->mapping_tree;
	map_length = length;
1401
	ret = btrfs_map_block(map_tree, READ, logical,
1402
			      &map_length, NULL, 0);
1403

C
Chris Mason 已提交
1404
	if (map_length < length + size)
1405
		return 1;
1406
	return ret;
1407 1408
}

C
Chris Mason 已提交
1409 1410 1411 1412 1413 1414 1415 1416
/*
 * in order to insert checksums into the metadata in large chunks,
 * we wait until bio submission time.   All the pages in the bio are
 * checksummed and sums are attached onto the ordered extent record.
 *
 * At IO completion time the cums attached on the ordered extent record
 * are inserted into the btree
 */
C
Chris Mason 已提交
1417 1418
static int __btrfs_submit_bio_start(struct inode *inode, int rw,
				    struct bio *bio, int mirror_num,
1419 1420
				    unsigned long bio_flags,
				    u64 bio_offset)
1421 1422 1423
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	int ret = 0;
1424

1425
	ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1426
	BUG_ON(ret);
C
Chris Mason 已提交
1427 1428
	return 0;
}
1429

C
Chris Mason 已提交
1430 1431 1432 1433 1434 1435 1436 1437
/*
 * in order to insert checksums into the metadata in large chunks,
 * we wait until bio submission time.   All the pages in the bio are
 * checksummed and sums are attached onto the ordered extent record.
 *
 * At IO completion time the cums attached on the ordered extent record
 * are inserted into the btree
 */
1438
static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1439 1440
			  int mirror_num, unsigned long bio_flags,
			  u64 bio_offset)
C
Chris Mason 已提交
1441 1442
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
1443
	return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1444 1445
}

C
Chris Mason 已提交
1446
/*
1447 1448
 * extent_io.c submission hook. This does the right thing for csum calculation
 * on write, or reading the csums from the tree before a read
C
Chris Mason 已提交
1449
 */
1450
static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1451 1452
			  int mirror_num, unsigned long bio_flags,
			  u64 bio_offset)
1453 1454 1455
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	int ret = 0;
1456
	int skip_sum;
1457

1458
	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1459

J
Josef Bacik 已提交
1460 1461 1462 1463
	if (root == root->fs_info->tree_root)
		ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2);
	else
		ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1464
	BUG_ON(ret);
1465

1466
	if (!(rw & REQ_WRITE)) {
1467
		if (bio_flags & EXTENT_BIO_COMPRESSED) {
C
Chris Mason 已提交
1468 1469
			return btrfs_submit_compressed_read(inode, bio,
						    mirror_num, bio_flags);
1470 1471 1472 1473 1474
		} else if (!skip_sum) {
			ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
			if (ret)
				return ret;
		}
1475
		goto mapit;
1476
	} else if (!skip_sum) {
1477 1478 1479
		/* csum items have already been cloned */
		if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
			goto mapit;
1480 1481
		/* we're doing a write, do the async checksumming */
		return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1482
				   inode, rw, bio, mirror_num,
1483 1484
				   bio_flags, bio_offset,
				   __btrfs_submit_bio_start,
C
Chris Mason 已提交
1485
				   __btrfs_submit_bio_done);
1486 1487
	}

1488
mapit:
1489
	return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1490
}
C
Chris Mason 已提交
1491

C
Chris Mason 已提交
1492 1493 1494 1495
/*
 * given a list of ordered sums record them in the inode.  This happens
 * at IO completion time based on sums calculated at bio submission time.
 */
1496
static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1497 1498 1499 1500 1501 1502
			     struct inode *inode, u64 file_offset,
			     struct list_head *list)
{
	struct btrfs_ordered_sum *sum;

	btrfs_set_trans_block_group(trans, inode);
Q
Qinghuang Feng 已提交
1503 1504

	list_for_each_entry(sum, list, list) {
1505 1506
		btrfs_csum_file_blocks(trans,
		       BTRFS_I(inode)->root->fs_info->csum_root, sum);
1507 1508 1509 1510
	}
	return 0;
}

1511 1512
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
			      struct extent_state **cached_state)
1513
{
C
Chris Mason 已提交
1514
	if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1515
		WARN_ON(1);
1516
	return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1517
				   cached_state, GFP_NOFS);
1518 1519
}

C
Chris Mason 已提交
1520
/* see btrfs_writepage_start_hook for details on why this is required */
1521 1522 1523 1524 1525
struct btrfs_writepage_fixup {
	struct page *page;
	struct btrfs_work work;
};

1526
static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1527 1528 1529
{
	struct btrfs_writepage_fixup *fixup;
	struct btrfs_ordered_extent *ordered;
1530
	struct extent_state *cached_state = NULL;
1531 1532 1533 1534 1535 1536 1537
	struct page *page;
	struct inode *inode;
	u64 page_start;
	u64 page_end;

	fixup = container_of(work, struct btrfs_writepage_fixup, work);
	page = fixup->page;
C
Chris Mason 已提交
1538
again:
1539 1540 1541 1542 1543 1544 1545 1546 1547 1548
	lock_page(page);
	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
		ClearPageChecked(page);
		goto out_page;
	}

	inode = page->mapping->host;
	page_start = page_offset(page);
	page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;

1549 1550
	lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
			 &cached_state, GFP_NOFS);
C
Chris Mason 已提交
1551 1552

	/* already ordered? We're done */
1553
	if (PagePrivate2(page))
1554
		goto out;
C
Chris Mason 已提交
1555 1556 1557

	ordered = btrfs_lookup_ordered_extent(inode, page_start);
	if (ordered) {
1558 1559
		unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
				     page_end, &cached_state, GFP_NOFS);
C
Chris Mason 已提交
1560 1561 1562 1563
		unlock_page(page);
		btrfs_start_ordered_extent(inode, ordered, 1);
		goto again;
	}
1564

1565
	BUG();
1566
	btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
1567 1568
	ClearPageChecked(page);
out:
1569 1570
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
			     &cached_state, GFP_NOFS);
1571 1572 1573
out_page:
	unlock_page(page);
	page_cache_release(page);
1574
	kfree(fixup);
1575 1576 1577 1578 1579 1580 1581 1582
}

/*
 * There are a few paths in the higher layers of the kernel that directly
 * set the page dirty bit without asking the filesystem if it is a
 * good idea.  This causes problems because we want to make sure COW
 * properly happens and the data=ordered rules are followed.
 *
C
Chris Mason 已提交
1583
 * In our case any range that doesn't have the ORDERED bit set
1584 1585 1586 1587
 * hasn't been properly setup for IO.  We kick off an async process
 * to fix it up.  The async helper will wait for ordered extents, set
 * the delalloc bit and make it safe to write the page.
 */
1588
static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1589 1590 1591 1592 1593
{
	struct inode *inode = page->mapping->host;
	struct btrfs_writepage_fixup *fixup;
	struct btrfs_root *root = BTRFS_I(inode)->root;

1594 1595
	/* this page is properly in the ordered list */
	if (TestClearPagePrivate2(page))
1596 1597 1598 1599 1600 1601 1602 1603
		return 0;

	if (PageChecked(page))
		return -EAGAIN;

	fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
	if (!fixup)
		return -EAGAIN;
1604

1605 1606 1607 1608 1609 1610 1611 1612
	SetPageChecked(page);
	page_cache_get(page);
	fixup->work.func = btrfs_writepage_fixup_worker;
	fixup->page = page;
	btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
	return -EAGAIN;
}

Y
Yan Zheng 已提交
1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630
static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
				       struct inode *inode, u64 file_pos,
				       u64 disk_bytenr, u64 disk_num_bytes,
				       u64 num_bytes, u64 ram_bytes,
				       u8 compression, u8 encryption,
				       u16 other_encoding, int extent_type)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_file_extent_item *fi;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key ins;
	u64 hint;
	int ret;

	path = btrfs_alloc_path();
	BUG_ON(!path);

1631
	path->leave_spinning = 1;
C
Chris Mason 已提交
1632 1633 1634 1635 1636 1637 1638 1639 1640 1641

	/*
	 * we may be replacing one extent in the tree with another.
	 * The new extent is pinned in the extent map, and we don't want
	 * to drop it from the cache until it is completely in the btree.
	 *
	 * So, tell btrfs_drop_extents to leave this extent in the cache.
	 * the caller is expected to unpin it and allow it to be merged
	 * with the others.
	 */
Y
Yan, Zheng 已提交
1642 1643
	ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
				 &hint, 0);
Y
Yan Zheng 已提交
1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663
	BUG_ON(ret);

	ins.objectid = inode->i_ino;
	ins.offset = file_pos;
	ins.type = BTRFS_EXTENT_DATA_KEY;
	ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
	BUG_ON(ret);
	leaf = path->nodes[0];
	fi = btrfs_item_ptr(leaf, path->slots[0],
			    struct btrfs_file_extent_item);
	btrfs_set_file_extent_generation(leaf, fi, trans->transid);
	btrfs_set_file_extent_type(leaf, fi, extent_type);
	btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
	btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
	btrfs_set_file_extent_offset(leaf, fi, 0);
	btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
	btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
	btrfs_set_file_extent_compression(leaf, fi, compression);
	btrfs_set_file_extent_encryption(leaf, fi, encryption);
	btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1664 1665 1666 1667

	btrfs_unlock_up_safe(path, 1);
	btrfs_set_lock_blocking(leaf);

Y
Yan Zheng 已提交
1668 1669 1670 1671 1672 1673 1674
	btrfs_mark_buffer_dirty(leaf);

	inode_add_bytes(inode, num_bytes);

	ins.objectid = disk_bytenr;
	ins.offset = disk_num_bytes;
	ins.type = BTRFS_EXTENT_ITEM_KEY;
1675 1676 1677
	ret = btrfs_alloc_reserved_file_extent(trans, root,
					root->root_key.objectid,
					inode->i_ino, file_pos, &ins);
Y
Yan Zheng 已提交
1678 1679
	BUG_ON(ret);
	btrfs_free_path(path);
1680

Y
Yan Zheng 已提交
1681 1682 1683
	return 0;
}

1684 1685 1686 1687 1688 1689
/*
 * helper function for btrfs_finish_ordered_io, this
 * just reads in some of the csum leaves to prime them into ram
 * before we start the transaction.  It limits the amount of btree
 * reads required while inside the transaction.
 */
C
Chris Mason 已提交
1690 1691 1692 1693
/* as ordered data IO finishes, this gets called so we can finish
 * an ordered extent if the range of bytes in the file it covers are
 * fully written.
 */
1694
static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1695 1696
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
1697
	struct btrfs_trans_handle *trans = NULL;
1698
	struct btrfs_ordered_extent *ordered_extent = NULL;
1699
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1700
	struct extent_state *cached_state = NULL;
1701
	int compress_type = 0;
1702
	int ret;
J
Josef Bacik 已提交
1703
	bool nolock = false;
1704

1705 1706
	ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
					     end - start + 1);
1707
	if (!ret)
1708 1709
		return 0;
	BUG_ON(!ordered_extent);
1710

J
Josef Bacik 已提交
1711 1712
	nolock = (root == root->fs_info->tree_root);

1713 1714 1715 1716
	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
		BUG_ON(!list_empty(&ordered_extent->list));
		ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
		if (!ret) {
J
Josef Bacik 已提交
1717 1718 1719 1720
			if (nolock)
				trans = btrfs_join_transaction_nolock(root, 1);
			else
				trans = btrfs_join_transaction(root, 1);
1721
			BUG_ON(IS_ERR(trans));
1722 1723
			btrfs_set_trans_block_group(trans, inode);
			trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1724 1725 1726 1727 1728
			ret = btrfs_update_inode(trans, root, inode);
			BUG_ON(ret);
		}
		goto out;
	}
1729

1730 1731 1732
	lock_extent_bits(io_tree, ordered_extent->file_offset,
			 ordered_extent->file_offset + ordered_extent->len - 1,
			 0, &cached_state, GFP_NOFS);
1733

J
Josef Bacik 已提交
1734 1735 1736 1737
	if (nolock)
		trans = btrfs_join_transaction_nolock(root, 1);
	else
		trans = btrfs_join_transaction(root, 1);
1738
	BUG_ON(IS_ERR(trans));
1739 1740
	btrfs_set_trans_block_group(trans, inode);
	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1741

C
Chris Mason 已提交
1742
	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1743
		compress_type = ordered_extent->compress_type;
Y
Yan Zheng 已提交
1744
	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1745
		BUG_ON(compress_type);
Y
Yan, Zheng 已提交
1746
		ret = btrfs_mark_extent_written(trans, inode,
Y
Yan Zheng 已提交
1747 1748 1749 1750 1751
						ordered_extent->file_offset,
						ordered_extent->file_offset +
						ordered_extent->len);
		BUG_ON(ret);
	} else {
1752
		BUG_ON(root == root->fs_info->tree_root);
Y
Yan Zheng 已提交
1753 1754 1755 1756 1757 1758
		ret = insert_reserved_file_extent(trans, inode,
						ordered_extent->file_offset,
						ordered_extent->start,
						ordered_extent->disk_len,
						ordered_extent->len,
						ordered_extent->len,
1759
						compress_type, 0, 0,
Y
Yan Zheng 已提交
1760
						BTRFS_FILE_EXTENT_REG);
C
Chris Mason 已提交
1761 1762 1763
		unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
				   ordered_extent->file_offset,
				   ordered_extent->len);
Y
Yan Zheng 已提交
1764 1765
		BUG_ON(ret);
	}
1766 1767 1768 1769
	unlock_extent_cached(io_tree, ordered_extent->file_offset,
			     ordered_extent->file_offset +
			     ordered_extent->len - 1, &cached_state, GFP_NOFS);

1770 1771 1772
	add_pending_csums(trans, inode, ordered_extent->file_offset,
			  &ordered_extent->list);

1773 1774 1775 1776 1777 1778
	ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
	if (!ret) {
		ret = btrfs_update_inode(trans, root, inode);
		BUG_ON(ret);
	}
	ret = 0;
1779
out:
J
Josef Bacik 已提交
1780 1781 1782 1783 1784 1785 1786 1787 1788
	if (nolock) {
		if (trans)
			btrfs_end_transaction_nolock(trans, root);
	} else {
		btrfs_delalloc_release_metadata(inode, ordered_extent->len);
		if (trans)
			btrfs_end_transaction(trans, root);
	}

1789 1790 1791 1792 1793 1794 1795 1796
	/* once for us */
	btrfs_put_ordered_extent(ordered_extent);
	/* once for the tree */
	btrfs_put_ordered_extent(ordered_extent);

	return 0;
}

1797
static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1798 1799
				struct extent_state *state, int uptodate)
{
1800 1801
	trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);

1802
	ClearPagePrivate2(page);
1803 1804 1805
	return btrfs_finish_ordered_io(page->mapping->host, start, end);
}

C
Chris Mason 已提交
1806 1807 1808 1809 1810 1811 1812 1813
/*
 * When IO fails, either with EIO or csum verification fails, we
 * try other mirrors that might have a good copy of the data.  This
 * io_failure_record is used to record state as we go through all the
 * mirrors.  If another mirror has good data, the page is set up to date
 * and things continue.  If a good mirror can't be found, the original
 * bio end_io callback is called to indicate things have failed.
 */
1814 1815 1816 1817 1818
struct io_failure_record {
	struct page *page;
	u64 start;
	u64 len;
	u64 logical;
1819
	unsigned long bio_flags;
1820 1821 1822
	int last_mirror;
};

1823
static int btrfs_io_failed_hook(struct bio *failed_bio,
1824 1825
			 struct page *page, u64 start, u64 end,
			 struct extent_state *state)
1826 1827 1828 1829 1830 1831
{
	struct io_failure_record *failrec = NULL;
	u64 private;
	struct extent_map *em;
	struct inode *inode = page->mapping->host;
	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1832
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1833 1834 1835
	struct bio *bio;
	int num_copies;
	int ret;
1836
	int rw;
1837 1838 1839 1840 1841 1842 1843 1844 1845 1846
	u64 logical;

	ret = get_state_private(failure_tree, start, &private);
	if (ret) {
		failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
		if (!failrec)
			return -ENOMEM;
		failrec->start = start;
		failrec->len = end - start + 1;
		failrec->last_mirror = 0;
1847
		failrec->bio_flags = 0;
1848

1849
		read_lock(&em_tree->lock);
1850 1851 1852 1853 1854
		em = lookup_extent_mapping(em_tree, start, failrec->len);
		if (em->start > start || em->start + em->len < start) {
			free_extent_map(em);
			em = NULL;
		}
1855
		read_unlock(&em_tree->lock);
1856 1857 1858 1859 1860 1861 1862

		if (!em || IS_ERR(em)) {
			kfree(failrec);
			return -EIO;
		}
		logical = start - em->start;
		logical = em->block_start + logical;
1863 1864 1865
		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
			logical = em->block_start;
			failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1866 1867
			extent_set_compress_type(&failrec->bio_flags,
						 em->compress_type);
1868
		}
1869 1870 1871 1872
		failrec->logical = logical;
		free_extent_map(em);
		set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
				EXTENT_DIRTY, GFP_NOFS);
1873 1874
		set_state_private(failure_tree, start,
				 (u64)(unsigned long)failrec);
1875
	} else {
1876
		failrec = (struct io_failure_record *)(unsigned long)private;
1877 1878 1879 1880 1881 1882
	}
	num_copies = btrfs_num_copies(
			      &BTRFS_I(inode)->root->fs_info->mapping_tree,
			      failrec->logical, failrec->len);
	failrec->last_mirror++;
	if (!state) {
1883
		spin_lock(&BTRFS_I(inode)->io_tree.lock);
1884 1885 1886 1887 1888
		state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
						    failrec->start,
						    EXTENT_LOCKED);
		if (state && state->start != failrec->start)
			state = NULL;
1889
		spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903
	}
	if (!state || failrec->last_mirror > num_copies) {
		set_state_private(failure_tree, failrec->start, 0);
		clear_extent_bits(failure_tree, failrec->start,
				  failrec->start + failrec->len - 1,
				  EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
		kfree(failrec);
		return -EIO;
	}
	bio = bio_alloc(GFP_NOFS, 1);
	bio->bi_private = state;
	bio->bi_end_io = failed_bio->bi_end_io;
	bio->bi_sector = failrec->logical >> 9;
	bio->bi_bdev = failed_bio->bi_bdev;
1904
	bio->bi_size = 0;
1905

1906
	bio_add_page(bio, page, failrec->len, start - page_offset(page));
1907
	if (failed_bio->bi_rw & REQ_WRITE)
1908 1909 1910 1911
		rw = WRITE;
	else
		rw = READ;

1912
	ret = BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
C
Chris Mason 已提交
1913
						      failrec->last_mirror,
1914
						      failrec->bio_flags, 0);
1915
	return ret;
1916 1917
}

C
Chris Mason 已提交
1918 1919 1920 1921
/*
 * each time an IO finishes, we do a fast check in the IO failure tree
 * to see if we need to process or clean up an io_failure_record
 */
1922
static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1923 1924 1925 1926 1927 1928 1929 1930
{
	u64 private;
	u64 private_failure;
	struct io_failure_record *failure;
	int ret;

	private = 0;
	if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1931
			     (u64)-1, 1, EXTENT_DIRTY, 0)) {
1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946
		ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
					start, &private_failure);
		if (ret == 0) {
			failure = (struct io_failure_record *)(unsigned long)
				   private_failure;
			set_state_private(&BTRFS_I(inode)->io_failure_tree,
					  failure->start, 0);
			clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
					  failure->start,
					  failure->start + failure->len - 1,
					  EXTENT_DIRTY | EXTENT_LOCKED,
					  GFP_NOFS);
			kfree(failure);
		}
	}
1947 1948 1949
	return 0;
}

C
Chris Mason 已提交
1950 1951 1952 1953 1954
/*
 * when reads are done, we need to check csums to verify the data is correct
 * if there's a match, we allow the bio to finish.  If not, we go through
 * the io_failure_record routines to find good copies
 */
1955
static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1956
			       struct extent_state *state)
1957
{
1958
	size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1959
	struct inode *inode = page->mapping->host;
1960
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1961
	char *kaddr;
1962
	u64 private = ~(u32)0;
1963
	int ret;
1964 1965
	struct btrfs_root *root = BTRFS_I(inode)->root;
	u32 csum = ~(u32)0;
1966

1967 1968 1969 1970
	if (PageChecked(page)) {
		ClearPageChecked(page);
		goto good;
	}
1971 1972

	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
1973 1974 1975
		return 0;

	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1976
	    test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
1977 1978
		clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
				  GFP_NOFS);
1979
		return 0;
1980
	}
1981

Y
Yan 已提交
1982
	if (state && state->start == start) {
1983 1984 1985 1986 1987
		private = state->private;
		ret = 0;
	} else {
		ret = get_state_private(io_tree, start, &private);
	}
1988
	kaddr = kmap_atomic(page, KM_USER0);
C
Chris Mason 已提交
1989
	if (ret)
1990
		goto zeroit;
C
Chris Mason 已提交
1991

1992 1993
	csum = btrfs_csum_data(root, kaddr + offset, csum,  end - start + 1);
	btrfs_csum_final(csum, (char *)&csum);
C
Chris Mason 已提交
1994
	if (csum != private)
1995
		goto zeroit;
C
Chris Mason 已提交
1996

1997
	kunmap_atomic(kaddr, KM_USER0);
1998
good:
1999 2000 2001
	/* if the io failure tree for this inode is non-empty,
	 * check to see if we've recovered from a failed IO
	 */
2002
	btrfs_clean_io_failures(inode, start);
2003 2004 2005
	return 0;

zeroit:
C
Chris Mason 已提交
2006 2007 2008 2009 2010 2011
	if (printk_ratelimit()) {
		printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
		       "private %llu\n", page->mapping->host->i_ino,
		       (unsigned long long)start, csum,
		       (unsigned long long)private);
	}
2012 2013
	memset(kaddr + offset, 1, end - start + 1);
	flush_dcache_page(page);
2014
	kunmap_atomic(kaddr, KM_USER0);
2015 2016
	if (private == 0)
		return 0;
2017
	return -EIO;
2018
}
2019

Y
Yan, Zheng 已提交
2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067
struct delayed_iput {
	struct list_head list;
	struct inode *inode;
};

void btrfs_add_delayed_iput(struct inode *inode)
{
	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
	struct delayed_iput *delayed;

	if (atomic_add_unless(&inode->i_count, -1, 1))
		return;

	delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
	delayed->inode = inode;

	spin_lock(&fs_info->delayed_iput_lock);
	list_add_tail(&delayed->list, &fs_info->delayed_iputs);
	spin_unlock(&fs_info->delayed_iput_lock);
}

void btrfs_run_delayed_iputs(struct btrfs_root *root)
{
	LIST_HEAD(list);
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct delayed_iput *delayed;
	int empty;

	spin_lock(&fs_info->delayed_iput_lock);
	empty = list_empty(&fs_info->delayed_iputs);
	spin_unlock(&fs_info->delayed_iput_lock);
	if (empty)
		return;

	down_read(&root->fs_info->cleanup_work_sem);
	spin_lock(&fs_info->delayed_iput_lock);
	list_splice_init(&fs_info->delayed_iputs, &list);
	spin_unlock(&fs_info->delayed_iput_lock);

	while (!list_empty(&list)) {
		delayed = list_entry(list.next, struct delayed_iput, list);
		list_del(&delayed->list);
		iput(delayed->inode);
		kfree(delayed);
	}
	up_read(&root->fs_info->cleanup_work_sem);
}

2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184
/*
 * calculate extra metadata reservation when snapshotting a subvolume
 * contains orphan files.
 */
void btrfs_orphan_pre_snapshot(struct btrfs_trans_handle *trans,
				struct btrfs_pending_snapshot *pending,
				u64 *bytes_to_reserve)
{
	struct btrfs_root *root;
	struct btrfs_block_rsv *block_rsv;
	u64 num_bytes;
	int index;

	root = pending->root;
	if (!root->orphan_block_rsv || list_empty(&root->orphan_list))
		return;

	block_rsv = root->orphan_block_rsv;

	/* orphan block reservation for the snapshot */
	num_bytes = block_rsv->size;

	/*
	 * after the snapshot is created, COWing tree blocks may use more
	 * space than it frees. So we should make sure there is enough
	 * reserved space.
	 */
	index = trans->transid & 0x1;
	if (block_rsv->reserved + block_rsv->freed[index] < block_rsv->size) {
		num_bytes += block_rsv->size -
			     (block_rsv->reserved + block_rsv->freed[index]);
	}

	*bytes_to_reserve += num_bytes;
}

void btrfs_orphan_post_snapshot(struct btrfs_trans_handle *trans,
				struct btrfs_pending_snapshot *pending)
{
	struct btrfs_root *root = pending->root;
	struct btrfs_root *snap = pending->snap;
	struct btrfs_block_rsv *block_rsv;
	u64 num_bytes;
	int index;
	int ret;

	if (!root->orphan_block_rsv || list_empty(&root->orphan_list))
		return;

	/* refill source subvolume's orphan block reservation */
	block_rsv = root->orphan_block_rsv;
	index = trans->transid & 0x1;
	if (block_rsv->reserved + block_rsv->freed[index] < block_rsv->size) {
		num_bytes = block_rsv->size -
			    (block_rsv->reserved + block_rsv->freed[index]);
		ret = btrfs_block_rsv_migrate(&pending->block_rsv,
					      root->orphan_block_rsv,
					      num_bytes);
		BUG_ON(ret);
	}

	/* setup orphan block reservation for the snapshot */
	block_rsv = btrfs_alloc_block_rsv(snap);
	BUG_ON(!block_rsv);

	btrfs_add_durable_block_rsv(root->fs_info, block_rsv);
	snap->orphan_block_rsv = block_rsv;

	num_bytes = root->orphan_block_rsv->size;
	ret = btrfs_block_rsv_migrate(&pending->block_rsv,
				      block_rsv, num_bytes);
	BUG_ON(ret);

#if 0
	/* insert orphan item for the snapshot */
	WARN_ON(!root->orphan_item_inserted);
	ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
				       snap->root_key.objectid);
	BUG_ON(ret);
	snap->orphan_item_inserted = 1;
#endif
}

enum btrfs_orphan_cleanup_state {
	ORPHAN_CLEANUP_STARTED	= 1,
	ORPHAN_CLEANUP_DONE	= 2,
};

/*
 * This is called in transaction commmit time. If there are no orphan
 * files in the subvolume, it removes orphan item and frees block_rsv
 * structure.
 */
void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
			      struct btrfs_root *root)
{
	int ret;

	if (!list_empty(&root->orphan_list) ||
	    root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
		return;

	if (root->orphan_item_inserted &&
	    btrfs_root_refs(&root->root_item) > 0) {
		ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
					    root->root_key.objectid);
		BUG_ON(ret);
		root->orphan_item_inserted = 0;
	}

	if (root->orphan_block_rsv) {
		WARN_ON(root->orphan_block_rsv->size > 0);
		btrfs_free_block_rsv(root, root->orphan_block_rsv);
		root->orphan_block_rsv = NULL;
	}
}

2185 2186 2187
/*
 * This creates an orphan entry for the given inode in case something goes
 * wrong in the middle of an unlink/truncate.
2188 2189 2190
 *
 * NOTE: caller of this function should reserve 5 units of metadata for
 *	 this function.
2191 2192 2193 2194
 */
int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
2195 2196 2197 2198
	struct btrfs_block_rsv *block_rsv = NULL;
	int reserve = 0;
	int insert = 0;
	int ret;
2199

2200 2201 2202 2203
	if (!root->orphan_block_rsv) {
		block_rsv = btrfs_alloc_block_rsv(root);
		BUG_ON(!block_rsv);
	}
2204

2205 2206 2207 2208 2209 2210
	spin_lock(&root->orphan_lock);
	if (!root->orphan_block_rsv) {
		root->orphan_block_rsv = block_rsv;
	} else if (block_rsv) {
		btrfs_free_block_rsv(root, block_rsv);
		block_rsv = NULL;
2211 2212
	}

2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226
	if (list_empty(&BTRFS_I(inode)->i_orphan)) {
		list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
#if 0
		/*
		 * For proper ENOSPC handling, we should do orphan
		 * cleanup when mounting. But this introduces backward
		 * compatibility issue.
		 */
		if (!xchg(&root->orphan_item_inserted, 1))
			insert = 2;
		else
			insert = 1;
#endif
		insert = 1;
2227 2228
	}

2229 2230 2231 2232 2233
	if (!BTRFS_I(inode)->orphan_meta_reserved) {
		BTRFS_I(inode)->orphan_meta_reserved = 1;
		reserve = 1;
	}
	spin_unlock(&root->orphan_lock);
2234

2235 2236
	if (block_rsv)
		btrfs_add_durable_block_rsv(root->fs_info, block_rsv);
2237

2238 2239 2240 2241 2242
	/* grab metadata reservation from transaction handle */
	if (reserve) {
		ret = btrfs_orphan_reserve_metadata(trans, inode);
		BUG_ON(ret);
	}
2243

2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256
	/* insert an orphan item to track this unlinked/truncated file */
	if (insert >= 1) {
		ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
		BUG_ON(ret);
	}

	/* insert an orphan item to track subvolume contains orphan files */
	if (insert >= 2) {
		ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
					       root->root_key.objectid);
		BUG_ON(ret);
	}
	return 0;
2257 2258 2259 2260 2261 2262 2263 2264 2265
}

/*
 * We have done the truncate/delete so we can go ahead and remove the orphan
 * item for this particular inode.
 */
int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
2266 2267
	int delete_item = 0;
	int release_rsv = 0;
2268 2269
	int ret = 0;

2270 2271 2272 2273
	spin_lock(&root->orphan_lock);
	if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
		list_del_init(&BTRFS_I(inode)->i_orphan);
		delete_item = 1;
2274 2275
	}

2276 2277 2278
	if (BTRFS_I(inode)->orphan_meta_reserved) {
		BTRFS_I(inode)->orphan_meta_reserved = 0;
		release_rsv = 1;
2279
	}
2280
	spin_unlock(&root->orphan_lock);
2281

2282 2283 2284 2285
	if (trans && delete_item) {
		ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
		BUG_ON(ret);
	}
2286

2287 2288
	if (release_rsv)
		btrfs_orphan_release_metadata(inode);
2289

2290
	return 0;
2291 2292 2293 2294 2295 2296
}

/*
 * this cleans up any orphans that may be left on the list from the last use
 * of this root.
 */
2297
int btrfs_orphan_cleanup(struct btrfs_root *root)
2298 2299 2300 2301 2302 2303 2304 2305
{
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key key, found_key;
	struct btrfs_trans_handle *trans;
	struct inode *inode;
	int ret = 0, nr_unlink = 0, nr_truncate = 0;

2306
	if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
2307
		return 0;
2308 2309

	path = btrfs_alloc_path();
2310 2311 2312 2313
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}
2314 2315 2316 2317 2318 2319 2320 2321
	path->reada = -1;

	key.objectid = BTRFS_ORPHAN_OBJECTID;
	btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
	key.offset = (u64)-1;

	while (1) {
		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2322 2323
		if (ret < 0)
			goto out;
2324 2325 2326 2327 2328 2329 2330

		/*
		 * if ret == 0 means we found what we were searching for, which
		 * is weird, but possible, so only screw with path if we didnt
		 * find the key and see if we have stuff that matches
		 */
		if (ret > 0) {
2331
			ret = 0;
2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354
			if (path->slots[0] == 0)
				break;
			path->slots[0]--;
		}

		/* pull out the item */
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);

		/* make sure the item matches what we want */
		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
			break;
		if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
			break;

		/* release the path since we're done with it */
		btrfs_release_path(root, path);

		/*
		 * this is where we are basically btrfs_lookup, without the
		 * crossing root thing.  we store the inode number in the
		 * offset of the orphan item.
		 */
2355 2356 2357
		found_key.objectid = found_key.offset;
		found_key.type = BTRFS_INODE_ITEM_KEY;
		found_key.offset = 0;
2358
		inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
2359 2360 2361 2362
		if (IS_ERR(inode)) {
			ret = PTR_ERR(inode);
			goto out;
		}
2363 2364 2365 2366 2367

		/*
		 * add this inode to the orphan list so btrfs_orphan_del does
		 * the proper thing when we hit it
		 */
2368
		spin_lock(&root->orphan_lock);
2369
		list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2370
		spin_unlock(&root->orphan_lock);
2371 2372 2373 2374 2375 2376 2377 2378

		/*
		 * if this is a bad inode, means we actually succeeded in
		 * removing the inode, but not the orphan record, which means
		 * we need to manually delete the orphan since iput will just
		 * do a destroy_inode
		 */
		if (is_bad_inode(inode)) {
2379
			trans = btrfs_start_transaction(root, 0);
2380 2381 2382 2383
			if (IS_ERR(trans)) {
				ret = PTR_ERR(trans);
				goto out;
			}
2384
			btrfs_orphan_del(trans, inode);
2385
			btrfs_end_transaction(trans, root);
2386 2387 2388 2389 2390 2391
			iput(inode);
			continue;
		}

		/* if we have links, this was a truncate, lets do that */
		if (inode->i_nlink) {
2392 2393 2394 2395 2396
			if (!S_ISREG(inode->i_mode)) {
				WARN_ON(1);
				iput(inode);
				continue;
			}
2397
			nr_truncate++;
2398
			ret = btrfs_truncate(inode);
2399 2400 2401 2402 2403 2404
		} else {
			nr_unlink++;
		}

		/* this will do delete_inode and everything for us */
		iput(inode);
2405 2406
		if (ret)
			goto out;
2407
	}
2408 2409 2410 2411 2412 2413 2414 2415
	root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;

	if (root->orphan_block_rsv)
		btrfs_block_rsv_release(root, root->orphan_block_rsv,
					(u64)-1);

	if (root->orphan_block_rsv || root->orphan_item_inserted) {
		trans = btrfs_join_transaction(root, 1);
2416 2417
		if (!IS_ERR(trans))
			btrfs_end_transaction(trans, root);
2418
	}
2419 2420 2421 2422 2423

	if (nr_unlink)
		printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
	if (nr_truncate)
		printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2424 2425 2426 2427 2428 2429

out:
	if (ret)
		printk(KERN_CRIT "btrfs: could not do orphan cleanup %d\n", ret);
	btrfs_free_path(path);
	return ret;
2430 2431
}

2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482
/*
 * very simple check to peek ahead in the leaf looking for xattrs.  If we
 * don't find any xattrs, we know there can't be any acls.
 *
 * slot is the slot the inode is in, objectid is the objectid of the inode
 */
static noinline int acls_after_inode_item(struct extent_buffer *leaf,
					  int slot, u64 objectid)
{
	u32 nritems = btrfs_header_nritems(leaf);
	struct btrfs_key found_key;
	int scanned = 0;

	slot++;
	while (slot < nritems) {
		btrfs_item_key_to_cpu(leaf, &found_key, slot);

		/* we found a different objectid, there must not be acls */
		if (found_key.objectid != objectid)
			return 0;

		/* we found an xattr, assume we've got an acl */
		if (found_key.type == BTRFS_XATTR_ITEM_KEY)
			return 1;

		/*
		 * we found a key greater than an xattr key, there can't
		 * be any acls later on
		 */
		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
			return 0;

		slot++;
		scanned++;

		/*
		 * it goes inode, inode backrefs, xattrs, extents,
		 * so if there are a ton of hard links to an inode there can
		 * be a lot of backrefs.  Don't waste time searching too hard,
		 * this is just an optimization
		 */
		if (scanned >= 8)
			break;
	}
	/* we hit the end of the leaf before we found an xattr or
	 * something larger than an xattr.  We have to assume the inode
	 * has acls
	 */
	return 1;
}

C
Chris Mason 已提交
2483 2484 2485
/*
 * read an inode from the btree into the in-memory inode
 */
2486
static void btrfs_read_locked_inode(struct inode *inode)
C
Chris Mason 已提交
2487 2488
{
	struct btrfs_path *path;
2489
	struct extent_buffer *leaf;
C
Chris Mason 已提交
2490
	struct btrfs_inode_item *inode_item;
2491
	struct btrfs_timespec *tspec;
C
Chris Mason 已提交
2492 2493
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_key location;
2494
	int maybe_acls;
C
Chris Mason 已提交
2495
	u64 alloc_group_block;
J
Josef Bacik 已提交
2496
	u32 rdev;
C
Chris Mason 已提交
2497 2498 2499 2500 2501
	int ret;

	path = btrfs_alloc_path();
	BUG_ON(!path);
	memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
C
Chris Mason 已提交
2502

C
Chris Mason 已提交
2503
	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2504
	if (ret)
C
Chris Mason 已提交
2505 2506
		goto make_bad;

2507 2508 2509 2510 2511 2512 2513 2514
	leaf = path->nodes[0];
	inode_item = btrfs_item_ptr(leaf, path->slots[0],
				    struct btrfs_inode_item);

	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
	inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
	inode->i_uid = btrfs_inode_uid(leaf, inode_item);
	inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2515
	btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528

	tspec = btrfs_inode_atime(inode_item);
	inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
	inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);

	tspec = btrfs_inode_mtime(inode_item);
	inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
	inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);

	tspec = btrfs_inode_ctime(inode_item);
	inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
	inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);

2529
	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2530
	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2531
	BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2532
	inode->i_generation = BTRFS_I(inode)->generation;
J
Josef Bacik 已提交
2533
	inode->i_rdev = 0;
2534 2535
	rdev = btrfs_inode_rdev(leaf, inode_item);

2536
	BTRFS_I(inode)->index_cnt = (u64)-1;
2537
	BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2538

2539
	alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2540

2541 2542 2543 2544 2545
	/*
	 * try to precache a NULL acl entry for files that don't have
	 * any xattrs or acls
	 */
	maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino);
2546 2547
	if (!maybe_acls)
		cache_no_acl(inode);
2548

2549 2550
	BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
						alloc_group_block, 0);
C
Chris Mason 已提交
2551 2552 2553 2554 2555 2556
	btrfs_free_path(path);
	inode_item = NULL;

	switch (inode->i_mode & S_IFMT) {
	case S_IFREG:
		inode->i_mapping->a_ops = &btrfs_aops;
C
Chris Mason 已提交
2557
		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2558
		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
C
Chris Mason 已提交
2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571
		inode->i_fop = &btrfs_file_operations;
		inode->i_op = &btrfs_file_inode_operations;
		break;
	case S_IFDIR:
		inode->i_fop = &btrfs_dir_file_operations;
		if (root == root->fs_info->tree_root)
			inode->i_op = &btrfs_dir_ro_inode_operations;
		else
			inode->i_op = &btrfs_dir_inode_operations;
		break;
	case S_IFLNK:
		inode->i_op = &btrfs_symlink_inode_operations;
		inode->i_mapping->a_ops = &btrfs_symlink_aops;
C
Chris Mason 已提交
2572
		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
C
Chris Mason 已提交
2573
		break;
J
Josef Bacik 已提交
2574
	default:
J
Jim Owens 已提交
2575
		inode->i_op = &btrfs_special_inode_operations;
J
Josef Bacik 已提交
2576 2577
		init_special_inode(inode, inode->i_mode, rdev);
		break;
C
Chris Mason 已提交
2578
	}
2579 2580

	btrfs_update_iflags(inode);
C
Chris Mason 已提交
2581 2582 2583 2584 2585 2586 2587
	return;

make_bad:
	btrfs_free_path(path);
	make_bad_inode(inode);
}

C
Chris Mason 已提交
2588 2589 2590
/*
 * given a leaf and an inode, copy the inode fields into the leaf
 */
2591 2592
static void fill_inode_item(struct btrfs_trans_handle *trans,
			    struct extent_buffer *leaf,
2593
			    struct btrfs_inode_item *item,
C
Chris Mason 已提交
2594 2595
			    struct inode *inode)
{
2596 2597 2598 2599 2600 2601 2602
	if (!leaf->map_token)
		map_private_extent_buffer(leaf, (unsigned long)item,
					  sizeof(struct btrfs_inode_item),
					  &leaf->map_token, &leaf->kaddr,
					  &leaf->map_start, &leaf->map_len,
					  KM_USER1);

2603 2604
	btrfs_set_inode_uid(leaf, item, inode->i_uid);
	btrfs_set_inode_gid(leaf, item, inode->i_gid);
2605
	btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623
	btrfs_set_inode_mode(leaf, item, inode->i_mode);
	btrfs_set_inode_nlink(leaf, item, inode->i_nlink);

	btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
			       inode->i_atime.tv_sec);
	btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
				inode->i_atime.tv_nsec);

	btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
			       inode->i_mtime.tv_sec);
	btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
				inode->i_mtime.tv_nsec);

	btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
			       inode->i_ctime.tv_sec);
	btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
				inode->i_ctime.tv_nsec);

2624
	btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2625
	btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2626
	btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2627
	btrfs_set_inode_transid(leaf, item, trans->transid);
2628
	btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
Y
Yan 已提交
2629
	btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2630
	btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
2631 2632 2633 2634 2635

	if (leaf->map_token) {
		unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
		leaf->map_token = NULL;
	}
C
Chris Mason 已提交
2636 2637
}

C
Chris Mason 已提交
2638 2639 2640
/*
 * copy everything in the in-memory inode into the btree.
 */
C
Chris Mason 已提交
2641 2642
noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
				struct btrfs_root *root, struct inode *inode)
C
Chris Mason 已提交
2643 2644 2645
{
	struct btrfs_inode_item *inode_item;
	struct btrfs_path *path;
2646
	struct extent_buffer *leaf;
C
Chris Mason 已提交
2647 2648 2649 2650
	int ret;

	path = btrfs_alloc_path();
	BUG_ON(!path);
2651
	path->leave_spinning = 1;
C
Chris Mason 已提交
2652 2653 2654 2655 2656 2657 2658 2659
	ret = btrfs_lookup_inode(trans, root, path,
				 &BTRFS_I(inode)->location, 1);
	if (ret) {
		if (ret > 0)
			ret = -ENOENT;
		goto failed;
	}

2660
	btrfs_unlock_up_safe(path, 1);
2661 2662
	leaf = path->nodes[0];
	inode_item = btrfs_item_ptr(leaf, path->slots[0],
C
Chris Mason 已提交
2663 2664
				  struct btrfs_inode_item);

2665
	fill_inode_item(trans, leaf, inode_item, inode);
2666
	btrfs_mark_buffer_dirty(leaf);
2667
	btrfs_set_inode_last_trans(trans, inode);
C
Chris Mason 已提交
2668 2669 2670 2671 2672 2673 2674
	ret = 0;
failed:
	btrfs_free_path(path);
	return ret;
}


C
Chris Mason 已提交
2675 2676 2677 2678 2679
/*
 * unlink helper that gets used here in inode.c and in the tree logging
 * recovery code.  It remove a link in a directory with a given name, and
 * also drops the back refs in the inode to the directory
 */
2680 2681 2682 2683
static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
				struct btrfs_root *root,
				struct inode *dir, struct inode *inode,
				const char *name, int name_len)
C
Chris Mason 已提交
2684 2685 2686
{
	struct btrfs_path *path;
	int ret = 0;
2687
	struct extent_buffer *leaf;
C
Chris Mason 已提交
2688
	struct btrfs_dir_item *di;
2689
	struct btrfs_key key;
2690
	u64 index;
C
Chris Mason 已提交
2691 2692

	path = btrfs_alloc_path();
2693 2694
	if (!path) {
		ret = -ENOMEM;
2695
		goto out;
2696 2697
	}

2698
	path->leave_spinning = 1;
C
Chris Mason 已提交
2699 2700 2701 2702 2703 2704 2705 2706 2707 2708
	di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
				    name, name_len, -1);
	if (IS_ERR(di)) {
		ret = PTR_ERR(di);
		goto err;
	}
	if (!di) {
		ret = -ENOENT;
		goto err;
	}
2709 2710
	leaf = path->nodes[0];
	btrfs_dir_item_key_to_cpu(leaf, di, &key);
C
Chris Mason 已提交
2711
	ret = btrfs_delete_one_dir_name(trans, root, path, di);
2712 2713
	if (ret)
		goto err;
C
Chris Mason 已提交
2714 2715
	btrfs_release_path(root, path);

2716
	ret = btrfs_del_inode_ref(trans, root, name, name_len,
2717 2718
				  inode->i_ino,
				  dir->i_ino, &index);
2719
	if (ret) {
C
Chris Mason 已提交
2720
		printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2721
		       "inode %lu parent %lu\n", name_len, name,
2722
		       inode->i_ino, dir->i_ino);
2723 2724 2725
		goto err;
	}

C
Chris Mason 已提交
2726
	di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2727
					 index, name, name_len, -1);
C
Chris Mason 已提交
2728 2729 2730 2731 2732 2733 2734 2735 2736
	if (IS_ERR(di)) {
		ret = PTR_ERR(di);
		goto err;
	}
	if (!di) {
		ret = -ENOENT;
		goto err;
	}
	ret = btrfs_delete_one_dir_name(trans, root, path, di);
2737
	btrfs_release_path(root, path);
C
Chris Mason 已提交
2738

2739 2740
	ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
					 inode, dir->i_ino);
C
Chris Mason 已提交
2741
	BUG_ON(ret != 0 && ret != -ENOENT);
2742 2743 2744

	ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
					   dir, index);
2745 2746
	if (ret == -ENOENT)
		ret = 0;
C
Chris Mason 已提交
2747 2748
err:
	btrfs_free_path(path);
2749 2750 2751 2752 2753 2754 2755
	if (ret)
		goto out;

	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
	inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
	btrfs_update_inode(trans, root, dir);
out:
C
Chris Mason 已提交
2756 2757 2758
	return ret;
}

2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773
int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
		       struct btrfs_root *root,
		       struct inode *dir, struct inode *inode,
		       const char *name, int name_len)
{
	int ret;
	ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
	if (!ret) {
		btrfs_drop_nlink(inode);
		ret = btrfs_update_inode(trans, root, inode);
	}
	return ret;
}
		

2774 2775 2776
/* helper to check if there is any shared block in the path */
static int check_path_shared(struct btrfs_root *root,
			     struct btrfs_path *path)
C
Chris Mason 已提交
2777
{
2778 2779
	struct extent_buffer *eb;
	int level;
2780
	u64 refs = 1;
2781

2782
	for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2783 2784
		int ret;

2785 2786 2787 2788 2789 2790 2791 2792 2793
		if (!path->nodes[level])
			break;
		eb = path->nodes[level];
		if (!btrfs_block_can_be_shared(root, eb))
			continue;
		ret = btrfs_lookup_extent_info(NULL, root, eb->start, eb->len,
					       &refs, NULL);
		if (refs > 1)
			return 1;
2794
	}
2795
	return 0;
C
Chris Mason 已提交
2796 2797
}

2798 2799 2800 2801 2802 2803 2804 2805 2806
/*
 * helper to start transaction for unlink and rmdir.
 *
 * unlink and rmdir are special in btrfs, they do not always free space.
 * so in enospc case, we should make sure they will free space before
 * allowing them to use the global metadata reservation.
 */
static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
						       struct dentry *dentry)
2807
{
C
Chris Mason 已提交
2808
	struct btrfs_trans_handle *trans;
2809
	struct btrfs_root *root = BTRFS_I(dir)->root;
2810
	struct btrfs_path *path;
2811
	struct btrfs_inode_ref *ref;
2812
	struct btrfs_dir_item *di;
2813
	struct inode *inode = dentry->d_inode;
2814
	u64 index;
2815 2816
	int check_link = 1;
	int err = -ENOSPC;
2817 2818
	int ret;

2819 2820 2821
	trans = btrfs_start_transaction(root, 10);
	if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
		return trans;
2822

2823 2824
	if (inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
		return ERR_PTR(-ENOSPC);
2825

2826 2827 2828
	/* check if there is someone else holds reference */
	if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1)
		return ERR_PTR(-ENOSPC);
2829

2830 2831
	if (atomic_read(&inode->i_count) > 2)
		return ERR_PTR(-ENOSPC);
2832

2833 2834 2835 2836 2837 2838 2839
	if (xchg(&root->fs_info->enospc_unlink, 1))
		return ERR_PTR(-ENOSPC);

	path = btrfs_alloc_path();
	if (!path) {
		root->fs_info->enospc_unlink = 0;
		return ERR_PTR(-ENOMEM);
2840 2841
	}

2842
	trans = btrfs_start_transaction(root, 0);
2843
	if (IS_ERR(trans)) {
2844 2845 2846 2847
		btrfs_free_path(path);
		root->fs_info->enospc_unlink = 0;
		return trans;
	}
2848

2849 2850
	path->skip_locking = 1;
	path->search_commit_root = 1;
2851

2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862
	ret = btrfs_lookup_inode(trans, root, path,
				&BTRFS_I(dir)->location, 0);
	if (ret < 0) {
		err = ret;
		goto out;
	}
	if (ret == 0) {
		if (check_path_shared(root, path))
			goto out;
	} else {
		check_link = 0;
2863
	}
2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969
	btrfs_release_path(root, path);

	ret = btrfs_lookup_inode(trans, root, path,
				&BTRFS_I(inode)->location, 0);
	if (ret < 0) {
		err = ret;
		goto out;
	}
	if (ret == 0) {
		if (check_path_shared(root, path))
			goto out;
	} else {
		check_link = 0;
	}
	btrfs_release_path(root, path);

	if (ret == 0 && S_ISREG(inode->i_mode)) {
		ret = btrfs_lookup_file_extent(trans, root, path,
					       inode->i_ino, (u64)-1, 0);
		if (ret < 0) {
			err = ret;
			goto out;
		}
		BUG_ON(ret == 0);
		if (check_path_shared(root, path))
			goto out;
		btrfs_release_path(root, path);
	}

	if (!check_link) {
		err = 0;
		goto out;
	}

	di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
				dentry->d_name.name, dentry->d_name.len, 0);
	if (IS_ERR(di)) {
		err = PTR_ERR(di);
		goto out;
	}
	if (di) {
		if (check_path_shared(root, path))
			goto out;
	} else {
		err = 0;
		goto out;
	}
	btrfs_release_path(root, path);

	ref = btrfs_lookup_inode_ref(trans, root, path,
				dentry->d_name.name, dentry->d_name.len,
				inode->i_ino, dir->i_ino, 0);
	if (IS_ERR(ref)) {
		err = PTR_ERR(ref);
		goto out;
	}
	BUG_ON(!ref);
	if (check_path_shared(root, path))
		goto out;
	index = btrfs_inode_ref_index(path->nodes[0], ref);
	btrfs_release_path(root, path);

	di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, index,
				dentry->d_name.name, dentry->d_name.len, 0);
	if (IS_ERR(di)) {
		err = PTR_ERR(di);
		goto out;
	}
	BUG_ON(ret == -ENOENT);
	if (check_path_shared(root, path))
		goto out;

	err = 0;
out:
	btrfs_free_path(path);
	if (err) {
		btrfs_end_transaction(trans, root);
		root->fs_info->enospc_unlink = 0;
		return ERR_PTR(err);
	}

	trans->block_rsv = &root->fs_info->global_block_rsv;
	return trans;
}

static void __unlink_end_trans(struct btrfs_trans_handle *trans,
			       struct btrfs_root *root)
{
	if (trans->block_rsv == &root->fs_info->global_block_rsv) {
		BUG_ON(!root->fs_info->enospc_unlink);
		root->fs_info->enospc_unlink = 0;
	}
	btrfs_end_transaction_throttle(trans, root);
}

static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
{
	struct btrfs_root *root = BTRFS_I(dir)->root;
	struct btrfs_trans_handle *trans;
	struct inode *inode = dentry->d_inode;
	int ret;
	unsigned long nr = 0;

	trans = __unlink_start_trans(dir, dentry);
	if (IS_ERR(trans))
		return PTR_ERR(trans);
2970

C
Chris Mason 已提交
2971
	btrfs_set_trans_block_group(trans, dir);
2972 2973 2974

	btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);

2975 2976
	ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
				 dentry->d_name.name, dentry->d_name.len);
2977
	BUG_ON(ret);
2978

2979
	if (inode->i_nlink == 0) {
2980
		ret = btrfs_orphan_add(trans, inode);
2981 2982
		BUG_ON(ret);
	}
2983

2984
	nr = trans->blocks_used;
2985
	__unlink_end_trans(trans, root);
2986
	btrfs_btree_balance_dirty(root, nr);
C
Chris Mason 已提交
2987 2988 2989
	return ret;
}

2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051
int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
			struct btrfs_root *root,
			struct inode *dir, u64 objectid,
			const char *name, int name_len)
{
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_dir_item *di;
	struct btrfs_key key;
	u64 index;
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
				   name, name_len, -1);
	BUG_ON(!di || IS_ERR(di));

	leaf = path->nodes[0];
	btrfs_dir_item_key_to_cpu(leaf, di, &key);
	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
	ret = btrfs_delete_one_dir_name(trans, root, path, di);
	BUG_ON(ret);
	btrfs_release_path(root, path);

	ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
				 objectid, root->root_key.objectid,
				 dir->i_ino, &index, name, name_len);
	if (ret < 0) {
		BUG_ON(ret != -ENOENT);
		di = btrfs_search_dir_index_item(root, path, dir->i_ino,
						 name, name_len);
		BUG_ON(!di || IS_ERR(di));

		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
		btrfs_release_path(root, path);
		index = key.offset;
	}

	di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
					 index, name, name_len, -1);
	BUG_ON(!di || IS_ERR(di));

	leaf = path->nodes[0];
	btrfs_dir_item_key_to_cpu(leaf, di, &key);
	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
	ret = btrfs_delete_one_dir_name(trans, root, path, di);
	BUG_ON(ret);
	btrfs_release_path(root, path);

	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
	ret = btrfs_update_inode(trans, root, dir);
	BUG_ON(ret);

	btrfs_free_path(path);
	return 0;
}

C
Chris Mason 已提交
3052 3053 3054
static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
{
	struct inode *inode = dentry->d_inode;
3055
	int err = 0;
C
Chris Mason 已提交
3056 3057
	struct btrfs_root *root = BTRFS_I(dir)->root;
	struct btrfs_trans_handle *trans;
3058
	unsigned long nr = 0;
C
Chris Mason 已提交
3059

3060
	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
3061
	    inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
Y
Yan 已提交
3062 3063
		return -ENOTEMPTY;

3064 3065
	trans = __unlink_start_trans(dir, dentry);
	if (IS_ERR(trans))
3066 3067
		return PTR_ERR(trans);

C
Chris Mason 已提交
3068 3069
	btrfs_set_trans_block_group(trans, dir);

3070 3071 3072 3073 3074 3075 3076 3077
	if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
		err = btrfs_unlink_subvol(trans, root, dir,
					  BTRFS_I(inode)->location.objectid,
					  dentry->d_name.name,
					  dentry->d_name.len);
		goto out;
	}

3078 3079
	err = btrfs_orphan_add(trans, inode);
	if (err)
3080
		goto out;
3081

C
Chris Mason 已提交
3082
	/* now the directory is empty */
3083 3084
	err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
				 dentry->d_name.name, dentry->d_name.len);
C
Chris Mason 已提交
3085
	if (!err)
3086
		btrfs_i_size_write(inode, 0);
3087
out:
3088
	nr = trans->blocks_used;
3089
	__unlink_end_trans(trans, root);
3090
	btrfs_btree_balance_dirty(root, nr);
3091

C
Chris Mason 已提交
3092 3093 3094
	return err;
}

3095
#if 0
3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123
/*
 * when truncating bytes in a file, it is possible to avoid reading
 * the leaves that contain only checksum items.  This can be the
 * majority of the IO required to delete a large file, but it must
 * be done carefully.
 *
 * The keys in the level just above the leaves are checked to make sure
 * the lowest key in a given leaf is a csum key, and starts at an offset
 * after the new  size.
 *
 * Then the key for the next leaf is checked to make sure it also has
 * a checksum item for the same file.  If it does, we know our target leaf
 * contains only checksum items, and it can be safely freed without reading
 * it.
 *
 * This is just an optimization targeted at large files.  It may do
 * nothing.  It will return 0 unless things went badly.
 */
static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
				     struct btrfs_root *root,
				     struct btrfs_path *path,
				     struct inode *inode, u64 new_size)
{
	struct btrfs_key key;
	int ret;
	int nritems;
	struct btrfs_key found_key;
	struct btrfs_key other_key;
Y
Yan Zheng 已提交
3124 3125 3126
	struct btrfs_leaf_ref *ref;
	u64 leaf_gen;
	u64 leaf_start;
3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219

	path->lowest_level = 1;
	key.objectid = inode->i_ino;
	key.type = BTRFS_CSUM_ITEM_KEY;
	key.offset = new_size;
again:
	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
	if (ret < 0)
		goto out;

	if (path->nodes[1] == NULL) {
		ret = 0;
		goto out;
	}
	ret = 0;
	btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
	nritems = btrfs_header_nritems(path->nodes[1]);

	if (!nritems)
		goto out;

	if (path->slots[1] >= nritems)
		goto next_node;

	/* did we find a key greater than anything we want to delete? */
	if (found_key.objectid > inode->i_ino ||
	   (found_key.objectid == inode->i_ino && found_key.type > key.type))
		goto out;

	/* we check the next key in the node to make sure the leave contains
	 * only checksum items.  This comparison doesn't work if our
	 * leaf is the last one in the node
	 */
	if (path->slots[1] + 1 >= nritems) {
next_node:
		/* search forward from the last key in the node, this
		 * will bring us into the next node in the tree
		 */
		btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);

		/* unlikely, but we inc below, so check to be safe */
		if (found_key.offset == (u64)-1)
			goto out;

		/* search_forward needs a path with locks held, do the
		 * search again for the original key.  It is possible
		 * this will race with a balance and return a path that
		 * we could modify, but this drop is just an optimization
		 * and is allowed to miss some leaves.
		 */
		btrfs_release_path(root, path);
		found_key.offset++;

		/* setup a max key for search_forward */
		other_key.offset = (u64)-1;
		other_key.type = key.type;
		other_key.objectid = key.objectid;

		path->keep_locks = 1;
		ret = btrfs_search_forward(root, &found_key, &other_key,
					   path, 0, 0);
		path->keep_locks = 0;
		if (ret || found_key.objectid != key.objectid ||
		    found_key.type != key.type) {
			ret = 0;
			goto out;
		}

		key.offset = found_key.offset;
		btrfs_release_path(root, path);
		cond_resched();
		goto again;
	}

	/* we know there's one more slot after us in the tree,
	 * read that key so we can verify it is also a checksum item
	 */
	btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);

	if (found_key.objectid < inode->i_ino)
		goto next_key;

	if (found_key.type != key.type || found_key.offset < new_size)
		goto next_key;

	/*
	 * if the key for the next leaf isn't a csum key from this objectid,
	 * we can't be sure there aren't good items inside this leaf.
	 * Bail out
	 */
	if (other_key.objectid != inode->i_ino || other_key.type != key.type)
		goto out;

Y
Yan Zheng 已提交
3220 3221
	leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
	leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
3222 3223 3224 3225
	/*
	 * it is safe to delete this leaf, it contains only
	 * csum items from this inode at an offset >= new_size
	 */
Y
Yan Zheng 已提交
3226
	ret = btrfs_del_leaf(trans, root, path, leaf_start);
3227 3228
	BUG_ON(ret);

Y
Yan Zheng 已提交
3229 3230 3231 3232 3233 3234 3235 3236 3237
	if (root->ref_cows && leaf_gen < trans->transid) {
		ref = btrfs_alloc_leaf_ref(root, 0);
		if (ref) {
			ref->root_gen = root->root_key.offset;
			ref->bytenr = leaf_start;
			ref->owner = 0;
			ref->generation = leaf_gen;
			ref->nritems = 0;

3238 3239
			btrfs_sort_leaf_ref(ref);

Y
Yan Zheng 已提交
3240 3241 3242 3243 3244 3245 3246
			ret = btrfs_add_leaf_ref(root, ref, 0);
			WARN_ON(ret);
			btrfs_free_leaf_ref(root, ref);
		} else {
			WARN_ON(1);
		}
	}
3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264
next_key:
	btrfs_release_path(root, path);

	if (other_key.objectid == inode->i_ino &&
	    other_key.type == key.type && other_key.offset > key.offset) {
		key.offset = other_key.offset;
		cond_resched();
		goto again;
	}
	ret = 0;
out:
	/* fixup any changes we've made to the path */
	path->lowest_level = 0;
	path->keep_locks = 0;
	btrfs_release_path(root, path);
	return ret;
}

3265 3266
#endif

C
Chris Mason 已提交
3267 3268 3269
/*
 * this can truncate away extent items, csum items and directory items.
 * It starts at a high offset and removes keys until it can't find
C
Chris Mason 已提交
3270
 * any higher than new_size
C
Chris Mason 已提交
3271 3272 3273
 *
 * csum items that cross the new i_size are truncated to the new size
 * as well.
3274 3275 3276
 *
 * min_type is the minimum key type to truncate down to.  If set to 0, this
 * will kill all the items on this inode, including the INODE_ITEM_KEY.
C
Chris Mason 已提交
3277
 */
3278 3279 3280 3281
int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
			       struct btrfs_root *root,
			       struct inode *inode,
			       u64 new_size, u32 min_type)
C
Chris Mason 已提交
3282 3283
{
	struct btrfs_path *path;
3284
	struct extent_buffer *leaf;
C
Chris Mason 已提交
3285
	struct btrfs_file_extent_item *fi;
3286 3287
	struct btrfs_key key;
	struct btrfs_key found_key;
C
Chris Mason 已提交
3288
	u64 extent_start = 0;
3289
	u64 extent_num_bytes = 0;
3290
	u64 extent_offset = 0;
C
Chris Mason 已提交
3291
	u64 item_end = 0;
3292 3293
	u64 mask = root->sectorsize - 1;
	u32 found_type = (u8)-1;
C
Chris Mason 已提交
3294 3295
	int found_extent;
	int del_item;
3296 3297
	int pending_del_nr = 0;
	int pending_del_slot = 0;
3298
	int extent_type = -1;
3299
	int encoding;
3300 3301 3302 3303
	int ret;
	int err = 0;

	BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
C
Chris Mason 已提交
3304

3305
	if (root->ref_cows || root == root->fs_info->tree_root)
3306
		btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
3307

C
Chris Mason 已提交
3308 3309
	path = btrfs_alloc_path();
	BUG_ON(!path);
J
Julia Lawall 已提交
3310
	path->reada = -1;
3311

C
Chris Mason 已提交
3312 3313
	key.objectid = inode->i_ino;
	key.offset = (u64)-1;
3314 3315
	key.type = (u8)-1;

3316
search_again:
3317
	path->leave_spinning = 1;
3318
	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3319 3320 3321 3322
	if (ret < 0) {
		err = ret;
		goto out;
	}
C
Chris Mason 已提交
3323

3324
	if (ret > 0) {
3325 3326 3327
		/* there are no items in the tree for us to truncate, we're
		 * done
		 */
3328 3329
		if (path->slots[0] == 0)
			goto out;
3330 3331 3332
		path->slots[0]--;
	}

C
Chris Mason 已提交
3333
	while (1) {
C
Chris Mason 已提交
3334
		fi = NULL;
3335 3336 3337
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
		found_type = btrfs_key_type(&found_key);
3338
		encoding = 0;
C
Chris Mason 已提交
3339

3340
		if (found_key.objectid != inode->i_ino)
C
Chris Mason 已提交
3341
			break;
3342

3343
		if (found_type < min_type)
C
Chris Mason 已提交
3344 3345
			break;

3346
		item_end = found_key.offset;
C
Chris Mason 已提交
3347
		if (found_type == BTRFS_EXTENT_DATA_KEY) {
3348
			fi = btrfs_item_ptr(leaf, path->slots[0],
C
Chris Mason 已提交
3349
					    struct btrfs_file_extent_item);
3350
			extent_type = btrfs_file_extent_type(leaf, fi);
3351 3352 3353 3354
			encoding = btrfs_file_extent_compression(leaf, fi);
			encoding |= btrfs_file_extent_encryption(leaf, fi);
			encoding |= btrfs_file_extent_other_encoding(leaf, fi);

3355
			if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3356
				item_end +=
3357
				    btrfs_file_extent_num_bytes(leaf, fi);
3358 3359
			} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
				item_end += btrfs_file_extent_inline_len(leaf,
C
Chris Mason 已提交
3360
									 fi);
C
Chris Mason 已提交
3361
			}
3362
			item_end--;
C
Chris Mason 已提交
3363
		}
3364 3365 3366 3367
		if (found_type > min_type) {
			del_item = 1;
		} else {
			if (item_end < new_size)
3368
				break;
3369 3370 3371 3372
			if (found_key.offset >= new_size)
				del_item = 1;
			else
				del_item = 0;
C
Chris Mason 已提交
3373 3374 3375
		}
		found_extent = 0;
		/* FIXME, shrink the extent if the ref count is only 1 */
3376 3377 3378 3379
		if (found_type != BTRFS_EXTENT_DATA_KEY)
			goto delete;

		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
C
Chris Mason 已提交
3380
			u64 num_dec;
3381
			extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
3382
			if (!del_item && !encoding) {
3383 3384
				u64 orig_num_bytes =
					btrfs_file_extent_num_bytes(leaf, fi);
3385
				extent_num_bytes = new_size -
3386
					found_key.offset + root->sectorsize - 1;
3387 3388
				extent_num_bytes = extent_num_bytes &
					~((u64)root->sectorsize - 1);
3389 3390 3391
				btrfs_set_file_extent_num_bytes(leaf, fi,
							 extent_num_bytes);
				num_dec = (orig_num_bytes -
C
Chris Mason 已提交
3392
					   extent_num_bytes);
3393
				if (root->ref_cows && extent_start != 0)
3394
					inode_sub_bytes(inode, num_dec);
3395
				btrfs_mark_buffer_dirty(leaf);
C
Chris Mason 已提交
3396
			} else {
3397 3398 3399
				extent_num_bytes =
					btrfs_file_extent_disk_num_bytes(leaf,
									 fi);
3400 3401 3402
				extent_offset = found_key.offset -
					btrfs_file_extent_offset(leaf, fi);

C
Chris Mason 已提交
3403
				/* FIXME blocksize != 4096 */
C
Chris Mason 已提交
3404
				num_dec = btrfs_file_extent_num_bytes(leaf, fi);
C
Chris Mason 已提交
3405 3406
				if (extent_start != 0) {
					found_extent = 1;
3407
					if (root->ref_cows)
3408
						inode_sub_bytes(inode, num_dec);
3409
				}
C
Chris Mason 已提交
3410
			}
C
Chris Mason 已提交
3411
		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
C
Chris Mason 已提交
3412 3413 3414 3415 3416 3417 3418 3419
			/*
			 * we can't truncate inline items that have had
			 * special encodings
			 */
			if (!del_item &&
			    btrfs_file_extent_compression(leaf, fi) == 0 &&
			    btrfs_file_extent_encryption(leaf, fi) == 0 &&
			    btrfs_file_extent_other_encoding(leaf, fi) == 0) {
3420 3421 3422
				u32 size = new_size - found_key.offset;

				if (root->ref_cows) {
3423 3424
					inode_sub_bytes(inode, item_end + 1 -
							new_size);
3425 3426 3427
				}
				size =
				    btrfs_file_extent_calc_inline_size(size);
C
Chris Mason 已提交
3428
				ret = btrfs_truncate_item(trans, root, path,
3429
							  size, 1);
C
Chris Mason 已提交
3430
				BUG_ON(ret);
3431
			} else if (root->ref_cows) {
3432 3433
				inode_sub_bytes(inode, item_end + 1 -
						found_key.offset);
C
Chris Mason 已提交
3434
			}
C
Chris Mason 已提交
3435
		}
3436
delete:
C
Chris Mason 已提交
3437
		if (del_item) {
3438 3439 3440 3441 3442 3443 3444 3445 3446 3447
			if (!pending_del_nr) {
				/* no pending yet, add ourselves */
				pending_del_slot = path->slots[0];
				pending_del_nr = 1;
			} else if (pending_del_nr &&
				   path->slots[0] + 1 == pending_del_slot) {
				/* hop on the pending chunk */
				pending_del_nr++;
				pending_del_slot = path->slots[0];
			} else {
C
Chris Mason 已提交
3448
				BUG();
3449
			}
C
Chris Mason 已提交
3450 3451 3452
		} else {
			break;
		}
3453 3454
		if (found_extent && (root->ref_cows ||
				     root == root->fs_info->tree_root)) {
3455
			btrfs_set_path_blocking(path);
C
Chris Mason 已提交
3456
			ret = btrfs_free_extent(trans, root, extent_start,
3457 3458 3459
						extent_num_bytes, 0,
						btrfs_header_owner(leaf),
						inode->i_ino, extent_offset);
C
Chris Mason 已提交
3460 3461
			BUG_ON(ret);
		}
3462

3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478
		if (found_type == BTRFS_INODE_ITEM_KEY)
			break;

		if (path->slots[0] == 0 ||
		    path->slots[0] != pending_del_slot) {
			if (root->ref_cows) {
				err = -EAGAIN;
				goto out;
			}
			if (pending_del_nr) {
				ret = btrfs_del_items(trans, root, path,
						pending_del_slot,
						pending_del_nr);
				BUG_ON(ret);
				pending_del_nr = 0;
			}
3479 3480
			btrfs_release_path(root, path);
			goto search_again;
3481 3482
		} else {
			path->slots[0]--;
3483
		}
C
Chris Mason 已提交
3484
	}
3485
out:
3486 3487 3488
	if (pending_del_nr) {
		ret = btrfs_del_items(trans, root, path, pending_del_slot,
				      pending_del_nr);
3489
		BUG_ON(ret);
3490
	}
C
Chris Mason 已提交
3491
	btrfs_free_path(path);
3492
	return err;
C
Chris Mason 已提交
3493 3494 3495 3496 3497 3498 3499 3500 3501
}

/*
 * taken from block_truncate_page, but does cow as it zeros out
 * any bytes left in the last page in the file.
 */
static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
{
	struct inode *inode = mapping->host;
3502
	struct btrfs_root *root = BTRFS_I(inode)->root;
3503 3504
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
	struct btrfs_ordered_extent *ordered;
3505
	struct extent_state *cached_state = NULL;
3506
	char *kaddr;
3507
	u32 blocksize = root->sectorsize;
C
Chris Mason 已提交
3508 3509 3510 3511
	pgoff_t index = from >> PAGE_CACHE_SHIFT;
	unsigned offset = from & (PAGE_CACHE_SIZE-1);
	struct page *page;
	int ret = 0;
3512
	u64 page_start;
3513
	u64 page_end;
C
Chris Mason 已提交
3514 3515 3516

	if ((offset & (blocksize - 1)) == 0)
		goto out;
3517
	ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
3518 3519
	if (ret)
		goto out;
C
Chris Mason 已提交
3520 3521

	ret = -ENOMEM;
3522
again:
C
Chris Mason 已提交
3523
	page = grab_cache_page(mapping, index);
3524
	if (!page) {
3525
		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
C
Chris Mason 已提交
3526
		goto out;
3527
	}
3528 3529 3530 3531

	page_start = page_offset(page);
	page_end = page_start + PAGE_CACHE_SIZE - 1;

C
Chris Mason 已提交
3532
	if (!PageUptodate(page)) {
C
Chris Mason 已提交
3533
		ret = btrfs_readpage(NULL, page);
C
Chris Mason 已提交
3534
		lock_page(page);
3535 3536 3537 3538 3539
		if (page->mapping != mapping) {
			unlock_page(page);
			page_cache_release(page);
			goto again;
		}
C
Chris Mason 已提交
3540 3541
		if (!PageUptodate(page)) {
			ret = -EIO;
3542
			goto out_unlock;
C
Chris Mason 已提交
3543 3544
		}
	}
3545
	wait_on_page_writeback(page);
3546

3547 3548
	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
			 GFP_NOFS);
3549 3550 3551 3552
	set_page_extent_mapped(page);

	ordered = btrfs_lookup_ordered_extent(inode, page_start);
	if (ordered) {
3553 3554
		unlock_extent_cached(io_tree, page_start, page_end,
				     &cached_state, GFP_NOFS);
3555 3556
		unlock_page(page);
		page_cache_release(page);
3557
		btrfs_start_ordered_extent(inode, ordered, 1);
3558 3559 3560 3561
		btrfs_put_ordered_extent(ordered);
		goto again;
	}

3562
	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
3563
			  EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
3564
			  0, 0, &cached_state, GFP_NOFS);
3565

3566 3567
	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
					&cached_state);
J
Josef Bacik 已提交
3568
	if (ret) {
3569 3570
		unlock_extent_cached(io_tree, page_start, page_end,
				     &cached_state, GFP_NOFS);
J
Josef Bacik 已提交
3571 3572 3573
		goto out_unlock;
	}

3574 3575 3576 3577 3578 3579 3580
	ret = 0;
	if (offset != PAGE_CACHE_SIZE) {
		kaddr = kmap(page);
		memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
		flush_dcache_page(page);
		kunmap(page);
	}
3581
	ClearPageChecked(page);
3582
	set_page_dirty(page);
3583 3584
	unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
			     GFP_NOFS);
C
Chris Mason 已提交
3585

3586
out_unlock:
3587
	if (ret)
3588
		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
C
Chris Mason 已提交
3589 3590 3591 3592 3593 3594
	unlock_page(page);
	page_cache_release(page);
out:
	return ret;
}

3595 3596 3597 3598 3599 3600
/*
 * This function puts in dummy file extents for the area we're creating a hole
 * for.  So if we are truncating this file to a larger size we need to insert
 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
 * the range between oldsize and size
 */
3601
int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
C
Chris Mason 已提交
3602
{
Y
Yan Zheng 已提交
3603 3604 3605
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3606
	struct extent_map *em = NULL;
3607
	struct extent_state *cached_state = NULL;
Y
Yan Zheng 已提交
3608
	u64 mask = root->sectorsize - 1;
3609
	u64 hole_start = (oldsize + mask) & ~mask;
Y
Yan Zheng 已提交
3610 3611 3612 3613
	u64 block_end = (size + mask) & ~mask;
	u64 last_byte;
	u64 cur_offset;
	u64 hole_size;
J
Josef Bacik 已提交
3614
	int err = 0;
C
Chris Mason 已提交
3615

Y
Yan Zheng 已提交
3616 3617 3618 3619 3620 3621 3622
	if (size <= hole_start)
		return 0;

	while (1) {
		struct btrfs_ordered_extent *ordered;
		btrfs_wait_ordered_range(inode, hole_start,
					 block_end - hole_start);
3623 3624
		lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
				 &cached_state, GFP_NOFS);
Y
Yan Zheng 已提交
3625 3626 3627
		ordered = btrfs_lookup_ordered_extent(inode, hole_start);
		if (!ordered)
			break;
3628 3629
		unlock_extent_cached(io_tree, hole_start, block_end - 1,
				     &cached_state, GFP_NOFS);
Y
Yan Zheng 已提交
3630 3631
		btrfs_put_ordered_extent(ordered);
	}
C
Chris Mason 已提交
3632

Y
Yan Zheng 已提交
3633 3634 3635 3636 3637 3638 3639
	cur_offset = hole_start;
	while (1) {
		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
				block_end - cur_offset, 0);
		BUG_ON(IS_ERR(em) || !em);
		last_byte = min(extent_map_end(em), block_end);
		last_byte = (last_byte + mask) & ~mask;
3640
		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3641
			u64 hint_byte = 0;
Y
Yan Zheng 已提交
3642
			hole_size = last_byte - cur_offset;
J
Josef Bacik 已提交
3643

3644 3645 3646
			trans = btrfs_start_transaction(root, 2);
			if (IS_ERR(trans)) {
				err = PTR_ERR(trans);
J
Josef Bacik 已提交
3647
				break;
3648
			}
3649 3650 3651 3652 3653
			btrfs_set_trans_block_group(trans, inode);

			err = btrfs_drop_extents(trans, inode, cur_offset,
						 cur_offset + hole_size,
						 &hint_byte, 1);
3654 3655
			if (err)
				break;
3656

Y
Yan Zheng 已提交
3657 3658 3659 3660
			err = btrfs_insert_file_extent(trans, root,
					inode->i_ino, cur_offset, 0,
					0, hole_size, 0, hole_size,
					0, 0, 0);
3661 3662
			if (err)
				break;
3663

Y
Yan Zheng 已提交
3664 3665
			btrfs_drop_extent_cache(inode, hole_start,
					last_byte - 1, 0);
3666 3667

			btrfs_end_transaction(trans, root);
Y
Yan Zheng 已提交
3668 3669
		}
		free_extent_map(em);
3670
		em = NULL;
Y
Yan Zheng 已提交
3671
		cur_offset = last_byte;
3672
		if (cur_offset >= block_end)
Y
Yan Zheng 已提交
3673 3674
			break;
	}
3675

3676
	free_extent_map(em);
3677 3678
	unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
			     GFP_NOFS);
Y
Yan Zheng 已提交
3679 3680
	return err;
}
C
Chris Mason 已提交
3681

3682
static int btrfs_setsize(struct inode *inode, loff_t newsize)
3683
{
3684
	loff_t oldsize = i_size_read(inode);
3685 3686
	int ret;

3687
	if (newsize == oldsize)
3688 3689
		return 0;

3690 3691 3692 3693 3694
	if (newsize > oldsize) {
		i_size_write(inode, newsize);
		btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
		truncate_pagecache(inode, oldsize, newsize);
		ret = btrfs_cont_expand(inode, oldsize, newsize);
3695
		if (ret) {
3696
			btrfs_setsize(inode, oldsize);
3697 3698 3699
			return ret;
		}

3700
		mark_inode_dirty(inode);
3701
	} else {
3702

3703 3704 3705 3706 3707 3708 3709
		/*
		 * We're truncating a file that used to have good data down to
		 * zero. Make sure it gets into the ordered flush list so that
		 * any new writes get down to disk quickly.
		 */
		if (newsize == 0)
			BTRFS_I(inode)->ordered_data_close = 1;
3710

3711 3712 3713 3714
		/* we don't support swapfiles, so vmtruncate shouldn't fail */
		truncate_setsize(inode, newsize);
		ret = btrfs_truncate(inode);
	}
3715

3716
	return ret;
3717 3718
}

Y
Yan Zheng 已提交
3719 3720 3721
static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
{
	struct inode *inode = dentry->d_inode;
L
Li Zefan 已提交
3722
	struct btrfs_root *root = BTRFS_I(inode)->root;
Y
Yan Zheng 已提交
3723
	int err;
C
Chris Mason 已提交
3724

L
Li Zefan 已提交
3725 3726 3727
	if (btrfs_root_readonly(root))
		return -EROFS;

Y
Yan Zheng 已提交
3728 3729 3730
	err = inode_change_ok(inode, attr);
	if (err)
		return err;
C
Chris Mason 已提交
3731

3732
	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
3733
		err = btrfs_setsize(inode, attr->ia_size);
3734 3735
		if (err)
			return err;
C
Chris Mason 已提交
3736
	}
Y
Yan Zheng 已提交
3737

C
Christoph Hellwig 已提交
3738 3739 3740 3741 3742 3743 3744
	if (attr->ia_valid) {
		setattr_copy(inode, attr);
		mark_inode_dirty(inode);

		if (attr->ia_valid & ATTR_MODE)
			err = btrfs_acl_chmod(inode);
	}
J
Josef Bacik 已提交
3745

C
Chris Mason 已提交
3746 3747
	return err;
}
3748

A
Al Viro 已提交
3749
void btrfs_evict_inode(struct inode *inode)
C
Chris Mason 已提交
3750 3751 3752
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(inode)->root;
3753
	unsigned long nr;
C
Chris Mason 已提交
3754 3755
	int ret;

3756 3757
	trace_btrfs_inode_evict(inode);

C
Chris Mason 已提交
3758
	truncate_inode_pages(&inode->i_data, 0);
3759 3760
	if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
			       root == root->fs_info->tree_root))
A
Al Viro 已提交
3761 3762
		goto no_delete;

C
Chris Mason 已提交
3763
	if (is_bad_inode(inode)) {
3764
		btrfs_orphan_del(NULL, inode);
C
Chris Mason 已提交
3765 3766
		goto no_delete;
	}
A
Al Viro 已提交
3767
	/* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
C
Chris Mason 已提交
3768
	btrfs_wait_ordered_range(inode, 0, (u64)-1);
3769

3770 3771 3772 3773 3774
	if (root->fs_info->log_root_recovering) {
		BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan));
		goto no_delete;
	}

3775 3776 3777 3778 3779
	if (inode->i_nlink > 0) {
		BUG_ON(btrfs_root_refs(&root->root_item) != 0);
		goto no_delete;
	}

3780
	btrfs_i_size_write(inode, 0);
3781

3782
	while (1) {
3783 3784
		trans = btrfs_start_transaction(root, 0);
		BUG_ON(IS_ERR(trans));
3785
		btrfs_set_trans_block_group(trans, inode);
3786 3787 3788 3789 3790 3791 3792 3793 3794 3795
		trans->block_rsv = root->orphan_block_rsv;

		ret = btrfs_block_rsv_check(trans, root,
					    root->orphan_block_rsv, 0, 5);
		if (ret) {
			BUG_ON(ret != -EAGAIN);
			ret = btrfs_commit_transaction(trans, root);
			BUG_ON(ret);
			continue;
		}
3796

3797
		ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
3798 3799
		if (ret != -EAGAIN)
			break;
3800

3801 3802 3803 3804
		nr = trans->blocks_used;
		btrfs_end_transaction(trans, root);
		trans = NULL;
		btrfs_btree_balance_dirty(root, nr);
3805

3806
	}
3807

3808 3809 3810 3811
	if (ret == 0) {
		ret = btrfs_orphan_del(trans, inode);
		BUG_ON(ret);
	}
3812

3813 3814 3815 3816
	if (!(root == root->fs_info->tree_root ||
	      root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
		btrfs_return_ino(root, inode->i_ino);

3817
	nr = trans->blocks_used;
3818
	btrfs_end_transaction(trans, root);
3819
	btrfs_btree_balance_dirty(root, nr);
C
Chris Mason 已提交
3820
no_delete:
A
Al Viro 已提交
3821
	end_writeback(inode);
3822
	return;
C
Chris Mason 已提交
3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836
}

/*
 * this returns the key found in the dir entry in the location pointer.
 * If no dir entries were found, location->objectid is 0.
 */
static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
			       struct btrfs_key *location)
{
	const char *name = dentry->d_name.name;
	int namelen = dentry->d_name.len;
	struct btrfs_dir_item *di;
	struct btrfs_path *path;
	struct btrfs_root *root = BTRFS_I(dir)->root;
Y
Yan 已提交
3837
	int ret = 0;
C
Chris Mason 已提交
3838 3839 3840

	path = btrfs_alloc_path();
	BUG_ON(!path);
3841

C
Chris Mason 已提交
3842 3843
	di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
				    namelen, 0);
Y
Yan 已提交
3844 3845
	if (IS_ERR(di))
		ret = PTR_ERR(di);
C
Chris Mason 已提交
3846 3847

	if (!di || IS_ERR(di))
3848
		goto out_err;
C
Chris Mason 已提交
3849

3850
	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
C
Chris Mason 已提交
3851 3852 3853
out:
	btrfs_free_path(path);
	return ret;
3854 3855 3856
out_err:
	location->objectid = 0;
	goto out;
C
Chris Mason 已提交
3857 3858 3859 3860 3861 3862 3863 3864
}

/*
 * when we hit a tree root in a directory, the btrfs part of the inode
 * needs to be changed to reflect the root directory of the tree root.  This
 * is kind of like crossing a mount point.
 */
static int fixup_tree_root_location(struct btrfs_root *root,
3865 3866 3867 3868
				    struct inode *dir,
				    struct dentry *dentry,
				    struct btrfs_key *location,
				    struct btrfs_root **sub_root)
C
Chris Mason 已提交
3869
{
3870 3871 3872 3873 3874 3875
	struct btrfs_path *path;
	struct btrfs_root *new_root;
	struct btrfs_root_ref *ref;
	struct extent_buffer *leaf;
	int ret;
	int err = 0;
C
Chris Mason 已提交
3876

3877 3878 3879 3880 3881
	path = btrfs_alloc_path();
	if (!path) {
		err = -ENOMEM;
		goto out;
	}
C
Chris Mason 已提交
3882

3883 3884 3885 3886 3887 3888 3889 3890 3891
	err = -ENOENT;
	ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
				  BTRFS_I(dir)->root->root_key.objectid,
				  location->objectid);
	if (ret) {
		if (ret < 0)
			err = ret;
		goto out;
	}
C
Chris Mason 已提交
3892

3893 3894 3895 3896 3897
	leaf = path->nodes[0];
	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
	if (btrfs_root_ref_dirid(leaf, ref) != dir->i_ino ||
	    btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
		goto out;
C
Chris Mason 已提交
3898

3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925
	ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
				   (unsigned long)(ref + 1),
				   dentry->d_name.len);
	if (ret)
		goto out;

	btrfs_release_path(root->fs_info->tree_root, path);

	new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
	if (IS_ERR(new_root)) {
		err = PTR_ERR(new_root);
		goto out;
	}

	if (btrfs_root_refs(&new_root->root_item) == 0) {
		err = -ENOENT;
		goto out;
	}

	*sub_root = new_root;
	location->objectid = btrfs_root_dirid(&new_root->root_item);
	location->type = BTRFS_INODE_ITEM_KEY;
	location->offset = 0;
	err = 0;
out:
	btrfs_free_path(path);
	return err;
C
Chris Mason 已提交
3926 3927
}

3928 3929 3930 3931
static void inode_tree_add(struct inode *inode)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_inode *entry;
3932 3933 3934 3935 3936
	struct rb_node **p;
	struct rb_node *parent;
again:
	p = &root->inode_tree.rb_node;
	parent = NULL;
3937

A
Al Viro 已提交
3938
	if (inode_unhashed(inode))
3939 3940
		return;

3941 3942 3943 3944 3945 3946
	spin_lock(&root->inode_lock);
	while (*p) {
		parent = *p;
		entry = rb_entry(parent, struct btrfs_inode, rb_node);

		if (inode->i_ino < entry->vfs_inode.i_ino)
3947
			p = &parent->rb_left;
3948
		else if (inode->i_ino > entry->vfs_inode.i_ino)
3949
			p = &parent->rb_right;
3950 3951
		else {
			WARN_ON(!(entry->vfs_inode.i_state &
A
Al Viro 已提交
3952
				  (I_WILL_FREE | I_FREEING)));
3953 3954 3955 3956
			rb_erase(parent, &root->inode_tree);
			RB_CLEAR_NODE(parent);
			spin_unlock(&root->inode_lock);
			goto again;
3957 3958 3959 3960 3961 3962 3963 3964 3965 3966
		}
	}
	rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
	rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
	spin_unlock(&root->inode_lock);
}

static void inode_tree_del(struct inode *inode)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
3967
	int empty = 0;
3968

3969
	spin_lock(&root->inode_lock);
3970 3971 3972
	if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
		rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
		RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3973
		empty = RB_EMPTY_ROOT(&root->inode_tree);
3974
	}
3975
	spin_unlock(&root->inode_lock);
3976

3977 3978 3979 3980 3981 3982 3983 3984
	/*
	 * Free space cache has inodes in the tree root, but the tree root has a
	 * root_refs of 0, so this could end up dropping the tree root as a
	 * snapshot, so we need the extra !root->fs_info->tree_root check to
	 * make sure we don't drop it.
	 */
	if (empty && btrfs_root_refs(&root->root_item) == 0 &&
	    root != root->fs_info->tree_root) {
3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037
		synchronize_srcu(&root->fs_info->subvol_srcu);
		spin_lock(&root->inode_lock);
		empty = RB_EMPTY_ROOT(&root->inode_tree);
		spin_unlock(&root->inode_lock);
		if (empty)
			btrfs_add_dead_root(root);
	}
}

int btrfs_invalidate_inodes(struct btrfs_root *root)
{
	struct rb_node *node;
	struct rb_node *prev;
	struct btrfs_inode *entry;
	struct inode *inode;
	u64 objectid = 0;

	WARN_ON(btrfs_root_refs(&root->root_item) != 0);

	spin_lock(&root->inode_lock);
again:
	node = root->inode_tree.rb_node;
	prev = NULL;
	while (node) {
		prev = node;
		entry = rb_entry(node, struct btrfs_inode, rb_node);

		if (objectid < entry->vfs_inode.i_ino)
			node = node->rb_left;
		else if (objectid > entry->vfs_inode.i_ino)
			node = node->rb_right;
		else
			break;
	}
	if (!node) {
		while (prev) {
			entry = rb_entry(prev, struct btrfs_inode, rb_node);
			if (objectid <= entry->vfs_inode.i_ino) {
				node = prev;
				break;
			}
			prev = rb_next(prev);
		}
	}
	while (node) {
		entry = rb_entry(node, struct btrfs_inode, rb_node);
		objectid = entry->vfs_inode.i_ino + 1;
		inode = igrab(&entry->vfs_inode);
		if (inode) {
			spin_unlock(&root->inode_lock);
			if (atomic_read(&inode->i_count) > 1)
				d_prune_aliases(inode);
			/*
4038
			 * btrfs_drop_inode will have it removed from
4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054
			 * the inode cache when its usage count
			 * hits zero.
			 */
			iput(inode);
			cond_resched();
			spin_lock(&root->inode_lock);
			goto again;
		}

		if (cond_resched_lock(&root->inode_lock))
			goto again;

		node = rb_next(node);
	}
	spin_unlock(&root->inode_lock);
	return 0;
4055 4056
}

4057 4058 4059 4060 4061
static int btrfs_init_locked_inode(struct inode *inode, void *p)
{
	struct btrfs_iget_args *args = p;
	inode->i_ino = args->ino;
	BTRFS_I(inode)->root = args->root;
J
Josef Bacik 已提交
4062
	btrfs_set_inode_space_info(args->root, inode);
C
Chris Mason 已提交
4063 4064 4065 4066 4067 4068
	return 0;
}

static int btrfs_find_actor(struct inode *inode, void *opaque)
{
	struct btrfs_iget_args *args = opaque;
C
Chris Mason 已提交
4069 4070
	return args->ino == inode->i_ino &&
		args->root == BTRFS_I(inode)->root;
C
Chris Mason 已提交
4071 4072
}

4073 4074 4075
static struct inode *btrfs_iget_locked(struct super_block *s,
				       u64 objectid,
				       struct btrfs_root *root)
C
Chris Mason 已提交
4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087
{
	struct inode *inode;
	struct btrfs_iget_args args;
	args.ino = objectid;
	args.root = root;

	inode = iget5_locked(s, objectid, btrfs_find_actor,
			     btrfs_init_locked_inode,
			     (void *)&args);
	return inode;
}

B
Balaji Rao 已提交
4088 4089 4090 4091
/* Get an inode object given its location and corresponding root.
 * Returns in *is_new if the inode was read from disk
 */
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
4092
			 struct btrfs_root *root, int *new)
B
Balaji Rao 已提交
4093 4094 4095 4096 4097
{
	struct inode *inode;

	inode = btrfs_iget_locked(s, location->objectid, root);
	if (!inode)
4098
		return ERR_PTR(-ENOMEM);
B
Balaji Rao 已提交
4099 4100 4101 4102 4103

	if (inode->i_state & I_NEW) {
		BTRFS_I(inode)->root = root;
		memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
		btrfs_read_locked_inode(inode);
4104
		inode_tree_add(inode);
B
Balaji Rao 已提交
4105
		unlock_new_inode(inode);
4106 4107
		if (new)
			*new = 1;
B
Balaji Rao 已提交
4108 4109 4110 4111 4112
	}

	return inode;
}

4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134
static struct inode *new_simple_dir(struct super_block *s,
				    struct btrfs_key *key,
				    struct btrfs_root *root)
{
	struct inode *inode = new_inode(s);

	if (!inode)
		return ERR_PTR(-ENOMEM);

	BTRFS_I(inode)->root = root;
	memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
	BTRFS_I(inode)->dummy_inode = 1;

	inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
	inode->i_op = &simple_dir_inode_operations;
	inode->i_fop = &simple_dir_operations;
	inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;

	return inode;
}

4135
struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
C
Chris Mason 已提交
4136
{
C
Chris Mason 已提交
4137
	struct inode *inode;
4138
	struct btrfs_root *root = BTRFS_I(dir)->root;
C
Chris Mason 已提交
4139 4140
	struct btrfs_root *sub_root = root;
	struct btrfs_key location;
4141
	int index;
4142
	int ret;
C
Chris Mason 已提交
4143 4144 4145

	if (dentry->d_name.len > BTRFS_NAME_LEN)
		return ERR_PTR(-ENAMETOOLONG);
4146

C
Chris Mason 已提交
4147
	ret = btrfs_inode_by_name(dir, dentry, &location);
4148

C
Chris Mason 已提交
4149 4150
	if (ret < 0)
		return ERR_PTR(ret);
4151

4152 4153 4154 4155
	if (location.objectid == 0)
		return NULL;

	if (location.type == BTRFS_INODE_ITEM_KEY) {
4156
		inode = btrfs_iget(dir->i_sb, &location, root, NULL);
4157 4158 4159 4160 4161
		return inode;
	}

	BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);

4162
	index = srcu_read_lock(&root->fs_info->subvol_srcu);
4163 4164 4165 4166 4167 4168 4169 4170
	ret = fixup_tree_root_location(root, dir, dentry,
				       &location, &sub_root);
	if (ret < 0) {
		if (ret != -ENOENT)
			inode = ERR_PTR(ret);
		else
			inode = new_simple_dir(dir->i_sb, &location, sub_root);
	} else {
4171
		inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
C
Chris Mason 已提交
4172
	}
4173 4174
	srcu_read_unlock(&root->fs_info->subvol_srcu, index);

4175
	if (!IS_ERR(inode) && root != sub_root) {
4176 4177
		down_read(&root->fs_info->cleanup_work_sem);
		if (!(inode->i_sb->s_flags & MS_RDONLY))
4178
			ret = btrfs_orphan_cleanup(sub_root);
4179
		up_read(&root->fs_info->cleanup_work_sem);
4180 4181
		if (ret)
			inode = ERR_PTR(ret);
4182 4183
	}

4184 4185 4186
	return inode;
}

N
Nick Piggin 已提交
4187
static int btrfs_dentry_delete(const struct dentry *dentry)
4188 4189 4190
{
	struct btrfs_root *root;

4191 4192
	if (!dentry->d_inode && !IS_ROOT(dentry))
		dentry = dentry->d_parent;
4193

4194 4195 4196 4197 4198
	if (dentry->d_inode) {
		root = BTRFS_I(dentry->d_inode)->root;
		if (btrfs_root_refs(&root->root_item) == 0)
			return 1;
	}
4199 4200 4201
	return 0;
}

4202 4203 4204 4205 4206 4207 4208 4209
static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
				   struct nameidata *nd)
{
	struct inode *inode;

	inode = btrfs_lookup_dentry(dir, dentry);
	if (IS_ERR(inode))
		return ERR_CAST(inode);
4210

C
Chris Mason 已提交
4211 4212 4213 4214 4215 4216 4217
	return d_splice_alias(inode, dentry);
}

static unsigned char btrfs_filetype_table[] = {
	DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
};

4218 4219
static int btrfs_real_readdir(struct file *filp, void *dirent,
			      filldir_t filldir)
C
Chris Mason 已提交
4220
{
4221
	struct inode *inode = filp->f_dentry->d_inode;
C
Chris Mason 已提交
4222 4223 4224 4225
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_item *item;
	struct btrfs_dir_item *di;
	struct btrfs_key key;
4226
	struct btrfs_key found_key;
C
Chris Mason 已提交
4227 4228
	struct btrfs_path *path;
	int ret;
4229
	struct extent_buffer *leaf;
C
Chris Mason 已提交
4230 4231 4232 4233 4234 4235 4236
	int slot;
	unsigned char d_type;
	int over = 0;
	u32 di_cur;
	u32 di_total;
	u32 di_len;
	int key_type = BTRFS_DIR_INDEX_KEY;
4237 4238 4239
	char tmp_name[32];
	char *name_ptr;
	int name_len;
C
Chris Mason 已提交
4240 4241 4242 4243

	/* FIXME, use a real flag for deciding about the key type */
	if (root->fs_info->tree_root == root)
		key_type = BTRFS_DIR_ITEM_KEY;
4244

4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255
	/* special case for "." */
	if (filp->f_pos == 0) {
		over = filldir(dirent, ".", 1,
			       1, inode->i_ino,
			       DT_DIR);
		if (over)
			return 0;
		filp->f_pos = 1;
	}
	/* special case for .., just use the back ref */
	if (filp->f_pos == 1) {
4256
		u64 pino = parent_ino(filp->f_path.dentry);
4257
		over = filldir(dirent, "..", 2,
4258
			       2, pino, DT_DIR);
4259
		if (over)
4260
			return 0;
4261 4262
		filp->f_pos = 2;
	}
4263 4264 4265
	path = btrfs_alloc_path();
	path->reada = 2;

C
Chris Mason 已提交
4266 4267
	btrfs_set_key_type(&key, key_type);
	key.offset = filp->f_pos;
4268
	key.objectid = inode->i_ino;
4269

C
Chris Mason 已提交
4270 4271 4272
	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		goto err;
4273 4274

	while (1) {
4275
		leaf = path->nodes[0];
C
Chris Mason 已提交
4276
		slot = path->slots[0];
4277 4278 4279 4280 4281 4282 4283
		if (slot >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret < 0)
				goto err;
			else if (ret > 0)
				break;
			continue;
C
Chris Mason 已提交
4284
		}
4285

4286 4287 4288 4289
		item = btrfs_item_nr(leaf, slot);
		btrfs_item_key_to_cpu(leaf, &found_key, slot);

		if (found_key.objectid != key.objectid)
C
Chris Mason 已提交
4290
			break;
4291
		if (btrfs_key_type(&found_key) != key_type)
C
Chris Mason 已提交
4292
			break;
4293
		if (found_key.offset < filp->f_pos)
4294
			goto next;
4295 4296

		filp->f_pos = found_key.offset;
4297

C
Chris Mason 已提交
4298 4299
		di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
		di_cur = 0;
4300
		di_total = btrfs_item_size(leaf, item);
4301 4302

		while (di_cur < di_total) {
4303 4304
			struct btrfs_key location;

4305 4306 4307
			if (verify_dir_item(root, leaf, di))
				break;

4308
			name_len = btrfs_dir_name_len(leaf, di);
4309
			if (name_len <= sizeof(tmp_name)) {
4310 4311 4312
				name_ptr = tmp_name;
			} else {
				name_ptr = kmalloc(name_len, GFP_NOFS);
4313 4314 4315 4316
				if (!name_ptr) {
					ret = -ENOMEM;
					goto err;
				}
4317 4318 4319 4320 4321 4322
			}
			read_extent_buffer(leaf, name_ptr,
					   (unsigned long)(di + 1), name_len);

			d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
			btrfs_dir_item_key_to_cpu(leaf, di, &location);
4323 4324 4325 4326 4327 4328 4329 4330 4331

			/* is this a reference to our own snapshot? If so
			 * skip it
			 */
			if (location.type == BTRFS_ROOT_ITEM_KEY &&
			    location.objectid == root->root_key.objectid) {
				over = 0;
				goto skip;
			}
4332
			over = filldir(dirent, name_ptr, name_len,
4333
				       found_key.offset, location.objectid,
C
Chris Mason 已提交
4334
				       d_type);
4335

4336
skip:
4337 4338 4339
			if (name_ptr != tmp_name)
				kfree(name_ptr);

C
Chris Mason 已提交
4340 4341
			if (over)
				goto nopos;
J
Josef Bacik 已提交
4342
			di_len = btrfs_dir_name_len(leaf, di) +
4343
				 btrfs_dir_data_len(leaf, di) + sizeof(*di);
C
Chris Mason 已提交
4344 4345 4346
			di_cur += di_len;
			di = (struct btrfs_dir_item *)((char *)di + di_len);
		}
4347 4348
next:
		path->slots[0]++;
C
Chris Mason 已提交
4349
	}
4350 4351

	/* Reached end of directory/root. Bump pos past the last item. */
4352
	if (key_type == BTRFS_DIR_INDEX_KEY)
4353 4354 4355 4356 4357
		/*
		 * 32-bit glibc will use getdents64, but then strtol -
		 * so the last number we can serve is this.
		 */
		filp->f_pos = 0x7fffffff;
4358 4359
	else
		filp->f_pos++;
C
Chris Mason 已提交
4360 4361 4362 4363 4364 4365 4366
nopos:
	ret = 0;
err:
	btrfs_free_path(path);
	return ret;
}

4367
int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
C
Chris Mason 已提交
4368 4369 4370 4371
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
	int ret = 0;
4372
	bool nolock = false;
C
Chris Mason 已提交
4373

4374
	if (BTRFS_I(inode)->dummy_inode)
4375 4376
		return 0;

4377 4378 4379
	smp_mb();
	nolock = (root->fs_info->closing && root == root->fs_info->tree_root);

4380
	if (wbc->sync_mode == WB_SYNC_ALL) {
4381 4382 4383 4384
		if (nolock)
			trans = btrfs_join_transaction_nolock(root, 1);
		else
			trans = btrfs_join_transaction(root, 1);
4385 4386
		if (IS_ERR(trans))
			return PTR_ERR(trans);
C
Chris Mason 已提交
4387
		btrfs_set_trans_block_group(trans, inode);
4388 4389 4390 4391
		if (nolock)
			ret = btrfs_end_transaction_nolock(trans, root);
		else
			ret = btrfs_commit_transaction(trans, root);
C
Chris Mason 已提交
4392 4393 4394 4395 4396
	}
	return ret;
}

/*
4397
 * This is somewhat expensive, updating the tree every time the
C
Chris Mason 已提交
4398 4399 4400 4401 4402 4403 4404 4405
 * inode changes.  But, it is most likely to find the inode in cache.
 * FIXME, needs more benchmarking...there are no reasons other than performance
 * to keep or drop this code.
 */
void btrfs_dirty_inode(struct inode *inode)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
4406 4407 4408 4409
	int ret;

	if (BTRFS_I(inode)->dummy_inode)
		return;
C
Chris Mason 已提交
4410

4411
	trans = btrfs_join_transaction(root, 1);
4412
	BUG_ON(IS_ERR(trans));
C
Chris Mason 已提交
4413
	btrfs_set_trans_block_group(trans, inode);
4414 4415

	ret = btrfs_update_inode(trans, root, inode);
4416 4417 4418 4419
	if (ret && ret == -ENOSPC) {
		/* whoops, lets try again with the full transaction */
		btrfs_end_transaction(trans, root);
		trans = btrfs_start_transaction(root, 1);
4420 4421 4422 4423 4424 4425 4426 4427
		if (IS_ERR(trans)) {
			if (printk_ratelimit()) {
				printk(KERN_ERR "btrfs: fail to "
				       "dirty  inode %lu error %ld\n",
				       inode->i_ino, PTR_ERR(trans));
			}
			return;
		}
4428
		btrfs_set_trans_block_group(trans, inode);
4429

4430 4431
		ret = btrfs_update_inode(trans, root, inode);
		if (ret) {
4432 4433 4434 4435 4436
			if (printk_ratelimit()) {
				printk(KERN_ERR "btrfs: fail to "
				       "dirty  inode %lu error %d\n",
				       inode->i_ino, ret);
			}
4437 4438
		}
	}
C
Chris Mason 已提交
4439 4440 4441
	btrfs_end_transaction(trans, root);
}

C
Chris Mason 已提交
4442 4443 4444 4445 4446
/*
 * find the highest existing sequence number in a directory
 * and then set the in-memory index_cnt variable to reflect
 * free sequence numbers
 */
4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498
static int btrfs_set_inode_index_count(struct inode *inode)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_key key, found_key;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	int ret;

	key.objectid = inode->i_ino;
	btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
	key.offset = (u64)-1;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		goto out;
	/* FIXME: we should be able to handle this */
	if (ret == 0)
		goto out;
	ret = 0;

	/*
	 * MAGIC NUMBER EXPLANATION:
	 * since we search a directory based on f_pos we have to start at 2
	 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
	 * else has to start at 2
	 */
	if (path->slots[0] == 0) {
		BTRFS_I(inode)->index_cnt = 2;
		goto out;
	}

	path->slots[0]--;

	leaf = path->nodes[0];
	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);

	if (found_key.objectid != inode->i_ino ||
	    btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
		BTRFS_I(inode)->index_cnt = 2;
		goto out;
	}

	BTRFS_I(inode)->index_cnt = found_key.offset + 1;
out:
	btrfs_free_path(path);
	return ret;
}

C
Chris Mason 已提交
4499 4500 4501 4502
/*
 * helper to find a free sequence number in a given directory.  This current
 * code is very simple, later versions will do smarter things in the btree
 */
4503
int btrfs_set_inode_index(struct inode *dir, u64 *index)
4504 4505 4506 4507 4508
{
	int ret = 0;

	if (BTRFS_I(dir)->index_cnt == (u64)-1) {
		ret = btrfs_set_inode_index_count(dir);
C
Chris Mason 已提交
4509
		if (ret)
4510 4511 4512
			return ret;
	}

4513
	*index = BTRFS_I(dir)->index_cnt;
4514 4515 4516 4517 4518
	BTRFS_I(dir)->index_cnt++;

	return ret;
}

C
Chris Mason 已提交
4519 4520
static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
				     struct btrfs_root *root,
4521
				     struct inode *dir,
4522
				     const char *name, int name_len,
4523 4524
				     u64 ref_objectid, u64 objectid,
				     u64 alloc_hint, int mode, u64 *index)
C
Chris Mason 已提交
4525 4526
{
	struct inode *inode;
4527
	struct btrfs_inode_item *inode_item;
C
Chris Mason 已提交
4528
	struct btrfs_key *location;
4529
	struct btrfs_path *path;
4530 4531 4532 4533
	struct btrfs_inode_ref *ref;
	struct btrfs_key key[2];
	u32 sizes[2];
	unsigned long ptr;
C
Chris Mason 已提交
4534 4535 4536
	int ret;
	int owner;

4537 4538 4539
	path = btrfs_alloc_path();
	BUG_ON(!path);

C
Chris Mason 已提交
4540
	inode = new_inode(root->fs_info->sb);
4541 4542
	if (!inode) {
		btrfs_free_path(path);
C
Chris Mason 已提交
4543
		return ERR_PTR(-ENOMEM);
4544
	}
C
Chris Mason 已提交
4545

4546 4547 4548 4549 4550 4551
	/*
	 * we have to initialize this early, so we can reclaim the inode
	 * number if we fail afterwards in this function.
	 */
	inode->i_ino = objectid;

4552
	if (dir) {
4553 4554
		trace_btrfs_inode_request(dir);

4555
		ret = btrfs_set_inode_index(dir, index);
4556
		if (ret) {
4557
			btrfs_free_path(path);
4558
			iput(inode);
4559
			return ERR_PTR(ret);
4560
		}
4561 4562 4563 4564 4565 4566 4567
	}
	/*
	 * index_cnt is ignored for everything but a dir,
	 * btrfs_get_inode_index_count has an explanation for the magic
	 * number
	 */
	BTRFS_I(inode)->index_cnt = 2;
C
Chris Mason 已提交
4568
	BTRFS_I(inode)->root = root;
4569
	BTRFS_I(inode)->generation = trans->transid;
4570
	inode->i_generation = BTRFS_I(inode)->generation;
J
Josef Bacik 已提交
4571
	btrfs_set_inode_space_info(root, inode);
4572

C
Chris Mason 已提交
4573 4574 4575 4576
	if (mode & S_IFDIR)
		owner = 0;
	else
		owner = 1;
4577 4578
	BTRFS_I(inode)->block_group =
			btrfs_find_block_group(root, 0, alloc_hint, owner);
4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590

	key[0].objectid = objectid;
	btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
	key[0].offset = 0;

	key[1].objectid = objectid;
	btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
	key[1].offset = ref_objectid;

	sizes[0] = sizeof(struct btrfs_inode_item);
	sizes[1] = name_len + sizeof(*ref);

4591
	path->leave_spinning = 1;
4592 4593
	ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
	if (ret != 0)
4594 4595
		goto fail;

4596
	inode_init_owner(inode, dir, mode);
4597
	inode_set_bytes(inode, 0);
C
Chris Mason 已提交
4598
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4599 4600
	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
				  struct btrfs_inode_item);
4601
	fill_inode_item(trans, path->nodes[0], inode_item, inode);
4602 4603 4604 4605

	ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
			     struct btrfs_inode_ref);
	btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
4606
	btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
4607 4608 4609
	ptr = (unsigned long)(ref + 1);
	write_extent_buffer(path->nodes[0], name, ptr, name_len);

4610 4611 4612
	btrfs_mark_buffer_dirty(path->nodes[0]);
	btrfs_free_path(path);

C
Chris Mason 已提交
4613 4614 4615 4616 4617
	location = &BTRFS_I(inode)->location;
	location->objectid = objectid;
	location->offset = 0;
	btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);

4618 4619
	btrfs_inherit_iflags(inode, dir);

4620 4621 4622
	if ((mode & S_IFREG)) {
		if (btrfs_test_opt(root, NODATASUM))
			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
4623 4624
		if (btrfs_test_opt(root, NODATACOW) ||
		    (BTRFS_I(dir)->flags & BTRFS_INODE_NODATACOW))
4625 4626 4627
			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
	}

C
Chris Mason 已提交
4628
	insert_inode_hash(inode);
4629
	inode_tree_add(inode);
4630 4631 4632

	trace_btrfs_inode_new(inode);

C
Chris Mason 已提交
4633
	return inode;
4634
fail:
4635 4636
	if (dir)
		BTRFS_I(dir)->index_cnt--;
4637
	btrfs_free_path(path);
4638
	iput(inode);
4639
	return ERR_PTR(ret);
C
Chris Mason 已提交
4640 4641 4642 4643 4644 4645 4646
}

static inline u8 btrfs_inode_type(struct inode *inode)
{
	return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
}

C
Chris Mason 已提交
4647 4648 4649 4650 4651 4652
/*
 * utility function to add 'inode' into 'parent_inode' with
 * a give name and a given sequence number.
 * if 'add_backref' is true, also insert a backref from the
 * inode to the parent directory.
 */
4653 4654 4655
int btrfs_add_link(struct btrfs_trans_handle *trans,
		   struct inode *parent_inode, struct inode *inode,
		   const char *name, int name_len, int add_backref, u64 index)
C
Chris Mason 已提交
4656
{
4657
	int ret = 0;
C
Chris Mason 已提交
4658
	struct btrfs_key key;
4659
	struct btrfs_root *root = BTRFS_I(parent_inode)->root;
4660

4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678
	if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
		memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
	} else {
		key.objectid = inode->i_ino;
		btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
		key.offset = 0;
	}

	if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
		ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
					 key.objectid, root->root_key.objectid,
					 parent_inode->i_ino,
					 index, name, name_len);
	} else if (add_backref) {
		ret = btrfs_insert_inode_ref(trans, root,
					     name, name_len, inode->i_ino,
					     parent_inode->i_ino, index);
	}
C
Chris Mason 已提交
4679 4680

	if (ret == 0) {
4681 4682 4683 4684 4685
		ret = btrfs_insert_dir_item(trans, root, name, name_len,
					    parent_inode->i_ino, &key,
					    btrfs_inode_type(inode), index);
		BUG_ON(ret);

4686
		btrfs_i_size_write(parent_inode, parent_inode->i_size +
4687
				   name_len * 2);
4688
		parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
4689
		ret = btrfs_update_inode(trans, root, parent_inode);
C
Chris Mason 已提交
4690 4691 4692 4693 4694
	}
	return ret;
}

static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
4695 4696
			    struct inode *dir, struct dentry *dentry,
			    struct inode *inode, int backref, u64 index)
C
Chris Mason 已提交
4697
{
4698 4699 4700
	int err = btrfs_add_link(trans, dir, inode,
				 dentry->d_name.name, dentry->d_name.len,
				 backref, index);
C
Chris Mason 已提交
4701 4702 4703 4704 4705 4706 4707 4708 4709
	if (!err) {
		d_instantiate(dentry, inode);
		return 0;
	}
	if (err > 0)
		err = -EEXIST;
	return err;
}

J
Josef Bacik 已提交
4710 4711 4712 4713 4714
static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
			int mode, dev_t rdev)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(dir)->root;
4715
	struct inode *inode = NULL;
J
Josef Bacik 已提交
4716 4717 4718
	int err;
	int drop_inode = 0;
	u64 objectid;
4719
	unsigned long nr = 0;
4720
	u64 index = 0;
J
Josef Bacik 已提交
4721 4722 4723 4724

	if (!new_valid_dev(rdev))
		return -EINVAL;

J
Josef Bacik 已提交
4725 4726 4727 4728 4729
	/*
	 * 2 for inode item and ref
	 * 2 for dir items
	 * 1 for xattr if selinux is on
	 */
4730 4731 4732
	trans = btrfs_start_transaction(root, 5);
	if (IS_ERR(trans))
		return PTR_ERR(trans);
4733

J
Josef Bacik 已提交
4734 4735
	btrfs_set_trans_block_group(trans, dir);

4736 4737 4738 4739
	err = btrfs_find_free_ino(root, &objectid);
	if (err)
		goto out_unlock;

4740
	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4741
				dentry->d_name.len, dir->i_ino, objectid,
4742
				BTRFS_I(dir)->block_group, mode, &index);
J
Josef Bacik 已提交
4743 4744 4745 4746
	err = PTR_ERR(inode);
	if (IS_ERR(inode))
		goto out_unlock;

4747
	err = btrfs_init_inode_security(trans, inode, dir);
J
Josef Bacik 已提交
4748 4749 4750 4751 4752
	if (err) {
		drop_inode = 1;
		goto out_unlock;
	}

J
Josef Bacik 已提交
4753
	btrfs_set_trans_block_group(trans, inode);
4754
	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
J
Josef Bacik 已提交
4755 4756 4757 4758 4759
	if (err)
		drop_inode = 1;
	else {
		inode->i_op = &btrfs_special_inode_operations;
		init_special_inode(inode, inode->i_mode, rdev);
4760
		btrfs_update_inode(trans, root, inode);
J
Josef Bacik 已提交
4761 4762 4763 4764
	}
	btrfs_update_inode_block_group(trans, inode);
	btrfs_update_inode_block_group(trans, dir);
out_unlock:
4765
	nr = trans->blocks_used;
4766
	btrfs_end_transaction_throttle(trans, root);
4767
	btrfs_btree_balance_dirty(root, nr);
J
Josef Bacik 已提交
4768 4769 4770 4771 4772 4773 4774
	if (drop_inode) {
		inode_dec_link_count(inode);
		iput(inode);
	}
	return err;
}

C
Chris Mason 已提交
4775 4776 4777 4778 4779
static int btrfs_create(struct inode *dir, struct dentry *dentry,
			int mode, struct nameidata *nd)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(dir)->root;
4780
	struct inode *inode = NULL;
C
Chris Mason 已提交
4781
	int drop_inode = 0;
4782
	int err;
4783
	unsigned long nr = 0;
C
Chris Mason 已提交
4784
	u64 objectid;
4785
	u64 index = 0;
C
Chris Mason 已提交
4786

J
Josef Bacik 已提交
4787 4788 4789 4790 4791
	/*
	 * 2 for inode item and ref
	 * 2 for dir items
	 * 1 for xattr if selinux is on
	 */
4792 4793 4794
	trans = btrfs_start_transaction(root, 5);
	if (IS_ERR(trans))
		return PTR_ERR(trans);
J
Josef Bacik 已提交
4795

C
Chris Mason 已提交
4796 4797
	btrfs_set_trans_block_group(trans, dir);

4798 4799 4800 4801
	err = btrfs_find_free_ino(root, &objectid);
	if (err)
		goto out_unlock;

4802
	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4803 4804
				dentry->d_name.len, dir->i_ino, objectid,
				BTRFS_I(dir)->block_group, mode, &index);
C
Chris Mason 已提交
4805 4806 4807 4808
	err = PTR_ERR(inode);
	if (IS_ERR(inode))
		goto out_unlock;

4809
	err = btrfs_init_inode_security(trans, inode, dir);
J
Josef Bacik 已提交
4810 4811 4812 4813 4814
	if (err) {
		drop_inode = 1;
		goto out_unlock;
	}

C
Chris Mason 已提交
4815
	btrfs_set_trans_block_group(trans, inode);
4816
	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
C
Chris Mason 已提交
4817 4818 4819 4820
	if (err)
		drop_inode = 1;
	else {
		inode->i_mapping->a_ops = &btrfs_aops;
C
Chris Mason 已提交
4821
		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
C
Chris Mason 已提交
4822 4823
		inode->i_fop = &btrfs_file_operations;
		inode->i_op = &btrfs_file_inode_operations;
4824
		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
C
Chris Mason 已提交
4825 4826 4827 4828
	}
	btrfs_update_inode_block_group(trans, inode);
	btrfs_update_inode_block_group(trans, dir);
out_unlock:
4829
	nr = trans->blocks_used;
4830
	btrfs_end_transaction_throttle(trans, root);
C
Chris Mason 已提交
4831 4832 4833 4834
	if (drop_inode) {
		inode_dec_link_count(inode);
		iput(inode);
	}
4835
	btrfs_btree_balance_dirty(root, nr);
C
Chris Mason 已提交
4836 4837 4838 4839 4840 4841 4842 4843 4844
	return err;
}

static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
		      struct dentry *dentry)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(dir)->root;
	struct inode *inode = old_dentry->d_inode;
4845
	u64 index;
4846
	unsigned long nr = 0;
C
Chris Mason 已提交
4847 4848 4849 4850 4851 4852
	int err;
	int drop_inode = 0;

	if (inode->i_nlink == 0)
		return -ENOENT;

4853 4854
	/* do not allow sys_link's with other subvols of the same device */
	if (root->objectid != BTRFS_I(inode)->root->objectid)
4855
		return -EXDEV;
4856

4857 4858 4859
	if (inode->i_nlink == ~0U)
		return -EMLINK;

4860
	err = btrfs_set_inode_index(dir, &index);
4861 4862 4863
	if (err)
		goto fail;

4864
	/*
M
Miao Xie 已提交
4865
	 * 2 items for inode and inode ref
4866
	 * 2 items for dir items
M
Miao Xie 已提交
4867
	 * 1 item for parent inode
4868
	 */
M
Miao Xie 已提交
4869
	trans = btrfs_start_transaction(root, 5);
4870 4871 4872 4873
	if (IS_ERR(trans)) {
		err = PTR_ERR(trans);
		goto fail;
	}
4874

4875 4876 4877
	btrfs_inc_nlink(inode);
	inode->i_ctime = CURRENT_TIME;

C
Chris Mason 已提交
4878
	btrfs_set_trans_block_group(trans, dir);
A
Al Viro 已提交
4879
	ihold(inode);
4880

4881
	err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
4882

4883
	if (err) {
4884
		drop_inode = 1;
4885
	} else {
4886
		struct dentry *parent = dget_parent(dentry);
4887 4888 4889
		btrfs_update_inode_block_group(trans, dir);
		err = btrfs_update_inode(trans, root, inode);
		BUG_ON(err);
4890 4891
		btrfs_log_new_name(trans, inode, NULL, parent);
		dput(parent);
4892
	}
C
Chris Mason 已提交
4893

4894
	nr = trans->blocks_used;
4895
	btrfs_end_transaction_throttle(trans, root);
4896
fail:
C
Chris Mason 已提交
4897 4898 4899 4900
	if (drop_inode) {
		inode_dec_link_count(inode);
		iput(inode);
	}
4901
	btrfs_btree_balance_dirty(root, nr);
C
Chris Mason 已提交
4902 4903 4904 4905 4906
	return err;
}

static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
{
4907
	struct inode *inode = NULL;
C
Chris Mason 已提交
4908 4909 4910 4911
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(dir)->root;
	int err = 0;
	int drop_on_err = 0;
4912
	u64 objectid = 0;
4913
	u64 index = 0;
4914
	unsigned long nr = 1;
C
Chris Mason 已提交
4915

J
Josef Bacik 已提交
4916 4917 4918 4919 4920
	/*
	 * 2 items for inode and ref
	 * 2 items for dir items
	 * 1 for xattr if selinux is on
	 */
4921 4922 4923
	trans = btrfs_start_transaction(root, 5);
	if (IS_ERR(trans))
		return PTR_ERR(trans);
J
Josef Bacik 已提交
4924
	btrfs_set_trans_block_group(trans, dir);
C
Chris Mason 已提交
4925

4926 4927 4928 4929
	err = btrfs_find_free_ino(root, &objectid);
	if (err)
		goto out_fail;

4930
	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4931
				dentry->d_name.len, dir->i_ino, objectid,
4932 4933
				BTRFS_I(dir)->block_group, S_IFDIR | mode,
				&index);
C
Chris Mason 已提交
4934 4935 4936 4937
	if (IS_ERR(inode)) {
		err = PTR_ERR(inode);
		goto out_fail;
	}
4938

C
Chris Mason 已提交
4939
	drop_on_err = 1;
J
Josef Bacik 已提交
4940

4941
	err = btrfs_init_inode_security(trans, inode, dir);
J
Josef Bacik 已提交
4942 4943 4944
	if (err)
		goto out_fail;

C
Chris Mason 已提交
4945 4946 4947 4948
	inode->i_op = &btrfs_dir_inode_operations;
	inode->i_fop = &btrfs_dir_file_operations;
	btrfs_set_trans_block_group(trans, inode);

4949
	btrfs_i_size_write(inode, 0);
C
Chris Mason 已提交
4950 4951 4952
	err = btrfs_update_inode(trans, root, inode);
	if (err)
		goto out_fail;
4953

4954 4955
	err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
			     dentry->d_name.len, 0, index);
C
Chris Mason 已提交
4956 4957
	if (err)
		goto out_fail;
4958

C
Chris Mason 已提交
4959 4960 4961 4962 4963 4964
	d_instantiate(dentry, inode);
	drop_on_err = 0;
	btrfs_update_inode_block_group(trans, inode);
	btrfs_update_inode_block_group(trans, dir);

out_fail:
4965
	nr = trans->blocks_used;
4966
	btrfs_end_transaction_throttle(trans, root);
C
Chris Mason 已提交
4967 4968
	if (drop_on_err)
		iput(inode);
4969
	btrfs_btree_balance_dirty(root, nr);
C
Chris Mason 已提交
4970 4971 4972
	return err;
}

C
Chris Mason 已提交
4973 4974 4975 4976
/* helper for btfs_get_extent.  Given an existing extent in the tree,
 * and an extent that you want to insert, deal with overlap and insert
 * the new extent into the tree.
 */
4977 4978
static int merge_extent_mapping(struct extent_map_tree *em_tree,
				struct extent_map *existing,
4979 4980
				struct extent_map *em,
				u64 map_start, u64 map_len)
4981 4982 4983
{
	u64 start_diff;

4984 4985 4986 4987
	BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
	start_diff = map_start - em->start;
	em->start = map_start;
	em->len = map_len;
C
Chris Mason 已提交
4988 4989
	if (em->block_start < EXTENT_MAP_LAST_BYTE &&
	    !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
4990
		em->block_start += start_diff;
C
Chris Mason 已提交
4991 4992
		em->block_len -= start_diff;
	}
4993
	return add_extent_mapping(em_tree, em);
4994 4995
}

C
Chris Mason 已提交
4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006
static noinline int uncompress_inline(struct btrfs_path *path,
				      struct inode *inode, struct page *page,
				      size_t pg_offset, u64 extent_offset,
				      struct btrfs_file_extent_item *item)
{
	int ret;
	struct extent_buffer *leaf = path->nodes[0];
	char *tmp;
	size_t max_size;
	unsigned long inline_size;
	unsigned long ptr;
5007
	int compress_type;
C
Chris Mason 已提交
5008 5009

	WARN_ON(pg_offset != 0);
5010
	compress_type = btrfs_file_extent_compression(leaf, item);
C
Chris Mason 已提交
5011 5012 5013 5014 5015 5016 5017 5018
	max_size = btrfs_file_extent_ram_bytes(leaf, item);
	inline_size = btrfs_file_extent_inline_item_len(leaf,
					btrfs_item_nr(leaf, path->slots[0]));
	tmp = kmalloc(inline_size, GFP_NOFS);
	ptr = btrfs_file_extent_inline_start(item);

	read_extent_buffer(leaf, tmp, ptr, inline_size);

5019
	max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
5020 5021
	ret = btrfs_decompress(compress_type, tmp, page,
			       extent_offset, inline_size, max_size);
C
Chris Mason 已提交
5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033
	if (ret) {
		char *kaddr = kmap_atomic(page, KM_USER0);
		unsigned long copy_size = min_t(u64,
				  PAGE_CACHE_SIZE - pg_offset,
				  max_size - extent_offset);
		memset(kaddr + pg_offset, 0, copy_size);
		kunmap_atomic(kaddr, KM_USER0);
	}
	kfree(tmp);
	return 0;
}

C
Chris Mason 已提交
5034 5035
/*
 * a bit scary, this does extent mapping from logical file offset to the disk.
C
Chris Mason 已提交
5036 5037
 * the ugly parts come from merging extents from the disk with the in-ram
 * representation.  This gets more complex because of the data=ordered code,
C
Chris Mason 已提交
5038 5039 5040 5041
 * where the in-ram extents might be locked pending data=ordered completion.
 *
 * This also copies inline extents directly into the page.
 */
C
Chris Mason 已提交
5042

5043
struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
5044
				    size_t pg_offset, u64 start, u64 len,
5045 5046 5047 5048
				    int create)
{
	int ret;
	int err = 0;
5049
	u64 bytenr;
5050 5051 5052 5053
	u64 extent_start = 0;
	u64 extent_end = 0;
	u64 objectid = inode->i_ino;
	u32 found_type;
5054
	struct btrfs_path *path = NULL;
5055 5056
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_file_extent_item *item;
5057 5058
	struct extent_buffer *leaf;
	struct btrfs_key found_key;
5059 5060
	struct extent_map *em = NULL;
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
5061
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5062
	struct btrfs_trans_handle *trans = NULL;
5063
	int compress_type;
5064 5065

again:
5066
	read_lock(&em_tree->lock);
5067
	em = lookup_extent_mapping(em_tree, start, len);
5068 5069
	if (em)
		em->bdev = root->fs_info->fs_devices->latest_bdev;
5070
	read_unlock(&em_tree->lock);
5071

5072
	if (em) {
5073 5074 5075
		if (em->start > start || em->start + em->len <= start)
			free_extent_map(em);
		else if (em->block_start == EXTENT_MAP_INLINE && page)
5076 5077 5078
			free_extent_map(em);
		else
			goto out;
5079
	}
5080
	em = alloc_extent_map(GFP_NOFS);
5081
	if (!em) {
5082 5083
		err = -ENOMEM;
		goto out;
5084
	}
5085
	em->bdev = root->fs_info->fs_devices->latest_bdev;
5086
	em->start = EXTENT_MAP_HOLE;
5087
	em->orig_start = EXTENT_MAP_HOLE;
5088
	em->len = (u64)-1;
C
Chris Mason 已提交
5089
	em->block_len = (u64)-1;
5090 5091 5092 5093 5094 5095

	if (!path) {
		path = btrfs_alloc_path();
		BUG_ON(!path);
	}

5096 5097
	ret = btrfs_lookup_file_extent(trans, root, path,
				       objectid, start, trans != NULL);
5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108
	if (ret < 0) {
		err = ret;
		goto out;
	}

	if (ret != 0) {
		if (path->slots[0] == 0)
			goto not_found;
		path->slots[0]--;
	}

5109 5110
	leaf = path->nodes[0];
	item = btrfs_item_ptr(leaf, path->slots[0],
5111 5112
			      struct btrfs_file_extent_item);
	/* are we inside the extent that was found? */
5113 5114 5115
	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
	found_type = btrfs_key_type(&found_key);
	if (found_key.objectid != objectid ||
5116 5117 5118 5119
	    found_type != BTRFS_EXTENT_DATA_KEY) {
		goto not_found;
	}

5120 5121
	found_type = btrfs_file_extent_type(leaf, item);
	extent_start = found_key.offset;
5122
	compress_type = btrfs_file_extent_compression(leaf, item);
Y
Yan Zheng 已提交
5123 5124
	if (found_type == BTRFS_FILE_EXTENT_REG ||
	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
5125
		extent_end = extent_start +
5126
		       btrfs_file_extent_num_bytes(leaf, item);
Y
Yan Zheng 已提交
5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140
	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
		size_t size;
		size = btrfs_file_extent_inline_len(leaf, item);
		extent_end = (extent_start + size + root->sectorsize - 1) &
			~((u64)root->sectorsize - 1);
	}

	if (start >= extent_end) {
		path->slots[0]++;
		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret < 0) {
				err = ret;
				goto out;
5141
			}
Y
Yan Zheng 已提交
5142 5143 5144
			if (ret > 0)
				goto not_found;
			leaf = path->nodes[0];
5145
		}
Y
Yan Zheng 已提交
5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
		if (found_key.objectid != objectid ||
		    found_key.type != BTRFS_EXTENT_DATA_KEY)
			goto not_found;
		if (start + len <= found_key.offset)
			goto not_found;
		em->start = start;
		em->len = found_key.offset - start;
		goto not_found_em;
	}

Y
Yan Zheng 已提交
5157 5158
	if (found_type == BTRFS_FILE_EXTENT_REG ||
	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
Y
Yan Zheng 已提交
5159 5160
		em->start = extent_start;
		em->len = extent_end - extent_start;
5161 5162
		em->orig_start = extent_start -
				 btrfs_file_extent_offset(leaf, item);
5163 5164
		bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
		if (bytenr == 0) {
5165
			em->block_start = EXTENT_MAP_HOLE;
5166 5167
			goto insert;
		}
5168
		if (compress_type != BTRFS_COMPRESS_NONE) {
C
Chris Mason 已提交
5169
			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
5170
			em->compress_type = compress_type;
C
Chris Mason 已提交
5171 5172 5173 5174 5175 5176 5177
			em->block_start = bytenr;
			em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
									 item);
		} else {
			bytenr += btrfs_file_extent_offset(leaf, item);
			em->block_start = bytenr;
			em->block_len = em->len;
Y
Yan Zheng 已提交
5178 5179
			if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
C
Chris Mason 已提交
5180
		}
5181 5182
		goto insert;
	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
5183
		unsigned long ptr;
5184
		char *map;
5185 5186 5187
		size_t size;
		size_t extent_offset;
		size_t copy_size;
5188

5189
		em->block_start = EXTENT_MAP_INLINE;
C
Chris Mason 已提交
5190
		if (!page || create) {
5191
			em->start = extent_start;
Y
Yan Zheng 已提交
5192
			em->len = extent_end - extent_start;
5193 5194
			goto out;
		}
5195

Y
Yan Zheng 已提交
5196 5197
		size = btrfs_file_extent_inline_len(leaf, item);
		extent_offset = page_offset(page) + pg_offset - extent_start;
5198
		copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
5199 5200
				size - extent_offset);
		em->start = extent_start + extent_offset;
5201 5202
		em->len = (copy_size + root->sectorsize - 1) &
			~((u64)root->sectorsize - 1);
5203
		em->orig_start = EXTENT_MAP_INLINE;
5204
		if (compress_type) {
C
Chris Mason 已提交
5205
			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
5206 5207
			em->compress_type = compress_type;
		}
5208
		ptr = btrfs_file_extent_inline_start(item) + extent_offset;
5209
		if (create == 0 && !PageUptodate(page)) {
5210 5211
			if (btrfs_file_extent_compression(leaf, item) !=
			    BTRFS_COMPRESS_NONE) {
C
Chris Mason 已提交
5212 5213 5214 5215 5216 5217 5218 5219
				ret = uncompress_inline(path, inode, page,
							pg_offset,
							extent_offset, item);
				BUG_ON(ret);
			} else {
				map = kmap(page);
				read_extent_buffer(leaf, map + pg_offset, ptr,
						   copy_size);
5220 5221 5222 5223 5224
				if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
					memset(map + pg_offset + copy_size, 0,
					       PAGE_CACHE_SIZE - pg_offset -
					       copy_size);
				}
C
Chris Mason 已提交
5225 5226
				kunmap(page);
			}
5227 5228
			flush_dcache_page(page);
		} else if (create && PageUptodate(page)) {
5229
			WARN_ON(1);
5230 5231 5232 5233 5234
			if (!trans) {
				kunmap(page);
				free_extent_map(em);
				em = NULL;
				btrfs_release_path(root, path);
5235
				trans = btrfs_join_transaction(root, 1);
5236 5237
				if (IS_ERR(trans))
					return ERR_CAST(trans);
5238 5239
				goto again;
			}
C
Chris Mason 已提交
5240
			map = kmap(page);
5241
			write_extent_buffer(leaf, map + pg_offset, ptr,
5242
					    copy_size);
C
Chris Mason 已提交
5243
			kunmap(page);
5244
			btrfs_mark_buffer_dirty(leaf);
5245
		}
5246
		set_extent_uptodate(io_tree, em->start,
5247
				    extent_map_end(em) - 1, NULL, GFP_NOFS);
5248 5249
		goto insert;
	} else {
C
Chris Mason 已提交
5250
		printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
5251 5252 5253 5254
		WARN_ON(1);
	}
not_found:
	em->start = start;
5255
	em->len = len;
5256
not_found_em:
5257
	em->block_start = EXTENT_MAP_HOLE;
Y
Yan Zheng 已提交
5258
	set_bit(EXTENT_FLAG_VACANCY, &em->flags);
5259 5260
insert:
	btrfs_release_path(root, path);
5261
	if (em->start > start || extent_map_end(em) <= start) {
C
Chris Mason 已提交
5262 5263 5264 5265 5266
		printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
		       "[%llu %llu]\n", (unsigned long long)em->start,
		       (unsigned long long)em->len,
		       (unsigned long long)start,
		       (unsigned long long)len);
5267 5268 5269
		err = -EIO;
		goto out;
	}
5270 5271

	err = 0;
5272
	write_lock(&em_tree->lock);
5273
	ret = add_extent_mapping(em_tree, em);
5274 5275 5276 5277
	/* it is possible that someone inserted the extent into the tree
	 * while we had the lock dropped.  It is also possible that
	 * an overlapping map exists in the tree
	 */
5278
	if (ret == -EEXIST) {
5279
		struct extent_map *existing;
5280 5281 5282

		ret = 0;

5283
		existing = lookup_extent_mapping(em_tree, start, len);
5284 5285 5286 5287 5288
		if (existing && (existing->start > start ||
		    existing->start + existing->len <= start)) {
			free_extent_map(existing);
			existing = NULL;
		}
5289 5290 5291 5292 5293
		if (!existing) {
			existing = lookup_extent_mapping(em_tree, em->start,
							 em->len);
			if (existing) {
				err = merge_extent_mapping(em_tree, existing,
5294 5295
							   em, start,
							   root->sectorsize);
5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308
				free_extent_map(existing);
				if (err) {
					free_extent_map(em);
					em = NULL;
				}
			} else {
				err = -EIO;
				free_extent_map(em);
				em = NULL;
			}
		} else {
			free_extent_map(em);
			em = existing;
5309
			err = 0;
5310 5311
		}
	}
5312
	write_unlock(&em_tree->lock);
5313
out:
5314 5315 5316

	trace_btrfs_get_extent(root, em);

5317 5318
	if (path)
		btrfs_free_path(path);
5319 5320
	if (trans) {
		ret = btrfs_end_transaction(trans, root);
C
Chris Mason 已提交
5321
		if (!err)
5322 5323 5324 5325 5326 5327 5328 5329 5330
			err = ret;
	}
	if (err) {
		free_extent_map(em);
		return ERR_PTR(err);
	}
	return em;
}

5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452
struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
					   size_t pg_offset, u64 start, u64 len,
					   int create)
{
	struct extent_map *em;
	struct extent_map *hole_em = NULL;
	u64 range_start = start;
	u64 end;
	u64 found;
	u64 found_end;
	int err = 0;

	em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
	if (IS_ERR(em))
		return em;
	if (em) {
		/*
		 * if our em maps to a hole, there might
		 * actually be delalloc bytes behind it
		 */
		if (em->block_start != EXTENT_MAP_HOLE)
			return em;
		else
			hole_em = em;
	}

	/* check to see if we've wrapped (len == -1 or similar) */
	end = start + len;
	if (end < start)
		end = (u64)-1;
	else
		end -= 1;

	em = NULL;

	/* ok, we didn't find anything, lets look for delalloc */
	found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
				 end, len, EXTENT_DELALLOC, 1);
	found_end = range_start + found;
	if (found_end < range_start)
		found_end = (u64)-1;

	/*
	 * we didn't find anything useful, return
	 * the original results from get_extent()
	 */
	if (range_start > end || found_end <= start) {
		em = hole_em;
		hole_em = NULL;
		goto out;
	}

	/* adjust the range_start to make sure it doesn't
	 * go backwards from the start they passed in
	 */
	range_start = max(start,range_start);
	found = found_end - range_start;

	if (found > 0) {
		u64 hole_start = start;
		u64 hole_len = len;

		em = alloc_extent_map(GFP_NOFS);
		if (!em) {
			err = -ENOMEM;
			goto out;
		}
		/*
		 * when btrfs_get_extent can't find anything it
		 * returns one huge hole
		 *
		 * make sure what it found really fits our range, and
		 * adjust to make sure it is based on the start from
		 * the caller
		 */
		if (hole_em) {
			u64 calc_end = extent_map_end(hole_em);

			if (calc_end <= start || (hole_em->start > end)) {
				free_extent_map(hole_em);
				hole_em = NULL;
			} else {
				hole_start = max(hole_em->start, start);
				hole_len = calc_end - hole_start;
			}
		}
		em->bdev = NULL;
		if (hole_em && range_start > hole_start) {
			/* our hole starts before our delalloc, so we
			 * have to return just the parts of the hole
			 * that go until  the delalloc starts
			 */
			em->len = min(hole_len,
				      range_start - hole_start);
			em->start = hole_start;
			em->orig_start = hole_start;
			/*
			 * don't adjust block start at all,
			 * it is fixed at EXTENT_MAP_HOLE
			 */
			em->block_start = hole_em->block_start;
			em->block_len = hole_len;
		} else {
			em->start = range_start;
			em->len = found;
			em->orig_start = range_start;
			em->block_start = EXTENT_MAP_DELALLOC;
			em->block_len = found;
		}
	} else if (hole_em) {
		return hole_em;
	}
out:

	free_extent_map(hole_em);
	if (err) {
		free_extent_map(em);
		return ERR_PTR(err);
	}
	return em;
}

5453
static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
5454
						  struct extent_map *em,
5455 5456 5457 5458 5459 5460 5461 5462
						  u64 start, u64 len)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
	struct btrfs_key ins;
	u64 alloc_hint;
	int ret;
5463
	bool insert = false;
5464

5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477
	/*
	 * Ok if the extent map we looked up is a hole and is for the exact
	 * range we want, there is no reason to allocate a new one, however if
	 * it is not right then we need to free this one and drop the cache for
	 * our range.
	 */
	if (em->block_start != EXTENT_MAP_HOLE || em->start != start ||
	    em->len != len) {
		free_extent_map(em);
		em = NULL;
		insert = true;
		btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
	}
5478 5479

	trans = btrfs_join_transaction(root, 0);
5480 5481
	if (IS_ERR(trans))
		return ERR_CAST(trans);
5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493

	trans->block_rsv = &root->fs_info->delalloc_block_rsv;

	alloc_hint = get_extent_allocation_hint(inode, start, len);
	ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0,
				   alloc_hint, (u64)-1, &ins, 1);
	if (ret) {
		em = ERR_PTR(ret);
		goto out;
	}

	if (!em) {
5494 5495 5496 5497 5498
		em = alloc_extent_map(GFP_NOFS);
		if (!em) {
			em = ERR_PTR(-ENOMEM);
			goto out;
		}
5499 5500 5501 5502 5503 5504 5505 5506 5507
	}

	em->start = start;
	em->orig_start = em->start;
	em->len = ins.offset;

	em->block_start = ins.objectid;
	em->block_len = ins.offset;
	em->bdev = root->fs_info->fs_devices->latest_bdev;
5508 5509 5510 5511 5512 5513

	/*
	 * We need to do this because if we're using the original em we searched
	 * for, we could have EXTENT_FLAG_VACANCY set, and we don't want that.
	 */
	em->flags = 0;
5514 5515
	set_bit(EXTENT_FLAG_PINNED, &em->flags);

5516
	while (insert) {
5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535
		write_lock(&em_tree->lock);
		ret = add_extent_mapping(em_tree, em);
		write_unlock(&em_tree->lock);
		if (ret != -EEXIST)
			break;
		btrfs_drop_extent_cache(inode, start, start + em->len - 1, 0);
	}

	ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
					   ins.offset, ins.offset, 0);
	if (ret) {
		btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
		em = ERR_PTR(ret);
	}
out:
	btrfs_end_transaction(trans, root);
	return em;
}

5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635
/*
 * returns 1 when the nocow is safe, < 1 on error, 0 if the
 * block must be cow'd
 */
static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
				      struct inode *inode, u64 offset, u64 len)
{
	struct btrfs_path *path;
	int ret;
	struct extent_buffer *leaf;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_file_extent_item *fi;
	struct btrfs_key key;
	u64 disk_bytenr;
	u64 backref_offset;
	u64 extent_end;
	u64 num_bytes;
	int slot;
	int found_type;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
				       offset, 0);
	if (ret < 0)
		goto out;

	slot = path->slots[0];
	if (ret == 1) {
		if (slot == 0) {
			/* can't find the item, must cow */
			ret = 0;
			goto out;
		}
		slot--;
	}
	ret = 0;
	leaf = path->nodes[0];
	btrfs_item_key_to_cpu(leaf, &key, slot);
	if (key.objectid != inode->i_ino ||
	    key.type != BTRFS_EXTENT_DATA_KEY) {
		/* not our file or wrong item type, must cow */
		goto out;
	}

	if (key.offset > offset) {
		/* Wrong offset, must cow */
		goto out;
	}

	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
	found_type = btrfs_file_extent_type(leaf, fi);
	if (found_type != BTRFS_FILE_EXTENT_REG &&
	    found_type != BTRFS_FILE_EXTENT_PREALLOC) {
		/* not a regular extent, must cow */
		goto out;
	}
	disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
	backref_offset = btrfs_file_extent_offset(leaf, fi);

	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
	if (extent_end < offset + len) {
		/* extent doesn't include our full range, must cow */
		goto out;
	}

	if (btrfs_extent_readonly(root, disk_bytenr))
		goto out;

	/*
	 * look for other files referencing this extent, if we
	 * find any we must cow
	 */
	if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
				  key.offset - backref_offset, disk_bytenr))
		goto out;

	/*
	 * adjust disk_bytenr and num_bytes to cover just the bytes
	 * in this extent we are about to write.  If there
	 * are any csums in that range we have to cow in order
	 * to keep the csums correct
	 */
	disk_bytenr += backref_offset;
	disk_bytenr += offset - key.offset;
	num_bytes = min(offset + len, extent_end) - offset;
	if (csum_exist_in_range(root, disk_bytenr, num_bytes))
				goto out;
	/*
	 * all of the above have passed, it is safe to overwrite this extent
	 * without cow
	 */
	ret = 1;
out:
	btrfs_free_path(path);
	return ret;
}

5636 5637 5638 5639 5640 5641 5642
static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
				   struct buffer_head *bh_result, int create)
{
	struct extent_map *em;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	u64 start = iblock << inode->i_blkbits;
	u64 len = bh_result->b_size;
5643
	struct btrfs_trans_handle *trans;
5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687

	em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
	if (IS_ERR(em))
		return PTR_ERR(em);

	/*
	 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
	 * io.  INLINE is special, and we could probably kludge it in here, but
	 * it's still buffered so for safety lets just fall back to the generic
	 * buffered path.
	 *
	 * For COMPRESSED we _have_ to read the entire extent in so we can
	 * decompress it, so there will be buffering required no matter what we
	 * do, so go ahead and fallback to buffered.
	 *
	 * We return -ENOTBLK because thats what makes DIO go ahead and go back
	 * to buffered IO.  Don't blame me, this is the price we pay for using
	 * the generic code.
	 */
	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
	    em->block_start == EXTENT_MAP_INLINE) {
		free_extent_map(em);
		return -ENOTBLK;
	}

	/* Just a good old fashioned hole, return */
	if (!create && (em->block_start == EXTENT_MAP_HOLE ||
			test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
		free_extent_map(em);
		/* DIO will do one hole at a time, so just unlock a sector */
		unlock_extent(&BTRFS_I(inode)->io_tree, start,
			      start + root->sectorsize - 1, GFP_NOFS);
		return 0;
	}

	/*
	 * We don't allocate a new extent in the following cases
	 *
	 * 1) The inode is marked as NODATACOW.  In this case we'll just use the
	 * existing extent.
	 * 2) The extent is marked as PREALLOC.  We're good to go here and can
	 * just use the extent.
	 *
	 */
5688 5689
	if (!create) {
		len = em->len - (start - em->start);
5690
		goto map;
5691
	}
5692 5693 5694 5695 5696 5697

	if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
	    ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
	     em->block_start != EXTENT_MAP_HOLE)) {
		int type;
		int ret;
5698
		u64 block_start;
5699 5700 5701 5702 5703

		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
			type = BTRFS_ORDERED_PREALLOC;
		else
			type = BTRFS_ORDERED_NOCOW;
5704
		len = min(len, em->len - (start - em->start));
5705
		block_start = em->block_start + (start - em->start);
5706 5707 5708 5709 5710 5711 5712

		/*
		 * we're not going to log anything, but we do need
		 * to make sure the current transaction stays open
		 * while we look for nocow cross refs
		 */
		trans = btrfs_join_transaction(root, 0);
5713
		if (IS_ERR(trans))
5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724
			goto must_cow;

		if (can_nocow_odirect(trans, inode, start, len) == 1) {
			ret = btrfs_add_ordered_extent_dio(inode, start,
					   block_start, len, len, type);
			btrfs_end_transaction(trans, root);
			if (ret) {
				free_extent_map(em);
				return ret;
			}
			goto unlock;
5725
		}
5726
		btrfs_end_transaction(trans, root);
5727
	}
5728 5729 5730 5731 5732 5733
must_cow:
	/*
	 * this will cow the extent, reset the len in case we changed
	 * it above
	 */
	len = bh_result->b_size;
5734
	em = btrfs_new_extent_direct(inode, em, start, len);
5735 5736 5737 5738
	if (IS_ERR(em))
		return PTR_ERR(em);
	len = min(len, em->len - (start - em->start));
unlock:
5739 5740 5741
	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1,
			  EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1,
			  0, NULL, GFP_NOFS);
5742 5743 5744
map:
	bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
		inode->i_blkbits;
5745
	bh_result->b_size = len;
5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762
	bh_result->b_bdev = em->bdev;
	set_buffer_mapped(bh_result);
	if (create && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
		set_buffer_new(bh_result);

	free_extent_map(em);

	return 0;
}

struct btrfs_dio_private {
	struct inode *inode;
	u64 logical_offset;
	u64 disk_bytenr;
	u64 bytes;
	u32 *csums;
	void *private;
M
Miao Xie 已提交
5763 5764 5765 5766 5767 5768 5769 5770

	/* number of bios pending for this dio */
	atomic_t pending_bios;

	/* IO errors */
	int errors;

	struct bio *orig_bio;
5771 5772 5773 5774
};

static void btrfs_endio_direct_read(struct bio *bio, int err)
{
M
Miao Xie 已提交
5775
	struct btrfs_dio_private *dip = bio->bi_private;
5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819
	struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
	struct bio_vec *bvec = bio->bi_io_vec;
	struct inode *inode = dip->inode;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	u64 start;
	u32 *private = dip->csums;

	start = dip->logical_offset;
	do {
		if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
			struct page *page = bvec->bv_page;
			char *kaddr;
			u32 csum = ~(u32)0;
			unsigned long flags;

			local_irq_save(flags);
			kaddr = kmap_atomic(page, KM_IRQ0);
			csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
					       csum, bvec->bv_len);
			btrfs_csum_final(csum, (char *)&csum);
			kunmap_atomic(kaddr, KM_IRQ0);
			local_irq_restore(flags);

			flush_dcache_page(bvec->bv_page);
			if (csum != *private) {
				printk(KERN_ERR "btrfs csum failed ino %lu off"
				      " %llu csum %u private %u\n",
				      inode->i_ino, (unsigned long long)start,
				      csum, *private);
				err = -EIO;
			}
		}

		start += bvec->bv_len;
		private++;
		bvec++;
	} while (bvec <= bvec_end);

	unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
		      dip->logical_offset + dip->bytes - 1, GFP_NOFS);
	bio->bi_private = dip->private;

	kfree(dip->csums);
	kfree(dip);
5820 5821 5822 5823

	/* If we had a csum failure make sure to clear the uptodate flag */
	if (err)
		clear_bit(BIO_UPTODATE, &bio->bi_flags);
5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834
	dio_end_io(bio, err);
}

static void btrfs_endio_direct_write(struct bio *bio, int err)
{
	struct btrfs_dio_private *dip = bio->bi_private;
	struct inode *inode = dip->inode;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
	struct btrfs_ordered_extent *ordered = NULL;
	struct extent_state *cached_state = NULL;
5835 5836
	u64 ordered_offset = dip->logical_offset;
	u64 ordered_bytes = dip->bytes;
5837 5838 5839 5840
	int ret;

	if (err)
		goto out_done;
5841 5842 5843 5844
again:
	ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
						   &ordered_offset,
						   ordered_bytes);
5845
	if (!ret)
5846
		goto out_test;
5847 5848 5849 5850

	BUG_ON(!ordered);

	trans = btrfs_join_transaction(root, 1);
5851
	if (IS_ERR(trans)) {
5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896
		err = -ENOMEM;
		goto out;
	}
	trans->block_rsv = &root->fs_info->delalloc_block_rsv;

	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
		ret = btrfs_ordered_update_i_size(inode, 0, ordered);
		if (!ret)
			ret = btrfs_update_inode(trans, root, inode);
		err = ret;
		goto out;
	}

	lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset,
			 ordered->file_offset + ordered->len - 1, 0,
			 &cached_state, GFP_NOFS);

	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
		ret = btrfs_mark_extent_written(trans, inode,
						ordered->file_offset,
						ordered->file_offset +
						ordered->len);
		if (ret) {
			err = ret;
			goto out_unlock;
		}
	} else {
		ret = insert_reserved_file_extent(trans, inode,
						  ordered->file_offset,
						  ordered->start,
						  ordered->disk_len,
						  ordered->len,
						  ordered->len,
						  0, 0, 0,
						  BTRFS_FILE_EXTENT_REG);
		unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
				   ordered->file_offset, ordered->len);
		if (ret) {
			err = ret;
			WARN_ON(1);
			goto out_unlock;
		}
	}

	add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
5897 5898 5899 5900
	ret = btrfs_ordered_update_i_size(inode, 0, ordered);
	if (!ret)
		btrfs_update_inode(trans, root, inode);
	ret = 0;
5901 5902 5903 5904 5905 5906 5907
out_unlock:
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
			     ordered->file_offset + ordered->len - 1,
			     &cached_state, GFP_NOFS);
out:
	btrfs_delalloc_release_metadata(inode, ordered->len);
	btrfs_end_transaction(trans, root);
5908
	ordered_offset = ordered->file_offset + ordered->len;
5909 5910
	btrfs_put_ordered_extent(ordered);
	btrfs_put_ordered_extent(ordered);
5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921

out_test:
	/*
	 * our bio might span multiple ordered extents.  If we haven't
	 * completed the accounting for the whole dio, go back and try again
	 */
	if (ordered_offset < dip->logical_offset + dip->bytes) {
		ordered_bytes = dip->logical_offset + dip->bytes -
			ordered_offset;
		goto again;
	}
5922 5923 5924 5925 5926
out_done:
	bio->bi_private = dip->private;

	kfree(dip->csums);
	kfree(dip);
5927 5928 5929 5930

	/* If we had an error make sure to clear the uptodate flag */
	if (err)
		clear_bit(BIO_UPTODATE, &bio->bi_flags);
5931 5932 5933
	dio_end_io(bio, err);
}

5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944
static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
				    struct bio *bio, int mirror_num,
				    unsigned long bio_flags, u64 offset)
{
	int ret;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
	BUG_ON(ret);
	return 0;
}

M
Miao Xie 已提交
5945 5946 5947 5948 5949 5950
static void btrfs_end_dio_bio(struct bio *bio, int err)
{
	struct btrfs_dio_private *dip = bio->bi_private;

	if (err) {
		printk(KERN_ERR "btrfs direct IO failed ino %lu rw %lu "
J
Jan Beulich 已提交
5951 5952 5953
		      "sector %#Lx len %u err no %d\n",
		      dip->inode->i_ino, bio->bi_rw,
		      (unsigned long long)bio->bi_sector, bio->bi_size, err);
M
Miao Xie 已提交
5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985
		dip->errors = 1;

		/*
		 * before atomic variable goto zero, we must make sure
		 * dip->errors is perceived to be set.
		 */
		smp_mb__before_atomic_dec();
	}

	/* if there are more bios still pending for this dio, just exit */
	if (!atomic_dec_and_test(&dip->pending_bios))
		goto out;

	if (dip->errors)
		bio_io_error(dip->orig_bio);
	else {
		set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags);
		bio_endio(dip->orig_bio, 0);
	}
out:
	bio_put(bio);
}

static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
				       u64 first_sector, gfp_t gfp_flags)
{
	int nr_vecs = bio_get_nr_vecs(bdev);
	return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
}

static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
					 int rw, u64 file_offset, int skip_sum,
5986
					 u32 *csums, int async_submit)
M
Miao Xie 已提交
5987 5988 5989 5990 5991 5992 5993 5994 5995 5996
{
	int write = rw & REQ_WRITE;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	int ret;

	bio_get(bio);
	ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
	if (ret)
		goto err;

5997 5998 5999 6000
	if (skip_sum)
		goto map;

	if (write && async_submit) {
M
Miao Xie 已提交
6001 6002 6003 6004 6005 6006
		ret = btrfs_wq_submit_bio(root->fs_info,
				   inode, rw, bio, 0, 0,
				   file_offset,
				   __btrfs_submit_bio_start_direct_io,
				   __btrfs_submit_bio_done);
		goto err;
6007 6008 6009 6010 6011 6012 6013 6014
	} else if (write) {
		/*
		 * If we aren't doing async submit, calculate the csum of the
		 * bio now.
		 */
		ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
		if (ret)
			goto err;
6015 6016
	} else if (!skip_sum) {
		ret = btrfs_lookup_bio_sums_dio(root, inode, bio,
M
Miao Xie 已提交
6017
					  file_offset, csums);
6018 6019 6020
		if (ret)
			goto err;
	}
M
Miao Xie 已提交
6021

6022 6023
map:
	ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
M
Miao Xie 已提交
6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044
err:
	bio_put(bio);
	return ret;
}

static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
				    int skip_sum)
{
	struct inode *inode = dip->inode;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
	struct bio *bio;
	struct bio *orig_bio = dip->orig_bio;
	struct bio_vec *bvec = orig_bio->bi_io_vec;
	u64 start_sector = orig_bio->bi_sector;
	u64 file_offset = dip->logical_offset;
	u64 submit_len = 0;
	u64 map_length;
	int nr_pages = 0;
	u32 *csums = dip->csums;
	int ret = 0;
6045
	int async_submit = 0;
6046
	int write = rw & REQ_WRITE;
M
Miao Xie 已提交
6047 6048 6049 6050 6051 6052 6053 6054 6055

	map_length = orig_bio->bi_size;
	ret = btrfs_map_block(map_tree, READ, start_sector << 9,
			      &map_length, NULL, 0);
	if (ret) {
		bio_put(bio);
		return -EIO;
	}

6056 6057 6058 6059 6060
	if (map_length >= orig_bio->bi_size) {
		bio = orig_bio;
		goto submit;
	}

6061
	async_submit = 1;
6062 6063 6064 6065 6066 6067 6068
	bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
	if (!bio)
		return -ENOMEM;
	bio->bi_private = dip;
	bio->bi_end_io = btrfs_end_dio_bio;
	atomic_inc(&dip->pending_bios);

M
Miao Xie 已提交
6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081
	while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
		if (unlikely(map_length < submit_len + bvec->bv_len ||
		    bio_add_page(bio, bvec->bv_page, bvec->bv_len,
				 bvec->bv_offset) < bvec->bv_len)) {
			/*
			 * inc the count before we submit the bio so
			 * we know the end IO handler won't happen before
			 * we inc the count. Otherwise, the dip might get freed
			 * before we're done setting it up
			 */
			atomic_inc(&dip->pending_bios);
			ret = __btrfs_submit_dio_bio(bio, inode, rw,
						     file_offset, skip_sum,
6082
						     csums, async_submit);
M
Miao Xie 已提交
6083 6084 6085 6086 6087 6088
			if (ret) {
				bio_put(bio);
				atomic_dec(&dip->pending_bios);
				goto out_err;
			}

6089 6090
			/* Write's use the ordered csums */
			if (!write && !skip_sum)
M
Miao Xie 已提交
6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118
				csums = csums + nr_pages;
			start_sector += submit_len >> 9;
			file_offset += submit_len;

			submit_len = 0;
			nr_pages = 0;

			bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
						  start_sector, GFP_NOFS);
			if (!bio)
				goto out_err;
			bio->bi_private = dip;
			bio->bi_end_io = btrfs_end_dio_bio;

			map_length = orig_bio->bi_size;
			ret = btrfs_map_block(map_tree, READ, start_sector << 9,
					      &map_length, NULL, 0);
			if (ret) {
				bio_put(bio);
				goto out_err;
			}
		} else {
			submit_len += bvec->bv_len;
			nr_pages ++;
			bvec++;
		}
	}

6119
submit:
M
Miao Xie 已提交
6120
	ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
6121
				     csums, async_submit);
M
Miao Xie 已提交
6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139
	if (!ret)
		return 0;

	bio_put(bio);
out_err:
	dip->errors = 1;
	/*
	 * before atomic variable goto zero, we must
	 * make sure dip->errors is perceived to be set.
	 */
	smp_mb__before_atomic_dec();
	if (atomic_dec_and_test(&dip->pending_bios))
		bio_io_error(dip->orig_bio);

	/* bio_end_io() will handle error, so we needn't return it */
	return 0;
}

6140 6141 6142 6143 6144 6145 6146
static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
				loff_t file_offset)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_dio_private *dip;
	struct bio_vec *bvec = bio->bi_io_vec;
	int skip_sum;
6147
	int write = rw & REQ_WRITE;
6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158
	int ret = 0;

	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;

	dip = kmalloc(sizeof(*dip), GFP_NOFS);
	if (!dip) {
		ret = -ENOMEM;
		goto free_ordered;
	}
	dip->csums = NULL;

6159 6160
	/* Write's use the ordered csum stuff, so we don't need dip->csums */
	if (!write && !skip_sum) {
6161 6162
		dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS);
		if (!dip->csums) {
D
Daniel J Blueman 已提交
6163
			kfree(dip);
6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178
			ret = -ENOMEM;
			goto free_ordered;
		}
	}

	dip->private = bio->bi_private;
	dip->inode = inode;
	dip->logical_offset = file_offset;

	dip->bytes = 0;
	do {
		dip->bytes += bvec->bv_len;
		bvec++;
	} while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1));

6179
	dip->disk_bytenr = (u64)bio->bi_sector << 9;
6180
	bio->bi_private = dip;
M
Miao Xie 已提交
6181 6182 6183
	dip->errors = 0;
	dip->orig_bio = bio;
	atomic_set(&dip->pending_bios, 0);
6184 6185 6186 6187 6188 6189

	if (write)
		bio->bi_end_io = btrfs_endio_direct_write;
	else
		bio->bi_end_io = btrfs_endio_direct_read;

M
Miao Xie 已提交
6190 6191
	ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
	if (!ret)
6192
		return;
6193 6194 6195 6196 6197 6198 6199
free_ordered:
	/*
	 * If this is a write, we need to clean up the reserved space and kill
	 * the ordered extent.
	 */
	if (write) {
		struct btrfs_ordered_extent *ordered;
6200
		ordered = btrfs_lookup_ordered_extent(inode, file_offset);
6201 6202 6203 6204 6205 6206 6207 6208 6209 6210
		if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
			btrfs_free_reserved_extent(root, ordered->start,
						   ordered->disk_len);
		btrfs_put_ordered_extent(ordered);
		btrfs_put_ordered_extent(ordered);
	}
	bio_endio(bio, ret);
}

C
Chris Mason 已提交
6211 6212 6213 6214 6215
static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
			const struct iovec *iov, loff_t offset,
			unsigned long nr_segs)
{
	int seg;
6216
	int i;
C
Chris Mason 已提交
6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230
	size_t size;
	unsigned long addr;
	unsigned blocksize_mask = root->sectorsize - 1;
	ssize_t retval = -EINVAL;
	loff_t end = offset;

	if (offset & blocksize_mask)
		goto out;

	/* Check the memory alignment.  Blocks cannot straddle pages */
	for (seg = 0; seg < nr_segs; seg++) {
		addr = (unsigned long)iov[seg].iov_base;
		size = iov[seg].iov_len;
		end += size;
6231
		if ((addr & blocksize_mask) || (size & blocksize_mask))
C
Chris Mason 已提交
6232
			goto out;
6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246

		/* If this is a write we don't need to check anymore */
		if (rw & WRITE)
			continue;

		/*
		 * Check to make sure we don't have duplicate iov_base's in this
		 * iovec, if so return EINVAL, otherwise we'll get csum errors
		 * when reading back.
		 */
		for (i = seg + 1; i < nr_segs; i++) {
			if (iov[seg].iov_base == iov[i].iov_base)
				goto out;
		}
C
Chris Mason 已提交
6247 6248 6249 6250 6251
	}
	retval = 0;
out:
	return retval;
}
6252 6253 6254 6255
static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
			const struct iovec *iov, loff_t offset,
			unsigned long nr_segs)
{
6256 6257 6258
	struct file *file = iocb->ki_filp;
	struct inode *inode = file->f_mapping->host;
	struct btrfs_ordered_extent *ordered;
6259
	struct extent_state *cached_state = NULL;
6260 6261
	u64 lockstart, lockend;
	ssize_t ret;
6262 6263
	int writing = rw & WRITE;
	int write_bits = 0;
6264
	size_t count = iov_length(iov, nr_segs);
6265

C
Chris Mason 已提交
6266 6267 6268 6269 6270
	if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
			    offset, nr_segs)) {
		return 0;
	}

6271
	lockstart = offset;
6272 6273 6274 6275 6276 6277 6278
	lockend = offset + count - 1;

	if (writing) {
		ret = btrfs_delalloc_reserve_space(inode, count);
		if (ret)
			goto out;
	}
6279

6280
	while (1) {
6281 6282
		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
				 0, &cached_state, GFP_NOFS);
6283 6284 6285 6286 6287 6288 6289 6290 6291
		/*
		 * We're concerned with the entire range that we're going to be
		 * doing DIO to, so we need to make sure theres no ordered
		 * extents in this range.
		 */
		ordered = btrfs_lookup_ordered_range(inode, lockstart,
						     lockend - lockstart + 1);
		if (!ordered)
			break;
6292 6293
		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
				     &cached_state, GFP_NOFS);
6294 6295 6296 6297 6298
		btrfs_start_ordered_extent(inode, ordered, 1);
		btrfs_put_ordered_extent(ordered);
		cond_resched();
	}

6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318
	/*
	 * we don't use btrfs_set_extent_delalloc because we don't want
	 * the dirty or uptodate bits
	 */
	if (writing) {
		write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING;
		ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
				     EXTENT_DELALLOC, 0, NULL, &cached_state,
				     GFP_NOFS);
		if (ret) {
			clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
					 lockend, EXTENT_LOCKED | write_bits,
					 1, 0, &cached_state, GFP_NOFS);
			goto out;
		}
	}

	free_extent_state(cached_state);
	cached_state = NULL;

C
Chris Mason 已提交
6319 6320 6321 6322
	ret = __blockdev_direct_IO(rw, iocb, inode,
		   BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
		   iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
		   btrfs_submit_direct, 0);
6323 6324

	if (ret < 0 && ret != -EIOCBQUEUED) {
6325 6326 6327 6328
		clear_extent_bit(&BTRFS_I(inode)->io_tree, offset,
			      offset + iov_length(iov, nr_segs) - 1,
			      EXTENT_LOCKED | write_bits, 1, 0,
			      &cached_state, GFP_NOFS);
6329 6330 6331 6332 6333
	} else if (ret >= 0 && ret < iov_length(iov, nr_segs)) {
		/*
		 * We're falling back to buffered, unlock the section we didn't
		 * do IO on.
		 */
6334 6335 6336 6337
		clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret,
			      offset + iov_length(iov, nr_segs) - 1,
			      EXTENT_LOCKED | write_bits, 1, 0,
			      &cached_state, GFP_NOFS);
6338
	}
6339 6340
out:
	free_extent_state(cached_state);
6341
	return ret;
6342 6343
}

Y
Yehuda Sadeh 已提交
6344 6345 6346
static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		__u64 start, __u64 len)
{
6347
	return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
Y
Yehuda Sadeh 已提交
6348 6349
}

6350
int btrfs_readpage(struct file *file, struct page *page)
C
Chris Mason 已提交
6351
{
6352 6353
	struct extent_io_tree *tree;
	tree = &BTRFS_I(page->mapping->host)->io_tree;
6354
	return extent_read_full_page(tree, page, btrfs_get_extent);
C
Chris Mason 已提交
6355
}
6356

6357
static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
C
Chris Mason 已提交
6358
{
6359
	struct extent_io_tree *tree;
6360 6361 6362 6363 6364 6365 6366


	if (current->flags & PF_MEMALLOC) {
		redirty_page_for_writepage(wbc, page);
		unlock_page(page);
		return 0;
	}
6367
	tree = &BTRFS_I(page->mapping->host)->io_tree;
6368
	return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
C
Chris Mason 已提交
6369 6370
}

6371 6372
int btrfs_writepages(struct address_space *mapping,
		     struct writeback_control *wbc)
C
Chris Mason 已提交
6373
{
6374
	struct extent_io_tree *tree;
6375

6376
	tree = &BTRFS_I(mapping->host)->io_tree;
C
Chris Mason 已提交
6377 6378 6379
	return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
}

C
Chris Mason 已提交
6380 6381 6382 6383
static int
btrfs_readpages(struct file *file, struct address_space *mapping,
		struct list_head *pages, unsigned nr_pages)
{
6384 6385
	struct extent_io_tree *tree;
	tree = &BTRFS_I(mapping->host)->io_tree;
C
Chris Mason 已提交
6386 6387 6388
	return extent_readpages(tree, mapping, pages, nr_pages,
				btrfs_get_extent);
}
6389
static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
C
Chris Mason 已提交
6390
{
6391 6392
	struct extent_io_tree *tree;
	struct extent_map_tree *map;
6393
	int ret;
6394

6395 6396
	tree = &BTRFS_I(page->mapping->host)->io_tree;
	map = &BTRFS_I(page->mapping->host)->extent_tree;
6397
	ret = try_release_extent_mapping(map, tree, page, gfp_flags);
6398 6399 6400 6401
	if (ret == 1) {
		ClearPagePrivate(page);
		set_page_private(page, 0);
		page_cache_release(page);
C
Chris Mason 已提交
6402
	}
6403
	return ret;
C
Chris Mason 已提交
6404 6405
}

6406 6407
static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
{
6408 6409
	if (PageWriteback(page) || PageDirty(page))
		return 0;
6410
	return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
6411 6412
}

6413
static void btrfs_invalidatepage(struct page *page, unsigned long offset)
C
Chris Mason 已提交
6414
{
6415
	struct extent_io_tree *tree;
6416
	struct btrfs_ordered_extent *ordered;
6417
	struct extent_state *cached_state = NULL;
6418 6419
	u64 page_start = page_offset(page);
	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
C
Chris Mason 已提交
6420

6421 6422 6423 6424 6425 6426 6427 6428

	/*
	 * we have the page locked, so new writeback can't start,
	 * and the dirty bit won't be cleared while we are here.
	 *
	 * Wait for IO on this page so that we can safely clear
	 * the PagePrivate2 bit and do ordered accounting
	 */
6429
	wait_on_page_writeback(page);
6430

6431
	tree = &BTRFS_I(page->mapping->host)->io_tree;
6432 6433 6434 6435
	if (offset) {
		btrfs_releasepage(page, GFP_NOFS);
		return;
	}
6436 6437
	lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
			 GFP_NOFS);
6438 6439 6440
	ordered = btrfs_lookup_ordered_extent(page->mapping->host,
					   page_offset(page));
	if (ordered) {
6441 6442 6443 6444
		/*
		 * IO on this page will never be started, so we need
		 * to account for any ordered extents now
		 */
6445 6446
		clear_extent_bit(tree, page_start, page_end,
				 EXTENT_DIRTY | EXTENT_DELALLOC |
6447
				 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
6448
				 &cached_state, GFP_NOFS);
6449 6450 6451 6452 6453 6454 6455 6456
		/*
		 * whoever cleared the private bit is responsible
		 * for the finish_ordered_io
		 */
		if (TestClearPagePrivate2(page)) {
			btrfs_finish_ordered_io(page->mapping->host,
						page_start, page_end);
		}
6457
		btrfs_put_ordered_extent(ordered);
6458 6459 6460
		cached_state = NULL;
		lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
				 GFP_NOFS);
6461 6462
	}
	clear_extent_bit(tree, page_start, page_end,
6463
		 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
6464
		 EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS);
6465 6466
	__btrfs_releasepage(page, GFP_NOFS);

C
Chris Mason 已提交
6467
	ClearPageChecked(page);
6468 6469 6470 6471 6472
	if (PagePrivate(page)) {
		ClearPagePrivate(page);
		set_page_private(page, 0);
		page_cache_release(page);
	}
C
Chris Mason 已提交
6473 6474
}

C
Chris Mason 已提交
6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489
/*
 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
 * called from a page fault handler when a page is first dirtied. Hence we must
 * be careful to check for EOF conditions here. We set the page up correctly
 * for a written page which means we get ENOSPC checking when writing into
 * holes and correct delalloc and unwritten extent mapping on filesystems that
 * support these features.
 *
 * We are not allowed to take the i_mutex here so we have to play games to
 * protect against truncate races as the page could now be beyond EOF.  Because
 * vmtruncate() writes the inode size before removing pages, once we have the
 * page lock we can determine safely if the page is beyond EOF. If it is not
 * beyond EOF, then the page is guaranteed safe against truncation until we
 * unlock the page.
 */
6490
int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
C
Chris Mason 已提交
6491
{
6492
	struct page *page = vmf->page;
6493
	struct inode *inode = fdentry(vma->vm_file)->d_inode;
6494
	struct btrfs_root *root = BTRFS_I(inode)->root;
6495 6496
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
	struct btrfs_ordered_extent *ordered;
6497
	struct extent_state *cached_state = NULL;
6498 6499
	char *kaddr;
	unsigned long zero_start;
C
Chris Mason 已提交
6500
	loff_t size;
6501
	int ret;
6502
	u64 page_start;
6503
	u64 page_end;
C
Chris Mason 已提交
6504

6505
	ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
6506 6507 6508 6509 6510
	if (ret) {
		if (ret == -ENOMEM)
			ret = VM_FAULT_OOM;
		else /* -ENOSPC, -EIO, etc */
			ret = VM_FAULT_SIGBUS;
6511
		goto out;
6512
	}
6513

6514
	ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
6515
again:
C
Chris Mason 已提交
6516 6517
	lock_page(page);
	size = i_size_read(inode);
6518 6519
	page_start = page_offset(page);
	page_end = page_start + PAGE_CACHE_SIZE - 1;
6520

C
Chris Mason 已提交
6521
	if ((page->mapping != inode->i_mapping) ||
6522
	    (page_start >= size)) {
C
Chris Mason 已提交
6523 6524 6525
		/* page got truncated out from underneath us */
		goto out_unlock;
	}
6526 6527
	wait_on_page_writeback(page);

6528 6529
	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
			 GFP_NOFS);
6530 6531
	set_page_extent_mapped(page);

6532 6533 6534 6535
	/*
	 * we can't set the delalloc bits if there are pending ordered
	 * extents.  Drop our locks and wait for them to finish
	 */
6536 6537
	ordered = btrfs_lookup_ordered_extent(inode, page_start);
	if (ordered) {
6538 6539
		unlock_extent_cached(io_tree, page_start, page_end,
				     &cached_state, GFP_NOFS);
6540
		unlock_page(page);
6541
		btrfs_start_ordered_extent(inode, ordered, 1);
6542 6543 6544 6545
		btrfs_put_ordered_extent(ordered);
		goto again;
	}

J
Josef Bacik 已提交
6546 6547 6548 6549 6550 6551 6552
	/*
	 * XXX - page_mkwrite gets called every time the page is dirtied, even
	 * if it was already dirty, so for space accounting reasons we need to
	 * clear any delalloc bits for the range we are fixing to save.  There
	 * is probably a better way to do this, but for now keep consistent with
	 * prepare_pages in the normal write path.
	 */
6553
	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
6554
			  EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
6555
			  0, 0, &cached_state, GFP_NOFS);
J
Josef Bacik 已提交
6556

6557 6558
	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
					&cached_state);
J
Josef Bacik 已提交
6559
	if (ret) {
6560 6561
		unlock_extent_cached(io_tree, page_start, page_end,
				     &cached_state, GFP_NOFS);
J
Josef Bacik 已提交
6562 6563 6564
		ret = VM_FAULT_SIGBUS;
		goto out_unlock;
	}
6565
	ret = 0;
C
Chris Mason 已提交
6566 6567

	/* page is wholly or partially inside EOF */
6568
	if (page_start + PAGE_CACHE_SIZE > size)
6569
		zero_start = size & ~PAGE_CACHE_MASK;
C
Chris Mason 已提交
6570
	else
6571
		zero_start = PAGE_CACHE_SIZE;
C
Chris Mason 已提交
6572

6573 6574 6575 6576 6577 6578
	if (zero_start != PAGE_CACHE_SIZE) {
		kaddr = kmap(page);
		memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
		flush_dcache_page(page);
		kunmap(page);
	}
6579
	ClearPageChecked(page);
6580
	set_page_dirty(page);
6581
	SetPageUptodate(page);
6582

6583 6584 6585
	BTRFS_I(inode)->last_trans = root->fs_info->generation;
	BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;

6586
	unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
C
Chris Mason 已提交
6587 6588

out_unlock:
6589 6590
	if (!ret)
		return VM_FAULT_LOCKED;
C
Chris Mason 已提交
6591
	unlock_page(page);
6592
	btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
6593
out:
C
Chris Mason 已提交
6594 6595 6596
	return ret;
}

6597
static int btrfs_truncate(struct inode *inode)
C
Chris Mason 已提交
6598 6599 6600
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	int ret;
6601
	int err = 0;
C
Chris Mason 已提交
6602
	struct btrfs_trans_handle *trans;
6603
	unsigned long nr;
6604
	u64 mask = root->sectorsize - 1;
C
Chris Mason 已提交
6605

6606 6607
	ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
	if (ret)
6608
		return ret;
6609

C
Chris Mason 已提交
6610
	btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
6611
	btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
C
Chris Mason 已提交
6612

6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629
	trans = btrfs_start_transaction(root, 5);
	if (IS_ERR(trans))
		return PTR_ERR(trans);

	btrfs_set_trans_block_group(trans, inode);

	ret = btrfs_orphan_add(trans, inode);
	if (ret) {
		btrfs_end_transaction(trans, root);
		return ret;
	}

	nr = trans->blocks_used;
	btrfs_end_transaction(trans, root);
	btrfs_btree_balance_dirty(root, nr);

	/* Now start a transaction for the truncate */
6630
	trans = btrfs_start_transaction(root, 0);
6631 6632
	if (IS_ERR(trans))
		return PTR_ERR(trans);
6633
	btrfs_set_trans_block_group(trans, inode);
6634
	trans->block_rsv = root->orphan_block_rsv;
6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655

	/*
	 * setattr is responsible for setting the ordered_data_close flag,
	 * but that is only tested during the last file release.  That
	 * could happen well after the next commit, leaving a great big
	 * window where new writes may get lost if someone chooses to write
	 * to this file after truncating to zero
	 *
	 * The inode doesn't have any dirty data here, and so if we commit
	 * this is a noop.  If someone immediately starts writing to the inode
	 * it is very likely we'll catch some of their writes in this
	 * transaction, and the commit will find this file on the ordered
	 * data list with good things to send down.
	 *
	 * This is a best effort solution, there is still a window where
	 * using truncate to replace the contents of the file will
	 * end up with a zero length file after a crash.
	 */
	if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
		btrfs_add_ordered_operation(trans, root, inode);

6656
	while (1) {
6657 6658
		if (!trans) {
			trans = btrfs_start_transaction(root, 0);
6659 6660
			if (IS_ERR(trans))
				return PTR_ERR(trans);
6661 6662 6663 6664 6665 6666
			btrfs_set_trans_block_group(trans, inode);
			trans->block_rsv = root->orphan_block_rsv;
		}

		ret = btrfs_block_rsv_check(trans, root,
					    root->orphan_block_rsv, 0, 5);
6667
		if (ret == -EAGAIN) {
6668
			ret = btrfs_commit_transaction(trans, root);
6669 6670
			if (ret)
				return ret;
6671 6672
			trans = NULL;
			continue;
6673 6674 6675
		} else if (ret) {
			err = ret;
			break;
6676 6677
		}

6678 6679 6680
		ret = btrfs_truncate_inode_items(trans, root, inode,
						 inode->i_size,
						 BTRFS_EXTENT_DATA_KEY);
6681 6682
		if (ret != -EAGAIN) {
			err = ret;
6683
			break;
6684
		}
C
Chris Mason 已提交
6685

6686
		ret = btrfs_update_inode(trans, root, inode);
6687 6688 6689 6690
		if (ret) {
			err = ret;
			break;
		}
6691

6692 6693
		nr = trans->blocks_used;
		btrfs_end_transaction(trans, root);
6694
		trans = NULL;
6695 6696 6697 6698 6699
		btrfs_btree_balance_dirty(root, nr);
	}

	if (ret == 0 && inode->i_nlink > 0) {
		ret = btrfs_orphan_del(trans, inode);
6700 6701
		if (ret)
			err = ret;
6702 6703 6704 6705 6706 6707
	} else if (ret && inode->i_nlink > 0) {
		/*
		 * Failed to do the truncate, remove us from the in memory
		 * orphan list.
		 */
		ret = btrfs_orphan_del(NULL, inode);
6708 6709 6710
	}

	ret = btrfs_update_inode(trans, root, inode);
6711 6712
	if (ret && !err)
		err = ret;
6713 6714

	nr = trans->blocks_used;
6715
	ret = btrfs_end_transaction_throttle(trans, root);
6716 6717
	if (ret && !err)
		err = ret;
6718
	btrfs_btree_balance_dirty(root, nr);
6719

6720
	return err;
C
Chris Mason 已提交
6721 6722
}

C
Chris Mason 已提交
6723 6724 6725
/*
 * create a new subvolume directory/inode (helper for the ioctl).
 */
6726
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
6727
			     struct btrfs_root *new_root,
6728
			     u64 new_dirid, u64 alloc_hint)
C
Chris Mason 已提交
6729 6730
{
	struct inode *inode;
6731
	int err;
6732
	u64 index = 0;
C
Chris Mason 已提交
6733

6734
	inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
6735
				new_dirid, alloc_hint, S_IFDIR | 0700, &index);
6736
	if (IS_ERR(inode))
C
Christoph Hellwig 已提交
6737
		return PTR_ERR(inode);
C
Chris Mason 已提交
6738 6739 6740 6741
	inode->i_op = &btrfs_dir_inode_operations;
	inode->i_fop = &btrfs_dir_file_operations;

	inode->i_nlink = 1;
6742
	btrfs_i_size_write(inode, 0);
6743

6744 6745
	err = btrfs_update_inode(trans, new_root, inode);
	BUG_ON(err);
6746

6747
	iput(inode);
6748
	return 0;
C
Chris Mason 已提交
6749 6750
}

C
Chris Mason 已提交
6751 6752 6753
/* helper function for file defrag and space balancing.  This
 * forces readahead on a given range of bytes in an inode
 */
6754
unsigned long btrfs_force_ra(struct address_space *mapping,
6755 6756 6757
			      struct file_ra_state *ra, struct file *file,
			      pgoff_t offset, pgoff_t last_index)
{
6758
	pgoff_t req_size = last_index - offset + 1;
6759 6760 6761 6762 6763

	page_cache_sync_readahead(mapping, ra, file, offset, req_size);
	return offset + req_size;
}

C
Chris Mason 已提交
6764 6765 6766
struct inode *btrfs_alloc_inode(struct super_block *sb)
{
	struct btrfs_inode *ei;
Y
Yan, Zheng 已提交
6767
	struct inode *inode;
C
Chris Mason 已提交
6768 6769 6770 6771

	ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
	if (!ei)
		return NULL;
Y
Yan, Zheng 已提交
6772 6773 6774 6775 6776

	ei->root = NULL;
	ei->space_info = NULL;
	ei->generation = 0;
	ei->sequence = 0;
6777
	ei->last_trans = 0;
6778
	ei->last_sub_trans = 0;
6779
	ei->logged_trans = 0;
Y
Yan, Zheng 已提交
6780 6781 6782 6783 6784 6785 6786
	ei->delalloc_bytes = 0;
	ei->reserved_bytes = 0;
	ei->disk_i_size = 0;
	ei->flags = 0;
	ei->index_cnt = (u64)-1;
	ei->last_unlink_trans = 0;

6787
	atomic_set(&ei->outstanding_extents, 0);
6788
	atomic_set(&ei->reserved_extents, 0);
Y
Yan, Zheng 已提交
6789 6790

	ei->ordered_data_close = 0;
6791
	ei->orphan_meta_reserved = 0;
Y
Yan, Zheng 已提交
6792
	ei->dummy_inode = 0;
6793
	ei->force_compress = BTRFS_COMPRESS_NONE;
Y
Yan, Zheng 已提交
6794 6795 6796 6797 6798 6799

	inode = &ei->vfs_inode;
	extent_map_tree_init(&ei->extent_tree, GFP_NOFS);
	extent_io_tree_init(&ei->io_tree, &inode->i_data, GFP_NOFS);
	extent_io_tree_init(&ei->io_failure_tree, &inode->i_data, GFP_NOFS);
	mutex_init(&ei->log_mutex);
6800
	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
6801
	INIT_LIST_HEAD(&ei->i_orphan);
Y
Yan, Zheng 已提交
6802
	INIT_LIST_HEAD(&ei->delalloc_inodes);
6803
	INIT_LIST_HEAD(&ei->ordered_operations);
Y
Yan, Zheng 已提交
6804 6805 6806
	RB_CLEAR_NODE(&ei->rb_node);

	return inode;
C
Chris Mason 已提交
6807 6808
}

N
Nick Piggin 已提交
6809 6810 6811 6812 6813 6814 6815
static void btrfs_i_callback(struct rcu_head *head)
{
	struct inode *inode = container_of(head, struct inode, i_rcu);
	INIT_LIST_HEAD(&inode->i_dentry);
	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
}

C
Chris Mason 已提交
6816 6817
void btrfs_destroy_inode(struct inode *inode)
{
6818
	struct btrfs_ordered_extent *ordered;
6819 6820
	struct btrfs_root *root = BTRFS_I(inode)->root;

C
Chris Mason 已提交
6821 6822
	WARN_ON(!list_empty(&inode->i_dentry));
	WARN_ON(inode->i_data.nrpages);
6823
	WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents));
6824
	WARN_ON(atomic_read(&BTRFS_I(inode)->reserved_extents));
C
Chris Mason 已提交
6825

6826 6827 6828 6829 6830 6831 6832 6833
	/*
	 * This can happen where we create an inode, but somebody else also
	 * created the same inode and we need to destroy the one we already
	 * created.
	 */
	if (!root)
		goto free;

6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844
	/*
	 * Make sure we're properly removed from the ordered operation
	 * lists.
	 */
	smp_mb();
	if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
		spin_lock(&root->fs_info->ordered_extent_lock);
		list_del_init(&BTRFS_I(inode)->ordered_operations);
		spin_unlock(&root->fs_info->ordered_extent_lock);
	}

6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859
	if (root == root->fs_info->tree_root) {
		struct btrfs_block_group_cache *block_group;

		block_group = btrfs_lookup_block_group(root->fs_info,
						BTRFS_I(inode)->block_group);
		if (block_group && block_group->inode == inode) {
			spin_lock(&block_group->lock);
			block_group->inode = NULL;
			spin_unlock(&block_group->lock);
			btrfs_put_block_group(block_group);
		} else if (block_group) {
			btrfs_put_block_group(block_group);
		}
	}

6860
	spin_lock(&root->orphan_lock);
6861
	if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
6862 6863 6864
		printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n",
		       inode->i_ino);
		list_del_init(&BTRFS_I(inode)->i_orphan);
6865
	}
6866
	spin_unlock(&root->orphan_lock);
6867

C
Chris Mason 已提交
6868
	while (1) {
6869 6870 6871 6872
		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
		if (!ordered)
			break;
		else {
C
Chris Mason 已提交
6873 6874 6875 6876
			printk(KERN_ERR "btrfs found ordered "
			       "extent %llu %llu on inode cleanup\n",
			       (unsigned long long)ordered->file_offset,
			       (unsigned long long)ordered->len);
6877 6878 6879 6880 6881
			btrfs_remove_ordered_extent(inode, ordered);
			btrfs_put_ordered_extent(ordered);
			btrfs_put_ordered_extent(ordered);
		}
	}
6882
	inode_tree_del(inode);
6883
	btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
6884
free:
N
Nick Piggin 已提交
6885
	call_rcu(&inode->i_rcu, btrfs_i_callback);
C
Chris Mason 已提交
6886 6887
}

6888
int btrfs_drop_inode(struct inode *inode)
6889 6890
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
6891

6892 6893
	if (btrfs_root_refs(&root->root_item) == 0 &&
	    root != root->fs_info->tree_root)
6894
		return 1;
6895
	else
6896
		return generic_drop_inode(inode);
6897 6898
}

6899
static void init_once(void *foo)
C
Chris Mason 已提交
6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915
{
	struct btrfs_inode *ei = (struct btrfs_inode *) foo;

	inode_init_once(&ei->vfs_inode);
}

void btrfs_destroy_cachep(void)
{
	if (btrfs_inode_cachep)
		kmem_cache_destroy(btrfs_inode_cachep);
	if (btrfs_trans_handle_cachep)
		kmem_cache_destroy(btrfs_trans_handle_cachep);
	if (btrfs_transaction_cachep)
		kmem_cache_destroy(btrfs_transaction_cachep);
	if (btrfs_path_cachep)
		kmem_cache_destroy(btrfs_path_cachep);
6916 6917
	if (btrfs_free_space_cachep)
		kmem_cache_destroy(btrfs_free_space_cachep);
C
Chris Mason 已提交
6918 6919 6920 6921
}

int btrfs_init_cachep(void)
{
6922 6923 6924
	btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
			sizeof(struct btrfs_inode), 0,
			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
C
Chris Mason 已提交
6925 6926
	if (!btrfs_inode_cachep)
		goto fail;
6927 6928 6929 6930

	btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
			sizeof(struct btrfs_trans_handle), 0,
			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
C
Chris Mason 已提交
6931 6932
	if (!btrfs_trans_handle_cachep)
		goto fail;
6933 6934 6935 6936

	btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
			sizeof(struct btrfs_transaction), 0,
			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
C
Chris Mason 已提交
6937 6938
	if (!btrfs_transaction_cachep)
		goto fail;
6939 6940 6941 6942

	btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
			sizeof(struct btrfs_path), 0,
			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
C
Chris Mason 已提交
6943 6944
	if (!btrfs_path_cachep)
		goto fail;
6945

6946 6947 6948 6949 6950 6951
	btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space_cache",
			sizeof(struct btrfs_free_space), 0,
			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
	if (!btrfs_free_space_cachep)
		goto fail;

C
Chris Mason 已提交
6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962
	return 0;
fail:
	btrfs_destroy_cachep();
	return -ENOMEM;
}

static int btrfs_getattr(struct vfsmount *mnt,
			 struct dentry *dentry, struct kstat *stat)
{
	struct inode *inode = dentry->d_inode;
	generic_fillattr(inode, stat);
6963
	stat->dev = BTRFS_I(inode)->root->anon_super.s_dev;
C
Chris Mason 已提交
6964
	stat->blksize = PAGE_CACHE_SIZE;
6965 6966
	stat->blocks = (inode_get_bytes(inode) +
			BTRFS_I(inode)->delalloc_bytes) >> 9;
C
Chris Mason 已提交
6967 6968 6969
	return 0;
}

6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989
/*
 * If a file is moved, it will inherit the cow and compression flags of the new
 * directory.
 */
static void fixup_inode_flags(struct inode *dir, struct inode *inode)
{
	struct btrfs_inode *b_dir = BTRFS_I(dir);
	struct btrfs_inode *b_inode = BTRFS_I(inode);

	if (b_dir->flags & BTRFS_INODE_NODATACOW)
		b_inode->flags |= BTRFS_INODE_NODATACOW;
	else
		b_inode->flags &= ~BTRFS_INODE_NODATACOW;

	if (b_dir->flags & BTRFS_INODE_COMPRESS)
		b_inode->flags |= BTRFS_INODE_COMPRESS;
	else
		b_inode->flags &= ~BTRFS_INODE_COMPRESS;
}

C
Chris Mason 已提交
6990 6991
static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
			   struct inode *new_dir, struct dentry *new_dentry)
C
Chris Mason 已提交
6992 6993 6994
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(old_dir)->root;
6995
	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
C
Chris Mason 已提交
6996 6997 6998
	struct inode *new_inode = new_dentry->d_inode;
	struct inode *old_inode = old_dentry->d_inode;
	struct timespec ctime = CURRENT_TIME;
6999
	u64 index = 0;
7000
	u64 root_objectid;
C
Chris Mason 已提交
7001 7002
	int ret;

7003 7004 7005
	if (new_dir->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
		return -EPERM;

7006 7007
	/* we only allow rename subvolume link between subvolumes */
	if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
7008 7009
		return -EXDEV;

7010 7011
	if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
	    (new_inode && new_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID))
C
Chris Mason 已提交
7012
		return -ENOTEMPTY;
7013

7014 7015 7016
	if (S_ISDIR(old_inode->i_mode) && new_inode &&
	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
		return -ENOTEMPTY;
7017 7018 7019 7020 7021
	/*
	 * we're using rename to replace one file with another.
	 * and the replacement file is large.  Start IO on it now so
	 * we don't add too much work to the end of the transaction
	 */
7022
	if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
7023 7024 7025
	    old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
		filemap_flush(old_inode->i_mapping);

7026 7027 7028
	/* close the racy window with snapshot create/destroy ioctl */
	if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
		down_read(&root->fs_info->subvol_sem);
7029 7030 7031 7032 7033 7034 7035 7036 7037
	/*
	 * We want to reserve the absolute worst case amount of items.  So if
	 * both inodes are subvols and we need to unlink them then that would
	 * require 4 item modifications, but if they are both normal inodes it
	 * would require 5 item modifications, so we'll assume their normal
	 * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
	 * should cover the worst case number of items we'll modify.
	 */
	trans = btrfs_start_transaction(root, 20);
7038 7039 7040 7041
	if (IS_ERR(trans)) {
                ret = PTR_ERR(trans);
                goto out_notrans;
        }
7042

7043
	btrfs_set_trans_block_group(trans, new_dir);
7044

7045 7046
	if (dest != root)
		btrfs_record_root_in_trans(trans, dest);
7047

7048 7049 7050
	ret = btrfs_set_inode_index(new_dir, &index);
	if (ret)
		goto out_fail;
7051

7052
	if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
7053 7054 7055
		/* force full log commit if subvolume involved. */
		root->fs_info->last_trans_log_full_commit = trans->transid;
	} else {
7056 7057 7058 7059 7060 7061 7062
		ret = btrfs_insert_inode_ref(trans, dest,
					     new_dentry->d_name.name,
					     new_dentry->d_name.len,
					     old_inode->i_ino,
					     new_dir->i_ino, index);
		if (ret)
			goto out_fail;
7063 7064 7065 7066 7067 7068 7069 7070 7071
		/*
		 * this is an ugly little race, but the rename is required
		 * to make sure that if we crash, the inode is either at the
		 * old name or the new one.  pinning the log transaction lets
		 * us make sure we don't allow a log commit to come in after
		 * we unlink the name but before we add the new name back in.
		 */
		btrfs_pin_log_trans(root);
	}
7072 7073 7074 7075 7076 7077 7078 7079 7080
	/*
	 * make sure the inode gets flushed if it is replacing
	 * something.
	 */
	if (new_inode && new_inode->i_size &&
	    old_inode && S_ISREG(old_inode->i_mode)) {
		btrfs_add_ordered_operation(trans, root, old_inode);
	}

C
Chris Mason 已提交
7081 7082 7083
	old_dir->i_ctime = old_dir->i_mtime = ctime;
	new_dir->i_ctime = new_dir->i_mtime = ctime;
	old_inode->i_ctime = ctime;
7084

7085 7086 7087
	if (old_dentry->d_parent != new_dentry->d_parent)
		btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);

7088 7089 7090 7091 7092 7093
	if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
		root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
		ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
					old_dentry->d_name.name,
					old_dentry->d_name.len);
	} else {
7094 7095 7096 7097 7098 7099
		ret = __btrfs_unlink_inode(trans, root, old_dir,
					old_dentry->d_inode,
					old_dentry->d_name.name,
					old_dentry->d_name.len);
		if (!ret)
			ret = btrfs_update_inode(trans, root, old_inode);
7100 7101
	}
	BUG_ON(ret);
C
Chris Mason 已提交
7102 7103 7104

	if (new_inode) {
		new_inode->i_ctime = CURRENT_TIME;
7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119
		if (unlikely(new_inode->i_ino ==
			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
			root_objectid = BTRFS_I(new_inode)->location.objectid;
			ret = btrfs_unlink_subvol(trans, dest, new_dir,
						root_objectid,
						new_dentry->d_name.name,
						new_dentry->d_name.len);
			BUG_ON(new_inode->i_nlink == 0);
		} else {
			ret = btrfs_unlink_inode(trans, dest, new_dir,
						 new_dentry->d_inode,
						 new_dentry->d_name.name,
						 new_dentry->d_name.len);
		}
		BUG_ON(ret);
7120
		if (new_inode->i_nlink == 0) {
7121
			ret = btrfs_orphan_add(trans, new_dentry->d_inode);
7122
			BUG_ON(ret);
7123
		}
C
Chris Mason 已提交
7124
	}
7125

7126 7127
	fixup_inode_flags(new_dir, old_inode);

7128 7129
	ret = btrfs_add_link(trans, new_dir, old_inode,
			     new_dentry->d_name.name,
7130
			     new_dentry->d_name.len, 0, index);
7131
	BUG_ON(ret);
C
Chris Mason 已提交
7132

7133
	if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) {
7134 7135 7136
		struct dentry *parent = dget_parent(new_dentry);
		btrfs_log_new_name(trans, old_inode, old_dir, parent);
		dput(parent);
7137 7138
		btrfs_end_log_trans(root);
	}
C
Chris Mason 已提交
7139
out_fail:
7140
	btrfs_end_transaction_throttle(trans, root);
7141
out_notrans:
7142 7143
	if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
		up_read(&root->fs_info->subvol_sem);
J
Josef Bacik 已提交
7144

C
Chris Mason 已提交
7145 7146 7147
	return ret;
}

C
Chris Mason 已提交
7148 7149 7150 7151
/*
 * some fairly slow code that needs optimization. This walks the list
 * of all the inodes with pending delalloc and forces them to disk.
 */
Y
Yan, Zheng 已提交
7152
int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
7153 7154 7155
{
	struct list_head *head = &root->fs_info->delalloc_inodes;
	struct btrfs_inode *binode;
7156
	struct inode *inode;
7157

Y
Yan Zheng 已提交
7158 7159 7160
	if (root->fs_info->sb->s_flags & MS_RDONLY)
		return -EROFS;

7161
	spin_lock(&root->fs_info->delalloc_lock);
C
Chris Mason 已提交
7162
	while (!list_empty(head)) {
7163 7164
		binode = list_entry(head->next, struct btrfs_inode,
				    delalloc_inodes);
7165 7166 7167
		inode = igrab(&binode->vfs_inode);
		if (!inode)
			list_del_init(&binode->delalloc_inodes);
7168
		spin_unlock(&root->fs_info->delalloc_lock);
7169
		if (inode) {
7170
			filemap_flush(inode->i_mapping);
Y
Yan, Zheng 已提交
7171 7172 7173 7174
			if (delay_iput)
				btrfs_add_delayed_iput(inode);
			else
				iput(inode);
7175 7176
		}
		cond_resched();
7177
		spin_lock(&root->fs_info->delalloc_lock);
7178
	}
7179
	spin_unlock(&root->fs_info->delalloc_lock);
7180 7181 7182 7183 7184 7185

	/* the filemap_flush will queue IO into the worker threads, but
	 * we have to make sure the IO is actually started and that
	 * ordered extents get created before we return
	 */
	atomic_inc(&root->fs_info->async_submit_draining);
C
Chris Mason 已提交
7186
	while (atomic_read(&root->fs_info->nr_async_submits) ||
7187
	      atomic_read(&root->fs_info->async_delalloc_pages)) {
7188
		wait_event(root->fs_info->async_submit_wait,
7189 7190
		   (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
		    atomic_read(&root->fs_info->async_delalloc_pages) == 0));
7191 7192
	}
	atomic_dec(&root->fs_info->async_submit_draining);
7193 7194 7195
	return 0;
}

J
Josef Bacik 已提交
7196 7197
int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput,
				   int sync)
7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217 7218
{
	struct btrfs_inode *binode;
	struct inode *inode = NULL;

	spin_lock(&root->fs_info->delalloc_lock);
	while (!list_empty(&root->fs_info->delalloc_inodes)) {
		binode = list_entry(root->fs_info->delalloc_inodes.next,
				    struct btrfs_inode, delalloc_inodes);
		inode = igrab(&binode->vfs_inode);
		if (inode) {
			list_move_tail(&binode->delalloc_inodes,
				       &root->fs_info->delalloc_inodes);
			break;
		}

		list_del_init(&binode->delalloc_inodes);
		cond_resched_lock(&root->fs_info->delalloc_lock);
	}
	spin_unlock(&root->fs_info->delalloc_lock);

	if (inode) {
J
Josef Bacik 已提交
7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238
		if (sync) {
			filemap_write_and_wait(inode->i_mapping);
			/*
			 * We have to do this because compression doesn't
			 * actually set PG_writeback until it submits the pages
			 * for IO, which happens in an async thread, so we could
			 * race and not actually wait for any writeback pages
			 * because they've not been submitted yet.  Technically
			 * this could still be the case for the ordered stuff
			 * since the async thread may not have started to do its
			 * work yet.  If this becomes the case then we need to
			 * figure out a way to make sure that in writepage we
			 * wait for any async pages to be submitted before
			 * returning so that fdatawait does what its supposed to
			 * do.
			 */
			btrfs_wait_ordered_range(inode, 0, (u64)-1);
		} else {
			filemap_flush(inode->i_mapping);
		}
7239 7240 7241 7242 7243 7244 7245 7246 7247
		if (delay_iput)
			btrfs_add_delayed_iput(inode);
		else
			iput(inode);
		return 1;
	}
	return 0;
}

C
Chris Mason 已提交
7248 7249 7250 7251 7252 7253 7254
static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
			 const char *symname)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(dir)->root;
	struct btrfs_path *path;
	struct btrfs_key key;
7255
	struct inode *inode = NULL;
C
Chris Mason 已提交
7256 7257 7258
	int err;
	int drop_inode = 0;
	u64 objectid;
7259
	u64 index = 0 ;
C
Chris Mason 已提交
7260 7261
	int name_len;
	int datasize;
7262
	unsigned long ptr;
C
Chris Mason 已提交
7263
	struct btrfs_file_extent_item *ei;
7264
	struct extent_buffer *leaf;
7265
	unsigned long nr = 0;
C
Chris Mason 已提交
7266 7267 7268 7269

	name_len = strlen(symname) + 1;
	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
		return -ENAMETOOLONG;
7270

J
Josef Bacik 已提交
7271 7272 7273 7274 7275
	/*
	 * 2 items for inode item and ref
	 * 2 items for dir items
	 * 1 item for xattr if selinux is on
	 */
7276 7277 7278
	trans = btrfs_start_transaction(root, 5);
	if (IS_ERR(trans))
		return PTR_ERR(trans);
7279

C
Chris Mason 已提交
7280 7281
	btrfs_set_trans_block_group(trans, dir);

7282 7283 7284 7285
	err = btrfs_find_free_ino(root, &objectid);
	if (err)
		goto out_unlock;

7286
	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
7287
				dentry->d_name.len, dir->i_ino, objectid,
7288 7289
				BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
				&index);
C
Chris Mason 已提交
7290 7291 7292 7293
	err = PTR_ERR(inode);
	if (IS_ERR(inode))
		goto out_unlock;

7294
	err = btrfs_init_inode_security(trans, inode, dir);
J
Josef Bacik 已提交
7295 7296 7297 7298 7299
	if (err) {
		drop_inode = 1;
		goto out_unlock;
	}

C
Chris Mason 已提交
7300
	btrfs_set_trans_block_group(trans, inode);
7301
	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
C
Chris Mason 已提交
7302 7303 7304 7305
	if (err)
		drop_inode = 1;
	else {
		inode->i_mapping->a_ops = &btrfs_aops;
C
Chris Mason 已提交
7306
		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
C
Chris Mason 已提交
7307 7308
		inode->i_fop = &btrfs_file_operations;
		inode->i_op = &btrfs_file_inode_operations;
7309
		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
C
Chris Mason 已提交
7310 7311 7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322 7323
	}
	btrfs_update_inode_block_group(trans, inode);
	btrfs_update_inode_block_group(trans, dir);
	if (drop_inode)
		goto out_unlock;

	path = btrfs_alloc_path();
	BUG_ON(!path);
	key.objectid = inode->i_ino;
	key.offset = 0;
	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
	datasize = btrfs_file_extent_calc_inline_size(name_len);
	err = btrfs_insert_empty_item(trans, root, path, &key,
				      datasize);
7324 7325 7326 7327
	if (err) {
		drop_inode = 1;
		goto out_unlock;
	}
7328 7329 7330 7331 7332
	leaf = path->nodes[0];
	ei = btrfs_item_ptr(leaf, path->slots[0],
			    struct btrfs_file_extent_item);
	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
	btrfs_set_file_extent_type(leaf, ei,
C
Chris Mason 已提交
7333
				   BTRFS_FILE_EXTENT_INLINE);
C
Chris Mason 已提交
7334 7335 7336 7337 7338
	btrfs_set_file_extent_encryption(leaf, ei, 0);
	btrfs_set_file_extent_compression(leaf, ei, 0);
	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);

C
Chris Mason 已提交
7339
	ptr = btrfs_file_extent_inline_start(ei);
7340 7341
	write_extent_buffer(leaf, symname, ptr, name_len);
	btrfs_mark_buffer_dirty(leaf);
C
Chris Mason 已提交
7342
	btrfs_free_path(path);
7343

C
Chris Mason 已提交
7344 7345
	inode->i_op = &btrfs_symlink_inode_operations;
	inode->i_mapping->a_ops = &btrfs_symlink_aops;
C
Chris Mason 已提交
7346
	inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
Y
Yan Zheng 已提交
7347
	inode_set_bytes(inode, name_len);
7348
	btrfs_i_size_write(inode, name_len - 1);
7349 7350 7351
	err = btrfs_update_inode(trans, root, inode);
	if (err)
		drop_inode = 1;
C
Chris Mason 已提交
7352 7353

out_unlock:
7354
	nr = trans->blocks_used;
7355
	btrfs_end_transaction_throttle(trans, root);
C
Chris Mason 已提交
7356 7357 7358 7359
	if (drop_inode) {
		inode_dec_link_count(inode);
		iput(inode);
	}
7360
	btrfs_btree_balance_dirty(root, nr);
C
Chris Mason 已提交
7361 7362
	return err;
}
7363

7364 7365 7366 7367
static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
				       u64 start, u64 num_bytes, u64 min_size,
				       loff_t actual_len, u64 *alloc_hint,
				       struct btrfs_trans_handle *trans)
Y
Yan Zheng 已提交
7368 7369 7370 7371
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_key ins;
	u64 cur_offset = start;
7372
	u64 i_size;
Y
Yan Zheng 已提交
7373
	int ret = 0;
7374
	bool own_trans = true;
Y
Yan Zheng 已提交
7375

7376 7377
	if (trans)
		own_trans = false;
Y
Yan Zheng 已提交
7378
	while (num_bytes > 0) {
7379 7380 7381 7382 7383 7384
		if (own_trans) {
			trans = btrfs_start_transaction(root, 3);
			if (IS_ERR(trans)) {
				ret = PTR_ERR(trans);
				break;
			}
7385 7386
		}

7387 7388
		ret = btrfs_reserve_extent(trans, root, num_bytes, min_size,
					   0, *alloc_hint, (u64)-1, &ins, 1);
7389
		if (ret) {
7390 7391
			if (own_trans)
				btrfs_end_transaction(trans, root);
7392
			break;
Y
Yan Zheng 已提交
7393
		}
7394

Y
Yan Zheng 已提交
7395 7396 7397
		ret = insert_reserved_file_extent(trans, inode,
						  cur_offset, ins.objectid,
						  ins.offset, ins.offset,
Y
Yan, Zheng 已提交
7398
						  ins.offset, 0, 0, 0,
Y
Yan Zheng 已提交
7399 7400
						  BTRFS_FILE_EXTENT_PREALLOC);
		BUG_ON(ret);
C
Chris Mason 已提交
7401 7402
		btrfs_drop_extent_cache(inode, cur_offset,
					cur_offset + ins.offset -1, 0);
7403

Y
Yan Zheng 已提交
7404 7405
		num_bytes -= ins.offset;
		cur_offset += ins.offset;
7406
		*alloc_hint = ins.objectid + ins.offset;
7407

Y
Yan Zheng 已提交
7408
		inode->i_ctime = CURRENT_TIME;
7409
		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
Y
Yan Zheng 已提交
7410
		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
7411 7412
		    (actual_len > inode->i_size) &&
		    (cur_offset > inode->i_size)) {
7413
			if (cur_offset > actual_len)
7414
				i_size = actual_len;
7415
			else
7416 7417 7418
				i_size = cur_offset;
			i_size_write(inode, i_size);
			btrfs_ordered_update_i_size(inode, i_size, NULL);
7419 7420
		}

Y
Yan Zheng 已提交
7421 7422 7423
		ret = btrfs_update_inode(trans, root, inode);
		BUG_ON(ret);

7424 7425
		if (own_trans)
			btrfs_end_transaction(trans, root);
7426
	}
Y
Yan Zheng 已提交
7427 7428 7429
	return ret;
}

7430 7431 7432 7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443 7444 7445 7446 7447
int btrfs_prealloc_file_range(struct inode *inode, int mode,
			      u64 start, u64 num_bytes, u64 min_size,
			      loff_t actual_len, u64 *alloc_hint)
{
	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
					   min_size, actual_len, alloc_hint,
					   NULL);
}

int btrfs_prealloc_file_range_trans(struct inode *inode,
				    struct btrfs_trans_handle *trans, int mode,
				    u64 start, u64 num_bytes, u64 min_size,
				    loff_t actual_len, u64 *alloc_hint)
{
	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
					   min_size, actual_len, alloc_hint, trans);
}

7448 7449 7450 7451 7452
static int btrfs_set_page_dirty(struct page *page)
{
	return __set_page_dirty_nobuffers(page);
}

7453
static int btrfs_permission(struct inode *inode, int mask, unsigned int flags)
Y
Yan 已提交
7454
{
L
Li Zefan 已提交
7455 7456 7457 7458
	struct btrfs_root *root = BTRFS_I(inode)->root;

	if (btrfs_root_readonly(root) && (mask & MAY_WRITE))
		return -EROFS;
7459
	if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE))
Y
Yan 已提交
7460
		return -EACCES;
7461
	return generic_permission(inode, mask, flags, btrfs_check_acl);
Y
Yan 已提交
7462
}
C
Chris Mason 已提交
7463

7464
static const struct inode_operations btrfs_dir_inode_operations = {
7465
	.getattr	= btrfs_getattr,
C
Chris Mason 已提交
7466 7467 7468 7469 7470 7471 7472 7473 7474
	.lookup		= btrfs_lookup,
	.create		= btrfs_create,
	.unlink		= btrfs_unlink,
	.link		= btrfs_link,
	.mkdir		= btrfs_mkdir,
	.rmdir		= btrfs_rmdir,
	.rename		= btrfs_rename,
	.symlink	= btrfs_symlink,
	.setattr	= btrfs_setattr,
J
Josef Bacik 已提交
7475
	.mknod		= btrfs_mknod,
7476 7477
	.setxattr	= btrfs_setxattr,
	.getxattr	= btrfs_getxattr,
J
Josef Bacik 已提交
7478
	.listxattr	= btrfs_listxattr,
7479
	.removexattr	= btrfs_removexattr,
Y
Yan 已提交
7480
	.permission	= btrfs_permission,
C
Chris Mason 已提交
7481
};
7482
static const struct inode_operations btrfs_dir_ro_inode_operations = {
C
Chris Mason 已提交
7483
	.lookup		= btrfs_lookup,
Y
Yan 已提交
7484
	.permission	= btrfs_permission,
C
Chris Mason 已提交
7485
};
7486

7487
static const struct file_operations btrfs_dir_file_operations = {
C
Chris Mason 已提交
7488 7489
	.llseek		= generic_file_llseek,
	.read		= generic_read_dir,
7490
	.readdir	= btrfs_real_readdir,
C
Christoph Hellwig 已提交
7491
	.unlocked_ioctl	= btrfs_ioctl,
C
Chris Mason 已提交
7492
#ifdef CONFIG_COMPAT
C
Christoph Hellwig 已提交
7493
	.compat_ioctl	= btrfs_ioctl,
C
Chris Mason 已提交
7494
#endif
S
Sage Weil 已提交
7495
	.release        = btrfs_release_file,
7496
	.fsync		= btrfs_sync_file,
C
Chris Mason 已提交
7497 7498
};

7499
static struct extent_io_ops btrfs_extent_io_ops = {
7500
	.fill_delalloc = run_delalloc_range,
7501
	.submit_bio_hook = btrfs_submit_bio_hook,
7502
	.merge_bio_hook = btrfs_merge_bio_hook,
7503
	.readpage_end_io_hook = btrfs_readpage_end_io_hook,
7504
	.writepage_end_io_hook = btrfs_writepage_end_io_hook,
7505
	.writepage_start_hook = btrfs_writepage_start_hook,
7506
	.readpage_io_failed_hook = btrfs_io_failed_hook,
C
Chris Mason 已提交
7507 7508
	.set_bit_hook = btrfs_set_bit_hook,
	.clear_bit_hook = btrfs_clear_bit_hook,
J
Josef Bacik 已提交
7509 7510
	.merge_extent_hook = btrfs_merge_extent_hook,
	.split_extent_hook = btrfs_split_extent_hook,
7511 7512
};

7513 7514 7515 7516 7517 7518 7519 7520 7521 7522 7523 7524
/*
 * btrfs doesn't support the bmap operation because swapfiles
 * use bmap to make a mapping of extents in the file.  They assume
 * these extents won't change over the life of the file and they
 * use the bmap result to do IO directly to the drive.
 *
 * the btrfs bmap call would return logical addresses that aren't
 * suitable for IO and they also will change frequently as COW
 * operations happen.  So, swapfile + btrfs == corruption.
 *
 * For now we're avoiding this by dropping bmap.
 */
7525
static const struct address_space_operations btrfs_aops = {
C
Chris Mason 已提交
7526 7527
	.readpage	= btrfs_readpage,
	.writepage	= btrfs_writepage,
C
Chris Mason 已提交
7528
	.writepages	= btrfs_writepages,
C
Chris Mason 已提交
7529
	.readpages	= btrfs_readpages,
C
Chris Mason 已提交
7530
	.sync_page	= block_sync_page,
7531
	.direct_IO	= btrfs_direct_IO,
7532 7533
	.invalidatepage = btrfs_invalidatepage,
	.releasepage	= btrfs_releasepage,
7534
	.set_page_dirty	= btrfs_set_page_dirty,
7535
	.error_remove_page = generic_error_remove_page,
C
Chris Mason 已提交
7536 7537
};

7538
static const struct address_space_operations btrfs_symlink_aops = {
C
Chris Mason 已提交
7539 7540
	.readpage	= btrfs_readpage,
	.writepage	= btrfs_writepage,
C
Chris Mason 已提交
7541 7542
	.invalidatepage = btrfs_invalidatepage,
	.releasepage	= btrfs_releasepage,
C
Chris Mason 已提交
7543 7544
};

7545
static const struct inode_operations btrfs_file_inode_operations = {
C
Chris Mason 已提交
7546 7547
	.getattr	= btrfs_getattr,
	.setattr	= btrfs_setattr,
7548 7549
	.setxattr	= btrfs_setxattr,
	.getxattr	= btrfs_getxattr,
J
Josef Bacik 已提交
7550
	.listxattr      = btrfs_listxattr,
7551
	.removexattr	= btrfs_removexattr,
Y
Yan 已提交
7552
	.permission	= btrfs_permission,
Y
Yehuda Sadeh 已提交
7553
	.fiemap		= btrfs_fiemap,
C
Chris Mason 已提交
7554
};
7555
static const struct inode_operations btrfs_special_inode_operations = {
J
Josef Bacik 已提交
7556 7557
	.getattr	= btrfs_getattr,
	.setattr	= btrfs_setattr,
Y
Yan 已提交
7558
	.permission	= btrfs_permission,
7559 7560
	.setxattr	= btrfs_setxattr,
	.getxattr	= btrfs_getxattr,
J
Josef Bacik 已提交
7561
	.listxattr	= btrfs_listxattr,
7562
	.removexattr	= btrfs_removexattr,
J
Josef Bacik 已提交
7563
};
7564
static const struct inode_operations btrfs_symlink_inode_operations = {
C
Chris Mason 已提交
7565 7566 7567
	.readlink	= generic_readlink,
	.follow_link	= page_follow_link_light,
	.put_link	= page_put_link,
7568
	.getattr	= btrfs_getattr,
Y
Yan 已提交
7569
	.permission	= btrfs_permission,
J
Jim Owens 已提交
7570 7571 7572 7573
	.setxattr	= btrfs_setxattr,
	.getxattr	= btrfs_getxattr,
	.listxattr	= btrfs_listxattr,
	.removexattr	= btrfs_removexattr,
C
Chris Mason 已提交
7574
};
7575

7576
const struct dentry_operations btrfs_dentry_operations = {
7577 7578
	.d_delete	= btrfs_dentry_delete,
};