inode.c 200.4 KB
Newer Older
C
Chris Mason 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

19
#include <linux/kernel.h>
20
#include <linux/bio.h>
C
Chris Mason 已提交
21
#include <linux/buffer_head.h>
S
Sage Weil 已提交
22
#include <linux/file.h>
C
Chris Mason 已提交
23 24 25 26 27 28 29 30 31 32 33 34
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/mpage.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/statfs.h>
#include <linux/compat.h>
C
Chris Mason 已提交
35
#include <linux/bit_spinlock.h>
J
Josef Bacik 已提交
36
#include <linux/xattr.h>
J
Josef Bacik 已提交
37
#include <linux/posix_acl.h>
Y
Yan Zheng 已提交
38
#include <linux/falloc.h>
39
#include <linux/slab.h>
C
Chris Mason 已提交
40
#include "compat.h"
C
Chris Mason 已提交
41 42 43 44 45 46
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
#include "ioctl.h"
#include "print-tree.h"
47
#include "volumes.h"
48
#include "ordered-data.h"
49
#include "xattr.h"
50
#include "tree-log.h"
C
Chris Mason 已提交
51
#include "compression.h"
52
#include "locking.h"
53
#include "free-space-cache.h"
C
Chris Mason 已提交
54 55 56 57 58 59

struct btrfs_iget_args {
	u64 ino;
	struct btrfs_root *root;
};

60 61 62 63 64
static const struct inode_operations btrfs_dir_inode_operations;
static const struct inode_operations btrfs_symlink_inode_operations;
static const struct inode_operations btrfs_dir_ro_inode_operations;
static const struct inode_operations btrfs_special_inode_operations;
static const struct inode_operations btrfs_file_inode_operations;
65 66
static const struct address_space_operations btrfs_aops;
static const struct address_space_operations btrfs_symlink_aops;
67
static const struct file_operations btrfs_dir_file_operations;
68
static struct extent_io_ops btrfs_extent_io_ops;
C
Chris Mason 已提交
69 70 71 72 73

static struct kmem_cache *btrfs_inode_cachep;
struct kmem_cache *btrfs_trans_handle_cachep;
struct kmem_cache *btrfs_transaction_cachep;
struct kmem_cache *btrfs_path_cachep;
74
struct kmem_cache *btrfs_free_space_cachep;
C
Chris Mason 已提交
75 76 77 78 79 80 81 82 83 84 85 86

#define S_SHIFT 12
static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
	[S_IFREG >> S_SHIFT]	= BTRFS_FT_REG_FILE,
	[S_IFDIR >> S_SHIFT]	= BTRFS_FT_DIR,
	[S_IFCHR >> S_SHIFT]	= BTRFS_FT_CHRDEV,
	[S_IFBLK >> S_SHIFT]	= BTRFS_FT_BLKDEV,
	[S_IFIFO >> S_SHIFT]	= BTRFS_FT_FIFO,
	[S_IFSOCK >> S_SHIFT]	= BTRFS_FT_SOCK,
	[S_IFLNK >> S_SHIFT]	= BTRFS_FT_SYMLINK,
};

87 88
static int btrfs_setsize(struct inode *inode, loff_t newsize);
static int btrfs_truncate(struct inode *inode);
C
Chris Mason 已提交
89
static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
90 91 92 93
static noinline int cow_file_range(struct inode *inode,
				   struct page *locked_page,
				   u64 start, u64 end, int *page_started,
				   unsigned long *nr_written, int unlock);
94

95 96
static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
				     struct inode *inode,  struct inode *dir)
J
Jim Owens 已提交
97 98 99
{
	int err;

100
	err = btrfs_init_acl(trans, inode, dir);
J
Jim Owens 已提交
101
	if (!err)
102
		err = btrfs_xattr_security_init(trans, inode, dir);
J
Jim Owens 已提交
103 104 105
	return err;
}

C
Chris Mason 已提交
106 107 108 109 110
/*
 * this does all the hard work for inserting an inline extent into
 * the btree.  The caller should have done a btrfs_drop_extents so that
 * no overlapping inline items exist in the btree
 */
C
Chris Mason 已提交
111
static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
C
Chris Mason 已提交
112 113
				struct btrfs_root *root, struct inode *inode,
				u64 start, size_t size, size_t compressed_size,
114
				int compress_type,
C
Chris Mason 已提交
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
				struct page **compressed_pages)
{
	struct btrfs_key key;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct page *page = NULL;
	char *kaddr;
	unsigned long ptr;
	struct btrfs_file_extent_item *ei;
	int err = 0;
	int ret;
	size_t cur_size = size;
	size_t datasize;
	unsigned long offset;

130
	if (compressed_size && compressed_pages)
C
Chris Mason 已提交
131 132
		cur_size = compressed_size;

C
Chris Mason 已提交
133 134
	path = btrfs_alloc_path();
	if (!path)
C
Chris Mason 已提交
135 136
		return -ENOMEM;

137
	path->leave_spinning = 1;
C
Chris Mason 已提交
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
	btrfs_set_trans_block_group(trans, inode);

	key.objectid = inode->i_ino;
	key.offset = start;
	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
	datasize = btrfs_file_extent_calc_inline_size(cur_size);

	inode_add_bytes(inode, size);
	ret = btrfs_insert_empty_item(trans, root, path, &key,
				      datasize);
	BUG_ON(ret);
	if (ret) {
		err = ret;
		goto fail;
	}
	leaf = path->nodes[0];
	ei = btrfs_item_ptr(leaf, path->slots[0],
			    struct btrfs_file_extent_item);
	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
	btrfs_set_file_extent_encryption(leaf, ei, 0);
	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
	ptr = btrfs_file_extent_inline_start(ei);

163
	if (compress_type != BTRFS_COMPRESS_NONE) {
C
Chris Mason 已提交
164 165
		struct page *cpage;
		int i = 0;
C
Chris Mason 已提交
166
		while (compressed_size > 0) {
C
Chris Mason 已提交
167
			cpage = compressed_pages[i];
168
			cur_size = min_t(unsigned long, compressed_size,
C
Chris Mason 已提交
169 170
				       PAGE_CACHE_SIZE);

171
			kaddr = kmap_atomic(cpage, KM_USER0);
C
Chris Mason 已提交
172
			write_extent_buffer(leaf, kaddr, ptr, cur_size);
173
			kunmap_atomic(kaddr, KM_USER0);
C
Chris Mason 已提交
174 175 176 177 178 179

			i++;
			ptr += cur_size;
			compressed_size -= cur_size;
		}
		btrfs_set_file_extent_compression(leaf, ei,
180
						  compress_type);
C
Chris Mason 已提交
181 182 183 184 185 186 187 188 189 190 191 192 193
	} else {
		page = find_get_page(inode->i_mapping,
				     start >> PAGE_CACHE_SHIFT);
		btrfs_set_file_extent_compression(leaf, ei, 0);
		kaddr = kmap_atomic(page, KM_USER0);
		offset = start & (PAGE_CACHE_SIZE - 1);
		write_extent_buffer(leaf, kaddr + offset, ptr, size);
		kunmap_atomic(kaddr, KM_USER0);
		page_cache_release(page);
	}
	btrfs_mark_buffer_dirty(leaf);
	btrfs_free_path(path);

194 195 196 197 198 199 200 201 202
	/*
	 * we're an inline extent, so nobody can
	 * extend the file past i_size without locking
	 * a page we already have locked.
	 *
	 * We must do any isize and inode updates
	 * before we unlock the pages.  Otherwise we
	 * could end up racing with unlink.
	 */
C
Chris Mason 已提交
203 204
	BTRFS_I(inode)->disk_i_size = inode->i_size;
	btrfs_update_inode(trans, root, inode);
205

C
Chris Mason 已提交
206 207 208 209 210 211 212 213 214 215 216 217
	return 0;
fail:
	btrfs_free_path(path);
	return err;
}


/*
 * conditionally insert an inline extent into the file.  This
 * does the checks required to make sure the data is small enough
 * to fit as an inline extent.
 */
218
static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
C
Chris Mason 已提交
219 220
				 struct btrfs_root *root,
				 struct inode *inode, u64 start, u64 end,
221
				 size_t compressed_size, int compress_type,
C
Chris Mason 已提交
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
				 struct page **compressed_pages)
{
	u64 isize = i_size_read(inode);
	u64 actual_end = min(end + 1, isize);
	u64 inline_len = actual_end - start;
	u64 aligned_end = (end + root->sectorsize - 1) &
			~((u64)root->sectorsize - 1);
	u64 hint_byte;
	u64 data_len = inline_len;
	int ret;

	if (compressed_size)
		data_len = compressed_size;

	if (start > 0 ||
C
Chris Mason 已提交
237
	    actual_end >= PAGE_CACHE_SIZE ||
C
Chris Mason 已提交
238 239 240 241 242 243 244 245
	    data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
	    (!compressed_size &&
	    (actual_end & (root->sectorsize - 1)) == 0) ||
	    end + 1 < isize ||
	    data_len > root->fs_info->max_inline) {
		return 1;
	}

Y
Yan, Zheng 已提交
246
	ret = btrfs_drop_extents(trans, inode, start, aligned_end,
C
Chris Mason 已提交
247
				 &hint_byte, 1);
C
Chris Mason 已提交
248 249 250 251 252 253
	BUG_ON(ret);

	if (isize > actual_end)
		inline_len = min_t(u64, isize, actual_end);
	ret = insert_inline_extent(trans, root, inode, start,
				   inline_len, compressed_size,
254
				   compress_type, compressed_pages);
C
Chris Mason 已提交
255
	BUG_ON(ret);
256
	btrfs_delalloc_release_metadata(inode, end + 1 - start);
C
Chris Mason 已提交
257
	btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
C
Chris Mason 已提交
258 259 260
	return 0;
}

261 262 263 264 265 266
struct async_extent {
	u64 start;
	u64 ram_size;
	u64 compressed_size;
	struct page **pages;
	unsigned long nr_pages;
267
	int compress_type;
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
	struct list_head list;
};

struct async_cow {
	struct inode *inode;
	struct btrfs_root *root;
	struct page *locked_page;
	u64 start;
	u64 end;
	struct list_head extents;
	struct btrfs_work work;
};

static noinline int add_async_extent(struct async_cow *cow,
				     u64 start, u64 ram_size,
				     u64 compressed_size,
				     struct page **pages,
285 286
				     unsigned long nr_pages,
				     int compress_type)
287 288 289 290
{
	struct async_extent *async_extent;

	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
291
	BUG_ON(!async_extent);
292 293 294 295 296
	async_extent->start = start;
	async_extent->ram_size = ram_size;
	async_extent->compressed_size = compressed_size;
	async_extent->pages = pages;
	async_extent->nr_pages = nr_pages;
297
	async_extent->compress_type = compress_type;
298 299 300 301
	list_add_tail(&async_extent->list, &cow->extents);
	return 0;
}

C
Chris Mason 已提交
302
/*
303 304 305
 * we create compressed extents in two phases.  The first
 * phase compresses a range of pages that have already been
 * locked (both pages and state bits are locked).
C
Chris Mason 已提交
306
 *
307 308 309 310 311
 * This is done inside an ordered work queue, and the compression
 * is spread across many cpus.  The actual IO submission is step
 * two, and the ordered work queue takes care of making sure that
 * happens in the same order things were put onto the queue by
 * writepages and friends.
C
Chris Mason 已提交
312
 *
313 314 315 316
 * If this code finds it can't get good compression, it puts an
 * entry onto the work queue to write the uncompressed bytes.  This
 * makes sure that both compressed inodes and uncompressed inodes
 * are written in the same order that pdflush sent them down.
C
Chris Mason 已提交
317
 */
318 319 320 321 322
static noinline int compress_file_range(struct inode *inode,
					struct page *locked_page,
					u64 start, u64 end,
					struct async_cow *async_cow,
					int *num_added)
323 324 325
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
326 327
	u64 num_bytes;
	u64 blocksize = root->sectorsize;
C
Chris Mason 已提交
328
	u64 actual_end;
329
	u64 isize = i_size_read(inode);
330
	int ret = 0;
C
Chris Mason 已提交
331 332 333 334 335 336
	struct page **pages = NULL;
	unsigned long nr_pages;
	unsigned long nr_pages_ret = 0;
	unsigned long total_compressed = 0;
	unsigned long total_in = 0;
	unsigned long max_compressed = 128 * 1024;
337
	unsigned long max_uncompressed = 128 * 1024;
C
Chris Mason 已提交
338 339
	int i;
	int will_compress;
340
	int compress_type = root->fs_info->compress_type;
341

342
	actual_end = min_t(u64, isize, end + 1);
C
Chris Mason 已提交
343 344 345 346
again:
	will_compress = 0;
	nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
	nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
347

348 349 350 351 352 353 354 355 356 357 358 359 360
	/*
	 * we don't want to send crud past the end of i_size through
	 * compression, that's just a waste of CPU time.  So, if the
	 * end of the file is before the start of our current
	 * requested range of bytes, we bail out to the uncompressed
	 * cleanup code that can deal with all of this.
	 *
	 * It isn't really the fastest way to fix things, but this is a
	 * very uncommon corner.
	 */
	if (actual_end <= start)
		goto cleanup_and_bail_uncompressed;

C
Chris Mason 已提交
361 362 363 364
	total_compressed = actual_end - start;

	/* we want to make sure that amount of ram required to uncompress
	 * an extent is reasonable, so we limit the total size in ram
365 366 367 368 369 370 371
	 * of a compressed extent to 128k.  This is a crucial number
	 * because it also controls how easily we can spread reads across
	 * cpus for decompression.
	 *
	 * We also want to make sure the amount of IO required to do
	 * a random read is reasonably small, so we limit the size of
	 * a compressed extent to 128k.
C
Chris Mason 已提交
372 373
	 */
	total_compressed = min(total_compressed, max_uncompressed);
374
	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
375
	num_bytes = max(blocksize,  num_bytes);
C
Chris Mason 已提交
376 377
	total_in = 0;
	ret = 0;
378

379 380 381 382
	/*
	 * we do compression for mount -o compress and when the
	 * inode has not been flagged as nocompress.  This flag can
	 * change at any time if we discover bad compression ratios.
C
Chris Mason 已提交
383
	 */
384
	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
C
Chris Mason 已提交
385
	    (btrfs_test_opt(root, COMPRESS) ||
386 387
	     (BTRFS_I(inode)->force_compress) ||
	     (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
C
Chris Mason 已提交
388
		WARN_ON(pages);
389
		pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
390
		BUG_ON(!pages);
C
Chris Mason 已提交
391

392 393 394 395 396 397 398 399 400 401
		if (BTRFS_I(inode)->force_compress)
			compress_type = BTRFS_I(inode)->force_compress;

		ret = btrfs_compress_pages(compress_type,
					   inode->i_mapping, start,
					   total_compressed, pages,
					   nr_pages, &nr_pages_ret,
					   &total_in,
					   &total_compressed,
					   max_compressed);
C
Chris Mason 已提交
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421

		if (!ret) {
			unsigned long offset = total_compressed &
				(PAGE_CACHE_SIZE - 1);
			struct page *page = pages[nr_pages_ret - 1];
			char *kaddr;

			/* zero the tail end of the last page, we might be
			 * sending it down to disk
			 */
			if (offset) {
				kaddr = kmap_atomic(page, KM_USER0);
				memset(kaddr + offset, 0,
				       PAGE_CACHE_SIZE - offset);
				kunmap_atomic(kaddr, KM_USER0);
			}
			will_compress = 1;
		}
	}
	if (start == 0) {
422
		trans = btrfs_join_transaction(root, 1);
423
		BUG_ON(IS_ERR(trans));
424
		btrfs_set_trans_block_group(trans, inode);
425
		trans->block_rsv = &root->fs_info->delalloc_block_rsv;
426

C
Chris Mason 已提交
427
		/* lets try to make an inline extent */
428
		if (ret || total_in < (actual_end - start)) {
C
Chris Mason 已提交
429
			/* we didn't compress the entire range, try
430
			 * to make an uncompressed inline extent.
C
Chris Mason 已提交
431 432
			 */
			ret = cow_file_range_inline(trans, root, inode,
433
						    start, end, 0, 0, NULL);
C
Chris Mason 已提交
434
		} else {
435
			/* try making a compressed inline extent */
C
Chris Mason 已提交
436 437
			ret = cow_file_range_inline(trans, root, inode,
						    start, end,
438 439
						    total_compressed,
						    compress_type, pages);
C
Chris Mason 已提交
440 441
		}
		if (ret == 0) {
442 443 444 445 446
			/*
			 * inline extent creation worked, we don't need
			 * to create any more async work items.  Unlock
			 * and free up our temp pages.
			 */
C
Chris Mason 已提交
447
			extent_clear_unlock_delalloc(inode,
448 449 450
			     &BTRFS_I(inode)->io_tree,
			     start, end, NULL,
			     EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
451
			     EXTENT_CLEAR_DELALLOC |
452
			     EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
453 454

			btrfs_end_transaction(trans, root);
C
Chris Mason 已提交
455 456
			goto free_pages_out;
		}
457
		btrfs_end_transaction(trans, root);
C
Chris Mason 已提交
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
	}

	if (will_compress) {
		/*
		 * we aren't doing an inline extent round the compressed size
		 * up to a block size boundary so the allocator does sane
		 * things
		 */
		total_compressed = (total_compressed + blocksize - 1) &
			~(blocksize - 1);

		/*
		 * one last check to make sure the compression is really a
		 * win, compare the page count read with the blocks on disk
		 */
		total_in = (total_in + PAGE_CACHE_SIZE - 1) &
			~(PAGE_CACHE_SIZE - 1);
		if (total_compressed >= total_in) {
			will_compress = 0;
		} else {
			num_bytes = total_in;
		}
	}
	if (!will_compress && pages) {
		/*
		 * the compression code ran but failed to make things smaller,
		 * free any pages it allocated and our page pointer array
		 */
		for (i = 0; i < nr_pages_ret; i++) {
C
Chris Mason 已提交
487
			WARN_ON(pages[i]->mapping);
C
Chris Mason 已提交
488 489 490 491 492 493 494 495
			page_cache_release(pages[i]);
		}
		kfree(pages);
		pages = NULL;
		total_compressed = 0;
		nr_pages_ret = 0;

		/* flag the file so we don't compress in the future */
C
Chris Mason 已提交
496 497
		if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
		    !(BTRFS_I(inode)->force_compress)) {
C
Chris Mason 已提交
498
			BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
C
Chris Mason 已提交
499
		}
C
Chris Mason 已提交
500
	}
501 502
	if (will_compress) {
		*num_added += 1;
C
Chris Mason 已提交
503

504 505 506 507 508
		/* the async work queues will take care of doing actual
		 * allocation on disk for these compressed pages,
		 * and will submit them to the elevator.
		 */
		add_async_extent(async_cow, start, num_bytes,
509 510
				 total_compressed, pages, nr_pages_ret,
				 compress_type);
511

512
		if (start + num_bytes < end) {
513 514 515 516 517 518
			start += num_bytes;
			pages = NULL;
			cond_resched();
			goto again;
		}
	} else {
519
cleanup_and_bail_uncompressed:
520 521 522 523 524 525 526 527 528 529 530 531
		/*
		 * No compression, but we still need to write the pages in
		 * the file we've been given so far.  redirty the locked
		 * page if it corresponds to our extent and set things up
		 * for the async work queue to run cow_file_range to do
		 * the normal delalloc dance
		 */
		if (page_offset(locked_page) >= start &&
		    page_offset(locked_page) <= end) {
			__set_page_dirty_nobuffers(locked_page);
			/* unlocked later on in the async handlers */
		}
532 533
		add_async_extent(async_cow, start, end - start + 1,
				 0, NULL, 0, BTRFS_COMPRESS_NONE);
534 535
		*num_added += 1;
	}
536

537 538 539 540 541 542 543 544
out:
	return 0;

free_pages_out:
	for (i = 0; i < nr_pages_ret; i++) {
		WARN_ON(pages[i]->mapping);
		page_cache_release(pages[i]);
	}
C
Chris Mason 已提交
545
	kfree(pages);
546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566

	goto out;
}

/*
 * phase two of compressed writeback.  This is the ordered portion
 * of the code, which only gets called in the order the work was
 * queued.  We walk all the async extents created by compress_file_range
 * and send them down to the disk.
 */
static noinline int submit_compressed_extents(struct inode *inode,
					      struct async_cow *async_cow)
{
	struct async_extent *async_extent;
	u64 alloc_hint = 0;
	struct btrfs_trans_handle *trans;
	struct btrfs_key ins;
	struct extent_map *em;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
	struct extent_io_tree *io_tree;
567
	int ret = 0;
568 569 570 571 572

	if (list_empty(&async_cow->extents))
		return 0;


C
Chris Mason 已提交
573
	while (!list_empty(&async_cow->extents)) {
574 575 576
		async_extent = list_entry(async_cow->extents.next,
					  struct async_extent, list);
		list_del(&async_extent->list);
C
Chris Mason 已提交
577

578 579
		io_tree = &BTRFS_I(inode)->io_tree;

580
retry:
581 582 583 584 585 586
		/* did the compression code fall back to uncompressed IO? */
		if (!async_extent->pages) {
			int page_started = 0;
			unsigned long nr_written = 0;

			lock_extent(io_tree, async_extent->start,
587 588
					 async_extent->start +
					 async_extent->ram_size - 1, GFP_NOFS);
589 590

			/* allocate blocks */
591 592 593 594 595
			ret = cow_file_range(inode, async_cow->locked_page,
					     async_extent->start,
					     async_extent->start +
					     async_extent->ram_size - 1,
					     &page_started, &nr_written, 0);
596 597 598 599 600 601 602

			/*
			 * if page_started, cow_file_range inserted an
			 * inline extent and took care of all the unlocking
			 * and IO for us.  Otherwise, we need to submit
			 * all those pages down to the drive.
			 */
603
			if (!page_started && !ret)
604 605
				extent_write_locked_range(io_tree,
						  inode, async_extent->start,
C
Chris Mason 已提交
606
						  async_extent->start +
607 608 609 610 611 612 613 614 615 616 617 618
						  async_extent->ram_size - 1,
						  btrfs_get_extent,
						  WB_SYNC_ALL);
			kfree(async_extent);
			cond_resched();
			continue;
		}

		lock_extent(io_tree, async_extent->start,
			    async_extent->start + async_extent->ram_size - 1,
			    GFP_NOFS);

619
		trans = btrfs_join_transaction(root, 1);
620
		BUG_ON(IS_ERR(trans));
621 622 623 624 625
		ret = btrfs_reserve_extent(trans, root,
					   async_extent->compressed_size,
					   async_extent->compressed_size,
					   0, alloc_hint,
					   (u64)-1, &ins, 1);
626 627
		btrfs_end_transaction(trans, root);

628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
		if (ret) {
			int i;
			for (i = 0; i < async_extent->nr_pages; i++) {
				WARN_ON(async_extent->pages[i]->mapping);
				page_cache_release(async_extent->pages[i]);
			}
			kfree(async_extent->pages);
			async_extent->nr_pages = 0;
			async_extent->pages = NULL;
			unlock_extent(io_tree, async_extent->start,
				      async_extent->start +
				      async_extent->ram_size - 1, GFP_NOFS);
			goto retry;
		}

643 644 645 646 647 648 649 650
		/*
		 * here we're doing allocation and writeback of the
		 * compressed pages
		 */
		btrfs_drop_extent_cache(inode, async_extent->start,
					async_extent->start +
					async_extent->ram_size - 1, 0);

651
		em = alloc_extent_map(GFP_NOFS);
652
		BUG_ON(!em);
653 654
		em->start = async_extent->start;
		em->len = async_extent->ram_size;
655
		em->orig_start = em->start;
C
Chris Mason 已提交
656

657 658 659
		em->block_start = ins.objectid;
		em->block_len = ins.offset;
		em->bdev = root->fs_info->fs_devices->latest_bdev;
660
		em->compress_type = async_extent->compress_type;
661 662 663
		set_bit(EXTENT_FLAG_PINNED, &em->flags);
		set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);

C
Chris Mason 已提交
664
		while (1) {
665
			write_lock(&em_tree->lock);
666
			ret = add_extent_mapping(em_tree, em);
667
			write_unlock(&em_tree->lock);
668 669 670 671 672 673 674 675 676
			if (ret != -EEXIST) {
				free_extent_map(em);
				break;
			}
			btrfs_drop_extent_cache(inode, async_extent->start,
						async_extent->start +
						async_extent->ram_size - 1, 0);
		}

677 678 679 680 681 682 683
		ret = btrfs_add_ordered_extent_compress(inode,
						async_extent->start,
						ins.objectid,
						async_extent->ram_size,
						ins.offset,
						BTRFS_ORDERED_COMPRESSED,
						async_extent->compress_type);
684 685 686 687 688 689
		BUG_ON(ret);

		/*
		 * clear dirty, set writeback and unlock the pages.
		 */
		extent_clear_unlock_delalloc(inode,
690 691 692 693 694 695
				&BTRFS_I(inode)->io_tree,
				async_extent->start,
				async_extent->start +
				async_extent->ram_size - 1,
				NULL, EXTENT_CLEAR_UNLOCK_PAGE |
				EXTENT_CLEAR_UNLOCK |
696
				EXTENT_CLEAR_DELALLOC |
697
				EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
698 699

		ret = btrfs_submit_compressed_write(inode,
C
Chris Mason 已提交
700 701 702 703 704
				    async_extent->start,
				    async_extent->ram_size,
				    ins.objectid,
				    ins.offset, async_extent->pages,
				    async_extent->nr_pages);
705 706 707 708 709 710 711 712 713 714

		BUG_ON(ret);
		alloc_hint = ins.objectid + ins.offset;
		kfree(async_extent);
		cond_resched();
	}

	return 0;
}

715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746
static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
				      u64 num_bytes)
{
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
	struct extent_map *em;
	u64 alloc_hint = 0;

	read_lock(&em_tree->lock);
	em = search_extent_mapping(em_tree, start, num_bytes);
	if (em) {
		/*
		 * if block start isn't an actual block number then find the
		 * first block in this inode and use that as a hint.  If that
		 * block is also bogus then just don't worry about it.
		 */
		if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
			free_extent_map(em);
			em = search_extent_mapping(em_tree, 0, 0);
			if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
				alloc_hint = em->block_start;
			if (em)
				free_extent_map(em);
		} else {
			alloc_hint = em->block_start;
			free_extent_map(em);
		}
	}
	read_unlock(&em_tree->lock);

	return alloc_hint;
}

747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778
/*
 * when extent_io.c finds a delayed allocation range in the file,
 * the call backs end up in this code.  The basic idea is to
 * allocate extents on disk for the range, and create ordered data structs
 * in ram to track those extents.
 *
 * locked_page is the page that writepage had locked already.  We use
 * it to make sure we don't do extra locks or unlocks.
 *
 * *page_started is set to one if we unlock locked_page and do everything
 * required to start IO on it.  It may be clean and already done with
 * IO when we return.
 */
static noinline int cow_file_range(struct inode *inode,
				   struct page *locked_page,
				   u64 start, u64 end, int *page_started,
				   unsigned long *nr_written,
				   int unlock)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
	u64 alloc_hint = 0;
	u64 num_bytes;
	unsigned long ram_size;
	u64 disk_num_bytes;
	u64 cur_alloc_size;
	u64 blocksize = root->sectorsize;
	struct btrfs_key ins;
	struct extent_map *em;
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
	int ret = 0;

J
Josef Bacik 已提交
779
	BUG_ON(root == root->fs_info->tree_root);
780
	trans = btrfs_join_transaction(root, 1);
781
	BUG_ON(IS_ERR(trans));
782
	btrfs_set_trans_block_group(trans, inode);
783
	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
784 785 786 787 788 789 790 791 792

	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
	num_bytes = max(blocksize,  num_bytes);
	disk_num_bytes = num_bytes;
	ret = 0;

	if (start == 0) {
		/* lets try to make an inline extent */
		ret = cow_file_range_inline(trans, root, inode,
793
					    start, end, 0, 0, NULL);
794 795
		if (ret == 0) {
			extent_clear_unlock_delalloc(inode,
796 797 798 799 800 801 802 803
				     &BTRFS_I(inode)->io_tree,
				     start, end, NULL,
				     EXTENT_CLEAR_UNLOCK_PAGE |
				     EXTENT_CLEAR_UNLOCK |
				     EXTENT_CLEAR_DELALLOC |
				     EXTENT_CLEAR_DIRTY |
				     EXTENT_SET_WRITEBACK |
				     EXTENT_END_WRITEBACK);
804

805 806 807 808 809 810 811 812 813 814 815
			*nr_written = *nr_written +
			     (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
			*page_started = 1;
			ret = 0;
			goto out;
		}
	}

	BUG_ON(disk_num_bytes >
	       btrfs_super_total_bytes(&root->fs_info->super_copy));

816
	alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
817 818
	btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);

C
Chris Mason 已提交
819
	while (disk_num_bytes > 0) {
820 821
		unsigned long op;

822
		cur_alloc_size = disk_num_bytes;
823
		ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
824
					   root->sectorsize, 0, alloc_hint,
825
					   (u64)-1, &ins, 1);
C
Chris Mason 已提交
826 827
		BUG_ON(ret);

828
		em = alloc_extent_map(GFP_NOFS);
829
		BUG_ON(!em);
830
		em->start = start;
831
		em->orig_start = em->start;
832 833
		ram_size = ins.offset;
		em->len = ins.offset;
C
Chris Mason 已提交
834

835
		em->block_start = ins.objectid;
C
Chris Mason 已提交
836
		em->block_len = ins.offset;
837
		em->bdev = root->fs_info->fs_devices->latest_bdev;
838
		set_bit(EXTENT_FLAG_PINNED, &em->flags);
C
Chris Mason 已提交
839

C
Chris Mason 已提交
840
		while (1) {
841
			write_lock(&em_tree->lock);
842
			ret = add_extent_mapping(em_tree, em);
843
			write_unlock(&em_tree->lock);
844 845 846 847 848
			if (ret != -EEXIST) {
				free_extent_map(em);
				break;
			}
			btrfs_drop_extent_cache(inode, start,
C
Chris Mason 已提交
849
						start + ram_size - 1, 0);
850 851
		}

852
		cur_alloc_size = ins.offset;
853
		ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
854
					       ram_size, cur_alloc_size, 0);
855
		BUG_ON(ret);
C
Chris Mason 已提交
856

857 858 859 860 861 862 863
		if (root->root_key.objectid ==
		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
			ret = btrfs_reloc_clone_csums(inode, start,
						      cur_alloc_size);
			BUG_ON(ret);
		}

C
Chris Mason 已提交
864
		if (disk_num_bytes < cur_alloc_size)
865
			break;
C
Chris Mason 已提交
866

C
Chris Mason 已提交
867 868 869
		/* we're not doing compressed IO, don't unlock the first
		 * page (which the caller expects to stay locked), don't
		 * clear any dirty bits and don't set any writeback bits
870 871 872
		 *
		 * Do set the Private2 bit so we know this page was properly
		 * setup for writepage
C
Chris Mason 已提交
873
		 */
874 875 876 877
		op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
		op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
			EXTENT_SET_PRIVATE2;

C
Chris Mason 已提交
878 879
		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
					     start, start + ram_size - 1,
880
					     locked_page, op);
C
Chris Mason 已提交
881
		disk_num_bytes -= cur_alloc_size;
882 883 884
		num_bytes -= cur_alloc_size;
		alloc_hint = ins.objectid + ins.offset;
		start += cur_alloc_size;
885 886
	}
out:
887
	ret = 0;
888
	btrfs_end_transaction(trans, root);
C
Chris Mason 已提交
889

890
	return ret;
891
}
C
Chris Mason 已提交
892

893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930
/*
 * work queue call back to started compression on a file and pages
 */
static noinline void async_cow_start(struct btrfs_work *work)
{
	struct async_cow *async_cow;
	int num_added = 0;
	async_cow = container_of(work, struct async_cow, work);

	compress_file_range(async_cow->inode, async_cow->locked_page,
			    async_cow->start, async_cow->end, async_cow,
			    &num_added);
	if (num_added == 0)
		async_cow->inode = NULL;
}

/*
 * work queue call back to submit previously compressed pages
 */
static noinline void async_cow_submit(struct btrfs_work *work)
{
	struct async_cow *async_cow;
	struct btrfs_root *root;
	unsigned long nr_pages;

	async_cow = container_of(work, struct async_cow, work);

	root = async_cow->root;
	nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
		PAGE_CACHE_SHIFT;

	atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);

	if (atomic_read(&root->fs_info->async_delalloc_pages) <
	    5 * 1042 * 1024 &&
	    waitqueue_active(&root->fs_info->async_submit_wait))
		wake_up(&root->fs_info->async_submit_wait);

C
Chris Mason 已提交
931
	if (async_cow->inode)
932 933
		submit_compressed_extents(async_cow->inode, async_cow);
}
C
Chris Mason 已提交
934

935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951
static noinline void async_cow_free(struct btrfs_work *work)
{
	struct async_cow *async_cow;
	async_cow = container_of(work, struct async_cow, work);
	kfree(async_cow);
}

static int cow_file_range_async(struct inode *inode, struct page *locked_page,
				u64 start, u64 end, int *page_started,
				unsigned long *nr_written)
{
	struct async_cow *async_cow;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	unsigned long nr_pages;
	u64 cur_end;
	int limit = 10 * 1024 * 1042;

952 953
	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
			 1, 0, NULL, GFP_NOFS);
C
Chris Mason 已提交
954
	while (start < end) {
955 956 957 958 959 960
		async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
		async_cow->inode = inode;
		async_cow->root = root;
		async_cow->locked_page = locked_page;
		async_cow->start = start;

961
		if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986
			cur_end = end;
		else
			cur_end = min(end, start + 512 * 1024 - 1);

		async_cow->end = cur_end;
		INIT_LIST_HEAD(&async_cow->extents);

		async_cow->work.func = async_cow_start;
		async_cow->work.ordered_func = async_cow_submit;
		async_cow->work.ordered_free = async_cow_free;
		async_cow->work.flags = 0;

		nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
			PAGE_CACHE_SHIFT;
		atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);

		btrfs_queue_worker(&root->fs_info->delalloc_workers,
				   &async_cow->work);

		if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
			wait_event(root->fs_info->async_submit_wait,
			   (atomic_read(&root->fs_info->async_delalloc_pages) <
			    limit));
		}

C
Chris Mason 已提交
987
		while (atomic_read(&root->fs_info->async_submit_draining) &&
988 989 990 991 992 993 994 995 996 997 998
		      atomic_read(&root->fs_info->async_delalloc_pages)) {
			wait_event(root->fs_info->async_submit_wait,
			  (atomic_read(&root->fs_info->async_delalloc_pages) ==
			   0));
		}

		*nr_written += nr_pages;
		start = cur_end + 1;
	}
	*page_started = 1;
	return 0;
999 1000
}

C
Chris Mason 已提交
1001
static noinline int csum_exist_in_range(struct btrfs_root *root,
1002 1003 1004 1005 1006 1007
					u64 bytenr, u64 num_bytes)
{
	int ret;
	struct btrfs_ordered_sum *sums;
	LIST_HEAD(list);

Y
Yan Zheng 已提交
1008 1009
	ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
				       bytenr + num_bytes - 1, &list);
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
	if (ret == 0 && list_empty(&list))
		return 0;

	while (!list_empty(&list)) {
		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
		list_del(&sums->list);
		kfree(sums);
	}
	return 1;
}

C
Chris Mason 已提交
1021 1022 1023 1024 1025 1026 1027
/*
 * when nowcow writeback call back.  This checks for snapshots or COW copies
 * of the extents that exist in the file, and COWs the file as required.
 *
 * If no cow copies or snapshots exist, we write directly to the existing
 * blocks on disk
 */
1028 1029
static noinline int run_delalloc_nocow(struct inode *inode,
				       struct page *locked_page,
1030 1031
			      u64 start, u64 end, int *page_started, int force,
			      unsigned long *nr_written)
1032 1033
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
1034
	struct btrfs_trans_handle *trans;
1035 1036
	struct extent_buffer *leaf;
	struct btrfs_path *path;
Y
Yan Zheng 已提交
1037
	struct btrfs_file_extent_item *fi;
1038
	struct btrfs_key found_key;
Y
Yan Zheng 已提交
1039 1040 1041
	u64 cow_start;
	u64 cur_offset;
	u64 extent_end;
1042
	u64 extent_offset;
Y
Yan Zheng 已提交
1043 1044 1045 1046
	u64 disk_bytenr;
	u64 num_bytes;
	int extent_type;
	int ret;
Y
Yan Zheng 已提交
1047
	int type;
Y
Yan Zheng 已提交
1048 1049
	int nocow;
	int check_prev = 1;
J
Josef Bacik 已提交
1050
	bool nolock = false;
1051 1052 1053

	path = btrfs_alloc_path();
	BUG_ON(!path);
J
Josef Bacik 已提交
1054 1055 1056 1057 1058 1059
	if (root == root->fs_info->tree_root) {
		nolock = true;
		trans = btrfs_join_transaction_nolock(root, 1);
	} else {
		trans = btrfs_join_transaction(root, 1);
	}
1060
	BUG_ON(IS_ERR(trans));
1061

Y
Yan Zheng 已提交
1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
	cow_start = (u64)-1;
	cur_offset = start;
	while (1) {
		ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
					       cur_offset, 0);
		BUG_ON(ret < 0);
		if (ret > 0 && path->slots[0] > 0 && check_prev) {
			leaf = path->nodes[0];
			btrfs_item_key_to_cpu(leaf, &found_key,
					      path->slots[0] - 1);
			if (found_key.objectid == inode->i_ino &&
			    found_key.type == BTRFS_EXTENT_DATA_KEY)
				path->slots[0]--;
		}
		check_prev = 0;
next_slot:
		leaf = path->nodes[0];
		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret < 0)
				BUG_ON(1);
			if (ret > 0)
				break;
			leaf = path->nodes[0];
		}
1087

Y
Yan Zheng 已提交
1088 1089
		nocow = 0;
		disk_bytenr = 0;
1090
		num_bytes = 0;
Y
Yan Zheng 已提交
1091 1092 1093 1094 1095 1096 1097 1098 1099
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);

		if (found_key.objectid > inode->i_ino ||
		    found_key.type > BTRFS_EXTENT_DATA_KEY ||
		    found_key.offset > end)
			break;

		if (found_key.offset > cur_offset) {
			extent_end = found_key.offset;
1100
			extent_type = 0;
Y
Yan Zheng 已提交
1101 1102 1103 1104 1105 1106 1107
			goto out_check;
		}

		fi = btrfs_item_ptr(leaf, path->slots[0],
				    struct btrfs_file_extent_item);
		extent_type = btrfs_file_extent_type(leaf, fi);

Y
Yan Zheng 已提交
1108 1109
		if (extent_type == BTRFS_FILE_EXTENT_REG ||
		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
Y
Yan Zheng 已提交
1110
			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1111
			extent_offset = btrfs_file_extent_offset(leaf, fi);
Y
Yan Zheng 已提交
1112 1113 1114 1115 1116 1117
			extent_end = found_key.offset +
				btrfs_file_extent_num_bytes(leaf, fi);
			if (extent_end <= start) {
				path->slots[0]++;
				goto next_slot;
			}
1118 1119
			if (disk_bytenr == 0)
				goto out_check;
Y
Yan Zheng 已提交
1120 1121 1122 1123
			if (btrfs_file_extent_compression(leaf, fi) ||
			    btrfs_file_extent_encryption(leaf, fi) ||
			    btrfs_file_extent_other_encoding(leaf, fi))
				goto out_check;
Y
Yan Zheng 已提交
1124 1125
			if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
				goto out_check;
1126
			if (btrfs_extent_readonly(root, disk_bytenr))
Y
Yan Zheng 已提交
1127
				goto out_check;
1128
			if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
1129 1130
						  found_key.offset -
						  extent_offset, disk_bytenr))
1131
				goto out_check;
1132
			disk_bytenr += extent_offset;
1133 1134 1135 1136 1137 1138 1139 1140 1141
			disk_bytenr += cur_offset - found_key.offset;
			num_bytes = min(end + 1, extent_end) - cur_offset;
			/*
			 * force cow if csum exists in the range.
			 * this ensure that csum for a given extent are
			 * either valid or do not exist.
			 */
			if (csum_exist_in_range(root, disk_bytenr, num_bytes))
				goto out_check;
Y
Yan Zheng 已提交
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162
			nocow = 1;
		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
			extent_end = found_key.offset +
				btrfs_file_extent_inline_len(leaf, fi);
			extent_end = ALIGN(extent_end, root->sectorsize);
		} else {
			BUG_ON(1);
		}
out_check:
		if (extent_end <= start) {
			path->slots[0]++;
			goto next_slot;
		}
		if (!nocow) {
			if (cow_start == (u64)-1)
				cow_start = cur_offset;
			cur_offset = extent_end;
			if (cur_offset > end)
				break;
			path->slots[0]++;
			goto next_slot;
1163 1164 1165
		}

		btrfs_release_path(root, path);
Y
Yan Zheng 已提交
1166 1167
		if (cow_start != (u64)-1) {
			ret = cow_file_range(inode, locked_page, cow_start,
1168 1169
					found_key.offset - 1, page_started,
					nr_written, 1);
Y
Yan Zheng 已提交
1170 1171
			BUG_ON(ret);
			cow_start = (u64)-1;
1172
		}
Y
Yan Zheng 已提交
1173

Y
Yan Zheng 已提交
1174 1175 1176 1177 1178
		if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
			struct extent_map *em;
			struct extent_map_tree *em_tree;
			em_tree = &BTRFS_I(inode)->extent_tree;
			em = alloc_extent_map(GFP_NOFS);
1179
			BUG_ON(!em);
Y
Yan Zheng 已提交
1180
			em->start = cur_offset;
1181
			em->orig_start = em->start;
Y
Yan Zheng 已提交
1182 1183 1184 1185 1186 1187
			em->len = num_bytes;
			em->block_len = num_bytes;
			em->block_start = disk_bytenr;
			em->bdev = root->fs_info->fs_devices->latest_bdev;
			set_bit(EXTENT_FLAG_PINNED, &em->flags);
			while (1) {
1188
				write_lock(&em_tree->lock);
Y
Yan Zheng 已提交
1189
				ret = add_extent_mapping(em_tree, em);
1190
				write_unlock(&em_tree->lock);
Y
Yan Zheng 已提交
1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201
				if (ret != -EEXIST) {
					free_extent_map(em);
					break;
				}
				btrfs_drop_extent_cache(inode, em->start,
						em->start + em->len - 1, 0);
			}
			type = BTRFS_ORDERED_PREALLOC;
		} else {
			type = BTRFS_ORDERED_NOCOW;
		}
Y
Yan Zheng 已提交
1202 1203

		ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
Y
Yan Zheng 已提交
1204 1205
					       num_bytes, num_bytes, type);
		BUG_ON(ret);
1206

1207 1208 1209 1210 1211 1212 1213
		if (root->root_key.objectid ==
		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
			ret = btrfs_reloc_clone_csums(inode, cur_offset,
						      num_bytes);
			BUG_ON(ret);
		}

Y
Yan Zheng 已提交
1214
		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1215 1216 1217 1218
				cur_offset, cur_offset + num_bytes - 1,
				locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
				EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
				EXTENT_SET_PRIVATE2);
Y
Yan Zheng 已提交
1219 1220 1221
		cur_offset = extent_end;
		if (cur_offset > end)
			break;
1222
	}
Y
Yan Zheng 已提交
1223 1224 1225 1226 1227 1228
	btrfs_release_path(root, path);

	if (cur_offset <= end && cow_start == (u64)-1)
		cow_start = cur_offset;
	if (cow_start != (u64)-1) {
		ret = cow_file_range(inode, locked_page, cow_start, end,
1229
				     page_started, nr_written, 1);
Y
Yan Zheng 已提交
1230 1231 1232
		BUG_ON(ret);
	}

J
Josef Bacik 已提交
1233 1234 1235 1236 1237 1238 1239
	if (nolock) {
		ret = btrfs_end_transaction_nolock(trans, root);
		BUG_ON(ret);
	} else {
		ret = btrfs_end_transaction(trans, root);
		BUG_ON(ret);
	}
1240
	btrfs_free_path(path);
Y
Yan Zheng 已提交
1241
	return 0;
1242 1243
}

C
Chris Mason 已提交
1244 1245 1246
/*
 * extent_io.c call back to do delayed allocation processing
 */
C
Chris Mason 已提交
1247
static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1248 1249
			      u64 start, u64 end, int *page_started,
			      unsigned long *nr_written)
1250 1251
{
	int ret;
1252
	struct btrfs_root *root = BTRFS_I(inode)->root;
1253

1254
	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
C
Chris Mason 已提交
1255
		ret = run_delalloc_nocow(inode, locked_page, start, end,
C
Chris Mason 已提交
1256
					 page_started, 1, nr_written);
1257
	else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
Y
Yan Zheng 已提交
1258
		ret = run_delalloc_nocow(inode, locked_page, start, end,
C
Chris Mason 已提交
1259
					 page_started, 0, nr_written);
C
Chris Mason 已提交
1260
	else if (!btrfs_test_opt(root, COMPRESS) &&
1261 1262
		 !(BTRFS_I(inode)->force_compress) &&
		 !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))
1263 1264
		ret = cow_file_range(inode, locked_page, start, end,
				      page_started, nr_written, 1);
1265
	else
1266
		ret = cow_file_range_async(inode, locked_page, start, end,
C
Chris Mason 已提交
1267
					   page_started, nr_written);
1268 1269 1270
	return ret;
}

J
Josef Bacik 已提交
1271
static int btrfs_split_extent_hook(struct inode *inode,
1272
				   struct extent_state *orig, u64 split)
J
Josef Bacik 已提交
1273
{
1274
	/* not delalloc, ignore it */
J
Josef Bacik 已提交
1275 1276 1277
	if (!(orig->state & EXTENT_DELALLOC))
		return 0;

1278
	atomic_inc(&BTRFS_I(inode)->outstanding_extents);
J
Josef Bacik 已提交
1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295
	return 0;
}

/*
 * extent_io.c merge_extent_hook, used to track merged delayed allocation
 * extents so we can keep track of new extents that are just merged onto old
 * extents, such as when we are doing sequential writes, so we can properly
 * account for the metadata space we'll need.
 */
static int btrfs_merge_extent_hook(struct inode *inode,
				   struct extent_state *new,
				   struct extent_state *other)
{
	/* not delalloc, ignore it */
	if (!(other->state & EXTENT_DELALLOC))
		return 0;

1296
	atomic_dec(&BTRFS_I(inode)->outstanding_extents);
J
Josef Bacik 已提交
1297 1298 1299
	return 0;
}

C
Chris Mason 已提交
1300 1301 1302 1303 1304
/*
 * extent_io.c set_bit_hook, used to track delayed allocation
 * bytes in this file, and to maintain the list of inodes that
 * have pending delalloc work to be done.
 */
1305 1306
static int btrfs_set_bit_hook(struct inode *inode,
			      struct extent_state *state, int *bits)
1307
{
J
Josef Bacik 已提交
1308

1309 1310 1311 1312 1313
	/*
	 * set_bit and clear bit hooks normally require _irqsave/restore
	 * but in this case, we are only testeing for the DELALLOC
	 * bit, which is only set or cleared with irqs on
	 */
1314
	if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1315
		struct btrfs_root *root = BTRFS_I(inode)->root;
1316
		u64 len = state->end + 1 - state->start;
J
Josef Bacik 已提交
1317 1318
		int do_list = (root->root_key.objectid !=
			       BTRFS_ROOT_TREE_OBJECTID);
J
Josef Bacik 已提交
1319

1320 1321 1322 1323
		if (*bits & EXTENT_FIRST_DELALLOC)
			*bits &= ~EXTENT_FIRST_DELALLOC;
		else
			atomic_inc(&BTRFS_I(inode)->outstanding_extents);
1324

1325
		spin_lock(&root->fs_info->delalloc_lock);
1326 1327
		BTRFS_I(inode)->delalloc_bytes += len;
		root->fs_info->delalloc_bytes += len;
J
Josef Bacik 已提交
1328
		if (do_list && list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1329 1330 1331
			list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
				      &root->fs_info->delalloc_inodes);
		}
1332
		spin_unlock(&root->fs_info->delalloc_lock);
1333 1334 1335 1336
	}
	return 0;
}

C
Chris Mason 已提交
1337 1338 1339
/*
 * extent_io.c clear_bit_hook, see set_bit_hook for why
 */
J
Josef Bacik 已提交
1340
static int btrfs_clear_bit_hook(struct inode *inode,
1341
				struct extent_state *state, int *bits)
1342
{
1343 1344 1345 1346 1347
	/*
	 * set_bit and clear bit hooks normally require _irqsave/restore
	 * but in this case, we are only testeing for the DELALLOC
	 * bit, which is only set or cleared with irqs on
	 */
1348
	if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1349
		struct btrfs_root *root = BTRFS_I(inode)->root;
1350
		u64 len = state->end + 1 - state->start;
J
Josef Bacik 已提交
1351 1352
		int do_list = (root->root_key.objectid !=
			       BTRFS_ROOT_TREE_OBJECTID);
1353

1354 1355 1356 1357 1358 1359 1360 1361
		if (*bits & EXTENT_FIRST_DELALLOC)
			*bits &= ~EXTENT_FIRST_DELALLOC;
		else if (!(*bits & EXTENT_DO_ACCOUNTING))
			atomic_dec(&BTRFS_I(inode)->outstanding_extents);

		if (*bits & EXTENT_DO_ACCOUNTING)
			btrfs_delalloc_release_metadata(inode, len);

J
Josef Bacik 已提交
1362 1363
		if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
		    && do_list)
1364
			btrfs_free_reserved_data_space(inode, len);
J
Josef Bacik 已提交
1365

1366
		spin_lock(&root->fs_info->delalloc_lock);
1367 1368 1369
		root->fs_info->delalloc_bytes -= len;
		BTRFS_I(inode)->delalloc_bytes -= len;

J
Josef Bacik 已提交
1370
		if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
1371 1372 1373
		    !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
			list_del_init(&BTRFS_I(inode)->delalloc_inodes);
		}
1374
		spin_unlock(&root->fs_info->delalloc_lock);
1375 1376 1377 1378
	}
	return 0;
}

C
Chris Mason 已提交
1379 1380 1381 1382
/*
 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
 * we don't create bios that span stripes or chunks
 */
1383
int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
C
Chris Mason 已提交
1384 1385
			 size_t size, struct bio *bio,
			 unsigned long bio_flags)
1386 1387 1388
{
	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
	struct btrfs_mapping_tree *map_tree;
1389
	u64 logical = (u64)bio->bi_sector << 9;
1390 1391 1392 1393
	u64 length = 0;
	u64 map_length;
	int ret;

1394 1395 1396
	if (bio_flags & EXTENT_BIO_COMPRESSED)
		return 0;

1397
	length = bio->bi_size;
1398 1399
	map_tree = &root->fs_info->mapping_tree;
	map_length = length;
1400
	ret = btrfs_map_block(map_tree, READ, logical,
1401
			      &map_length, NULL, 0);
1402

C
Chris Mason 已提交
1403
	if (map_length < length + size)
1404
		return 1;
1405
	return ret;
1406 1407
}

C
Chris Mason 已提交
1408 1409 1410 1411 1412 1413 1414 1415
/*
 * in order to insert checksums into the metadata in large chunks,
 * we wait until bio submission time.   All the pages in the bio are
 * checksummed and sums are attached onto the ordered extent record.
 *
 * At IO completion time the cums attached on the ordered extent record
 * are inserted into the btree
 */
C
Chris Mason 已提交
1416 1417
static int __btrfs_submit_bio_start(struct inode *inode, int rw,
				    struct bio *bio, int mirror_num,
1418 1419
				    unsigned long bio_flags,
				    u64 bio_offset)
1420 1421 1422
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	int ret = 0;
1423

1424
	ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1425
	BUG_ON(ret);
C
Chris Mason 已提交
1426 1427
	return 0;
}
1428

C
Chris Mason 已提交
1429 1430 1431 1432 1433 1434 1435 1436
/*
 * in order to insert checksums into the metadata in large chunks,
 * we wait until bio submission time.   All the pages in the bio are
 * checksummed and sums are attached onto the ordered extent record.
 *
 * At IO completion time the cums attached on the ordered extent record
 * are inserted into the btree
 */
1437
static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1438 1439
			  int mirror_num, unsigned long bio_flags,
			  u64 bio_offset)
C
Chris Mason 已提交
1440 1441
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
1442
	return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1443 1444
}

C
Chris Mason 已提交
1445
/*
1446 1447
 * extent_io.c submission hook. This does the right thing for csum calculation
 * on write, or reading the csums from the tree before a read
C
Chris Mason 已提交
1448
 */
1449
static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1450 1451
			  int mirror_num, unsigned long bio_flags,
			  u64 bio_offset)
1452 1453 1454
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	int ret = 0;
1455
	int skip_sum;
1456

1457
	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1458

J
Josef Bacik 已提交
1459 1460 1461 1462
	if (root == root->fs_info->tree_root)
		ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2);
	else
		ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1463
	BUG_ON(ret);
1464

1465
	if (!(rw & REQ_WRITE)) {
1466
		if (bio_flags & EXTENT_BIO_COMPRESSED) {
C
Chris Mason 已提交
1467 1468
			return btrfs_submit_compressed_read(inode, bio,
						    mirror_num, bio_flags);
1469 1470 1471 1472 1473
		} else if (!skip_sum) {
			ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
			if (ret)
				return ret;
		}
1474
		goto mapit;
1475
	} else if (!skip_sum) {
1476 1477 1478
		/* csum items have already been cloned */
		if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
			goto mapit;
1479 1480
		/* we're doing a write, do the async checksumming */
		return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1481
				   inode, rw, bio, mirror_num,
1482 1483
				   bio_flags, bio_offset,
				   __btrfs_submit_bio_start,
C
Chris Mason 已提交
1484
				   __btrfs_submit_bio_done);
1485 1486
	}

1487
mapit:
1488
	return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1489
}
C
Chris Mason 已提交
1490

C
Chris Mason 已提交
1491 1492 1493 1494
/*
 * given a list of ordered sums record them in the inode.  This happens
 * at IO completion time based on sums calculated at bio submission time.
 */
1495
static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1496 1497 1498 1499 1500 1501
			     struct inode *inode, u64 file_offset,
			     struct list_head *list)
{
	struct btrfs_ordered_sum *sum;

	btrfs_set_trans_block_group(trans, inode);
Q
Qinghuang Feng 已提交
1502 1503

	list_for_each_entry(sum, list, list) {
1504 1505
		btrfs_csum_file_blocks(trans,
		       BTRFS_I(inode)->root->fs_info->csum_root, sum);
1506 1507 1508 1509
	}
	return 0;
}

1510 1511
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
			      struct extent_state **cached_state)
1512
{
C
Chris Mason 已提交
1513
	if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1514
		WARN_ON(1);
1515
	return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1516
				   cached_state, GFP_NOFS);
1517 1518
}

C
Chris Mason 已提交
1519
/* see btrfs_writepage_start_hook for details on why this is required */
1520 1521 1522 1523 1524
struct btrfs_writepage_fixup {
	struct page *page;
	struct btrfs_work work;
};

1525
static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1526 1527 1528
{
	struct btrfs_writepage_fixup *fixup;
	struct btrfs_ordered_extent *ordered;
1529
	struct extent_state *cached_state = NULL;
1530 1531 1532 1533 1534 1535 1536
	struct page *page;
	struct inode *inode;
	u64 page_start;
	u64 page_end;

	fixup = container_of(work, struct btrfs_writepage_fixup, work);
	page = fixup->page;
C
Chris Mason 已提交
1537
again:
1538 1539 1540 1541 1542 1543 1544 1545 1546 1547
	lock_page(page);
	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
		ClearPageChecked(page);
		goto out_page;
	}

	inode = page->mapping->host;
	page_start = page_offset(page);
	page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;

1548 1549
	lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
			 &cached_state, GFP_NOFS);
C
Chris Mason 已提交
1550 1551

	/* already ordered? We're done */
1552
	if (PagePrivate2(page))
1553
		goto out;
C
Chris Mason 已提交
1554 1555 1556

	ordered = btrfs_lookup_ordered_extent(inode, page_start);
	if (ordered) {
1557 1558
		unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
				     page_end, &cached_state, GFP_NOFS);
C
Chris Mason 已提交
1559 1560 1561 1562
		unlock_page(page);
		btrfs_start_ordered_extent(inode, ordered, 1);
		goto again;
	}
1563

1564
	BUG();
1565
	btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
1566 1567
	ClearPageChecked(page);
out:
1568 1569
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
			     &cached_state, GFP_NOFS);
1570 1571 1572
out_page:
	unlock_page(page);
	page_cache_release(page);
1573
	kfree(fixup);
1574 1575 1576 1577 1578 1579 1580 1581
}

/*
 * There are a few paths in the higher layers of the kernel that directly
 * set the page dirty bit without asking the filesystem if it is a
 * good idea.  This causes problems because we want to make sure COW
 * properly happens and the data=ordered rules are followed.
 *
C
Chris Mason 已提交
1582
 * In our case any range that doesn't have the ORDERED bit set
1583 1584 1585 1586
 * hasn't been properly setup for IO.  We kick off an async process
 * to fix it up.  The async helper will wait for ordered extents, set
 * the delalloc bit and make it safe to write the page.
 */
1587
static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1588 1589 1590 1591 1592
{
	struct inode *inode = page->mapping->host;
	struct btrfs_writepage_fixup *fixup;
	struct btrfs_root *root = BTRFS_I(inode)->root;

1593 1594
	/* this page is properly in the ordered list */
	if (TestClearPagePrivate2(page))
1595 1596 1597 1598 1599 1600 1601 1602
		return 0;

	if (PageChecked(page))
		return -EAGAIN;

	fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
	if (!fixup)
		return -EAGAIN;
1603

1604 1605 1606 1607 1608 1609 1610 1611
	SetPageChecked(page);
	page_cache_get(page);
	fixup->work.func = btrfs_writepage_fixup_worker;
	fixup->page = page;
	btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
	return -EAGAIN;
}

Y
Yan Zheng 已提交
1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629
static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
				       struct inode *inode, u64 file_pos,
				       u64 disk_bytenr, u64 disk_num_bytes,
				       u64 num_bytes, u64 ram_bytes,
				       u8 compression, u8 encryption,
				       u16 other_encoding, int extent_type)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_file_extent_item *fi;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key ins;
	u64 hint;
	int ret;

	path = btrfs_alloc_path();
	BUG_ON(!path);

1630
	path->leave_spinning = 1;
C
Chris Mason 已提交
1631 1632 1633 1634 1635 1636 1637 1638 1639 1640

	/*
	 * we may be replacing one extent in the tree with another.
	 * The new extent is pinned in the extent map, and we don't want
	 * to drop it from the cache until it is completely in the btree.
	 *
	 * So, tell btrfs_drop_extents to leave this extent in the cache.
	 * the caller is expected to unpin it and allow it to be merged
	 * with the others.
	 */
Y
Yan, Zheng 已提交
1641 1642
	ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
				 &hint, 0);
Y
Yan Zheng 已提交
1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662
	BUG_ON(ret);

	ins.objectid = inode->i_ino;
	ins.offset = file_pos;
	ins.type = BTRFS_EXTENT_DATA_KEY;
	ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
	BUG_ON(ret);
	leaf = path->nodes[0];
	fi = btrfs_item_ptr(leaf, path->slots[0],
			    struct btrfs_file_extent_item);
	btrfs_set_file_extent_generation(leaf, fi, trans->transid);
	btrfs_set_file_extent_type(leaf, fi, extent_type);
	btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
	btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
	btrfs_set_file_extent_offset(leaf, fi, 0);
	btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
	btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
	btrfs_set_file_extent_compression(leaf, fi, compression);
	btrfs_set_file_extent_encryption(leaf, fi, encryption);
	btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1663 1664 1665 1666

	btrfs_unlock_up_safe(path, 1);
	btrfs_set_lock_blocking(leaf);

Y
Yan Zheng 已提交
1667 1668 1669 1670 1671 1672 1673
	btrfs_mark_buffer_dirty(leaf);

	inode_add_bytes(inode, num_bytes);

	ins.objectid = disk_bytenr;
	ins.offset = disk_num_bytes;
	ins.type = BTRFS_EXTENT_ITEM_KEY;
1674 1675 1676
	ret = btrfs_alloc_reserved_file_extent(trans, root,
					root->root_key.objectid,
					inode->i_ino, file_pos, &ins);
Y
Yan Zheng 已提交
1677 1678
	BUG_ON(ret);
	btrfs_free_path(path);
1679

Y
Yan Zheng 已提交
1680 1681 1682
	return 0;
}

1683 1684 1685 1686 1687 1688
/*
 * helper function for btrfs_finish_ordered_io, this
 * just reads in some of the csum leaves to prime them into ram
 * before we start the transaction.  It limits the amount of btree
 * reads required while inside the transaction.
 */
C
Chris Mason 已提交
1689 1690 1691 1692
/* as ordered data IO finishes, this gets called so we can finish
 * an ordered extent if the range of bytes in the file it covers are
 * fully written.
 */
1693
static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1694 1695
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
1696
	struct btrfs_trans_handle *trans = NULL;
1697
	struct btrfs_ordered_extent *ordered_extent = NULL;
1698
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1699
	struct extent_state *cached_state = NULL;
1700
	int compress_type = 0;
1701
	int ret;
J
Josef Bacik 已提交
1702
	bool nolock = false;
1703

1704 1705
	ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
					     end - start + 1);
1706
	if (!ret)
1707 1708
		return 0;
	BUG_ON(!ordered_extent);
1709

J
Josef Bacik 已提交
1710 1711
	nolock = (root == root->fs_info->tree_root);

1712 1713 1714 1715
	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
		BUG_ON(!list_empty(&ordered_extent->list));
		ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
		if (!ret) {
J
Josef Bacik 已提交
1716 1717 1718 1719
			if (nolock)
				trans = btrfs_join_transaction_nolock(root, 1);
			else
				trans = btrfs_join_transaction(root, 1);
1720
			BUG_ON(IS_ERR(trans));
1721 1722
			btrfs_set_trans_block_group(trans, inode);
			trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1723 1724 1725 1726 1727
			ret = btrfs_update_inode(trans, root, inode);
			BUG_ON(ret);
		}
		goto out;
	}
1728

1729 1730 1731
	lock_extent_bits(io_tree, ordered_extent->file_offset,
			 ordered_extent->file_offset + ordered_extent->len - 1,
			 0, &cached_state, GFP_NOFS);
1732

J
Josef Bacik 已提交
1733 1734 1735 1736
	if (nolock)
		trans = btrfs_join_transaction_nolock(root, 1);
	else
		trans = btrfs_join_transaction(root, 1);
1737
	BUG_ON(IS_ERR(trans));
1738 1739
	btrfs_set_trans_block_group(trans, inode);
	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1740

C
Chris Mason 已提交
1741
	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1742
		compress_type = ordered_extent->compress_type;
Y
Yan Zheng 已提交
1743
	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1744
		BUG_ON(compress_type);
Y
Yan, Zheng 已提交
1745
		ret = btrfs_mark_extent_written(trans, inode,
Y
Yan Zheng 已提交
1746 1747 1748 1749 1750
						ordered_extent->file_offset,
						ordered_extent->file_offset +
						ordered_extent->len);
		BUG_ON(ret);
	} else {
1751
		BUG_ON(root == root->fs_info->tree_root);
Y
Yan Zheng 已提交
1752 1753 1754 1755 1756 1757
		ret = insert_reserved_file_extent(trans, inode,
						ordered_extent->file_offset,
						ordered_extent->start,
						ordered_extent->disk_len,
						ordered_extent->len,
						ordered_extent->len,
1758
						compress_type, 0, 0,
Y
Yan Zheng 已提交
1759
						BTRFS_FILE_EXTENT_REG);
C
Chris Mason 已提交
1760 1761 1762
		unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
				   ordered_extent->file_offset,
				   ordered_extent->len);
Y
Yan Zheng 已提交
1763 1764
		BUG_ON(ret);
	}
1765 1766 1767 1768
	unlock_extent_cached(io_tree, ordered_extent->file_offset,
			     ordered_extent->file_offset +
			     ordered_extent->len - 1, &cached_state, GFP_NOFS);

1769 1770 1771
	add_pending_csums(trans, inode, ordered_extent->file_offset,
			  &ordered_extent->list);

1772 1773 1774 1775 1776 1777
	ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
	if (!ret) {
		ret = btrfs_update_inode(trans, root, inode);
		BUG_ON(ret);
	}
	ret = 0;
1778
out:
J
Josef Bacik 已提交
1779 1780 1781 1782 1783 1784 1785 1786 1787
	if (nolock) {
		if (trans)
			btrfs_end_transaction_nolock(trans, root);
	} else {
		btrfs_delalloc_release_metadata(inode, ordered_extent->len);
		if (trans)
			btrfs_end_transaction(trans, root);
	}

1788 1789 1790 1791 1792 1793 1794 1795
	/* once for us */
	btrfs_put_ordered_extent(ordered_extent);
	/* once for the tree */
	btrfs_put_ordered_extent(ordered_extent);

	return 0;
}

1796
static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1797 1798
				struct extent_state *state, int uptodate)
{
1799 1800
	trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);

1801
	ClearPagePrivate2(page);
1802 1803 1804
	return btrfs_finish_ordered_io(page->mapping->host, start, end);
}

C
Chris Mason 已提交
1805 1806 1807 1808 1809 1810 1811 1812
/*
 * When IO fails, either with EIO or csum verification fails, we
 * try other mirrors that might have a good copy of the data.  This
 * io_failure_record is used to record state as we go through all the
 * mirrors.  If another mirror has good data, the page is set up to date
 * and things continue.  If a good mirror can't be found, the original
 * bio end_io callback is called to indicate things have failed.
 */
1813 1814 1815 1816 1817
struct io_failure_record {
	struct page *page;
	u64 start;
	u64 len;
	u64 logical;
1818
	unsigned long bio_flags;
1819 1820 1821
	int last_mirror;
};

1822
static int btrfs_io_failed_hook(struct bio *failed_bio,
1823 1824
			 struct page *page, u64 start, u64 end,
			 struct extent_state *state)
1825 1826 1827 1828 1829 1830
{
	struct io_failure_record *failrec = NULL;
	u64 private;
	struct extent_map *em;
	struct inode *inode = page->mapping->host;
	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1831
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1832 1833 1834
	struct bio *bio;
	int num_copies;
	int ret;
1835
	int rw;
1836 1837 1838 1839 1840 1841 1842 1843 1844 1845
	u64 logical;

	ret = get_state_private(failure_tree, start, &private);
	if (ret) {
		failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
		if (!failrec)
			return -ENOMEM;
		failrec->start = start;
		failrec->len = end - start + 1;
		failrec->last_mirror = 0;
1846
		failrec->bio_flags = 0;
1847

1848
		read_lock(&em_tree->lock);
1849 1850 1851 1852 1853
		em = lookup_extent_mapping(em_tree, start, failrec->len);
		if (em->start > start || em->start + em->len < start) {
			free_extent_map(em);
			em = NULL;
		}
1854
		read_unlock(&em_tree->lock);
1855 1856 1857 1858 1859 1860 1861

		if (!em || IS_ERR(em)) {
			kfree(failrec);
			return -EIO;
		}
		logical = start - em->start;
		logical = em->block_start + logical;
1862 1863 1864
		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
			logical = em->block_start;
			failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1865 1866
			extent_set_compress_type(&failrec->bio_flags,
						 em->compress_type);
1867
		}
1868 1869 1870 1871
		failrec->logical = logical;
		free_extent_map(em);
		set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
				EXTENT_DIRTY, GFP_NOFS);
1872 1873
		set_state_private(failure_tree, start,
				 (u64)(unsigned long)failrec);
1874
	} else {
1875
		failrec = (struct io_failure_record *)(unsigned long)private;
1876 1877 1878 1879 1880 1881
	}
	num_copies = btrfs_num_copies(
			      &BTRFS_I(inode)->root->fs_info->mapping_tree,
			      failrec->logical, failrec->len);
	failrec->last_mirror++;
	if (!state) {
1882
		spin_lock(&BTRFS_I(inode)->io_tree.lock);
1883 1884 1885 1886 1887
		state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
						    failrec->start,
						    EXTENT_LOCKED);
		if (state && state->start != failrec->start)
			state = NULL;
1888
		spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902
	}
	if (!state || failrec->last_mirror > num_copies) {
		set_state_private(failure_tree, failrec->start, 0);
		clear_extent_bits(failure_tree, failrec->start,
				  failrec->start + failrec->len - 1,
				  EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
		kfree(failrec);
		return -EIO;
	}
	bio = bio_alloc(GFP_NOFS, 1);
	bio->bi_private = state;
	bio->bi_end_io = failed_bio->bi_end_io;
	bio->bi_sector = failrec->logical >> 9;
	bio->bi_bdev = failed_bio->bi_bdev;
1903
	bio->bi_size = 0;
1904

1905
	bio_add_page(bio, page, failrec->len, start - page_offset(page));
1906
	if (failed_bio->bi_rw & REQ_WRITE)
1907 1908 1909 1910
		rw = WRITE;
	else
		rw = READ;

1911
	ret = BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
C
Chris Mason 已提交
1912
						      failrec->last_mirror,
1913
						      failrec->bio_flags, 0);
1914
	return ret;
1915 1916
}

C
Chris Mason 已提交
1917 1918 1919 1920
/*
 * each time an IO finishes, we do a fast check in the IO failure tree
 * to see if we need to process or clean up an io_failure_record
 */
1921
static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1922 1923 1924 1925 1926 1927 1928 1929
{
	u64 private;
	u64 private_failure;
	struct io_failure_record *failure;
	int ret;

	private = 0;
	if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1930
			     (u64)-1, 1, EXTENT_DIRTY, 0)) {
1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945
		ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
					start, &private_failure);
		if (ret == 0) {
			failure = (struct io_failure_record *)(unsigned long)
				   private_failure;
			set_state_private(&BTRFS_I(inode)->io_failure_tree,
					  failure->start, 0);
			clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
					  failure->start,
					  failure->start + failure->len - 1,
					  EXTENT_DIRTY | EXTENT_LOCKED,
					  GFP_NOFS);
			kfree(failure);
		}
	}
1946 1947 1948
	return 0;
}

C
Chris Mason 已提交
1949 1950 1951 1952 1953
/*
 * when reads are done, we need to check csums to verify the data is correct
 * if there's a match, we allow the bio to finish.  If not, we go through
 * the io_failure_record routines to find good copies
 */
1954
static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1955
			       struct extent_state *state)
1956
{
1957
	size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1958
	struct inode *inode = page->mapping->host;
1959
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1960
	char *kaddr;
1961
	u64 private = ~(u32)0;
1962
	int ret;
1963 1964
	struct btrfs_root *root = BTRFS_I(inode)->root;
	u32 csum = ~(u32)0;
1965

1966 1967 1968 1969
	if (PageChecked(page)) {
		ClearPageChecked(page);
		goto good;
	}
1970 1971

	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
1972 1973 1974
		return 0;

	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1975
	    test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
1976 1977
		clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
				  GFP_NOFS);
1978
		return 0;
1979
	}
1980

Y
Yan 已提交
1981
	if (state && state->start == start) {
1982 1983 1984 1985 1986
		private = state->private;
		ret = 0;
	} else {
		ret = get_state_private(io_tree, start, &private);
	}
1987
	kaddr = kmap_atomic(page, KM_USER0);
C
Chris Mason 已提交
1988
	if (ret)
1989
		goto zeroit;
C
Chris Mason 已提交
1990

1991 1992
	csum = btrfs_csum_data(root, kaddr + offset, csum,  end - start + 1);
	btrfs_csum_final(csum, (char *)&csum);
C
Chris Mason 已提交
1993
	if (csum != private)
1994
		goto zeroit;
C
Chris Mason 已提交
1995

1996
	kunmap_atomic(kaddr, KM_USER0);
1997
good:
1998 1999 2000
	/* if the io failure tree for this inode is non-empty,
	 * check to see if we've recovered from a failed IO
	 */
2001
	btrfs_clean_io_failures(inode, start);
2002 2003 2004
	return 0;

zeroit:
C
Chris Mason 已提交
2005 2006 2007 2008 2009 2010
	if (printk_ratelimit()) {
		printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
		       "private %llu\n", page->mapping->host->i_ino,
		       (unsigned long long)start, csum,
		       (unsigned long long)private);
	}
2011 2012
	memset(kaddr + offset, 1, end - start + 1);
	flush_dcache_page(page);
2013
	kunmap_atomic(kaddr, KM_USER0);
2014 2015
	if (private == 0)
		return 0;
2016
	return -EIO;
2017
}
2018

Y
Yan, Zheng 已提交
2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066
struct delayed_iput {
	struct list_head list;
	struct inode *inode;
};

void btrfs_add_delayed_iput(struct inode *inode)
{
	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
	struct delayed_iput *delayed;

	if (atomic_add_unless(&inode->i_count, -1, 1))
		return;

	delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
	delayed->inode = inode;

	spin_lock(&fs_info->delayed_iput_lock);
	list_add_tail(&delayed->list, &fs_info->delayed_iputs);
	spin_unlock(&fs_info->delayed_iput_lock);
}

void btrfs_run_delayed_iputs(struct btrfs_root *root)
{
	LIST_HEAD(list);
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct delayed_iput *delayed;
	int empty;

	spin_lock(&fs_info->delayed_iput_lock);
	empty = list_empty(&fs_info->delayed_iputs);
	spin_unlock(&fs_info->delayed_iput_lock);
	if (empty)
		return;

	down_read(&root->fs_info->cleanup_work_sem);
	spin_lock(&fs_info->delayed_iput_lock);
	list_splice_init(&fs_info->delayed_iputs, &list);
	spin_unlock(&fs_info->delayed_iput_lock);

	while (!list_empty(&list)) {
		delayed = list_entry(list.next, struct delayed_iput, list);
		list_del(&delayed->list);
		iput(delayed->inode);
		kfree(delayed);
	}
	up_read(&root->fs_info->cleanup_work_sem);
}

2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183
/*
 * calculate extra metadata reservation when snapshotting a subvolume
 * contains orphan files.
 */
void btrfs_orphan_pre_snapshot(struct btrfs_trans_handle *trans,
				struct btrfs_pending_snapshot *pending,
				u64 *bytes_to_reserve)
{
	struct btrfs_root *root;
	struct btrfs_block_rsv *block_rsv;
	u64 num_bytes;
	int index;

	root = pending->root;
	if (!root->orphan_block_rsv || list_empty(&root->orphan_list))
		return;

	block_rsv = root->orphan_block_rsv;

	/* orphan block reservation for the snapshot */
	num_bytes = block_rsv->size;

	/*
	 * after the snapshot is created, COWing tree blocks may use more
	 * space than it frees. So we should make sure there is enough
	 * reserved space.
	 */
	index = trans->transid & 0x1;
	if (block_rsv->reserved + block_rsv->freed[index] < block_rsv->size) {
		num_bytes += block_rsv->size -
			     (block_rsv->reserved + block_rsv->freed[index]);
	}

	*bytes_to_reserve += num_bytes;
}

void btrfs_orphan_post_snapshot(struct btrfs_trans_handle *trans,
				struct btrfs_pending_snapshot *pending)
{
	struct btrfs_root *root = pending->root;
	struct btrfs_root *snap = pending->snap;
	struct btrfs_block_rsv *block_rsv;
	u64 num_bytes;
	int index;
	int ret;

	if (!root->orphan_block_rsv || list_empty(&root->orphan_list))
		return;

	/* refill source subvolume's orphan block reservation */
	block_rsv = root->orphan_block_rsv;
	index = trans->transid & 0x1;
	if (block_rsv->reserved + block_rsv->freed[index] < block_rsv->size) {
		num_bytes = block_rsv->size -
			    (block_rsv->reserved + block_rsv->freed[index]);
		ret = btrfs_block_rsv_migrate(&pending->block_rsv,
					      root->orphan_block_rsv,
					      num_bytes);
		BUG_ON(ret);
	}

	/* setup orphan block reservation for the snapshot */
	block_rsv = btrfs_alloc_block_rsv(snap);
	BUG_ON(!block_rsv);

	btrfs_add_durable_block_rsv(root->fs_info, block_rsv);
	snap->orphan_block_rsv = block_rsv;

	num_bytes = root->orphan_block_rsv->size;
	ret = btrfs_block_rsv_migrate(&pending->block_rsv,
				      block_rsv, num_bytes);
	BUG_ON(ret);

#if 0
	/* insert orphan item for the snapshot */
	WARN_ON(!root->orphan_item_inserted);
	ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
				       snap->root_key.objectid);
	BUG_ON(ret);
	snap->orphan_item_inserted = 1;
#endif
}

enum btrfs_orphan_cleanup_state {
	ORPHAN_CLEANUP_STARTED	= 1,
	ORPHAN_CLEANUP_DONE	= 2,
};

/*
 * This is called in transaction commmit time. If there are no orphan
 * files in the subvolume, it removes orphan item and frees block_rsv
 * structure.
 */
void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
			      struct btrfs_root *root)
{
	int ret;

	if (!list_empty(&root->orphan_list) ||
	    root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
		return;

	if (root->orphan_item_inserted &&
	    btrfs_root_refs(&root->root_item) > 0) {
		ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
					    root->root_key.objectid);
		BUG_ON(ret);
		root->orphan_item_inserted = 0;
	}

	if (root->orphan_block_rsv) {
		WARN_ON(root->orphan_block_rsv->size > 0);
		btrfs_free_block_rsv(root, root->orphan_block_rsv);
		root->orphan_block_rsv = NULL;
	}
}

2184 2185 2186
/*
 * This creates an orphan entry for the given inode in case something goes
 * wrong in the middle of an unlink/truncate.
2187 2188 2189
 *
 * NOTE: caller of this function should reserve 5 units of metadata for
 *	 this function.
2190 2191 2192 2193
 */
int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
2194 2195 2196 2197
	struct btrfs_block_rsv *block_rsv = NULL;
	int reserve = 0;
	int insert = 0;
	int ret;
2198

2199 2200 2201 2202
	if (!root->orphan_block_rsv) {
		block_rsv = btrfs_alloc_block_rsv(root);
		BUG_ON(!block_rsv);
	}
2203

2204 2205 2206 2207 2208 2209
	spin_lock(&root->orphan_lock);
	if (!root->orphan_block_rsv) {
		root->orphan_block_rsv = block_rsv;
	} else if (block_rsv) {
		btrfs_free_block_rsv(root, block_rsv);
		block_rsv = NULL;
2210 2211
	}

2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225
	if (list_empty(&BTRFS_I(inode)->i_orphan)) {
		list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
#if 0
		/*
		 * For proper ENOSPC handling, we should do orphan
		 * cleanup when mounting. But this introduces backward
		 * compatibility issue.
		 */
		if (!xchg(&root->orphan_item_inserted, 1))
			insert = 2;
		else
			insert = 1;
#endif
		insert = 1;
2226 2227
	}

2228 2229 2230 2231 2232
	if (!BTRFS_I(inode)->orphan_meta_reserved) {
		BTRFS_I(inode)->orphan_meta_reserved = 1;
		reserve = 1;
	}
	spin_unlock(&root->orphan_lock);
2233

2234 2235
	if (block_rsv)
		btrfs_add_durable_block_rsv(root->fs_info, block_rsv);
2236

2237 2238 2239 2240 2241
	/* grab metadata reservation from transaction handle */
	if (reserve) {
		ret = btrfs_orphan_reserve_metadata(trans, inode);
		BUG_ON(ret);
	}
2242

2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255
	/* insert an orphan item to track this unlinked/truncated file */
	if (insert >= 1) {
		ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
		BUG_ON(ret);
	}

	/* insert an orphan item to track subvolume contains orphan files */
	if (insert >= 2) {
		ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
					       root->root_key.objectid);
		BUG_ON(ret);
	}
	return 0;
2256 2257 2258 2259 2260 2261 2262 2263 2264
}

/*
 * We have done the truncate/delete so we can go ahead and remove the orphan
 * item for this particular inode.
 */
int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
2265 2266
	int delete_item = 0;
	int release_rsv = 0;
2267 2268
	int ret = 0;

2269 2270 2271 2272
	spin_lock(&root->orphan_lock);
	if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
		list_del_init(&BTRFS_I(inode)->i_orphan);
		delete_item = 1;
2273 2274
	}

2275 2276 2277
	if (BTRFS_I(inode)->orphan_meta_reserved) {
		BTRFS_I(inode)->orphan_meta_reserved = 0;
		release_rsv = 1;
2278
	}
2279
	spin_unlock(&root->orphan_lock);
2280

2281 2282 2283 2284
	if (trans && delete_item) {
		ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
		BUG_ON(ret);
	}
2285

2286 2287
	if (release_rsv)
		btrfs_orphan_release_metadata(inode);
2288

2289
	return 0;
2290 2291 2292 2293 2294 2295
}

/*
 * this cleans up any orphans that may be left on the list from the last use
 * of this root.
 */
2296
int btrfs_orphan_cleanup(struct btrfs_root *root)
2297 2298 2299 2300 2301 2302 2303 2304
{
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_key key, found_key;
	struct btrfs_trans_handle *trans;
	struct inode *inode;
	int ret = 0, nr_unlink = 0, nr_truncate = 0;

2305
	if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
2306
		return 0;
2307 2308

	path = btrfs_alloc_path();
2309 2310 2311 2312
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}
2313 2314 2315 2316 2317 2318 2319 2320
	path->reada = -1;

	key.objectid = BTRFS_ORPHAN_OBJECTID;
	btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
	key.offset = (u64)-1;

	while (1) {
		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2321 2322
		if (ret < 0)
			goto out;
2323 2324 2325 2326 2327 2328 2329

		/*
		 * if ret == 0 means we found what we were searching for, which
		 * is weird, but possible, so only screw with path if we didnt
		 * find the key and see if we have stuff that matches
		 */
		if (ret > 0) {
2330
			ret = 0;
2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353
			if (path->slots[0] == 0)
				break;
			path->slots[0]--;
		}

		/* pull out the item */
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);

		/* make sure the item matches what we want */
		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
			break;
		if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
			break;

		/* release the path since we're done with it */
		btrfs_release_path(root, path);

		/*
		 * this is where we are basically btrfs_lookup, without the
		 * crossing root thing.  we store the inode number in the
		 * offset of the orphan item.
		 */
2354 2355 2356
		found_key.objectid = found_key.offset;
		found_key.type = BTRFS_INODE_ITEM_KEY;
		found_key.offset = 0;
2357
		inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
2358 2359 2360 2361
		if (IS_ERR(inode)) {
			ret = PTR_ERR(inode);
			goto out;
		}
2362 2363 2364 2365 2366

		/*
		 * add this inode to the orphan list so btrfs_orphan_del does
		 * the proper thing when we hit it
		 */
2367
		spin_lock(&root->orphan_lock);
2368
		list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2369
		spin_unlock(&root->orphan_lock);
2370 2371 2372 2373 2374 2375 2376 2377

		/*
		 * if this is a bad inode, means we actually succeeded in
		 * removing the inode, but not the orphan record, which means
		 * we need to manually delete the orphan since iput will just
		 * do a destroy_inode
		 */
		if (is_bad_inode(inode)) {
2378
			trans = btrfs_start_transaction(root, 0);
2379 2380 2381 2382
			if (IS_ERR(trans)) {
				ret = PTR_ERR(trans);
				goto out;
			}
2383
			btrfs_orphan_del(trans, inode);
2384
			btrfs_end_transaction(trans, root);
2385 2386 2387 2388 2389 2390
			iput(inode);
			continue;
		}

		/* if we have links, this was a truncate, lets do that */
		if (inode->i_nlink) {
2391 2392 2393 2394 2395
			if (!S_ISREG(inode->i_mode)) {
				WARN_ON(1);
				iput(inode);
				continue;
			}
2396
			nr_truncate++;
2397
			ret = btrfs_truncate(inode);
2398 2399 2400 2401 2402 2403
		} else {
			nr_unlink++;
		}

		/* this will do delete_inode and everything for us */
		iput(inode);
2404 2405
		if (ret)
			goto out;
2406
	}
2407 2408 2409 2410 2411 2412 2413 2414
	root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;

	if (root->orphan_block_rsv)
		btrfs_block_rsv_release(root, root->orphan_block_rsv,
					(u64)-1);

	if (root->orphan_block_rsv || root->orphan_item_inserted) {
		trans = btrfs_join_transaction(root, 1);
2415 2416
		if (!IS_ERR(trans))
			btrfs_end_transaction(trans, root);
2417
	}
2418 2419 2420 2421 2422

	if (nr_unlink)
		printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
	if (nr_truncate)
		printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2423 2424 2425 2426 2427 2428

out:
	if (ret)
		printk(KERN_CRIT "btrfs: could not do orphan cleanup %d\n", ret);
	btrfs_free_path(path);
	return ret;
2429 2430
}

2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481
/*
 * very simple check to peek ahead in the leaf looking for xattrs.  If we
 * don't find any xattrs, we know there can't be any acls.
 *
 * slot is the slot the inode is in, objectid is the objectid of the inode
 */
static noinline int acls_after_inode_item(struct extent_buffer *leaf,
					  int slot, u64 objectid)
{
	u32 nritems = btrfs_header_nritems(leaf);
	struct btrfs_key found_key;
	int scanned = 0;

	slot++;
	while (slot < nritems) {
		btrfs_item_key_to_cpu(leaf, &found_key, slot);

		/* we found a different objectid, there must not be acls */
		if (found_key.objectid != objectid)
			return 0;

		/* we found an xattr, assume we've got an acl */
		if (found_key.type == BTRFS_XATTR_ITEM_KEY)
			return 1;

		/*
		 * we found a key greater than an xattr key, there can't
		 * be any acls later on
		 */
		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
			return 0;

		slot++;
		scanned++;

		/*
		 * it goes inode, inode backrefs, xattrs, extents,
		 * so if there are a ton of hard links to an inode there can
		 * be a lot of backrefs.  Don't waste time searching too hard,
		 * this is just an optimization
		 */
		if (scanned >= 8)
			break;
	}
	/* we hit the end of the leaf before we found an xattr or
	 * something larger than an xattr.  We have to assume the inode
	 * has acls
	 */
	return 1;
}

C
Chris Mason 已提交
2482 2483 2484
/*
 * read an inode from the btree into the in-memory inode
 */
2485
static void btrfs_read_locked_inode(struct inode *inode)
C
Chris Mason 已提交
2486 2487
{
	struct btrfs_path *path;
2488
	struct extent_buffer *leaf;
C
Chris Mason 已提交
2489
	struct btrfs_inode_item *inode_item;
2490
	struct btrfs_timespec *tspec;
C
Chris Mason 已提交
2491 2492
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_key location;
2493
	int maybe_acls;
C
Chris Mason 已提交
2494
	u64 alloc_group_block;
J
Josef Bacik 已提交
2495
	u32 rdev;
C
Chris Mason 已提交
2496 2497 2498 2499 2500
	int ret;

	path = btrfs_alloc_path();
	BUG_ON(!path);
	memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
C
Chris Mason 已提交
2501

C
Chris Mason 已提交
2502
	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2503
	if (ret)
C
Chris Mason 已提交
2504 2505
		goto make_bad;

2506 2507 2508 2509 2510 2511 2512 2513
	leaf = path->nodes[0];
	inode_item = btrfs_item_ptr(leaf, path->slots[0],
				    struct btrfs_inode_item);

	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
	inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
	inode->i_uid = btrfs_inode_uid(leaf, inode_item);
	inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2514
	btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527

	tspec = btrfs_inode_atime(inode_item);
	inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
	inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);

	tspec = btrfs_inode_mtime(inode_item);
	inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
	inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);

	tspec = btrfs_inode_ctime(inode_item);
	inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
	inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);

2528
	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2529
	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2530
	BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2531
	inode->i_generation = BTRFS_I(inode)->generation;
J
Josef Bacik 已提交
2532
	inode->i_rdev = 0;
2533 2534
	rdev = btrfs_inode_rdev(leaf, inode_item);

2535
	BTRFS_I(inode)->index_cnt = (u64)-1;
2536
	BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2537

2538
	alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2539

2540 2541 2542 2543 2544
	/*
	 * try to precache a NULL acl entry for files that don't have
	 * any xattrs or acls
	 */
	maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino);
2545 2546
	if (!maybe_acls)
		cache_no_acl(inode);
2547

2548 2549
	BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
						alloc_group_block, 0);
C
Chris Mason 已提交
2550 2551 2552 2553 2554 2555
	btrfs_free_path(path);
	inode_item = NULL;

	switch (inode->i_mode & S_IFMT) {
	case S_IFREG:
		inode->i_mapping->a_ops = &btrfs_aops;
C
Chris Mason 已提交
2556
		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2557
		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
C
Chris Mason 已提交
2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570
		inode->i_fop = &btrfs_file_operations;
		inode->i_op = &btrfs_file_inode_operations;
		break;
	case S_IFDIR:
		inode->i_fop = &btrfs_dir_file_operations;
		if (root == root->fs_info->tree_root)
			inode->i_op = &btrfs_dir_ro_inode_operations;
		else
			inode->i_op = &btrfs_dir_inode_operations;
		break;
	case S_IFLNK:
		inode->i_op = &btrfs_symlink_inode_operations;
		inode->i_mapping->a_ops = &btrfs_symlink_aops;
C
Chris Mason 已提交
2571
		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
C
Chris Mason 已提交
2572
		break;
J
Josef Bacik 已提交
2573
	default:
J
Jim Owens 已提交
2574
		inode->i_op = &btrfs_special_inode_operations;
J
Josef Bacik 已提交
2575 2576
		init_special_inode(inode, inode->i_mode, rdev);
		break;
C
Chris Mason 已提交
2577
	}
2578 2579

	btrfs_update_iflags(inode);
C
Chris Mason 已提交
2580 2581 2582 2583 2584 2585 2586
	return;

make_bad:
	btrfs_free_path(path);
	make_bad_inode(inode);
}

C
Chris Mason 已提交
2587 2588 2589
/*
 * given a leaf and an inode, copy the inode fields into the leaf
 */
2590 2591
static void fill_inode_item(struct btrfs_trans_handle *trans,
			    struct extent_buffer *leaf,
2592
			    struct btrfs_inode_item *item,
C
Chris Mason 已提交
2593 2594
			    struct inode *inode)
{
2595 2596 2597 2598 2599 2600 2601
	if (!leaf->map_token)
		map_private_extent_buffer(leaf, (unsigned long)item,
					  sizeof(struct btrfs_inode_item),
					  &leaf->map_token, &leaf->kaddr,
					  &leaf->map_start, &leaf->map_len,
					  KM_USER1);

2602 2603
	btrfs_set_inode_uid(leaf, item, inode->i_uid);
	btrfs_set_inode_gid(leaf, item, inode->i_gid);
2604
	btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622
	btrfs_set_inode_mode(leaf, item, inode->i_mode);
	btrfs_set_inode_nlink(leaf, item, inode->i_nlink);

	btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
			       inode->i_atime.tv_sec);
	btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
				inode->i_atime.tv_nsec);

	btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
			       inode->i_mtime.tv_sec);
	btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
				inode->i_mtime.tv_nsec);

	btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
			       inode->i_ctime.tv_sec);
	btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
				inode->i_ctime.tv_nsec);

2623
	btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2624
	btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2625
	btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2626
	btrfs_set_inode_transid(leaf, item, trans->transid);
2627
	btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
Y
Yan 已提交
2628
	btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2629
	btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
2630 2631 2632 2633 2634

	if (leaf->map_token) {
		unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
		leaf->map_token = NULL;
	}
C
Chris Mason 已提交
2635 2636
}

C
Chris Mason 已提交
2637 2638 2639
/*
 * copy everything in the in-memory inode into the btree.
 */
C
Chris Mason 已提交
2640 2641
noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
				struct btrfs_root *root, struct inode *inode)
C
Chris Mason 已提交
2642 2643 2644
{
	struct btrfs_inode_item *inode_item;
	struct btrfs_path *path;
2645
	struct extent_buffer *leaf;
C
Chris Mason 已提交
2646 2647 2648 2649
	int ret;

	path = btrfs_alloc_path();
	BUG_ON(!path);
2650
	path->leave_spinning = 1;
C
Chris Mason 已提交
2651 2652 2653 2654 2655 2656 2657 2658
	ret = btrfs_lookup_inode(trans, root, path,
				 &BTRFS_I(inode)->location, 1);
	if (ret) {
		if (ret > 0)
			ret = -ENOENT;
		goto failed;
	}

2659
	btrfs_unlock_up_safe(path, 1);
2660 2661
	leaf = path->nodes[0];
	inode_item = btrfs_item_ptr(leaf, path->slots[0],
C
Chris Mason 已提交
2662 2663
				  struct btrfs_inode_item);

2664
	fill_inode_item(trans, leaf, inode_item, inode);
2665
	btrfs_mark_buffer_dirty(leaf);
2666
	btrfs_set_inode_last_trans(trans, inode);
C
Chris Mason 已提交
2667 2668 2669 2670 2671 2672 2673
	ret = 0;
failed:
	btrfs_free_path(path);
	return ret;
}


C
Chris Mason 已提交
2674 2675 2676 2677 2678
/*
 * unlink helper that gets used here in inode.c and in the tree logging
 * recovery code.  It remove a link in a directory with a given name, and
 * also drops the back refs in the inode to the directory
 */
2679 2680 2681 2682
static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
				struct btrfs_root *root,
				struct inode *dir, struct inode *inode,
				const char *name, int name_len)
C
Chris Mason 已提交
2683 2684 2685
{
	struct btrfs_path *path;
	int ret = 0;
2686
	struct extent_buffer *leaf;
C
Chris Mason 已提交
2687
	struct btrfs_dir_item *di;
2688
	struct btrfs_key key;
2689
	u64 index;
C
Chris Mason 已提交
2690 2691

	path = btrfs_alloc_path();
2692 2693
	if (!path) {
		ret = -ENOMEM;
2694
		goto out;
2695 2696
	}

2697
	path->leave_spinning = 1;
C
Chris Mason 已提交
2698 2699 2700 2701 2702 2703 2704 2705 2706 2707
	di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
				    name, name_len, -1);
	if (IS_ERR(di)) {
		ret = PTR_ERR(di);
		goto err;
	}
	if (!di) {
		ret = -ENOENT;
		goto err;
	}
2708 2709
	leaf = path->nodes[0];
	btrfs_dir_item_key_to_cpu(leaf, di, &key);
C
Chris Mason 已提交
2710
	ret = btrfs_delete_one_dir_name(trans, root, path, di);
2711 2712
	if (ret)
		goto err;
C
Chris Mason 已提交
2713 2714
	btrfs_release_path(root, path);

2715
	ret = btrfs_del_inode_ref(trans, root, name, name_len,
2716 2717
				  inode->i_ino,
				  dir->i_ino, &index);
2718
	if (ret) {
C
Chris Mason 已提交
2719
		printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2720
		       "inode %lu parent %lu\n", name_len, name,
2721
		       inode->i_ino, dir->i_ino);
2722 2723 2724
		goto err;
	}

C
Chris Mason 已提交
2725
	di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2726
					 index, name, name_len, -1);
C
Chris Mason 已提交
2727 2728 2729 2730 2731 2732 2733 2734 2735
	if (IS_ERR(di)) {
		ret = PTR_ERR(di);
		goto err;
	}
	if (!di) {
		ret = -ENOENT;
		goto err;
	}
	ret = btrfs_delete_one_dir_name(trans, root, path, di);
2736
	btrfs_release_path(root, path);
C
Chris Mason 已提交
2737

2738 2739
	ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
					 inode, dir->i_ino);
C
Chris Mason 已提交
2740
	BUG_ON(ret != 0 && ret != -ENOENT);
2741 2742 2743

	ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
					   dir, index);
2744 2745
	if (ret == -ENOENT)
		ret = 0;
C
Chris Mason 已提交
2746 2747
err:
	btrfs_free_path(path);
2748 2749 2750 2751 2752 2753 2754
	if (ret)
		goto out;

	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
	inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
	btrfs_update_inode(trans, root, dir);
out:
C
Chris Mason 已提交
2755 2756 2757
	return ret;
}

2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772
int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
		       struct btrfs_root *root,
		       struct inode *dir, struct inode *inode,
		       const char *name, int name_len)
{
	int ret;
	ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
	if (!ret) {
		btrfs_drop_nlink(inode);
		ret = btrfs_update_inode(trans, root, inode);
	}
	return ret;
}
		

2773 2774 2775
/* helper to check if there is any shared block in the path */
static int check_path_shared(struct btrfs_root *root,
			     struct btrfs_path *path)
C
Chris Mason 已提交
2776
{
2777 2778
	struct extent_buffer *eb;
	int level;
2779
	u64 refs = 1;
2780

2781
	for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2782 2783
		int ret;

2784 2785 2786 2787 2788 2789 2790 2791 2792
		if (!path->nodes[level])
			break;
		eb = path->nodes[level];
		if (!btrfs_block_can_be_shared(root, eb))
			continue;
		ret = btrfs_lookup_extent_info(NULL, root, eb->start, eb->len,
					       &refs, NULL);
		if (refs > 1)
			return 1;
2793
	}
2794
	return 0;
C
Chris Mason 已提交
2795 2796
}

2797 2798 2799 2800 2801 2802 2803 2804 2805
/*
 * helper to start transaction for unlink and rmdir.
 *
 * unlink and rmdir are special in btrfs, they do not always free space.
 * so in enospc case, we should make sure they will free space before
 * allowing them to use the global metadata reservation.
 */
static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
						       struct dentry *dentry)
2806
{
C
Chris Mason 已提交
2807
	struct btrfs_trans_handle *trans;
2808
	struct btrfs_root *root = BTRFS_I(dir)->root;
2809
	struct btrfs_path *path;
2810
	struct btrfs_inode_ref *ref;
2811
	struct btrfs_dir_item *di;
2812
	struct inode *inode = dentry->d_inode;
2813
	u64 index;
2814 2815
	int check_link = 1;
	int err = -ENOSPC;
2816 2817
	int ret;

2818 2819 2820
	trans = btrfs_start_transaction(root, 10);
	if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
		return trans;
2821

2822 2823
	if (inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
		return ERR_PTR(-ENOSPC);
2824

2825 2826 2827
	/* check if there is someone else holds reference */
	if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1)
		return ERR_PTR(-ENOSPC);
2828

2829 2830
	if (atomic_read(&inode->i_count) > 2)
		return ERR_PTR(-ENOSPC);
2831

2832 2833 2834 2835 2836 2837 2838
	if (xchg(&root->fs_info->enospc_unlink, 1))
		return ERR_PTR(-ENOSPC);

	path = btrfs_alloc_path();
	if (!path) {
		root->fs_info->enospc_unlink = 0;
		return ERR_PTR(-ENOMEM);
2839 2840
	}

2841
	trans = btrfs_start_transaction(root, 0);
2842
	if (IS_ERR(trans)) {
2843 2844 2845 2846
		btrfs_free_path(path);
		root->fs_info->enospc_unlink = 0;
		return trans;
	}
2847

2848 2849
	path->skip_locking = 1;
	path->search_commit_root = 1;
2850

2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861
	ret = btrfs_lookup_inode(trans, root, path,
				&BTRFS_I(dir)->location, 0);
	if (ret < 0) {
		err = ret;
		goto out;
	}
	if (ret == 0) {
		if (check_path_shared(root, path))
			goto out;
	} else {
		check_link = 0;
2862
	}
2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968
	btrfs_release_path(root, path);

	ret = btrfs_lookup_inode(trans, root, path,
				&BTRFS_I(inode)->location, 0);
	if (ret < 0) {
		err = ret;
		goto out;
	}
	if (ret == 0) {
		if (check_path_shared(root, path))
			goto out;
	} else {
		check_link = 0;
	}
	btrfs_release_path(root, path);

	if (ret == 0 && S_ISREG(inode->i_mode)) {
		ret = btrfs_lookup_file_extent(trans, root, path,
					       inode->i_ino, (u64)-1, 0);
		if (ret < 0) {
			err = ret;
			goto out;
		}
		BUG_ON(ret == 0);
		if (check_path_shared(root, path))
			goto out;
		btrfs_release_path(root, path);
	}

	if (!check_link) {
		err = 0;
		goto out;
	}

	di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
				dentry->d_name.name, dentry->d_name.len, 0);
	if (IS_ERR(di)) {
		err = PTR_ERR(di);
		goto out;
	}
	if (di) {
		if (check_path_shared(root, path))
			goto out;
	} else {
		err = 0;
		goto out;
	}
	btrfs_release_path(root, path);

	ref = btrfs_lookup_inode_ref(trans, root, path,
				dentry->d_name.name, dentry->d_name.len,
				inode->i_ino, dir->i_ino, 0);
	if (IS_ERR(ref)) {
		err = PTR_ERR(ref);
		goto out;
	}
	BUG_ON(!ref);
	if (check_path_shared(root, path))
		goto out;
	index = btrfs_inode_ref_index(path->nodes[0], ref);
	btrfs_release_path(root, path);

	di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, index,
				dentry->d_name.name, dentry->d_name.len, 0);
	if (IS_ERR(di)) {
		err = PTR_ERR(di);
		goto out;
	}
	BUG_ON(ret == -ENOENT);
	if (check_path_shared(root, path))
		goto out;

	err = 0;
out:
	btrfs_free_path(path);
	if (err) {
		btrfs_end_transaction(trans, root);
		root->fs_info->enospc_unlink = 0;
		return ERR_PTR(err);
	}

	trans->block_rsv = &root->fs_info->global_block_rsv;
	return trans;
}

static void __unlink_end_trans(struct btrfs_trans_handle *trans,
			       struct btrfs_root *root)
{
	if (trans->block_rsv == &root->fs_info->global_block_rsv) {
		BUG_ON(!root->fs_info->enospc_unlink);
		root->fs_info->enospc_unlink = 0;
	}
	btrfs_end_transaction_throttle(trans, root);
}

static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
{
	struct btrfs_root *root = BTRFS_I(dir)->root;
	struct btrfs_trans_handle *trans;
	struct inode *inode = dentry->d_inode;
	int ret;
	unsigned long nr = 0;

	trans = __unlink_start_trans(dir, dentry);
	if (IS_ERR(trans))
		return PTR_ERR(trans);
2969

C
Chris Mason 已提交
2970
	btrfs_set_trans_block_group(trans, dir);
2971 2972 2973

	btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);

2974 2975
	ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
				 dentry->d_name.name, dentry->d_name.len);
2976
	BUG_ON(ret);
2977

2978
	if (inode->i_nlink == 0) {
2979
		ret = btrfs_orphan_add(trans, inode);
2980 2981
		BUG_ON(ret);
	}
2982

2983
	nr = trans->blocks_used;
2984
	__unlink_end_trans(trans, root);
2985
	btrfs_btree_balance_dirty(root, nr);
C
Chris Mason 已提交
2986 2987 2988
	return ret;
}

2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050
int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
			struct btrfs_root *root,
			struct inode *dir, u64 objectid,
			const char *name, int name_len)
{
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_dir_item *di;
	struct btrfs_key key;
	u64 index;
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
				   name, name_len, -1);
	BUG_ON(!di || IS_ERR(di));

	leaf = path->nodes[0];
	btrfs_dir_item_key_to_cpu(leaf, di, &key);
	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
	ret = btrfs_delete_one_dir_name(trans, root, path, di);
	BUG_ON(ret);
	btrfs_release_path(root, path);

	ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
				 objectid, root->root_key.objectid,
				 dir->i_ino, &index, name, name_len);
	if (ret < 0) {
		BUG_ON(ret != -ENOENT);
		di = btrfs_search_dir_index_item(root, path, dir->i_ino,
						 name, name_len);
		BUG_ON(!di || IS_ERR(di));

		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
		btrfs_release_path(root, path);
		index = key.offset;
	}

	di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
					 index, name, name_len, -1);
	BUG_ON(!di || IS_ERR(di));

	leaf = path->nodes[0];
	btrfs_dir_item_key_to_cpu(leaf, di, &key);
	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
	ret = btrfs_delete_one_dir_name(trans, root, path, di);
	BUG_ON(ret);
	btrfs_release_path(root, path);

	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
	ret = btrfs_update_inode(trans, root, dir);
	BUG_ON(ret);

	btrfs_free_path(path);
	return 0;
}

C
Chris Mason 已提交
3051 3052 3053
static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
{
	struct inode *inode = dentry->d_inode;
3054
	int err = 0;
C
Chris Mason 已提交
3055 3056
	struct btrfs_root *root = BTRFS_I(dir)->root;
	struct btrfs_trans_handle *trans;
3057
	unsigned long nr = 0;
C
Chris Mason 已提交
3058

3059
	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
3060
	    inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
Y
Yan 已提交
3061 3062
		return -ENOTEMPTY;

3063 3064
	trans = __unlink_start_trans(dir, dentry);
	if (IS_ERR(trans))
3065 3066
		return PTR_ERR(trans);

C
Chris Mason 已提交
3067 3068
	btrfs_set_trans_block_group(trans, dir);

3069 3070 3071 3072 3073 3074 3075 3076
	if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
		err = btrfs_unlink_subvol(trans, root, dir,
					  BTRFS_I(inode)->location.objectid,
					  dentry->d_name.name,
					  dentry->d_name.len);
		goto out;
	}

3077 3078
	err = btrfs_orphan_add(trans, inode);
	if (err)
3079
		goto out;
3080

C
Chris Mason 已提交
3081
	/* now the directory is empty */
3082 3083
	err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
				 dentry->d_name.name, dentry->d_name.len);
C
Chris Mason 已提交
3084
	if (!err)
3085
		btrfs_i_size_write(inode, 0);
3086
out:
3087
	nr = trans->blocks_used;
3088
	__unlink_end_trans(trans, root);
3089
	btrfs_btree_balance_dirty(root, nr);
3090

C
Chris Mason 已提交
3091 3092 3093
	return err;
}

3094
#if 0
3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122
/*
 * when truncating bytes in a file, it is possible to avoid reading
 * the leaves that contain only checksum items.  This can be the
 * majority of the IO required to delete a large file, but it must
 * be done carefully.
 *
 * The keys in the level just above the leaves are checked to make sure
 * the lowest key in a given leaf is a csum key, and starts at an offset
 * after the new  size.
 *
 * Then the key for the next leaf is checked to make sure it also has
 * a checksum item for the same file.  If it does, we know our target leaf
 * contains only checksum items, and it can be safely freed without reading
 * it.
 *
 * This is just an optimization targeted at large files.  It may do
 * nothing.  It will return 0 unless things went badly.
 */
static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
				     struct btrfs_root *root,
				     struct btrfs_path *path,
				     struct inode *inode, u64 new_size)
{
	struct btrfs_key key;
	int ret;
	int nritems;
	struct btrfs_key found_key;
	struct btrfs_key other_key;
Y
Yan Zheng 已提交
3123 3124 3125
	struct btrfs_leaf_ref *ref;
	u64 leaf_gen;
	u64 leaf_start;
3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218

	path->lowest_level = 1;
	key.objectid = inode->i_ino;
	key.type = BTRFS_CSUM_ITEM_KEY;
	key.offset = new_size;
again:
	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
	if (ret < 0)
		goto out;

	if (path->nodes[1] == NULL) {
		ret = 0;
		goto out;
	}
	ret = 0;
	btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
	nritems = btrfs_header_nritems(path->nodes[1]);

	if (!nritems)
		goto out;

	if (path->slots[1] >= nritems)
		goto next_node;

	/* did we find a key greater than anything we want to delete? */
	if (found_key.objectid > inode->i_ino ||
	   (found_key.objectid == inode->i_ino && found_key.type > key.type))
		goto out;

	/* we check the next key in the node to make sure the leave contains
	 * only checksum items.  This comparison doesn't work if our
	 * leaf is the last one in the node
	 */
	if (path->slots[1] + 1 >= nritems) {
next_node:
		/* search forward from the last key in the node, this
		 * will bring us into the next node in the tree
		 */
		btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);

		/* unlikely, but we inc below, so check to be safe */
		if (found_key.offset == (u64)-1)
			goto out;

		/* search_forward needs a path with locks held, do the
		 * search again for the original key.  It is possible
		 * this will race with a balance and return a path that
		 * we could modify, but this drop is just an optimization
		 * and is allowed to miss some leaves.
		 */
		btrfs_release_path(root, path);
		found_key.offset++;

		/* setup a max key for search_forward */
		other_key.offset = (u64)-1;
		other_key.type = key.type;
		other_key.objectid = key.objectid;

		path->keep_locks = 1;
		ret = btrfs_search_forward(root, &found_key, &other_key,
					   path, 0, 0);
		path->keep_locks = 0;
		if (ret || found_key.objectid != key.objectid ||
		    found_key.type != key.type) {
			ret = 0;
			goto out;
		}

		key.offset = found_key.offset;
		btrfs_release_path(root, path);
		cond_resched();
		goto again;
	}

	/* we know there's one more slot after us in the tree,
	 * read that key so we can verify it is also a checksum item
	 */
	btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);

	if (found_key.objectid < inode->i_ino)
		goto next_key;

	if (found_key.type != key.type || found_key.offset < new_size)
		goto next_key;

	/*
	 * if the key for the next leaf isn't a csum key from this objectid,
	 * we can't be sure there aren't good items inside this leaf.
	 * Bail out
	 */
	if (other_key.objectid != inode->i_ino || other_key.type != key.type)
		goto out;

Y
Yan Zheng 已提交
3219 3220
	leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
	leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
3221 3222 3223 3224
	/*
	 * it is safe to delete this leaf, it contains only
	 * csum items from this inode at an offset >= new_size
	 */
Y
Yan Zheng 已提交
3225
	ret = btrfs_del_leaf(trans, root, path, leaf_start);
3226 3227
	BUG_ON(ret);

Y
Yan Zheng 已提交
3228 3229 3230 3231 3232 3233 3234 3235 3236
	if (root->ref_cows && leaf_gen < trans->transid) {
		ref = btrfs_alloc_leaf_ref(root, 0);
		if (ref) {
			ref->root_gen = root->root_key.offset;
			ref->bytenr = leaf_start;
			ref->owner = 0;
			ref->generation = leaf_gen;
			ref->nritems = 0;

3237 3238
			btrfs_sort_leaf_ref(ref);

Y
Yan Zheng 已提交
3239 3240 3241 3242 3243 3244 3245
			ret = btrfs_add_leaf_ref(root, ref, 0);
			WARN_ON(ret);
			btrfs_free_leaf_ref(root, ref);
		} else {
			WARN_ON(1);
		}
	}
3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263
next_key:
	btrfs_release_path(root, path);

	if (other_key.objectid == inode->i_ino &&
	    other_key.type == key.type && other_key.offset > key.offset) {
		key.offset = other_key.offset;
		cond_resched();
		goto again;
	}
	ret = 0;
out:
	/* fixup any changes we've made to the path */
	path->lowest_level = 0;
	path->keep_locks = 0;
	btrfs_release_path(root, path);
	return ret;
}

3264 3265
#endif

C
Chris Mason 已提交
3266 3267 3268
/*
 * this can truncate away extent items, csum items and directory items.
 * It starts at a high offset and removes keys until it can't find
C
Chris Mason 已提交
3269
 * any higher than new_size
C
Chris Mason 已提交
3270 3271 3272
 *
 * csum items that cross the new i_size are truncated to the new size
 * as well.
3273 3274 3275
 *
 * min_type is the minimum key type to truncate down to.  If set to 0, this
 * will kill all the items on this inode, including the INODE_ITEM_KEY.
C
Chris Mason 已提交
3276
 */
3277 3278 3279 3280
int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
			       struct btrfs_root *root,
			       struct inode *inode,
			       u64 new_size, u32 min_type)
C
Chris Mason 已提交
3281 3282
{
	struct btrfs_path *path;
3283
	struct extent_buffer *leaf;
C
Chris Mason 已提交
3284
	struct btrfs_file_extent_item *fi;
3285 3286
	struct btrfs_key key;
	struct btrfs_key found_key;
C
Chris Mason 已提交
3287
	u64 extent_start = 0;
3288
	u64 extent_num_bytes = 0;
3289
	u64 extent_offset = 0;
C
Chris Mason 已提交
3290
	u64 item_end = 0;
3291 3292
	u64 mask = root->sectorsize - 1;
	u32 found_type = (u8)-1;
C
Chris Mason 已提交
3293 3294
	int found_extent;
	int del_item;
3295 3296
	int pending_del_nr = 0;
	int pending_del_slot = 0;
3297
	int extent_type = -1;
3298
	int encoding;
3299 3300 3301 3302
	int ret;
	int err = 0;

	BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
C
Chris Mason 已提交
3303

3304
	if (root->ref_cows || root == root->fs_info->tree_root)
3305
		btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
3306

C
Chris Mason 已提交
3307 3308
	path = btrfs_alloc_path();
	BUG_ON(!path);
J
Julia Lawall 已提交
3309
	path->reada = -1;
3310

C
Chris Mason 已提交
3311 3312
	key.objectid = inode->i_ino;
	key.offset = (u64)-1;
3313 3314
	key.type = (u8)-1;

3315
search_again:
3316
	path->leave_spinning = 1;
3317
	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3318 3319 3320 3321
	if (ret < 0) {
		err = ret;
		goto out;
	}
C
Chris Mason 已提交
3322

3323
	if (ret > 0) {
3324 3325 3326
		/* there are no items in the tree for us to truncate, we're
		 * done
		 */
3327 3328
		if (path->slots[0] == 0)
			goto out;
3329 3330 3331
		path->slots[0]--;
	}

C
Chris Mason 已提交
3332
	while (1) {
C
Chris Mason 已提交
3333
		fi = NULL;
3334 3335 3336
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
		found_type = btrfs_key_type(&found_key);
3337
		encoding = 0;
C
Chris Mason 已提交
3338

3339
		if (found_key.objectid != inode->i_ino)
C
Chris Mason 已提交
3340
			break;
3341

3342
		if (found_type < min_type)
C
Chris Mason 已提交
3343 3344
			break;

3345
		item_end = found_key.offset;
C
Chris Mason 已提交
3346
		if (found_type == BTRFS_EXTENT_DATA_KEY) {
3347
			fi = btrfs_item_ptr(leaf, path->slots[0],
C
Chris Mason 已提交
3348
					    struct btrfs_file_extent_item);
3349
			extent_type = btrfs_file_extent_type(leaf, fi);
3350 3351 3352 3353
			encoding = btrfs_file_extent_compression(leaf, fi);
			encoding |= btrfs_file_extent_encryption(leaf, fi);
			encoding |= btrfs_file_extent_other_encoding(leaf, fi);

3354
			if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3355
				item_end +=
3356
				    btrfs_file_extent_num_bytes(leaf, fi);
3357 3358
			} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
				item_end += btrfs_file_extent_inline_len(leaf,
C
Chris Mason 已提交
3359
									 fi);
C
Chris Mason 已提交
3360
			}
3361
			item_end--;
C
Chris Mason 已提交
3362
		}
3363 3364 3365 3366
		if (found_type > min_type) {
			del_item = 1;
		} else {
			if (item_end < new_size)
3367
				break;
3368 3369 3370 3371
			if (found_key.offset >= new_size)
				del_item = 1;
			else
				del_item = 0;
C
Chris Mason 已提交
3372 3373 3374
		}
		found_extent = 0;
		/* FIXME, shrink the extent if the ref count is only 1 */
3375 3376 3377 3378
		if (found_type != BTRFS_EXTENT_DATA_KEY)
			goto delete;

		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
C
Chris Mason 已提交
3379
			u64 num_dec;
3380
			extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
3381
			if (!del_item && !encoding) {
3382 3383
				u64 orig_num_bytes =
					btrfs_file_extent_num_bytes(leaf, fi);
3384
				extent_num_bytes = new_size -
3385
					found_key.offset + root->sectorsize - 1;
3386 3387
				extent_num_bytes = extent_num_bytes &
					~((u64)root->sectorsize - 1);
3388 3389 3390
				btrfs_set_file_extent_num_bytes(leaf, fi,
							 extent_num_bytes);
				num_dec = (orig_num_bytes -
C
Chris Mason 已提交
3391
					   extent_num_bytes);
3392
				if (root->ref_cows && extent_start != 0)
3393
					inode_sub_bytes(inode, num_dec);
3394
				btrfs_mark_buffer_dirty(leaf);
C
Chris Mason 已提交
3395
			} else {
3396 3397 3398
				extent_num_bytes =
					btrfs_file_extent_disk_num_bytes(leaf,
									 fi);
3399 3400 3401
				extent_offset = found_key.offset -
					btrfs_file_extent_offset(leaf, fi);

C
Chris Mason 已提交
3402
				/* FIXME blocksize != 4096 */
C
Chris Mason 已提交
3403
				num_dec = btrfs_file_extent_num_bytes(leaf, fi);
C
Chris Mason 已提交
3404 3405
				if (extent_start != 0) {
					found_extent = 1;
3406
					if (root->ref_cows)
3407
						inode_sub_bytes(inode, num_dec);
3408
				}
C
Chris Mason 已提交
3409
			}
C
Chris Mason 已提交
3410
		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
C
Chris Mason 已提交
3411 3412 3413 3414 3415 3416 3417 3418
			/*
			 * we can't truncate inline items that have had
			 * special encodings
			 */
			if (!del_item &&
			    btrfs_file_extent_compression(leaf, fi) == 0 &&
			    btrfs_file_extent_encryption(leaf, fi) == 0 &&
			    btrfs_file_extent_other_encoding(leaf, fi) == 0) {
3419 3420 3421
				u32 size = new_size - found_key.offset;

				if (root->ref_cows) {
3422 3423
					inode_sub_bytes(inode, item_end + 1 -
							new_size);
3424 3425 3426
				}
				size =
				    btrfs_file_extent_calc_inline_size(size);
C
Chris Mason 已提交
3427
				ret = btrfs_truncate_item(trans, root, path,
3428
							  size, 1);
C
Chris Mason 已提交
3429
				BUG_ON(ret);
3430
			} else if (root->ref_cows) {
3431 3432
				inode_sub_bytes(inode, item_end + 1 -
						found_key.offset);
C
Chris Mason 已提交
3433
			}
C
Chris Mason 已提交
3434
		}
3435
delete:
C
Chris Mason 已提交
3436
		if (del_item) {
3437 3438 3439 3440 3441 3442 3443 3444 3445 3446
			if (!pending_del_nr) {
				/* no pending yet, add ourselves */
				pending_del_slot = path->slots[0];
				pending_del_nr = 1;
			} else if (pending_del_nr &&
				   path->slots[0] + 1 == pending_del_slot) {
				/* hop on the pending chunk */
				pending_del_nr++;
				pending_del_slot = path->slots[0];
			} else {
C
Chris Mason 已提交
3447
				BUG();
3448
			}
C
Chris Mason 已提交
3449 3450 3451
		} else {
			break;
		}
3452 3453
		if (found_extent && (root->ref_cows ||
				     root == root->fs_info->tree_root)) {
3454
			btrfs_set_path_blocking(path);
C
Chris Mason 已提交
3455
			ret = btrfs_free_extent(trans, root, extent_start,
3456 3457 3458
						extent_num_bytes, 0,
						btrfs_header_owner(leaf),
						inode->i_ino, extent_offset);
C
Chris Mason 已提交
3459 3460
			BUG_ON(ret);
		}
3461

3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477
		if (found_type == BTRFS_INODE_ITEM_KEY)
			break;

		if (path->slots[0] == 0 ||
		    path->slots[0] != pending_del_slot) {
			if (root->ref_cows) {
				err = -EAGAIN;
				goto out;
			}
			if (pending_del_nr) {
				ret = btrfs_del_items(trans, root, path,
						pending_del_slot,
						pending_del_nr);
				BUG_ON(ret);
				pending_del_nr = 0;
			}
3478 3479
			btrfs_release_path(root, path);
			goto search_again;
3480 3481
		} else {
			path->slots[0]--;
3482
		}
C
Chris Mason 已提交
3483
	}
3484
out:
3485 3486 3487
	if (pending_del_nr) {
		ret = btrfs_del_items(trans, root, path, pending_del_slot,
				      pending_del_nr);
3488
		BUG_ON(ret);
3489
	}
C
Chris Mason 已提交
3490
	btrfs_free_path(path);
3491
	return err;
C
Chris Mason 已提交
3492 3493 3494 3495 3496 3497 3498 3499 3500
}

/*
 * taken from block_truncate_page, but does cow as it zeros out
 * any bytes left in the last page in the file.
 */
static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
{
	struct inode *inode = mapping->host;
3501
	struct btrfs_root *root = BTRFS_I(inode)->root;
3502 3503
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
	struct btrfs_ordered_extent *ordered;
3504
	struct extent_state *cached_state = NULL;
3505
	char *kaddr;
3506
	u32 blocksize = root->sectorsize;
C
Chris Mason 已提交
3507 3508 3509 3510
	pgoff_t index = from >> PAGE_CACHE_SHIFT;
	unsigned offset = from & (PAGE_CACHE_SIZE-1);
	struct page *page;
	int ret = 0;
3511
	u64 page_start;
3512
	u64 page_end;
C
Chris Mason 已提交
3513 3514 3515

	if ((offset & (blocksize - 1)) == 0)
		goto out;
3516
	ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
3517 3518
	if (ret)
		goto out;
C
Chris Mason 已提交
3519 3520

	ret = -ENOMEM;
3521
again:
C
Chris Mason 已提交
3522
	page = grab_cache_page(mapping, index);
3523
	if (!page) {
3524
		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
C
Chris Mason 已提交
3525
		goto out;
3526
	}
3527 3528 3529 3530

	page_start = page_offset(page);
	page_end = page_start + PAGE_CACHE_SIZE - 1;

C
Chris Mason 已提交
3531
	if (!PageUptodate(page)) {
C
Chris Mason 已提交
3532
		ret = btrfs_readpage(NULL, page);
C
Chris Mason 已提交
3533
		lock_page(page);
3534 3535 3536 3537 3538
		if (page->mapping != mapping) {
			unlock_page(page);
			page_cache_release(page);
			goto again;
		}
C
Chris Mason 已提交
3539 3540
		if (!PageUptodate(page)) {
			ret = -EIO;
3541
			goto out_unlock;
C
Chris Mason 已提交
3542 3543
		}
	}
3544
	wait_on_page_writeback(page);
3545

3546 3547
	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
			 GFP_NOFS);
3548 3549 3550 3551
	set_page_extent_mapped(page);

	ordered = btrfs_lookup_ordered_extent(inode, page_start);
	if (ordered) {
3552 3553
		unlock_extent_cached(io_tree, page_start, page_end,
				     &cached_state, GFP_NOFS);
3554 3555
		unlock_page(page);
		page_cache_release(page);
3556
		btrfs_start_ordered_extent(inode, ordered, 1);
3557 3558 3559 3560
		btrfs_put_ordered_extent(ordered);
		goto again;
	}

3561
	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
3562
			  EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
3563
			  0, 0, &cached_state, GFP_NOFS);
3564

3565 3566
	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
					&cached_state);
J
Josef Bacik 已提交
3567
	if (ret) {
3568 3569
		unlock_extent_cached(io_tree, page_start, page_end,
				     &cached_state, GFP_NOFS);
J
Josef Bacik 已提交
3570 3571 3572
		goto out_unlock;
	}

3573 3574 3575 3576 3577 3578 3579
	ret = 0;
	if (offset != PAGE_CACHE_SIZE) {
		kaddr = kmap(page);
		memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
		flush_dcache_page(page);
		kunmap(page);
	}
3580
	ClearPageChecked(page);
3581
	set_page_dirty(page);
3582 3583
	unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
			     GFP_NOFS);
C
Chris Mason 已提交
3584

3585
out_unlock:
3586
	if (ret)
3587
		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
C
Chris Mason 已提交
3588 3589 3590 3591 3592 3593
	unlock_page(page);
	page_cache_release(page);
out:
	return ret;
}

3594 3595 3596 3597 3598 3599
/*
 * This function puts in dummy file extents for the area we're creating a hole
 * for.  So if we are truncating this file to a larger size we need to insert
 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
 * the range between oldsize and size
 */
3600
int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
C
Chris Mason 已提交
3601
{
Y
Yan Zheng 已提交
3602 3603 3604
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3605
	struct extent_map *em = NULL;
3606
	struct extent_state *cached_state = NULL;
Y
Yan Zheng 已提交
3607
	u64 mask = root->sectorsize - 1;
3608
	u64 hole_start = (oldsize + mask) & ~mask;
Y
Yan Zheng 已提交
3609 3610 3611 3612
	u64 block_end = (size + mask) & ~mask;
	u64 last_byte;
	u64 cur_offset;
	u64 hole_size;
J
Josef Bacik 已提交
3613
	int err = 0;
C
Chris Mason 已提交
3614

Y
Yan Zheng 已提交
3615 3616 3617 3618 3619 3620 3621
	if (size <= hole_start)
		return 0;

	while (1) {
		struct btrfs_ordered_extent *ordered;
		btrfs_wait_ordered_range(inode, hole_start,
					 block_end - hole_start);
3622 3623
		lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
				 &cached_state, GFP_NOFS);
Y
Yan Zheng 已提交
3624 3625 3626
		ordered = btrfs_lookup_ordered_extent(inode, hole_start);
		if (!ordered)
			break;
3627 3628
		unlock_extent_cached(io_tree, hole_start, block_end - 1,
				     &cached_state, GFP_NOFS);
Y
Yan Zheng 已提交
3629 3630
		btrfs_put_ordered_extent(ordered);
	}
C
Chris Mason 已提交
3631

Y
Yan Zheng 已提交
3632 3633 3634 3635 3636 3637 3638
	cur_offset = hole_start;
	while (1) {
		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
				block_end - cur_offset, 0);
		BUG_ON(IS_ERR(em) || !em);
		last_byte = min(extent_map_end(em), block_end);
		last_byte = (last_byte + mask) & ~mask;
3639
		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3640
			u64 hint_byte = 0;
Y
Yan Zheng 已提交
3641
			hole_size = last_byte - cur_offset;
J
Josef Bacik 已提交
3642

3643 3644 3645
			trans = btrfs_start_transaction(root, 2);
			if (IS_ERR(trans)) {
				err = PTR_ERR(trans);
J
Josef Bacik 已提交
3646
				break;
3647
			}
3648 3649 3650 3651 3652
			btrfs_set_trans_block_group(trans, inode);

			err = btrfs_drop_extents(trans, inode, cur_offset,
						 cur_offset + hole_size,
						 &hint_byte, 1);
3653 3654
			if (err)
				break;
3655

Y
Yan Zheng 已提交
3656 3657 3658 3659
			err = btrfs_insert_file_extent(trans, root,
					inode->i_ino, cur_offset, 0,
					0, hole_size, 0, hole_size,
					0, 0, 0);
3660 3661
			if (err)
				break;
3662

Y
Yan Zheng 已提交
3663 3664
			btrfs_drop_extent_cache(inode, hole_start,
					last_byte - 1, 0);
3665 3666

			btrfs_end_transaction(trans, root);
Y
Yan Zheng 已提交
3667 3668
		}
		free_extent_map(em);
3669
		em = NULL;
Y
Yan Zheng 已提交
3670
		cur_offset = last_byte;
3671
		if (cur_offset >= block_end)
Y
Yan Zheng 已提交
3672 3673
			break;
	}
3674

3675
	free_extent_map(em);
3676 3677
	unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
			     GFP_NOFS);
Y
Yan Zheng 已提交
3678 3679
	return err;
}
C
Chris Mason 已提交
3680

3681
static int btrfs_setsize(struct inode *inode, loff_t newsize)
3682
{
3683
	loff_t oldsize = i_size_read(inode);
3684 3685
	int ret;

3686
	if (newsize == oldsize)
3687 3688
		return 0;

3689 3690 3691 3692 3693
	if (newsize > oldsize) {
		i_size_write(inode, newsize);
		btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
		truncate_pagecache(inode, oldsize, newsize);
		ret = btrfs_cont_expand(inode, oldsize, newsize);
3694
		if (ret) {
3695
			btrfs_setsize(inode, oldsize);
3696 3697 3698
			return ret;
		}

3699
		mark_inode_dirty(inode);
3700
	} else {
3701

3702 3703 3704 3705 3706 3707 3708
		/*
		 * We're truncating a file that used to have good data down to
		 * zero. Make sure it gets into the ordered flush list so that
		 * any new writes get down to disk quickly.
		 */
		if (newsize == 0)
			BTRFS_I(inode)->ordered_data_close = 1;
3709

3710 3711 3712 3713
		/* we don't support swapfiles, so vmtruncate shouldn't fail */
		truncate_setsize(inode, newsize);
		ret = btrfs_truncate(inode);
	}
3714

3715
	return ret;
3716 3717
}

Y
Yan Zheng 已提交
3718 3719 3720
static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
{
	struct inode *inode = dentry->d_inode;
L
Li Zefan 已提交
3721
	struct btrfs_root *root = BTRFS_I(inode)->root;
Y
Yan Zheng 已提交
3722
	int err;
C
Chris Mason 已提交
3723

L
Li Zefan 已提交
3724 3725 3726
	if (btrfs_root_readonly(root))
		return -EROFS;

Y
Yan Zheng 已提交
3727 3728 3729
	err = inode_change_ok(inode, attr);
	if (err)
		return err;
C
Chris Mason 已提交
3730

3731
	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
3732
		err = btrfs_setsize(inode, attr->ia_size);
3733 3734
		if (err)
			return err;
C
Chris Mason 已提交
3735
	}
Y
Yan Zheng 已提交
3736

C
Christoph Hellwig 已提交
3737 3738 3739 3740 3741 3742 3743
	if (attr->ia_valid) {
		setattr_copy(inode, attr);
		mark_inode_dirty(inode);

		if (attr->ia_valid & ATTR_MODE)
			err = btrfs_acl_chmod(inode);
	}
J
Josef Bacik 已提交
3744

C
Chris Mason 已提交
3745 3746
	return err;
}
3747

A
Al Viro 已提交
3748
void btrfs_evict_inode(struct inode *inode)
C
Chris Mason 已提交
3749 3750 3751
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(inode)->root;
3752
	unsigned long nr;
C
Chris Mason 已提交
3753 3754
	int ret;

3755 3756
	trace_btrfs_inode_evict(inode);

C
Chris Mason 已提交
3757
	truncate_inode_pages(&inode->i_data, 0);
3758 3759
	if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
			       root == root->fs_info->tree_root))
A
Al Viro 已提交
3760 3761
		goto no_delete;

C
Chris Mason 已提交
3762
	if (is_bad_inode(inode)) {
3763
		btrfs_orphan_del(NULL, inode);
C
Chris Mason 已提交
3764 3765
		goto no_delete;
	}
A
Al Viro 已提交
3766
	/* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
C
Chris Mason 已提交
3767
	btrfs_wait_ordered_range(inode, 0, (u64)-1);
3768

3769 3770 3771 3772 3773
	if (root->fs_info->log_root_recovering) {
		BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan));
		goto no_delete;
	}

3774 3775 3776 3777 3778
	if (inode->i_nlink > 0) {
		BUG_ON(btrfs_root_refs(&root->root_item) != 0);
		goto no_delete;
	}

3779
	btrfs_i_size_write(inode, 0);
3780

3781
	while (1) {
3782 3783
		trans = btrfs_start_transaction(root, 0);
		BUG_ON(IS_ERR(trans));
3784
		btrfs_set_trans_block_group(trans, inode);
3785 3786 3787 3788 3789 3790 3791 3792 3793 3794
		trans->block_rsv = root->orphan_block_rsv;

		ret = btrfs_block_rsv_check(trans, root,
					    root->orphan_block_rsv, 0, 5);
		if (ret) {
			BUG_ON(ret != -EAGAIN);
			ret = btrfs_commit_transaction(trans, root);
			BUG_ON(ret);
			continue;
		}
3795

3796
		ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
3797 3798
		if (ret != -EAGAIN)
			break;
3799

3800 3801 3802 3803
		nr = trans->blocks_used;
		btrfs_end_transaction(trans, root);
		trans = NULL;
		btrfs_btree_balance_dirty(root, nr);
3804

3805
	}
3806

3807 3808 3809 3810
	if (ret == 0) {
		ret = btrfs_orphan_del(trans, inode);
		BUG_ON(ret);
	}
3811

3812
	nr = trans->blocks_used;
3813
	btrfs_end_transaction(trans, root);
3814
	btrfs_btree_balance_dirty(root, nr);
C
Chris Mason 已提交
3815
no_delete:
A
Al Viro 已提交
3816
	end_writeback(inode);
3817
	return;
C
Chris Mason 已提交
3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831
}

/*
 * this returns the key found in the dir entry in the location pointer.
 * If no dir entries were found, location->objectid is 0.
 */
static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
			       struct btrfs_key *location)
{
	const char *name = dentry->d_name.name;
	int namelen = dentry->d_name.len;
	struct btrfs_dir_item *di;
	struct btrfs_path *path;
	struct btrfs_root *root = BTRFS_I(dir)->root;
Y
Yan 已提交
3832
	int ret = 0;
C
Chris Mason 已提交
3833 3834 3835

	path = btrfs_alloc_path();
	BUG_ON(!path);
3836

C
Chris Mason 已提交
3837 3838
	di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
				    namelen, 0);
Y
Yan 已提交
3839 3840
	if (IS_ERR(di))
		ret = PTR_ERR(di);
C
Chris Mason 已提交
3841 3842

	if (!di || IS_ERR(di))
3843
		goto out_err;
C
Chris Mason 已提交
3844

3845
	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
C
Chris Mason 已提交
3846 3847 3848
out:
	btrfs_free_path(path);
	return ret;
3849 3850 3851
out_err:
	location->objectid = 0;
	goto out;
C
Chris Mason 已提交
3852 3853 3854 3855 3856 3857 3858 3859
}

/*
 * when we hit a tree root in a directory, the btrfs part of the inode
 * needs to be changed to reflect the root directory of the tree root.  This
 * is kind of like crossing a mount point.
 */
static int fixup_tree_root_location(struct btrfs_root *root,
3860 3861 3862 3863
				    struct inode *dir,
				    struct dentry *dentry,
				    struct btrfs_key *location,
				    struct btrfs_root **sub_root)
C
Chris Mason 已提交
3864
{
3865 3866 3867 3868 3869 3870
	struct btrfs_path *path;
	struct btrfs_root *new_root;
	struct btrfs_root_ref *ref;
	struct extent_buffer *leaf;
	int ret;
	int err = 0;
C
Chris Mason 已提交
3871

3872 3873 3874 3875 3876
	path = btrfs_alloc_path();
	if (!path) {
		err = -ENOMEM;
		goto out;
	}
C
Chris Mason 已提交
3877

3878 3879 3880 3881 3882 3883 3884 3885 3886
	err = -ENOENT;
	ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
				  BTRFS_I(dir)->root->root_key.objectid,
				  location->objectid);
	if (ret) {
		if (ret < 0)
			err = ret;
		goto out;
	}
C
Chris Mason 已提交
3887

3888 3889 3890 3891 3892
	leaf = path->nodes[0];
	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
	if (btrfs_root_ref_dirid(leaf, ref) != dir->i_ino ||
	    btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
		goto out;
C
Chris Mason 已提交
3893

3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920
	ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
				   (unsigned long)(ref + 1),
				   dentry->d_name.len);
	if (ret)
		goto out;

	btrfs_release_path(root->fs_info->tree_root, path);

	new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
	if (IS_ERR(new_root)) {
		err = PTR_ERR(new_root);
		goto out;
	}

	if (btrfs_root_refs(&new_root->root_item) == 0) {
		err = -ENOENT;
		goto out;
	}

	*sub_root = new_root;
	location->objectid = btrfs_root_dirid(&new_root->root_item);
	location->type = BTRFS_INODE_ITEM_KEY;
	location->offset = 0;
	err = 0;
out:
	btrfs_free_path(path);
	return err;
C
Chris Mason 已提交
3921 3922
}

3923 3924 3925 3926
static void inode_tree_add(struct inode *inode)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_inode *entry;
3927 3928 3929 3930 3931
	struct rb_node **p;
	struct rb_node *parent;
again:
	p = &root->inode_tree.rb_node;
	parent = NULL;
3932

A
Al Viro 已提交
3933
	if (inode_unhashed(inode))
3934 3935
		return;

3936 3937 3938 3939 3940 3941
	spin_lock(&root->inode_lock);
	while (*p) {
		parent = *p;
		entry = rb_entry(parent, struct btrfs_inode, rb_node);

		if (inode->i_ino < entry->vfs_inode.i_ino)
3942
			p = &parent->rb_left;
3943
		else if (inode->i_ino > entry->vfs_inode.i_ino)
3944
			p = &parent->rb_right;
3945 3946
		else {
			WARN_ON(!(entry->vfs_inode.i_state &
A
Al Viro 已提交
3947
				  (I_WILL_FREE | I_FREEING)));
3948 3949 3950 3951
			rb_erase(parent, &root->inode_tree);
			RB_CLEAR_NODE(parent);
			spin_unlock(&root->inode_lock);
			goto again;
3952 3953 3954 3955 3956 3957 3958 3959 3960 3961
		}
	}
	rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
	rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
	spin_unlock(&root->inode_lock);
}

static void inode_tree_del(struct inode *inode)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
3962
	int empty = 0;
3963

3964
	spin_lock(&root->inode_lock);
3965 3966 3967
	if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
		rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
		RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3968
		empty = RB_EMPTY_ROOT(&root->inode_tree);
3969
	}
3970
	spin_unlock(&root->inode_lock);
3971

3972 3973 3974 3975 3976 3977 3978 3979
	/*
	 * Free space cache has inodes in the tree root, but the tree root has a
	 * root_refs of 0, so this could end up dropping the tree root as a
	 * snapshot, so we need the extra !root->fs_info->tree_root check to
	 * make sure we don't drop it.
	 */
	if (empty && btrfs_root_refs(&root->root_item) == 0 &&
	    root != root->fs_info->tree_root) {
3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032
		synchronize_srcu(&root->fs_info->subvol_srcu);
		spin_lock(&root->inode_lock);
		empty = RB_EMPTY_ROOT(&root->inode_tree);
		spin_unlock(&root->inode_lock);
		if (empty)
			btrfs_add_dead_root(root);
	}
}

int btrfs_invalidate_inodes(struct btrfs_root *root)
{
	struct rb_node *node;
	struct rb_node *prev;
	struct btrfs_inode *entry;
	struct inode *inode;
	u64 objectid = 0;

	WARN_ON(btrfs_root_refs(&root->root_item) != 0);

	spin_lock(&root->inode_lock);
again:
	node = root->inode_tree.rb_node;
	prev = NULL;
	while (node) {
		prev = node;
		entry = rb_entry(node, struct btrfs_inode, rb_node);

		if (objectid < entry->vfs_inode.i_ino)
			node = node->rb_left;
		else if (objectid > entry->vfs_inode.i_ino)
			node = node->rb_right;
		else
			break;
	}
	if (!node) {
		while (prev) {
			entry = rb_entry(prev, struct btrfs_inode, rb_node);
			if (objectid <= entry->vfs_inode.i_ino) {
				node = prev;
				break;
			}
			prev = rb_next(prev);
		}
	}
	while (node) {
		entry = rb_entry(node, struct btrfs_inode, rb_node);
		objectid = entry->vfs_inode.i_ino + 1;
		inode = igrab(&entry->vfs_inode);
		if (inode) {
			spin_unlock(&root->inode_lock);
			if (atomic_read(&inode->i_count) > 1)
				d_prune_aliases(inode);
			/*
4033
			 * btrfs_drop_inode will have it removed from
4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049
			 * the inode cache when its usage count
			 * hits zero.
			 */
			iput(inode);
			cond_resched();
			spin_lock(&root->inode_lock);
			goto again;
		}

		if (cond_resched_lock(&root->inode_lock))
			goto again;

		node = rb_next(node);
	}
	spin_unlock(&root->inode_lock);
	return 0;
4050 4051
}

4052 4053 4054 4055 4056
static int btrfs_init_locked_inode(struct inode *inode, void *p)
{
	struct btrfs_iget_args *args = p;
	inode->i_ino = args->ino;
	BTRFS_I(inode)->root = args->root;
J
Josef Bacik 已提交
4057
	btrfs_set_inode_space_info(args->root, inode);
C
Chris Mason 已提交
4058 4059 4060 4061 4062 4063
	return 0;
}

static int btrfs_find_actor(struct inode *inode, void *opaque)
{
	struct btrfs_iget_args *args = opaque;
C
Chris Mason 已提交
4064 4065
	return args->ino == inode->i_ino &&
		args->root == BTRFS_I(inode)->root;
C
Chris Mason 已提交
4066 4067
}

4068 4069 4070
static struct inode *btrfs_iget_locked(struct super_block *s,
				       u64 objectid,
				       struct btrfs_root *root)
C
Chris Mason 已提交
4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082
{
	struct inode *inode;
	struct btrfs_iget_args args;
	args.ino = objectid;
	args.root = root;

	inode = iget5_locked(s, objectid, btrfs_find_actor,
			     btrfs_init_locked_inode,
			     (void *)&args);
	return inode;
}

B
Balaji Rao 已提交
4083 4084 4085 4086
/* Get an inode object given its location and corresponding root.
 * Returns in *is_new if the inode was read from disk
 */
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
4087
			 struct btrfs_root *root, int *new)
B
Balaji Rao 已提交
4088 4089 4090 4091 4092
{
	struct inode *inode;

	inode = btrfs_iget_locked(s, location->objectid, root);
	if (!inode)
4093
		return ERR_PTR(-ENOMEM);
B
Balaji Rao 已提交
4094 4095 4096 4097 4098

	if (inode->i_state & I_NEW) {
		BTRFS_I(inode)->root = root;
		memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
		btrfs_read_locked_inode(inode);
4099
		inode_tree_add(inode);
B
Balaji Rao 已提交
4100
		unlock_new_inode(inode);
4101 4102
		if (new)
			*new = 1;
B
Balaji Rao 已提交
4103 4104 4105 4106 4107
	}

	return inode;
}

4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129
static struct inode *new_simple_dir(struct super_block *s,
				    struct btrfs_key *key,
				    struct btrfs_root *root)
{
	struct inode *inode = new_inode(s);

	if (!inode)
		return ERR_PTR(-ENOMEM);

	BTRFS_I(inode)->root = root;
	memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
	BTRFS_I(inode)->dummy_inode = 1;

	inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
	inode->i_op = &simple_dir_inode_operations;
	inode->i_fop = &simple_dir_operations;
	inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;

	return inode;
}

4130
struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
C
Chris Mason 已提交
4131
{
C
Chris Mason 已提交
4132
	struct inode *inode;
4133
	struct btrfs_root *root = BTRFS_I(dir)->root;
C
Chris Mason 已提交
4134 4135
	struct btrfs_root *sub_root = root;
	struct btrfs_key location;
4136
	int index;
4137
	int ret;
C
Chris Mason 已提交
4138 4139 4140

	if (dentry->d_name.len > BTRFS_NAME_LEN)
		return ERR_PTR(-ENAMETOOLONG);
4141

C
Chris Mason 已提交
4142
	ret = btrfs_inode_by_name(dir, dentry, &location);
4143

C
Chris Mason 已提交
4144 4145
	if (ret < 0)
		return ERR_PTR(ret);
4146

4147 4148 4149 4150
	if (location.objectid == 0)
		return NULL;

	if (location.type == BTRFS_INODE_ITEM_KEY) {
4151
		inode = btrfs_iget(dir->i_sb, &location, root, NULL);
4152 4153 4154 4155 4156
		return inode;
	}

	BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);

4157
	index = srcu_read_lock(&root->fs_info->subvol_srcu);
4158 4159 4160 4161 4162 4163 4164 4165
	ret = fixup_tree_root_location(root, dir, dentry,
				       &location, &sub_root);
	if (ret < 0) {
		if (ret != -ENOENT)
			inode = ERR_PTR(ret);
		else
			inode = new_simple_dir(dir->i_sb, &location, sub_root);
	} else {
4166
		inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
C
Chris Mason 已提交
4167
	}
4168 4169
	srcu_read_unlock(&root->fs_info->subvol_srcu, index);

4170
	if (!IS_ERR(inode) && root != sub_root) {
4171 4172
		down_read(&root->fs_info->cleanup_work_sem);
		if (!(inode->i_sb->s_flags & MS_RDONLY))
4173
			ret = btrfs_orphan_cleanup(sub_root);
4174
		up_read(&root->fs_info->cleanup_work_sem);
4175 4176
		if (ret)
			inode = ERR_PTR(ret);
4177 4178
	}

4179 4180 4181
	return inode;
}

N
Nick Piggin 已提交
4182
static int btrfs_dentry_delete(const struct dentry *dentry)
4183 4184 4185
{
	struct btrfs_root *root;

4186 4187
	if (!dentry->d_inode && !IS_ROOT(dentry))
		dentry = dentry->d_parent;
4188

4189 4190 4191 4192 4193
	if (dentry->d_inode) {
		root = BTRFS_I(dentry->d_inode)->root;
		if (btrfs_root_refs(&root->root_item) == 0)
			return 1;
	}
4194 4195 4196
	return 0;
}

4197 4198 4199 4200 4201 4202 4203 4204
static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
				   struct nameidata *nd)
{
	struct inode *inode;

	inode = btrfs_lookup_dentry(dir, dentry);
	if (IS_ERR(inode))
		return ERR_CAST(inode);
4205

C
Chris Mason 已提交
4206 4207 4208 4209 4210 4211 4212
	return d_splice_alias(inode, dentry);
}

static unsigned char btrfs_filetype_table[] = {
	DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
};

4213 4214
static int btrfs_real_readdir(struct file *filp, void *dirent,
			      filldir_t filldir)
C
Chris Mason 已提交
4215
{
4216
	struct inode *inode = filp->f_dentry->d_inode;
C
Chris Mason 已提交
4217 4218 4219 4220
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_item *item;
	struct btrfs_dir_item *di;
	struct btrfs_key key;
4221
	struct btrfs_key found_key;
C
Chris Mason 已提交
4222 4223 4224
	struct btrfs_path *path;
	int ret;
	u32 nritems;
4225
	struct extent_buffer *leaf;
C
Chris Mason 已提交
4226 4227 4228 4229 4230 4231 4232 4233
	int slot;
	int advance;
	unsigned char d_type;
	int over = 0;
	u32 di_cur;
	u32 di_total;
	u32 di_len;
	int key_type = BTRFS_DIR_INDEX_KEY;
4234 4235 4236
	char tmp_name[32];
	char *name_ptr;
	int name_len;
C
Chris Mason 已提交
4237 4238 4239 4240

	/* FIXME, use a real flag for deciding about the key type */
	if (root->fs_info->tree_root == root)
		key_type = BTRFS_DIR_ITEM_KEY;
4241

4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252
	/* special case for "." */
	if (filp->f_pos == 0) {
		over = filldir(dirent, ".", 1,
			       1, inode->i_ino,
			       DT_DIR);
		if (over)
			return 0;
		filp->f_pos = 1;
	}
	/* special case for .., just use the back ref */
	if (filp->f_pos == 1) {
4253
		u64 pino = parent_ino(filp->f_path.dentry);
4254
		over = filldir(dirent, "..", 2,
4255
			       2, pino, DT_DIR);
4256
		if (over)
4257
			return 0;
4258 4259
		filp->f_pos = 2;
	}
4260 4261 4262
	path = btrfs_alloc_path();
	path->reada = 2;

C
Chris Mason 已提交
4263 4264
	btrfs_set_key_type(&key, key_type);
	key.offset = filp->f_pos;
4265
	key.objectid = inode->i_ino;
4266

C
Chris Mason 已提交
4267 4268 4269 4270
	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		goto err;
	advance = 0;
4271 4272

	while (1) {
4273 4274
		leaf = path->nodes[0];
		nritems = btrfs_header_nritems(leaf);
C
Chris Mason 已提交
4275 4276
		slot = path->slots[0];
		if (advance || slot >= nritems) {
4277
			if (slot >= nritems - 1) {
C
Chris Mason 已提交
4278 4279 4280
				ret = btrfs_next_leaf(root, path);
				if (ret)
					break;
4281 4282
				leaf = path->nodes[0];
				nritems = btrfs_header_nritems(leaf);
C
Chris Mason 已提交
4283 4284 4285 4286 4287 4288
				slot = path->slots[0];
			} else {
				slot++;
				path->slots[0]++;
			}
		}
4289

C
Chris Mason 已提交
4290
		advance = 1;
4291 4292 4293 4294
		item = btrfs_item_nr(leaf, slot);
		btrfs_item_key_to_cpu(leaf, &found_key, slot);

		if (found_key.objectid != key.objectid)
C
Chris Mason 已提交
4295
			break;
4296
		if (btrfs_key_type(&found_key) != key_type)
C
Chris Mason 已提交
4297
			break;
4298
		if (found_key.offset < filp->f_pos)
C
Chris Mason 已提交
4299
			continue;
4300 4301

		filp->f_pos = found_key.offset;
4302

C
Chris Mason 已提交
4303 4304
		di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
		di_cur = 0;
4305
		di_total = btrfs_item_size(leaf, item);
4306 4307

		while (di_cur < di_total) {
4308 4309
			struct btrfs_key location;

4310 4311 4312
			if (verify_dir_item(root, leaf, di))
				break;

4313
			name_len = btrfs_dir_name_len(leaf, di);
4314
			if (name_len <= sizeof(tmp_name)) {
4315 4316 4317
				name_ptr = tmp_name;
			} else {
				name_ptr = kmalloc(name_len, GFP_NOFS);
4318 4319 4320 4321
				if (!name_ptr) {
					ret = -ENOMEM;
					goto err;
				}
4322 4323 4324 4325 4326 4327
			}
			read_extent_buffer(leaf, name_ptr,
					   (unsigned long)(di + 1), name_len);

			d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
			btrfs_dir_item_key_to_cpu(leaf, di, &location);
4328 4329 4330 4331 4332 4333 4334 4335 4336

			/* is this a reference to our own snapshot? If so
			 * skip it
			 */
			if (location.type == BTRFS_ROOT_ITEM_KEY &&
			    location.objectid == root->root_key.objectid) {
				over = 0;
				goto skip;
			}
4337
			over = filldir(dirent, name_ptr, name_len,
4338
				       found_key.offset, location.objectid,
C
Chris Mason 已提交
4339
				       d_type);
4340

4341
skip:
4342 4343 4344
			if (name_ptr != tmp_name)
				kfree(name_ptr);

C
Chris Mason 已提交
4345 4346
			if (over)
				goto nopos;
J
Josef Bacik 已提交
4347
			di_len = btrfs_dir_name_len(leaf, di) +
4348
				 btrfs_dir_data_len(leaf, di) + sizeof(*di);
C
Chris Mason 已提交
4349 4350 4351 4352
			di_cur += di_len;
			di = (struct btrfs_dir_item *)((char *)di + di_len);
		}
	}
4353 4354

	/* Reached end of directory/root. Bump pos past the last item. */
4355
	if (key_type == BTRFS_DIR_INDEX_KEY)
4356 4357 4358 4359 4360
		/*
		 * 32-bit glibc will use getdents64, but then strtol -
		 * so the last number we can serve is this.
		 */
		filp->f_pos = 0x7fffffff;
4361 4362
	else
		filp->f_pos++;
C
Chris Mason 已提交
4363 4364 4365 4366 4367 4368 4369
nopos:
	ret = 0;
err:
	btrfs_free_path(path);
	return ret;
}

4370
int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
C
Chris Mason 已提交
4371 4372 4373 4374
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
	int ret = 0;
4375
	bool nolock = false;
C
Chris Mason 已提交
4376

4377
	if (BTRFS_I(inode)->dummy_inode)
4378 4379
		return 0;

4380 4381 4382
	smp_mb();
	nolock = (root->fs_info->closing && root == root->fs_info->tree_root);

4383
	if (wbc->sync_mode == WB_SYNC_ALL) {
4384 4385 4386 4387
		if (nolock)
			trans = btrfs_join_transaction_nolock(root, 1);
		else
			trans = btrfs_join_transaction(root, 1);
4388 4389
		if (IS_ERR(trans))
			return PTR_ERR(trans);
C
Chris Mason 已提交
4390
		btrfs_set_trans_block_group(trans, inode);
4391 4392 4393 4394
		if (nolock)
			ret = btrfs_end_transaction_nolock(trans, root);
		else
			ret = btrfs_commit_transaction(trans, root);
C
Chris Mason 已提交
4395 4396 4397 4398 4399
	}
	return ret;
}

/*
4400
 * This is somewhat expensive, updating the tree every time the
C
Chris Mason 已提交
4401 4402 4403 4404 4405 4406 4407 4408
 * inode changes.  But, it is most likely to find the inode in cache.
 * FIXME, needs more benchmarking...there are no reasons other than performance
 * to keep or drop this code.
 */
void btrfs_dirty_inode(struct inode *inode)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
4409 4410 4411 4412
	int ret;

	if (BTRFS_I(inode)->dummy_inode)
		return;
C
Chris Mason 已提交
4413

4414
	trans = btrfs_join_transaction(root, 1);
4415
	BUG_ON(IS_ERR(trans));
C
Chris Mason 已提交
4416
	btrfs_set_trans_block_group(trans, inode);
4417 4418

	ret = btrfs_update_inode(trans, root, inode);
4419 4420 4421 4422
	if (ret && ret == -ENOSPC) {
		/* whoops, lets try again with the full transaction */
		btrfs_end_transaction(trans, root);
		trans = btrfs_start_transaction(root, 1);
4423 4424 4425 4426 4427 4428 4429 4430
		if (IS_ERR(trans)) {
			if (printk_ratelimit()) {
				printk(KERN_ERR "btrfs: fail to "
				       "dirty  inode %lu error %ld\n",
				       inode->i_ino, PTR_ERR(trans));
			}
			return;
		}
4431
		btrfs_set_trans_block_group(trans, inode);
4432

4433 4434
		ret = btrfs_update_inode(trans, root, inode);
		if (ret) {
4435 4436 4437 4438 4439
			if (printk_ratelimit()) {
				printk(KERN_ERR "btrfs: fail to "
				       "dirty  inode %lu error %d\n",
				       inode->i_ino, ret);
			}
4440 4441
		}
	}
C
Chris Mason 已提交
4442 4443 4444
	btrfs_end_transaction(trans, root);
}

C
Chris Mason 已提交
4445 4446 4447 4448 4449
/*
 * find the highest existing sequence number in a directory
 * and then set the in-memory index_cnt variable to reflect
 * free sequence numbers
 */
4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501
static int btrfs_set_inode_index_count(struct inode *inode)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_key key, found_key;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	int ret;

	key.objectid = inode->i_ino;
	btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
	key.offset = (u64)-1;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		goto out;
	/* FIXME: we should be able to handle this */
	if (ret == 0)
		goto out;
	ret = 0;

	/*
	 * MAGIC NUMBER EXPLANATION:
	 * since we search a directory based on f_pos we have to start at 2
	 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
	 * else has to start at 2
	 */
	if (path->slots[0] == 0) {
		BTRFS_I(inode)->index_cnt = 2;
		goto out;
	}

	path->slots[0]--;

	leaf = path->nodes[0];
	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);

	if (found_key.objectid != inode->i_ino ||
	    btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
		BTRFS_I(inode)->index_cnt = 2;
		goto out;
	}

	BTRFS_I(inode)->index_cnt = found_key.offset + 1;
out:
	btrfs_free_path(path);
	return ret;
}

C
Chris Mason 已提交
4502 4503 4504 4505
/*
 * helper to find a free sequence number in a given directory.  This current
 * code is very simple, later versions will do smarter things in the btree
 */
4506
int btrfs_set_inode_index(struct inode *dir, u64 *index)
4507 4508 4509 4510 4511
{
	int ret = 0;

	if (BTRFS_I(dir)->index_cnt == (u64)-1) {
		ret = btrfs_set_inode_index_count(dir);
C
Chris Mason 已提交
4512
		if (ret)
4513 4514 4515
			return ret;
	}

4516
	*index = BTRFS_I(dir)->index_cnt;
4517 4518 4519 4520 4521
	BTRFS_I(dir)->index_cnt++;

	return ret;
}

C
Chris Mason 已提交
4522 4523
static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
				     struct btrfs_root *root,
4524
				     struct inode *dir,
4525
				     const char *name, int name_len,
4526 4527
				     u64 ref_objectid, u64 objectid,
				     u64 alloc_hint, int mode, u64 *index)
C
Chris Mason 已提交
4528 4529
{
	struct inode *inode;
4530
	struct btrfs_inode_item *inode_item;
C
Chris Mason 已提交
4531
	struct btrfs_key *location;
4532
	struct btrfs_path *path;
4533 4534 4535 4536
	struct btrfs_inode_ref *ref;
	struct btrfs_key key[2];
	u32 sizes[2];
	unsigned long ptr;
C
Chris Mason 已提交
4537 4538 4539
	int ret;
	int owner;

4540 4541 4542
	path = btrfs_alloc_path();
	BUG_ON(!path);

C
Chris Mason 已提交
4543
	inode = new_inode(root->fs_info->sb);
4544 4545
	if (!inode) {
		btrfs_free_path(path);
C
Chris Mason 已提交
4546
		return ERR_PTR(-ENOMEM);
4547
	}
C
Chris Mason 已提交
4548

4549
	if (dir) {
4550 4551
		trace_btrfs_inode_request(dir);

4552
		ret = btrfs_set_inode_index(dir, index);
4553
		if (ret) {
4554
			btrfs_free_path(path);
4555
			iput(inode);
4556
			return ERR_PTR(ret);
4557
		}
4558 4559 4560 4561 4562 4563 4564
	}
	/*
	 * index_cnt is ignored for everything but a dir,
	 * btrfs_get_inode_index_count has an explanation for the magic
	 * number
	 */
	BTRFS_I(inode)->index_cnt = 2;
C
Chris Mason 已提交
4565
	BTRFS_I(inode)->root = root;
4566
	BTRFS_I(inode)->generation = trans->transid;
4567
	inode->i_generation = BTRFS_I(inode)->generation;
J
Josef Bacik 已提交
4568
	btrfs_set_inode_space_info(root, inode);
4569

C
Chris Mason 已提交
4570 4571 4572 4573
	if (mode & S_IFDIR)
		owner = 0;
	else
		owner = 1;
4574 4575
	BTRFS_I(inode)->block_group =
			btrfs_find_block_group(root, 0, alloc_hint, owner);
4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587

	key[0].objectid = objectid;
	btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
	key[0].offset = 0;

	key[1].objectid = objectid;
	btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
	key[1].offset = ref_objectid;

	sizes[0] = sizeof(struct btrfs_inode_item);
	sizes[1] = name_len + sizeof(*ref);

4588
	path->leave_spinning = 1;
4589 4590
	ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
	if (ret != 0)
4591 4592
		goto fail;

4593
	inode_init_owner(inode, dir, mode);
C
Chris Mason 已提交
4594
	inode->i_ino = objectid;
4595
	inode_set_bytes(inode, 0);
C
Chris Mason 已提交
4596
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4597 4598
	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
				  struct btrfs_inode_item);
4599
	fill_inode_item(trans, path->nodes[0], inode_item, inode);
4600 4601 4602 4603

	ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
			     struct btrfs_inode_ref);
	btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
4604
	btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
4605 4606 4607
	ptr = (unsigned long)(ref + 1);
	write_extent_buffer(path->nodes[0], name, ptr, name_len);

4608 4609 4610
	btrfs_mark_buffer_dirty(path->nodes[0]);
	btrfs_free_path(path);

C
Chris Mason 已提交
4611 4612 4613 4614 4615
	location = &BTRFS_I(inode)->location;
	location->objectid = objectid;
	location->offset = 0;
	btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);

4616 4617
	btrfs_inherit_iflags(inode, dir);

4618 4619 4620
	if ((mode & S_IFREG)) {
		if (btrfs_test_opt(root, NODATASUM))
			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
4621 4622
		if (btrfs_test_opt(root, NODATACOW) ||
		    (BTRFS_I(dir)->flags & BTRFS_INODE_NODATACOW))
4623 4624 4625
			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
	}

C
Chris Mason 已提交
4626
	insert_inode_hash(inode);
4627
	inode_tree_add(inode);
4628 4629 4630

	trace_btrfs_inode_new(inode);

C
Chris Mason 已提交
4631
	return inode;
4632
fail:
4633 4634
	if (dir)
		BTRFS_I(dir)->index_cnt--;
4635
	btrfs_free_path(path);
4636
	iput(inode);
4637
	return ERR_PTR(ret);
C
Chris Mason 已提交
4638 4639 4640 4641 4642 4643 4644
}

static inline u8 btrfs_inode_type(struct inode *inode)
{
	return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
}

C
Chris Mason 已提交
4645 4646 4647 4648 4649 4650
/*
 * utility function to add 'inode' into 'parent_inode' with
 * a give name and a given sequence number.
 * if 'add_backref' is true, also insert a backref from the
 * inode to the parent directory.
 */
4651 4652 4653
int btrfs_add_link(struct btrfs_trans_handle *trans,
		   struct inode *parent_inode, struct inode *inode,
		   const char *name, int name_len, int add_backref, u64 index)
C
Chris Mason 已提交
4654
{
4655
	int ret = 0;
C
Chris Mason 已提交
4656
	struct btrfs_key key;
4657
	struct btrfs_root *root = BTRFS_I(parent_inode)->root;
4658

4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676
	if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
		memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
	} else {
		key.objectid = inode->i_ino;
		btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
		key.offset = 0;
	}

	if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
		ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
					 key.objectid, root->root_key.objectid,
					 parent_inode->i_ino,
					 index, name, name_len);
	} else if (add_backref) {
		ret = btrfs_insert_inode_ref(trans, root,
					     name, name_len, inode->i_ino,
					     parent_inode->i_ino, index);
	}
C
Chris Mason 已提交
4677 4678

	if (ret == 0) {
4679 4680 4681 4682 4683
		ret = btrfs_insert_dir_item(trans, root, name, name_len,
					    parent_inode->i_ino, &key,
					    btrfs_inode_type(inode), index);
		BUG_ON(ret);

4684
		btrfs_i_size_write(parent_inode, parent_inode->i_size +
4685
				   name_len * 2);
4686
		parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
4687
		ret = btrfs_update_inode(trans, root, parent_inode);
C
Chris Mason 已提交
4688 4689 4690 4691 4692
	}
	return ret;
}

static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
4693 4694
			    struct inode *dir, struct dentry *dentry,
			    struct inode *inode, int backref, u64 index)
C
Chris Mason 已提交
4695
{
4696 4697 4698
	int err = btrfs_add_link(trans, dir, inode,
				 dentry->d_name.name, dentry->d_name.len,
				 backref, index);
C
Chris Mason 已提交
4699 4700 4701 4702 4703 4704 4705 4706 4707
	if (!err) {
		d_instantiate(dentry, inode);
		return 0;
	}
	if (err > 0)
		err = -EEXIST;
	return err;
}

J
Josef Bacik 已提交
4708 4709 4710 4711 4712
static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
			int mode, dev_t rdev)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(dir)->root;
4713
	struct inode *inode = NULL;
J
Josef Bacik 已提交
4714 4715 4716
	int err;
	int drop_inode = 0;
	u64 objectid;
4717
	unsigned long nr = 0;
4718
	u64 index = 0;
J
Josef Bacik 已提交
4719 4720 4721 4722

	if (!new_valid_dev(rdev))
		return -EINVAL;

4723 4724 4725 4726
	err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
	if (err)
		return err;

J
Josef Bacik 已提交
4727 4728 4729 4730 4731
	/*
	 * 2 for inode item and ref
	 * 2 for dir items
	 * 1 for xattr if selinux is on
	 */
4732 4733 4734
	trans = btrfs_start_transaction(root, 5);
	if (IS_ERR(trans))
		return PTR_ERR(trans);
4735

J
Josef Bacik 已提交
4736 4737
	btrfs_set_trans_block_group(trans, dir);

4738
	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4739
				dentry->d_name.len, dir->i_ino, objectid,
4740
				BTRFS_I(dir)->block_group, mode, &index);
J
Josef Bacik 已提交
4741 4742 4743 4744
	err = PTR_ERR(inode);
	if (IS_ERR(inode))
		goto out_unlock;

4745
	err = btrfs_init_inode_security(trans, inode, dir);
J
Josef Bacik 已提交
4746 4747 4748 4749 4750
	if (err) {
		drop_inode = 1;
		goto out_unlock;
	}

J
Josef Bacik 已提交
4751
	btrfs_set_trans_block_group(trans, inode);
4752
	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
J
Josef Bacik 已提交
4753 4754 4755 4756 4757
	if (err)
		drop_inode = 1;
	else {
		inode->i_op = &btrfs_special_inode_operations;
		init_special_inode(inode, inode->i_mode, rdev);
4758
		btrfs_update_inode(trans, root, inode);
J
Josef Bacik 已提交
4759 4760 4761 4762
	}
	btrfs_update_inode_block_group(trans, inode);
	btrfs_update_inode_block_group(trans, dir);
out_unlock:
4763
	nr = trans->blocks_used;
4764
	btrfs_end_transaction_throttle(trans, root);
4765
	btrfs_btree_balance_dirty(root, nr);
J
Josef Bacik 已提交
4766 4767 4768 4769 4770 4771 4772
	if (drop_inode) {
		inode_dec_link_count(inode);
		iput(inode);
	}
	return err;
}

C
Chris Mason 已提交
4773 4774 4775 4776 4777
static int btrfs_create(struct inode *dir, struct dentry *dentry,
			int mode, struct nameidata *nd)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(dir)->root;
4778
	struct inode *inode = NULL;
C
Chris Mason 已提交
4779
	int drop_inode = 0;
4780
	int err;
4781
	unsigned long nr = 0;
C
Chris Mason 已提交
4782
	u64 objectid;
4783
	u64 index = 0;
C
Chris Mason 已提交
4784

4785 4786 4787
	err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
	if (err)
		return err;
J
Josef Bacik 已提交
4788 4789 4790 4791 4792
	/*
	 * 2 for inode item and ref
	 * 2 for dir items
	 * 1 for xattr if selinux is on
	 */
4793 4794 4795
	trans = btrfs_start_transaction(root, 5);
	if (IS_ERR(trans))
		return PTR_ERR(trans);
J
Josef Bacik 已提交
4796

C
Chris Mason 已提交
4797 4798
	btrfs_set_trans_block_group(trans, dir);

4799
	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4800 4801
				dentry->d_name.len, dir->i_ino, objectid,
				BTRFS_I(dir)->block_group, mode, &index);
C
Chris Mason 已提交
4802 4803 4804 4805
	err = PTR_ERR(inode);
	if (IS_ERR(inode))
		goto out_unlock;

4806
	err = btrfs_init_inode_security(trans, inode, dir);
J
Josef Bacik 已提交
4807 4808 4809 4810 4811
	if (err) {
		drop_inode = 1;
		goto out_unlock;
	}

C
Chris Mason 已提交
4812
	btrfs_set_trans_block_group(trans, inode);
4813
	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
C
Chris Mason 已提交
4814 4815 4816 4817
	if (err)
		drop_inode = 1;
	else {
		inode->i_mapping->a_ops = &btrfs_aops;
C
Chris Mason 已提交
4818
		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
C
Chris Mason 已提交
4819 4820
		inode->i_fop = &btrfs_file_operations;
		inode->i_op = &btrfs_file_inode_operations;
4821
		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
C
Chris Mason 已提交
4822 4823 4824 4825
	}
	btrfs_update_inode_block_group(trans, inode);
	btrfs_update_inode_block_group(trans, dir);
out_unlock:
4826
	nr = trans->blocks_used;
4827
	btrfs_end_transaction_throttle(trans, root);
C
Chris Mason 已提交
4828 4829 4830 4831
	if (drop_inode) {
		inode_dec_link_count(inode);
		iput(inode);
	}
4832
	btrfs_btree_balance_dirty(root, nr);
C
Chris Mason 已提交
4833 4834 4835 4836 4837 4838 4839 4840 4841
	return err;
}

static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
		      struct dentry *dentry)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(dir)->root;
	struct inode *inode = old_dentry->d_inode;
4842
	u64 index;
4843
	unsigned long nr = 0;
C
Chris Mason 已提交
4844 4845 4846 4847 4848 4849
	int err;
	int drop_inode = 0;

	if (inode->i_nlink == 0)
		return -ENOENT;

4850 4851
	/* do not allow sys_link's with other subvols of the same device */
	if (root->objectid != BTRFS_I(inode)->root->objectid)
4852
		return -EXDEV;
4853

4854 4855 4856
	if (inode->i_nlink == ~0U)
		return -EMLINK;

J
Josef Bacik 已提交
4857
	btrfs_inc_nlink(inode);
4858
	inode->i_ctime = CURRENT_TIME;
J
Josef Bacik 已提交
4859

4860
	err = btrfs_set_inode_index(dir, &index);
4861 4862 4863
	if (err)
		goto fail;

4864
	/*
M
Miao Xie 已提交
4865
	 * 2 items for inode and inode ref
4866
	 * 2 items for dir items
M
Miao Xie 已提交
4867
	 * 1 item for parent inode
4868
	 */
M
Miao Xie 已提交
4869
	trans = btrfs_start_transaction(root, 5);
4870 4871 4872 4873
	if (IS_ERR(trans)) {
		err = PTR_ERR(trans);
		goto fail;
	}
4874

C
Chris Mason 已提交
4875
	btrfs_set_trans_block_group(trans, dir);
A
Al Viro 已提交
4876
	ihold(inode);
4877

4878
	err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
4879

4880
	if (err) {
4881
		drop_inode = 1;
4882
	} else {
4883
		struct dentry *parent = dget_parent(dentry);
4884 4885 4886
		btrfs_update_inode_block_group(trans, dir);
		err = btrfs_update_inode(trans, root, inode);
		BUG_ON(err);
4887 4888
		btrfs_log_new_name(trans, inode, NULL, parent);
		dput(parent);
4889
	}
C
Chris Mason 已提交
4890

4891
	nr = trans->blocks_used;
4892
	btrfs_end_transaction_throttle(trans, root);
4893
fail:
C
Chris Mason 已提交
4894 4895 4896 4897
	if (drop_inode) {
		inode_dec_link_count(inode);
		iput(inode);
	}
4898
	btrfs_btree_balance_dirty(root, nr);
C
Chris Mason 已提交
4899 4900 4901 4902 4903
	return err;
}

static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
{
4904
	struct inode *inode = NULL;
C
Chris Mason 已提交
4905 4906 4907 4908
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(dir)->root;
	int err = 0;
	int drop_on_err = 0;
4909
	u64 objectid = 0;
4910
	u64 index = 0;
4911
	unsigned long nr = 1;
C
Chris Mason 已提交
4912

4913 4914 4915 4916
	err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
	if (err)
		return err;

J
Josef Bacik 已提交
4917 4918 4919 4920 4921
	/*
	 * 2 items for inode and ref
	 * 2 items for dir items
	 * 1 for xattr if selinux is on
	 */
4922 4923 4924
	trans = btrfs_start_transaction(root, 5);
	if (IS_ERR(trans))
		return PTR_ERR(trans);
J
Josef Bacik 已提交
4925
	btrfs_set_trans_block_group(trans, dir);
C
Chris Mason 已提交
4926

4927
	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4928
				dentry->d_name.len, dir->i_ino, objectid,
4929 4930
				BTRFS_I(dir)->block_group, S_IFDIR | mode,
				&index);
C
Chris Mason 已提交
4931 4932 4933 4934
	if (IS_ERR(inode)) {
		err = PTR_ERR(inode);
		goto out_fail;
	}
4935

C
Chris Mason 已提交
4936
	drop_on_err = 1;
J
Josef Bacik 已提交
4937

4938
	err = btrfs_init_inode_security(trans, inode, dir);
J
Josef Bacik 已提交
4939 4940 4941
	if (err)
		goto out_fail;

C
Chris Mason 已提交
4942 4943 4944 4945
	inode->i_op = &btrfs_dir_inode_operations;
	inode->i_fop = &btrfs_dir_file_operations;
	btrfs_set_trans_block_group(trans, inode);

4946
	btrfs_i_size_write(inode, 0);
C
Chris Mason 已提交
4947 4948 4949
	err = btrfs_update_inode(trans, root, inode);
	if (err)
		goto out_fail;
4950

4951 4952
	err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
			     dentry->d_name.len, 0, index);
C
Chris Mason 已提交
4953 4954
	if (err)
		goto out_fail;
4955

C
Chris Mason 已提交
4956 4957 4958 4959 4960 4961
	d_instantiate(dentry, inode);
	drop_on_err = 0;
	btrfs_update_inode_block_group(trans, inode);
	btrfs_update_inode_block_group(trans, dir);

out_fail:
4962
	nr = trans->blocks_used;
4963
	btrfs_end_transaction_throttle(trans, root);
C
Chris Mason 已提交
4964 4965
	if (drop_on_err)
		iput(inode);
4966
	btrfs_btree_balance_dirty(root, nr);
C
Chris Mason 已提交
4967 4968 4969
	return err;
}

C
Chris Mason 已提交
4970 4971 4972 4973
/* helper for btfs_get_extent.  Given an existing extent in the tree,
 * and an extent that you want to insert, deal with overlap and insert
 * the new extent into the tree.
 */
4974 4975
static int merge_extent_mapping(struct extent_map_tree *em_tree,
				struct extent_map *existing,
4976 4977
				struct extent_map *em,
				u64 map_start, u64 map_len)
4978 4979 4980
{
	u64 start_diff;

4981 4982 4983 4984
	BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
	start_diff = map_start - em->start;
	em->start = map_start;
	em->len = map_len;
C
Chris Mason 已提交
4985 4986
	if (em->block_start < EXTENT_MAP_LAST_BYTE &&
	    !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
4987
		em->block_start += start_diff;
C
Chris Mason 已提交
4988 4989
		em->block_len -= start_diff;
	}
4990
	return add_extent_mapping(em_tree, em);
4991 4992
}

C
Chris Mason 已提交
4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003
static noinline int uncompress_inline(struct btrfs_path *path,
				      struct inode *inode, struct page *page,
				      size_t pg_offset, u64 extent_offset,
				      struct btrfs_file_extent_item *item)
{
	int ret;
	struct extent_buffer *leaf = path->nodes[0];
	char *tmp;
	size_t max_size;
	unsigned long inline_size;
	unsigned long ptr;
5004
	int compress_type;
C
Chris Mason 已提交
5005 5006

	WARN_ON(pg_offset != 0);
5007
	compress_type = btrfs_file_extent_compression(leaf, item);
C
Chris Mason 已提交
5008 5009 5010 5011 5012 5013 5014 5015
	max_size = btrfs_file_extent_ram_bytes(leaf, item);
	inline_size = btrfs_file_extent_inline_item_len(leaf,
					btrfs_item_nr(leaf, path->slots[0]));
	tmp = kmalloc(inline_size, GFP_NOFS);
	ptr = btrfs_file_extent_inline_start(item);

	read_extent_buffer(leaf, tmp, ptr, inline_size);

5016
	max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
5017 5018
	ret = btrfs_decompress(compress_type, tmp, page,
			       extent_offset, inline_size, max_size);
C
Chris Mason 已提交
5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030
	if (ret) {
		char *kaddr = kmap_atomic(page, KM_USER0);
		unsigned long copy_size = min_t(u64,
				  PAGE_CACHE_SIZE - pg_offset,
				  max_size - extent_offset);
		memset(kaddr + pg_offset, 0, copy_size);
		kunmap_atomic(kaddr, KM_USER0);
	}
	kfree(tmp);
	return 0;
}

C
Chris Mason 已提交
5031 5032
/*
 * a bit scary, this does extent mapping from logical file offset to the disk.
C
Chris Mason 已提交
5033 5034
 * the ugly parts come from merging extents from the disk with the in-ram
 * representation.  This gets more complex because of the data=ordered code,
C
Chris Mason 已提交
5035 5036 5037 5038
 * where the in-ram extents might be locked pending data=ordered completion.
 *
 * This also copies inline extents directly into the page.
 */
C
Chris Mason 已提交
5039

5040
struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
5041
				    size_t pg_offset, u64 start, u64 len,
5042 5043 5044 5045
				    int create)
{
	int ret;
	int err = 0;
5046
	u64 bytenr;
5047 5048 5049 5050
	u64 extent_start = 0;
	u64 extent_end = 0;
	u64 objectid = inode->i_ino;
	u32 found_type;
5051
	struct btrfs_path *path = NULL;
5052 5053
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_file_extent_item *item;
5054 5055
	struct extent_buffer *leaf;
	struct btrfs_key found_key;
5056 5057
	struct extent_map *em = NULL;
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
5058
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5059
	struct btrfs_trans_handle *trans = NULL;
5060
	int compress_type;
5061 5062

again:
5063
	read_lock(&em_tree->lock);
5064
	em = lookup_extent_mapping(em_tree, start, len);
5065 5066
	if (em)
		em->bdev = root->fs_info->fs_devices->latest_bdev;
5067
	read_unlock(&em_tree->lock);
5068

5069
	if (em) {
5070 5071 5072
		if (em->start > start || em->start + em->len <= start)
			free_extent_map(em);
		else if (em->block_start == EXTENT_MAP_INLINE && page)
5073 5074 5075
			free_extent_map(em);
		else
			goto out;
5076
	}
5077
	em = alloc_extent_map(GFP_NOFS);
5078
	if (!em) {
5079 5080
		err = -ENOMEM;
		goto out;
5081
	}
5082
	em->bdev = root->fs_info->fs_devices->latest_bdev;
5083
	em->start = EXTENT_MAP_HOLE;
5084
	em->orig_start = EXTENT_MAP_HOLE;
5085
	em->len = (u64)-1;
C
Chris Mason 已提交
5086
	em->block_len = (u64)-1;
5087 5088 5089 5090 5091 5092

	if (!path) {
		path = btrfs_alloc_path();
		BUG_ON(!path);
	}

5093 5094
	ret = btrfs_lookup_file_extent(trans, root, path,
				       objectid, start, trans != NULL);
5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105
	if (ret < 0) {
		err = ret;
		goto out;
	}

	if (ret != 0) {
		if (path->slots[0] == 0)
			goto not_found;
		path->slots[0]--;
	}

5106 5107
	leaf = path->nodes[0];
	item = btrfs_item_ptr(leaf, path->slots[0],
5108 5109
			      struct btrfs_file_extent_item);
	/* are we inside the extent that was found? */
5110 5111 5112
	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
	found_type = btrfs_key_type(&found_key);
	if (found_key.objectid != objectid ||
5113 5114 5115 5116
	    found_type != BTRFS_EXTENT_DATA_KEY) {
		goto not_found;
	}

5117 5118
	found_type = btrfs_file_extent_type(leaf, item);
	extent_start = found_key.offset;
5119
	compress_type = btrfs_file_extent_compression(leaf, item);
Y
Yan Zheng 已提交
5120 5121
	if (found_type == BTRFS_FILE_EXTENT_REG ||
	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
5122
		extent_end = extent_start +
5123
		       btrfs_file_extent_num_bytes(leaf, item);
Y
Yan Zheng 已提交
5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137
	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
		size_t size;
		size = btrfs_file_extent_inline_len(leaf, item);
		extent_end = (extent_start + size + root->sectorsize - 1) &
			~((u64)root->sectorsize - 1);
	}

	if (start >= extent_end) {
		path->slots[0]++;
		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret < 0) {
				err = ret;
				goto out;
5138
			}
Y
Yan Zheng 已提交
5139 5140 5141
			if (ret > 0)
				goto not_found;
			leaf = path->nodes[0];
5142
		}
Y
Yan Zheng 已提交
5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
		if (found_key.objectid != objectid ||
		    found_key.type != BTRFS_EXTENT_DATA_KEY)
			goto not_found;
		if (start + len <= found_key.offset)
			goto not_found;
		em->start = start;
		em->len = found_key.offset - start;
		goto not_found_em;
	}

Y
Yan Zheng 已提交
5154 5155
	if (found_type == BTRFS_FILE_EXTENT_REG ||
	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
Y
Yan Zheng 已提交
5156 5157
		em->start = extent_start;
		em->len = extent_end - extent_start;
5158 5159
		em->orig_start = extent_start -
				 btrfs_file_extent_offset(leaf, item);
5160 5161
		bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
		if (bytenr == 0) {
5162
			em->block_start = EXTENT_MAP_HOLE;
5163 5164
			goto insert;
		}
5165
		if (compress_type != BTRFS_COMPRESS_NONE) {
C
Chris Mason 已提交
5166
			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
5167
			em->compress_type = compress_type;
C
Chris Mason 已提交
5168 5169 5170 5171 5172 5173 5174
			em->block_start = bytenr;
			em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
									 item);
		} else {
			bytenr += btrfs_file_extent_offset(leaf, item);
			em->block_start = bytenr;
			em->block_len = em->len;
Y
Yan Zheng 已提交
5175 5176
			if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
C
Chris Mason 已提交
5177
		}
5178 5179
		goto insert;
	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
5180
		unsigned long ptr;
5181
		char *map;
5182 5183 5184
		size_t size;
		size_t extent_offset;
		size_t copy_size;
5185

5186
		em->block_start = EXTENT_MAP_INLINE;
C
Chris Mason 已提交
5187
		if (!page || create) {
5188
			em->start = extent_start;
Y
Yan Zheng 已提交
5189
			em->len = extent_end - extent_start;
5190 5191
			goto out;
		}
5192

Y
Yan Zheng 已提交
5193 5194
		size = btrfs_file_extent_inline_len(leaf, item);
		extent_offset = page_offset(page) + pg_offset - extent_start;
5195
		copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
5196 5197
				size - extent_offset);
		em->start = extent_start + extent_offset;
5198 5199
		em->len = (copy_size + root->sectorsize - 1) &
			~((u64)root->sectorsize - 1);
5200
		em->orig_start = EXTENT_MAP_INLINE;
5201
		if (compress_type) {
C
Chris Mason 已提交
5202
			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
5203 5204
			em->compress_type = compress_type;
		}
5205
		ptr = btrfs_file_extent_inline_start(item) + extent_offset;
5206
		if (create == 0 && !PageUptodate(page)) {
5207 5208
			if (btrfs_file_extent_compression(leaf, item) !=
			    BTRFS_COMPRESS_NONE) {
C
Chris Mason 已提交
5209 5210 5211 5212 5213 5214 5215 5216
				ret = uncompress_inline(path, inode, page,
							pg_offset,
							extent_offset, item);
				BUG_ON(ret);
			} else {
				map = kmap(page);
				read_extent_buffer(leaf, map + pg_offset, ptr,
						   copy_size);
5217 5218 5219 5220 5221
				if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
					memset(map + pg_offset + copy_size, 0,
					       PAGE_CACHE_SIZE - pg_offset -
					       copy_size);
				}
C
Chris Mason 已提交
5222 5223
				kunmap(page);
			}
5224 5225
			flush_dcache_page(page);
		} else if (create && PageUptodate(page)) {
5226
			WARN_ON(1);
5227 5228 5229 5230 5231
			if (!trans) {
				kunmap(page);
				free_extent_map(em);
				em = NULL;
				btrfs_release_path(root, path);
5232
				trans = btrfs_join_transaction(root, 1);
5233 5234
				if (IS_ERR(trans))
					return ERR_CAST(trans);
5235 5236
				goto again;
			}
C
Chris Mason 已提交
5237
			map = kmap(page);
5238
			write_extent_buffer(leaf, map + pg_offset, ptr,
5239
					    copy_size);
C
Chris Mason 已提交
5240
			kunmap(page);
5241
			btrfs_mark_buffer_dirty(leaf);
5242
		}
5243
		set_extent_uptodate(io_tree, em->start,
5244
				    extent_map_end(em) - 1, NULL, GFP_NOFS);
5245 5246
		goto insert;
	} else {
C
Chris Mason 已提交
5247
		printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
5248 5249 5250 5251
		WARN_ON(1);
	}
not_found:
	em->start = start;
5252
	em->len = len;
5253
not_found_em:
5254
	em->block_start = EXTENT_MAP_HOLE;
Y
Yan Zheng 已提交
5255
	set_bit(EXTENT_FLAG_VACANCY, &em->flags);
5256 5257
insert:
	btrfs_release_path(root, path);
5258
	if (em->start > start || extent_map_end(em) <= start) {
C
Chris Mason 已提交
5259 5260 5261 5262 5263
		printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
		       "[%llu %llu]\n", (unsigned long long)em->start,
		       (unsigned long long)em->len,
		       (unsigned long long)start,
		       (unsigned long long)len);
5264 5265 5266
		err = -EIO;
		goto out;
	}
5267 5268

	err = 0;
5269
	write_lock(&em_tree->lock);
5270
	ret = add_extent_mapping(em_tree, em);
5271 5272 5273 5274
	/* it is possible that someone inserted the extent into the tree
	 * while we had the lock dropped.  It is also possible that
	 * an overlapping map exists in the tree
	 */
5275
	if (ret == -EEXIST) {
5276
		struct extent_map *existing;
5277 5278 5279

		ret = 0;

5280
		existing = lookup_extent_mapping(em_tree, start, len);
5281 5282 5283 5284 5285
		if (existing && (existing->start > start ||
		    existing->start + existing->len <= start)) {
			free_extent_map(existing);
			existing = NULL;
		}
5286 5287 5288 5289 5290
		if (!existing) {
			existing = lookup_extent_mapping(em_tree, em->start,
							 em->len);
			if (existing) {
				err = merge_extent_mapping(em_tree, existing,
5291 5292
							   em, start,
							   root->sectorsize);
5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305
				free_extent_map(existing);
				if (err) {
					free_extent_map(em);
					em = NULL;
				}
			} else {
				err = -EIO;
				free_extent_map(em);
				em = NULL;
			}
		} else {
			free_extent_map(em);
			em = existing;
5306
			err = 0;
5307 5308
		}
	}
5309
	write_unlock(&em_tree->lock);
5310
out:
5311 5312 5313

	trace_btrfs_get_extent(root, em);

5314 5315
	if (path)
		btrfs_free_path(path);
5316 5317
	if (trans) {
		ret = btrfs_end_transaction(trans, root);
C
Chris Mason 已提交
5318
		if (!err)
5319 5320 5321 5322 5323 5324 5325 5326 5327
			err = ret;
	}
	if (err) {
		free_extent_map(em);
		return ERR_PTR(err);
	}
	return em;
}

5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449
struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
					   size_t pg_offset, u64 start, u64 len,
					   int create)
{
	struct extent_map *em;
	struct extent_map *hole_em = NULL;
	u64 range_start = start;
	u64 end;
	u64 found;
	u64 found_end;
	int err = 0;

	em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
	if (IS_ERR(em))
		return em;
	if (em) {
		/*
		 * if our em maps to a hole, there might
		 * actually be delalloc bytes behind it
		 */
		if (em->block_start != EXTENT_MAP_HOLE)
			return em;
		else
			hole_em = em;
	}

	/* check to see if we've wrapped (len == -1 or similar) */
	end = start + len;
	if (end < start)
		end = (u64)-1;
	else
		end -= 1;

	em = NULL;

	/* ok, we didn't find anything, lets look for delalloc */
	found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
				 end, len, EXTENT_DELALLOC, 1);
	found_end = range_start + found;
	if (found_end < range_start)
		found_end = (u64)-1;

	/*
	 * we didn't find anything useful, return
	 * the original results from get_extent()
	 */
	if (range_start > end || found_end <= start) {
		em = hole_em;
		hole_em = NULL;
		goto out;
	}

	/* adjust the range_start to make sure it doesn't
	 * go backwards from the start they passed in
	 */
	range_start = max(start,range_start);
	found = found_end - range_start;

	if (found > 0) {
		u64 hole_start = start;
		u64 hole_len = len;

		em = alloc_extent_map(GFP_NOFS);
		if (!em) {
			err = -ENOMEM;
			goto out;
		}
		/*
		 * when btrfs_get_extent can't find anything it
		 * returns one huge hole
		 *
		 * make sure what it found really fits our range, and
		 * adjust to make sure it is based on the start from
		 * the caller
		 */
		if (hole_em) {
			u64 calc_end = extent_map_end(hole_em);

			if (calc_end <= start || (hole_em->start > end)) {
				free_extent_map(hole_em);
				hole_em = NULL;
			} else {
				hole_start = max(hole_em->start, start);
				hole_len = calc_end - hole_start;
			}
		}
		em->bdev = NULL;
		if (hole_em && range_start > hole_start) {
			/* our hole starts before our delalloc, so we
			 * have to return just the parts of the hole
			 * that go until  the delalloc starts
			 */
			em->len = min(hole_len,
				      range_start - hole_start);
			em->start = hole_start;
			em->orig_start = hole_start;
			/*
			 * don't adjust block start at all,
			 * it is fixed at EXTENT_MAP_HOLE
			 */
			em->block_start = hole_em->block_start;
			em->block_len = hole_len;
		} else {
			em->start = range_start;
			em->len = found;
			em->orig_start = range_start;
			em->block_start = EXTENT_MAP_DELALLOC;
			em->block_len = found;
		}
	} else if (hole_em) {
		return hole_em;
	}
out:

	free_extent_map(hole_em);
	if (err) {
		free_extent_map(em);
		return ERR_PTR(err);
	}
	return em;
}

5450
static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
5451
						  struct extent_map *em,
5452 5453 5454 5455 5456 5457 5458 5459
						  u64 start, u64 len)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
	struct btrfs_key ins;
	u64 alloc_hint;
	int ret;
5460
	bool insert = false;
5461

5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474
	/*
	 * Ok if the extent map we looked up is a hole and is for the exact
	 * range we want, there is no reason to allocate a new one, however if
	 * it is not right then we need to free this one and drop the cache for
	 * our range.
	 */
	if (em->block_start != EXTENT_MAP_HOLE || em->start != start ||
	    em->len != len) {
		free_extent_map(em);
		em = NULL;
		insert = true;
		btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
	}
5475 5476

	trans = btrfs_join_transaction(root, 0);
5477 5478
	if (IS_ERR(trans))
		return ERR_CAST(trans);
5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490

	trans->block_rsv = &root->fs_info->delalloc_block_rsv;

	alloc_hint = get_extent_allocation_hint(inode, start, len);
	ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0,
				   alloc_hint, (u64)-1, &ins, 1);
	if (ret) {
		em = ERR_PTR(ret);
		goto out;
	}

	if (!em) {
5491 5492 5493 5494 5495
		em = alloc_extent_map(GFP_NOFS);
		if (!em) {
			em = ERR_PTR(-ENOMEM);
			goto out;
		}
5496 5497 5498 5499 5500 5501 5502 5503 5504
	}

	em->start = start;
	em->orig_start = em->start;
	em->len = ins.offset;

	em->block_start = ins.objectid;
	em->block_len = ins.offset;
	em->bdev = root->fs_info->fs_devices->latest_bdev;
5505 5506 5507 5508 5509 5510

	/*
	 * We need to do this because if we're using the original em we searched
	 * for, we could have EXTENT_FLAG_VACANCY set, and we don't want that.
	 */
	em->flags = 0;
5511 5512
	set_bit(EXTENT_FLAG_PINNED, &em->flags);

5513
	while (insert) {
5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532
		write_lock(&em_tree->lock);
		ret = add_extent_mapping(em_tree, em);
		write_unlock(&em_tree->lock);
		if (ret != -EEXIST)
			break;
		btrfs_drop_extent_cache(inode, start, start + em->len - 1, 0);
	}

	ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
					   ins.offset, ins.offset, 0);
	if (ret) {
		btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
		em = ERR_PTR(ret);
	}
out:
	btrfs_end_transaction(trans, root);
	return em;
}

5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632
/*
 * returns 1 when the nocow is safe, < 1 on error, 0 if the
 * block must be cow'd
 */
static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
				      struct inode *inode, u64 offset, u64 len)
{
	struct btrfs_path *path;
	int ret;
	struct extent_buffer *leaf;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_file_extent_item *fi;
	struct btrfs_key key;
	u64 disk_bytenr;
	u64 backref_offset;
	u64 extent_end;
	u64 num_bytes;
	int slot;
	int found_type;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
				       offset, 0);
	if (ret < 0)
		goto out;

	slot = path->slots[0];
	if (ret == 1) {
		if (slot == 0) {
			/* can't find the item, must cow */
			ret = 0;
			goto out;
		}
		slot--;
	}
	ret = 0;
	leaf = path->nodes[0];
	btrfs_item_key_to_cpu(leaf, &key, slot);
	if (key.objectid != inode->i_ino ||
	    key.type != BTRFS_EXTENT_DATA_KEY) {
		/* not our file or wrong item type, must cow */
		goto out;
	}

	if (key.offset > offset) {
		/* Wrong offset, must cow */
		goto out;
	}

	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
	found_type = btrfs_file_extent_type(leaf, fi);
	if (found_type != BTRFS_FILE_EXTENT_REG &&
	    found_type != BTRFS_FILE_EXTENT_PREALLOC) {
		/* not a regular extent, must cow */
		goto out;
	}
	disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
	backref_offset = btrfs_file_extent_offset(leaf, fi);

	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
	if (extent_end < offset + len) {
		/* extent doesn't include our full range, must cow */
		goto out;
	}

	if (btrfs_extent_readonly(root, disk_bytenr))
		goto out;

	/*
	 * look for other files referencing this extent, if we
	 * find any we must cow
	 */
	if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
				  key.offset - backref_offset, disk_bytenr))
		goto out;

	/*
	 * adjust disk_bytenr and num_bytes to cover just the bytes
	 * in this extent we are about to write.  If there
	 * are any csums in that range we have to cow in order
	 * to keep the csums correct
	 */
	disk_bytenr += backref_offset;
	disk_bytenr += offset - key.offset;
	num_bytes = min(offset + len, extent_end) - offset;
	if (csum_exist_in_range(root, disk_bytenr, num_bytes))
				goto out;
	/*
	 * all of the above have passed, it is safe to overwrite this extent
	 * without cow
	 */
	ret = 1;
out:
	btrfs_free_path(path);
	return ret;
}

5633 5634 5635 5636 5637 5638 5639
static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
				   struct buffer_head *bh_result, int create)
{
	struct extent_map *em;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	u64 start = iblock << inode->i_blkbits;
	u64 len = bh_result->b_size;
5640
	struct btrfs_trans_handle *trans;
5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684

	em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
	if (IS_ERR(em))
		return PTR_ERR(em);

	/*
	 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
	 * io.  INLINE is special, and we could probably kludge it in here, but
	 * it's still buffered so for safety lets just fall back to the generic
	 * buffered path.
	 *
	 * For COMPRESSED we _have_ to read the entire extent in so we can
	 * decompress it, so there will be buffering required no matter what we
	 * do, so go ahead and fallback to buffered.
	 *
	 * We return -ENOTBLK because thats what makes DIO go ahead and go back
	 * to buffered IO.  Don't blame me, this is the price we pay for using
	 * the generic code.
	 */
	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
	    em->block_start == EXTENT_MAP_INLINE) {
		free_extent_map(em);
		return -ENOTBLK;
	}

	/* Just a good old fashioned hole, return */
	if (!create && (em->block_start == EXTENT_MAP_HOLE ||
			test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
		free_extent_map(em);
		/* DIO will do one hole at a time, so just unlock a sector */
		unlock_extent(&BTRFS_I(inode)->io_tree, start,
			      start + root->sectorsize - 1, GFP_NOFS);
		return 0;
	}

	/*
	 * We don't allocate a new extent in the following cases
	 *
	 * 1) The inode is marked as NODATACOW.  In this case we'll just use the
	 * existing extent.
	 * 2) The extent is marked as PREALLOC.  We're good to go here and can
	 * just use the extent.
	 *
	 */
5685 5686
	if (!create) {
		len = em->len - (start - em->start);
5687
		goto map;
5688
	}
5689 5690 5691 5692 5693 5694

	if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
	    ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
	     em->block_start != EXTENT_MAP_HOLE)) {
		int type;
		int ret;
5695
		u64 block_start;
5696 5697 5698 5699 5700

		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
			type = BTRFS_ORDERED_PREALLOC;
		else
			type = BTRFS_ORDERED_NOCOW;
5701
		len = min(len, em->len - (start - em->start));
5702
		block_start = em->block_start + (start - em->start);
5703 5704 5705 5706 5707 5708 5709

		/*
		 * we're not going to log anything, but we do need
		 * to make sure the current transaction stays open
		 * while we look for nocow cross refs
		 */
		trans = btrfs_join_transaction(root, 0);
5710
		if (IS_ERR(trans))
5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721
			goto must_cow;

		if (can_nocow_odirect(trans, inode, start, len) == 1) {
			ret = btrfs_add_ordered_extent_dio(inode, start,
					   block_start, len, len, type);
			btrfs_end_transaction(trans, root);
			if (ret) {
				free_extent_map(em);
				return ret;
			}
			goto unlock;
5722
		}
5723
		btrfs_end_transaction(trans, root);
5724
	}
5725 5726 5727 5728 5729 5730
must_cow:
	/*
	 * this will cow the extent, reset the len in case we changed
	 * it above
	 */
	len = bh_result->b_size;
5731
	em = btrfs_new_extent_direct(inode, em, start, len);
5732 5733 5734 5735
	if (IS_ERR(em))
		return PTR_ERR(em);
	len = min(len, em->len - (start - em->start));
unlock:
5736 5737 5738
	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1,
			  EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1,
			  0, NULL, GFP_NOFS);
5739 5740 5741
map:
	bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
		inode->i_blkbits;
5742
	bh_result->b_size = len;
5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759
	bh_result->b_bdev = em->bdev;
	set_buffer_mapped(bh_result);
	if (create && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
		set_buffer_new(bh_result);

	free_extent_map(em);

	return 0;
}

struct btrfs_dio_private {
	struct inode *inode;
	u64 logical_offset;
	u64 disk_bytenr;
	u64 bytes;
	u32 *csums;
	void *private;
M
Miao Xie 已提交
5760 5761 5762 5763 5764 5765 5766 5767

	/* number of bios pending for this dio */
	atomic_t pending_bios;

	/* IO errors */
	int errors;

	struct bio *orig_bio;
5768 5769 5770 5771
};

static void btrfs_endio_direct_read(struct bio *bio, int err)
{
M
Miao Xie 已提交
5772
	struct btrfs_dio_private *dip = bio->bi_private;
5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816
	struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
	struct bio_vec *bvec = bio->bi_io_vec;
	struct inode *inode = dip->inode;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	u64 start;
	u32 *private = dip->csums;

	start = dip->logical_offset;
	do {
		if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
			struct page *page = bvec->bv_page;
			char *kaddr;
			u32 csum = ~(u32)0;
			unsigned long flags;

			local_irq_save(flags);
			kaddr = kmap_atomic(page, KM_IRQ0);
			csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
					       csum, bvec->bv_len);
			btrfs_csum_final(csum, (char *)&csum);
			kunmap_atomic(kaddr, KM_IRQ0);
			local_irq_restore(flags);

			flush_dcache_page(bvec->bv_page);
			if (csum != *private) {
				printk(KERN_ERR "btrfs csum failed ino %lu off"
				      " %llu csum %u private %u\n",
				      inode->i_ino, (unsigned long long)start,
				      csum, *private);
				err = -EIO;
			}
		}

		start += bvec->bv_len;
		private++;
		bvec++;
	} while (bvec <= bvec_end);

	unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
		      dip->logical_offset + dip->bytes - 1, GFP_NOFS);
	bio->bi_private = dip->private;

	kfree(dip->csums);
	kfree(dip);
5817 5818 5819 5820

	/* If we had a csum failure make sure to clear the uptodate flag */
	if (err)
		clear_bit(BIO_UPTODATE, &bio->bi_flags);
5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831
	dio_end_io(bio, err);
}

static void btrfs_endio_direct_write(struct bio *bio, int err)
{
	struct btrfs_dio_private *dip = bio->bi_private;
	struct inode *inode = dip->inode;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
	struct btrfs_ordered_extent *ordered = NULL;
	struct extent_state *cached_state = NULL;
5832 5833
	u64 ordered_offset = dip->logical_offset;
	u64 ordered_bytes = dip->bytes;
5834 5835 5836 5837
	int ret;

	if (err)
		goto out_done;
5838 5839 5840 5841
again:
	ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
						   &ordered_offset,
						   ordered_bytes);
5842
	if (!ret)
5843
		goto out_test;
5844 5845 5846 5847

	BUG_ON(!ordered);

	trans = btrfs_join_transaction(root, 1);
5848
	if (IS_ERR(trans)) {
5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893
		err = -ENOMEM;
		goto out;
	}
	trans->block_rsv = &root->fs_info->delalloc_block_rsv;

	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
		ret = btrfs_ordered_update_i_size(inode, 0, ordered);
		if (!ret)
			ret = btrfs_update_inode(trans, root, inode);
		err = ret;
		goto out;
	}

	lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset,
			 ordered->file_offset + ordered->len - 1, 0,
			 &cached_state, GFP_NOFS);

	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
		ret = btrfs_mark_extent_written(trans, inode,
						ordered->file_offset,
						ordered->file_offset +
						ordered->len);
		if (ret) {
			err = ret;
			goto out_unlock;
		}
	} else {
		ret = insert_reserved_file_extent(trans, inode,
						  ordered->file_offset,
						  ordered->start,
						  ordered->disk_len,
						  ordered->len,
						  ordered->len,
						  0, 0, 0,
						  BTRFS_FILE_EXTENT_REG);
		unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
				   ordered->file_offset, ordered->len);
		if (ret) {
			err = ret;
			WARN_ON(1);
			goto out_unlock;
		}
	}

	add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
5894 5895 5896 5897
	ret = btrfs_ordered_update_i_size(inode, 0, ordered);
	if (!ret)
		btrfs_update_inode(trans, root, inode);
	ret = 0;
5898 5899 5900 5901 5902 5903 5904
out_unlock:
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
			     ordered->file_offset + ordered->len - 1,
			     &cached_state, GFP_NOFS);
out:
	btrfs_delalloc_release_metadata(inode, ordered->len);
	btrfs_end_transaction(trans, root);
5905
	ordered_offset = ordered->file_offset + ordered->len;
5906 5907
	btrfs_put_ordered_extent(ordered);
	btrfs_put_ordered_extent(ordered);
5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918

out_test:
	/*
	 * our bio might span multiple ordered extents.  If we haven't
	 * completed the accounting for the whole dio, go back and try again
	 */
	if (ordered_offset < dip->logical_offset + dip->bytes) {
		ordered_bytes = dip->logical_offset + dip->bytes -
			ordered_offset;
		goto again;
	}
5919 5920 5921 5922 5923
out_done:
	bio->bi_private = dip->private;

	kfree(dip->csums);
	kfree(dip);
5924 5925 5926 5927

	/* If we had an error make sure to clear the uptodate flag */
	if (err)
		clear_bit(BIO_UPTODATE, &bio->bi_flags);
5928 5929 5930
	dio_end_io(bio, err);
}

5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941
static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
				    struct bio *bio, int mirror_num,
				    unsigned long bio_flags, u64 offset)
{
	int ret;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
	BUG_ON(ret);
	return 0;
}

M
Miao Xie 已提交
5942 5943 5944 5945 5946 5947
static void btrfs_end_dio_bio(struct bio *bio, int err)
{
	struct btrfs_dio_private *dip = bio->bi_private;

	if (err) {
		printk(KERN_ERR "btrfs direct IO failed ino %lu rw %lu "
J
Jan Beulich 已提交
5948 5949 5950
		      "sector %#Lx len %u err no %d\n",
		      dip->inode->i_ino, bio->bi_rw,
		      (unsigned long long)bio->bi_sector, bio->bi_size, err);
M
Miao Xie 已提交
5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982
		dip->errors = 1;

		/*
		 * before atomic variable goto zero, we must make sure
		 * dip->errors is perceived to be set.
		 */
		smp_mb__before_atomic_dec();
	}

	/* if there are more bios still pending for this dio, just exit */
	if (!atomic_dec_and_test(&dip->pending_bios))
		goto out;

	if (dip->errors)
		bio_io_error(dip->orig_bio);
	else {
		set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags);
		bio_endio(dip->orig_bio, 0);
	}
out:
	bio_put(bio);
}

static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
				       u64 first_sector, gfp_t gfp_flags)
{
	int nr_vecs = bio_get_nr_vecs(bdev);
	return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
}

static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
					 int rw, u64 file_offset, int skip_sum,
5983
					 u32 *csums, int async_submit)
M
Miao Xie 已提交
5984 5985 5986 5987 5988 5989 5990 5991 5992 5993
{
	int write = rw & REQ_WRITE;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	int ret;

	bio_get(bio);
	ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
	if (ret)
		goto err;

5994 5995 5996 5997
	if (skip_sum)
		goto map;

	if (write && async_submit) {
M
Miao Xie 已提交
5998 5999 6000 6001 6002 6003
		ret = btrfs_wq_submit_bio(root->fs_info,
				   inode, rw, bio, 0, 0,
				   file_offset,
				   __btrfs_submit_bio_start_direct_io,
				   __btrfs_submit_bio_done);
		goto err;
6004 6005 6006 6007 6008 6009 6010 6011
	} else if (write) {
		/*
		 * If we aren't doing async submit, calculate the csum of the
		 * bio now.
		 */
		ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
		if (ret)
			goto err;
6012 6013
	} else if (!skip_sum) {
		ret = btrfs_lookup_bio_sums_dio(root, inode, bio,
M
Miao Xie 已提交
6014
					  file_offset, csums);
6015 6016 6017
		if (ret)
			goto err;
	}
M
Miao Xie 已提交
6018

6019 6020
map:
	ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
M
Miao Xie 已提交
6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041
err:
	bio_put(bio);
	return ret;
}

static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
				    int skip_sum)
{
	struct inode *inode = dip->inode;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
	struct bio *bio;
	struct bio *orig_bio = dip->orig_bio;
	struct bio_vec *bvec = orig_bio->bi_io_vec;
	u64 start_sector = orig_bio->bi_sector;
	u64 file_offset = dip->logical_offset;
	u64 submit_len = 0;
	u64 map_length;
	int nr_pages = 0;
	u32 *csums = dip->csums;
	int ret = 0;
6042
	int async_submit = 0;
6043
	int write = rw & REQ_WRITE;
M
Miao Xie 已提交
6044 6045 6046 6047 6048 6049 6050 6051 6052

	map_length = orig_bio->bi_size;
	ret = btrfs_map_block(map_tree, READ, start_sector << 9,
			      &map_length, NULL, 0);
	if (ret) {
		bio_put(bio);
		return -EIO;
	}

6053 6054 6055 6056 6057
	if (map_length >= orig_bio->bi_size) {
		bio = orig_bio;
		goto submit;
	}

6058
	async_submit = 1;
6059 6060 6061 6062 6063 6064 6065
	bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
	if (!bio)
		return -ENOMEM;
	bio->bi_private = dip;
	bio->bi_end_io = btrfs_end_dio_bio;
	atomic_inc(&dip->pending_bios);

M
Miao Xie 已提交
6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078
	while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
		if (unlikely(map_length < submit_len + bvec->bv_len ||
		    bio_add_page(bio, bvec->bv_page, bvec->bv_len,
				 bvec->bv_offset) < bvec->bv_len)) {
			/*
			 * inc the count before we submit the bio so
			 * we know the end IO handler won't happen before
			 * we inc the count. Otherwise, the dip might get freed
			 * before we're done setting it up
			 */
			atomic_inc(&dip->pending_bios);
			ret = __btrfs_submit_dio_bio(bio, inode, rw,
						     file_offset, skip_sum,
6079
						     csums, async_submit);
M
Miao Xie 已提交
6080 6081 6082 6083 6084 6085
			if (ret) {
				bio_put(bio);
				atomic_dec(&dip->pending_bios);
				goto out_err;
			}

6086 6087
			/* Write's use the ordered csums */
			if (!write && !skip_sum)
M
Miao Xie 已提交
6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115
				csums = csums + nr_pages;
			start_sector += submit_len >> 9;
			file_offset += submit_len;

			submit_len = 0;
			nr_pages = 0;

			bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
						  start_sector, GFP_NOFS);
			if (!bio)
				goto out_err;
			bio->bi_private = dip;
			bio->bi_end_io = btrfs_end_dio_bio;

			map_length = orig_bio->bi_size;
			ret = btrfs_map_block(map_tree, READ, start_sector << 9,
					      &map_length, NULL, 0);
			if (ret) {
				bio_put(bio);
				goto out_err;
			}
		} else {
			submit_len += bvec->bv_len;
			nr_pages ++;
			bvec++;
		}
	}

6116
submit:
M
Miao Xie 已提交
6117
	ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
6118
				     csums, async_submit);
M
Miao Xie 已提交
6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136
	if (!ret)
		return 0;

	bio_put(bio);
out_err:
	dip->errors = 1;
	/*
	 * before atomic variable goto zero, we must
	 * make sure dip->errors is perceived to be set.
	 */
	smp_mb__before_atomic_dec();
	if (atomic_dec_and_test(&dip->pending_bios))
		bio_io_error(dip->orig_bio);

	/* bio_end_io() will handle error, so we needn't return it */
	return 0;
}

6137 6138 6139 6140 6141 6142 6143
static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
				loff_t file_offset)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_dio_private *dip;
	struct bio_vec *bvec = bio->bi_io_vec;
	int skip_sum;
6144
	int write = rw & REQ_WRITE;
6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155
	int ret = 0;

	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;

	dip = kmalloc(sizeof(*dip), GFP_NOFS);
	if (!dip) {
		ret = -ENOMEM;
		goto free_ordered;
	}
	dip->csums = NULL;

6156 6157
	/* Write's use the ordered csum stuff, so we don't need dip->csums */
	if (!write && !skip_sum) {
6158 6159
		dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS);
		if (!dip->csums) {
D
Daniel J Blueman 已提交
6160
			kfree(dip);
6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175
			ret = -ENOMEM;
			goto free_ordered;
		}
	}

	dip->private = bio->bi_private;
	dip->inode = inode;
	dip->logical_offset = file_offset;

	dip->bytes = 0;
	do {
		dip->bytes += bvec->bv_len;
		bvec++;
	} while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1));

6176
	dip->disk_bytenr = (u64)bio->bi_sector << 9;
6177
	bio->bi_private = dip;
M
Miao Xie 已提交
6178 6179 6180
	dip->errors = 0;
	dip->orig_bio = bio;
	atomic_set(&dip->pending_bios, 0);
6181 6182 6183 6184 6185 6186

	if (write)
		bio->bi_end_io = btrfs_endio_direct_write;
	else
		bio->bi_end_io = btrfs_endio_direct_read;

M
Miao Xie 已提交
6187 6188
	ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
	if (!ret)
6189
		return;
6190 6191 6192 6193 6194 6195 6196
free_ordered:
	/*
	 * If this is a write, we need to clean up the reserved space and kill
	 * the ordered extent.
	 */
	if (write) {
		struct btrfs_ordered_extent *ordered;
6197
		ordered = btrfs_lookup_ordered_extent(inode, file_offset);
6198 6199 6200 6201 6202 6203 6204 6205 6206 6207
		if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
			btrfs_free_reserved_extent(root, ordered->start,
						   ordered->disk_len);
		btrfs_put_ordered_extent(ordered);
		btrfs_put_ordered_extent(ordered);
	}
	bio_endio(bio, ret);
}

C
Chris Mason 已提交
6208 6209 6210 6211 6212
static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
			const struct iovec *iov, loff_t offset,
			unsigned long nr_segs)
{
	int seg;
6213
	int i;
C
Chris Mason 已提交
6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227
	size_t size;
	unsigned long addr;
	unsigned blocksize_mask = root->sectorsize - 1;
	ssize_t retval = -EINVAL;
	loff_t end = offset;

	if (offset & blocksize_mask)
		goto out;

	/* Check the memory alignment.  Blocks cannot straddle pages */
	for (seg = 0; seg < nr_segs; seg++) {
		addr = (unsigned long)iov[seg].iov_base;
		size = iov[seg].iov_len;
		end += size;
6228
		if ((addr & blocksize_mask) || (size & blocksize_mask))
C
Chris Mason 已提交
6229
			goto out;
6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243

		/* If this is a write we don't need to check anymore */
		if (rw & WRITE)
			continue;

		/*
		 * Check to make sure we don't have duplicate iov_base's in this
		 * iovec, if so return EINVAL, otherwise we'll get csum errors
		 * when reading back.
		 */
		for (i = seg + 1; i < nr_segs; i++) {
			if (iov[seg].iov_base == iov[i].iov_base)
				goto out;
		}
C
Chris Mason 已提交
6244 6245 6246 6247 6248
	}
	retval = 0;
out:
	return retval;
}
6249 6250 6251 6252
static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
			const struct iovec *iov, loff_t offset,
			unsigned long nr_segs)
{
6253 6254 6255
	struct file *file = iocb->ki_filp;
	struct inode *inode = file->f_mapping->host;
	struct btrfs_ordered_extent *ordered;
6256
	struct extent_state *cached_state = NULL;
6257 6258
	u64 lockstart, lockend;
	ssize_t ret;
6259 6260
	int writing = rw & WRITE;
	int write_bits = 0;
6261
	size_t count = iov_length(iov, nr_segs);
6262

C
Chris Mason 已提交
6263 6264 6265 6266 6267
	if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
			    offset, nr_segs)) {
		return 0;
	}

6268
	lockstart = offset;
6269 6270 6271 6272 6273 6274 6275
	lockend = offset + count - 1;

	if (writing) {
		ret = btrfs_delalloc_reserve_space(inode, count);
		if (ret)
			goto out;
	}
6276

6277
	while (1) {
6278 6279
		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
				 0, &cached_state, GFP_NOFS);
6280 6281 6282 6283 6284 6285 6286 6287 6288
		/*
		 * We're concerned with the entire range that we're going to be
		 * doing DIO to, so we need to make sure theres no ordered
		 * extents in this range.
		 */
		ordered = btrfs_lookup_ordered_range(inode, lockstart,
						     lockend - lockstart + 1);
		if (!ordered)
			break;
6289 6290
		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
				     &cached_state, GFP_NOFS);
6291 6292 6293 6294 6295
		btrfs_start_ordered_extent(inode, ordered, 1);
		btrfs_put_ordered_extent(ordered);
		cond_resched();
	}

6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315
	/*
	 * we don't use btrfs_set_extent_delalloc because we don't want
	 * the dirty or uptodate bits
	 */
	if (writing) {
		write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING;
		ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
				     EXTENT_DELALLOC, 0, NULL, &cached_state,
				     GFP_NOFS);
		if (ret) {
			clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
					 lockend, EXTENT_LOCKED | write_bits,
					 1, 0, &cached_state, GFP_NOFS);
			goto out;
		}
	}

	free_extent_state(cached_state);
	cached_state = NULL;

C
Chris Mason 已提交
6316 6317 6318 6319
	ret = __blockdev_direct_IO(rw, iocb, inode,
		   BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
		   iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
		   btrfs_submit_direct, 0);
6320 6321

	if (ret < 0 && ret != -EIOCBQUEUED) {
6322 6323 6324 6325
		clear_extent_bit(&BTRFS_I(inode)->io_tree, offset,
			      offset + iov_length(iov, nr_segs) - 1,
			      EXTENT_LOCKED | write_bits, 1, 0,
			      &cached_state, GFP_NOFS);
6326 6327 6328 6329 6330
	} else if (ret >= 0 && ret < iov_length(iov, nr_segs)) {
		/*
		 * We're falling back to buffered, unlock the section we didn't
		 * do IO on.
		 */
6331 6332 6333 6334
		clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret,
			      offset + iov_length(iov, nr_segs) - 1,
			      EXTENT_LOCKED | write_bits, 1, 0,
			      &cached_state, GFP_NOFS);
6335
	}
6336 6337
out:
	free_extent_state(cached_state);
6338
	return ret;
6339 6340
}

Y
Yehuda Sadeh 已提交
6341 6342 6343
static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		__u64 start, __u64 len)
{
6344
	return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
Y
Yehuda Sadeh 已提交
6345 6346
}

6347
int btrfs_readpage(struct file *file, struct page *page)
C
Chris Mason 已提交
6348
{
6349 6350
	struct extent_io_tree *tree;
	tree = &BTRFS_I(page->mapping->host)->io_tree;
6351
	return extent_read_full_page(tree, page, btrfs_get_extent);
C
Chris Mason 已提交
6352
}
6353

6354
static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
C
Chris Mason 已提交
6355
{
6356
	struct extent_io_tree *tree;
6357 6358 6359 6360 6361 6362 6363


	if (current->flags & PF_MEMALLOC) {
		redirty_page_for_writepage(wbc, page);
		unlock_page(page);
		return 0;
	}
6364
	tree = &BTRFS_I(page->mapping->host)->io_tree;
6365
	return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
C
Chris Mason 已提交
6366 6367
}

6368 6369
int btrfs_writepages(struct address_space *mapping,
		     struct writeback_control *wbc)
C
Chris Mason 已提交
6370
{
6371
	struct extent_io_tree *tree;
6372

6373
	tree = &BTRFS_I(mapping->host)->io_tree;
C
Chris Mason 已提交
6374 6375 6376
	return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
}

C
Chris Mason 已提交
6377 6378 6379 6380
static int
btrfs_readpages(struct file *file, struct address_space *mapping,
		struct list_head *pages, unsigned nr_pages)
{
6381 6382
	struct extent_io_tree *tree;
	tree = &BTRFS_I(mapping->host)->io_tree;
C
Chris Mason 已提交
6383 6384 6385
	return extent_readpages(tree, mapping, pages, nr_pages,
				btrfs_get_extent);
}
6386
static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
C
Chris Mason 已提交
6387
{
6388 6389
	struct extent_io_tree *tree;
	struct extent_map_tree *map;
6390
	int ret;
6391

6392 6393
	tree = &BTRFS_I(page->mapping->host)->io_tree;
	map = &BTRFS_I(page->mapping->host)->extent_tree;
6394
	ret = try_release_extent_mapping(map, tree, page, gfp_flags);
6395 6396 6397 6398
	if (ret == 1) {
		ClearPagePrivate(page);
		set_page_private(page, 0);
		page_cache_release(page);
C
Chris Mason 已提交
6399
	}
6400
	return ret;
C
Chris Mason 已提交
6401 6402
}

6403 6404
static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
{
6405 6406
	if (PageWriteback(page) || PageDirty(page))
		return 0;
6407
	return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
6408 6409
}

6410
static void btrfs_invalidatepage(struct page *page, unsigned long offset)
C
Chris Mason 已提交
6411
{
6412
	struct extent_io_tree *tree;
6413
	struct btrfs_ordered_extent *ordered;
6414
	struct extent_state *cached_state = NULL;
6415 6416
	u64 page_start = page_offset(page);
	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
C
Chris Mason 已提交
6417

6418 6419 6420 6421 6422 6423 6424 6425

	/*
	 * we have the page locked, so new writeback can't start,
	 * and the dirty bit won't be cleared while we are here.
	 *
	 * Wait for IO on this page so that we can safely clear
	 * the PagePrivate2 bit and do ordered accounting
	 */
6426
	wait_on_page_writeback(page);
6427

6428
	tree = &BTRFS_I(page->mapping->host)->io_tree;
6429 6430 6431 6432
	if (offset) {
		btrfs_releasepage(page, GFP_NOFS);
		return;
	}
6433 6434
	lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
			 GFP_NOFS);
6435 6436 6437
	ordered = btrfs_lookup_ordered_extent(page->mapping->host,
					   page_offset(page));
	if (ordered) {
6438 6439 6440 6441
		/*
		 * IO on this page will never be started, so we need
		 * to account for any ordered extents now
		 */
6442 6443
		clear_extent_bit(tree, page_start, page_end,
				 EXTENT_DIRTY | EXTENT_DELALLOC |
6444
				 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
6445
				 &cached_state, GFP_NOFS);
6446 6447 6448 6449 6450 6451 6452 6453
		/*
		 * whoever cleared the private bit is responsible
		 * for the finish_ordered_io
		 */
		if (TestClearPagePrivate2(page)) {
			btrfs_finish_ordered_io(page->mapping->host,
						page_start, page_end);
		}
6454
		btrfs_put_ordered_extent(ordered);
6455 6456 6457
		cached_state = NULL;
		lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
				 GFP_NOFS);
6458 6459
	}
	clear_extent_bit(tree, page_start, page_end,
6460
		 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
6461
		 EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS);
6462 6463
	__btrfs_releasepage(page, GFP_NOFS);

C
Chris Mason 已提交
6464
	ClearPageChecked(page);
6465 6466 6467 6468 6469
	if (PagePrivate(page)) {
		ClearPagePrivate(page);
		set_page_private(page, 0);
		page_cache_release(page);
	}
C
Chris Mason 已提交
6470 6471
}

C
Chris Mason 已提交
6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486
/*
 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
 * called from a page fault handler when a page is first dirtied. Hence we must
 * be careful to check for EOF conditions here. We set the page up correctly
 * for a written page which means we get ENOSPC checking when writing into
 * holes and correct delalloc and unwritten extent mapping on filesystems that
 * support these features.
 *
 * We are not allowed to take the i_mutex here so we have to play games to
 * protect against truncate races as the page could now be beyond EOF.  Because
 * vmtruncate() writes the inode size before removing pages, once we have the
 * page lock we can determine safely if the page is beyond EOF. If it is not
 * beyond EOF, then the page is guaranteed safe against truncation until we
 * unlock the page.
 */
6487
int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
C
Chris Mason 已提交
6488
{
6489
	struct page *page = vmf->page;
6490
	struct inode *inode = fdentry(vma->vm_file)->d_inode;
6491
	struct btrfs_root *root = BTRFS_I(inode)->root;
6492 6493
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
	struct btrfs_ordered_extent *ordered;
6494
	struct extent_state *cached_state = NULL;
6495 6496
	char *kaddr;
	unsigned long zero_start;
C
Chris Mason 已提交
6497
	loff_t size;
6498
	int ret;
6499
	u64 page_start;
6500
	u64 page_end;
C
Chris Mason 已提交
6501

6502
	ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
6503 6504 6505 6506 6507
	if (ret) {
		if (ret == -ENOMEM)
			ret = VM_FAULT_OOM;
		else /* -ENOSPC, -EIO, etc */
			ret = VM_FAULT_SIGBUS;
6508
		goto out;
6509
	}
6510

6511
	ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
6512
again:
C
Chris Mason 已提交
6513 6514
	lock_page(page);
	size = i_size_read(inode);
6515 6516
	page_start = page_offset(page);
	page_end = page_start + PAGE_CACHE_SIZE - 1;
6517

C
Chris Mason 已提交
6518
	if ((page->mapping != inode->i_mapping) ||
6519
	    (page_start >= size)) {
C
Chris Mason 已提交
6520 6521 6522
		/* page got truncated out from underneath us */
		goto out_unlock;
	}
6523 6524
	wait_on_page_writeback(page);

6525 6526
	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
			 GFP_NOFS);
6527 6528
	set_page_extent_mapped(page);

6529 6530 6531 6532
	/*
	 * we can't set the delalloc bits if there are pending ordered
	 * extents.  Drop our locks and wait for them to finish
	 */
6533 6534
	ordered = btrfs_lookup_ordered_extent(inode, page_start);
	if (ordered) {
6535 6536
		unlock_extent_cached(io_tree, page_start, page_end,
				     &cached_state, GFP_NOFS);
6537
		unlock_page(page);
6538
		btrfs_start_ordered_extent(inode, ordered, 1);
6539 6540 6541 6542
		btrfs_put_ordered_extent(ordered);
		goto again;
	}

J
Josef Bacik 已提交
6543 6544 6545 6546 6547 6548 6549
	/*
	 * XXX - page_mkwrite gets called every time the page is dirtied, even
	 * if it was already dirty, so for space accounting reasons we need to
	 * clear any delalloc bits for the range we are fixing to save.  There
	 * is probably a better way to do this, but for now keep consistent with
	 * prepare_pages in the normal write path.
	 */
6550
	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
6551
			  EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
6552
			  0, 0, &cached_state, GFP_NOFS);
J
Josef Bacik 已提交
6553

6554 6555
	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
					&cached_state);
J
Josef Bacik 已提交
6556
	if (ret) {
6557 6558
		unlock_extent_cached(io_tree, page_start, page_end,
				     &cached_state, GFP_NOFS);
J
Josef Bacik 已提交
6559 6560 6561
		ret = VM_FAULT_SIGBUS;
		goto out_unlock;
	}
6562
	ret = 0;
C
Chris Mason 已提交
6563 6564

	/* page is wholly or partially inside EOF */
6565
	if (page_start + PAGE_CACHE_SIZE > size)
6566
		zero_start = size & ~PAGE_CACHE_MASK;
C
Chris Mason 已提交
6567
	else
6568
		zero_start = PAGE_CACHE_SIZE;
C
Chris Mason 已提交
6569

6570 6571 6572 6573 6574 6575
	if (zero_start != PAGE_CACHE_SIZE) {
		kaddr = kmap(page);
		memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
		flush_dcache_page(page);
		kunmap(page);
	}
6576
	ClearPageChecked(page);
6577
	set_page_dirty(page);
6578
	SetPageUptodate(page);
6579

6580 6581 6582
	BTRFS_I(inode)->last_trans = root->fs_info->generation;
	BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;

6583
	unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
C
Chris Mason 已提交
6584 6585

out_unlock:
6586 6587
	if (!ret)
		return VM_FAULT_LOCKED;
C
Chris Mason 已提交
6588
	unlock_page(page);
6589
	btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
6590
out:
C
Chris Mason 已提交
6591 6592 6593
	return ret;
}

6594
static int btrfs_truncate(struct inode *inode)
C
Chris Mason 已提交
6595 6596 6597
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	int ret;
6598
	int err = 0;
C
Chris Mason 已提交
6599
	struct btrfs_trans_handle *trans;
6600
	unsigned long nr;
6601
	u64 mask = root->sectorsize - 1;
C
Chris Mason 已提交
6602

6603 6604
	ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
	if (ret)
6605
		return ret;
6606

C
Chris Mason 已提交
6607
	btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
6608
	btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
C
Chris Mason 已提交
6609

6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626
	trans = btrfs_start_transaction(root, 5);
	if (IS_ERR(trans))
		return PTR_ERR(trans);

	btrfs_set_trans_block_group(trans, inode);

	ret = btrfs_orphan_add(trans, inode);
	if (ret) {
		btrfs_end_transaction(trans, root);
		return ret;
	}

	nr = trans->blocks_used;
	btrfs_end_transaction(trans, root);
	btrfs_btree_balance_dirty(root, nr);

	/* Now start a transaction for the truncate */
6627
	trans = btrfs_start_transaction(root, 0);
6628 6629
	if (IS_ERR(trans))
		return PTR_ERR(trans);
6630
	btrfs_set_trans_block_group(trans, inode);
6631
	trans->block_rsv = root->orphan_block_rsv;
6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652

	/*
	 * setattr is responsible for setting the ordered_data_close flag,
	 * but that is only tested during the last file release.  That
	 * could happen well after the next commit, leaving a great big
	 * window where new writes may get lost if someone chooses to write
	 * to this file after truncating to zero
	 *
	 * The inode doesn't have any dirty data here, and so if we commit
	 * this is a noop.  If someone immediately starts writing to the inode
	 * it is very likely we'll catch some of their writes in this
	 * transaction, and the commit will find this file on the ordered
	 * data list with good things to send down.
	 *
	 * This is a best effort solution, there is still a window where
	 * using truncate to replace the contents of the file will
	 * end up with a zero length file after a crash.
	 */
	if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
		btrfs_add_ordered_operation(trans, root, inode);

6653
	while (1) {
6654 6655
		if (!trans) {
			trans = btrfs_start_transaction(root, 0);
6656 6657
			if (IS_ERR(trans))
				return PTR_ERR(trans);
6658 6659 6660 6661 6662 6663
			btrfs_set_trans_block_group(trans, inode);
			trans->block_rsv = root->orphan_block_rsv;
		}

		ret = btrfs_block_rsv_check(trans, root,
					    root->orphan_block_rsv, 0, 5);
6664
		if (ret == -EAGAIN) {
6665
			ret = btrfs_commit_transaction(trans, root);
6666 6667
			if (ret)
				return ret;
6668 6669
			trans = NULL;
			continue;
6670 6671 6672
		} else if (ret) {
			err = ret;
			break;
6673 6674
		}

6675 6676 6677
		ret = btrfs_truncate_inode_items(trans, root, inode,
						 inode->i_size,
						 BTRFS_EXTENT_DATA_KEY);
6678 6679
		if (ret != -EAGAIN) {
			err = ret;
6680
			break;
6681
		}
C
Chris Mason 已提交
6682

6683
		ret = btrfs_update_inode(trans, root, inode);
6684 6685 6686 6687
		if (ret) {
			err = ret;
			break;
		}
6688

6689 6690
		nr = trans->blocks_used;
		btrfs_end_transaction(trans, root);
6691
		trans = NULL;
6692 6693 6694 6695 6696
		btrfs_btree_balance_dirty(root, nr);
	}

	if (ret == 0 && inode->i_nlink > 0) {
		ret = btrfs_orphan_del(trans, inode);
6697 6698
		if (ret)
			err = ret;
6699 6700 6701 6702 6703 6704
	} else if (ret && inode->i_nlink > 0) {
		/*
		 * Failed to do the truncate, remove us from the in memory
		 * orphan list.
		 */
		ret = btrfs_orphan_del(NULL, inode);
6705 6706 6707
	}

	ret = btrfs_update_inode(trans, root, inode);
6708 6709
	if (ret && !err)
		err = ret;
6710 6711

	nr = trans->blocks_used;
6712
	ret = btrfs_end_transaction_throttle(trans, root);
6713 6714
	if (ret && !err)
		err = ret;
6715
	btrfs_btree_balance_dirty(root, nr);
6716

6717
	return err;
C
Chris Mason 已提交
6718 6719
}

C
Chris Mason 已提交
6720 6721 6722
/*
 * create a new subvolume directory/inode (helper for the ioctl).
 */
6723
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
6724
			     struct btrfs_root *new_root,
6725
			     u64 new_dirid, u64 alloc_hint)
C
Chris Mason 已提交
6726 6727
{
	struct inode *inode;
6728
	int err;
6729
	u64 index = 0;
C
Chris Mason 已提交
6730

6731
	inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
6732
				new_dirid, alloc_hint, S_IFDIR | 0700, &index);
6733
	if (IS_ERR(inode))
C
Christoph Hellwig 已提交
6734
		return PTR_ERR(inode);
C
Chris Mason 已提交
6735 6736 6737 6738
	inode->i_op = &btrfs_dir_inode_operations;
	inode->i_fop = &btrfs_dir_file_operations;

	inode->i_nlink = 1;
6739
	btrfs_i_size_write(inode, 0);
6740

6741 6742
	err = btrfs_update_inode(trans, new_root, inode);
	BUG_ON(err);
6743

6744
	iput(inode);
6745
	return 0;
C
Chris Mason 已提交
6746 6747
}

C
Chris Mason 已提交
6748 6749 6750
/* helper function for file defrag and space balancing.  This
 * forces readahead on a given range of bytes in an inode
 */
6751
unsigned long btrfs_force_ra(struct address_space *mapping,
6752 6753 6754
			      struct file_ra_state *ra, struct file *file,
			      pgoff_t offset, pgoff_t last_index)
{
6755
	pgoff_t req_size = last_index - offset + 1;
6756 6757 6758 6759 6760

	page_cache_sync_readahead(mapping, ra, file, offset, req_size);
	return offset + req_size;
}

C
Chris Mason 已提交
6761 6762 6763
struct inode *btrfs_alloc_inode(struct super_block *sb)
{
	struct btrfs_inode *ei;
Y
Yan, Zheng 已提交
6764
	struct inode *inode;
C
Chris Mason 已提交
6765 6766 6767 6768

	ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
	if (!ei)
		return NULL;
Y
Yan, Zheng 已提交
6769 6770 6771 6772 6773

	ei->root = NULL;
	ei->space_info = NULL;
	ei->generation = 0;
	ei->sequence = 0;
6774
	ei->last_trans = 0;
6775
	ei->last_sub_trans = 0;
6776
	ei->logged_trans = 0;
Y
Yan, Zheng 已提交
6777 6778 6779 6780 6781 6782 6783
	ei->delalloc_bytes = 0;
	ei->reserved_bytes = 0;
	ei->disk_i_size = 0;
	ei->flags = 0;
	ei->index_cnt = (u64)-1;
	ei->last_unlink_trans = 0;

6784
	atomic_set(&ei->outstanding_extents, 0);
6785
	atomic_set(&ei->reserved_extents, 0);
Y
Yan, Zheng 已提交
6786 6787

	ei->ordered_data_close = 0;
6788
	ei->orphan_meta_reserved = 0;
Y
Yan, Zheng 已提交
6789
	ei->dummy_inode = 0;
6790
	ei->force_compress = BTRFS_COMPRESS_NONE;
Y
Yan, Zheng 已提交
6791 6792 6793 6794 6795 6796

	inode = &ei->vfs_inode;
	extent_map_tree_init(&ei->extent_tree, GFP_NOFS);
	extent_io_tree_init(&ei->io_tree, &inode->i_data, GFP_NOFS);
	extent_io_tree_init(&ei->io_failure_tree, &inode->i_data, GFP_NOFS);
	mutex_init(&ei->log_mutex);
6797
	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
6798
	INIT_LIST_HEAD(&ei->i_orphan);
Y
Yan, Zheng 已提交
6799
	INIT_LIST_HEAD(&ei->delalloc_inodes);
6800
	INIT_LIST_HEAD(&ei->ordered_operations);
Y
Yan, Zheng 已提交
6801 6802 6803
	RB_CLEAR_NODE(&ei->rb_node);

	return inode;
C
Chris Mason 已提交
6804 6805
}

N
Nick Piggin 已提交
6806 6807 6808 6809 6810 6811 6812
static void btrfs_i_callback(struct rcu_head *head)
{
	struct inode *inode = container_of(head, struct inode, i_rcu);
	INIT_LIST_HEAD(&inode->i_dentry);
	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
}

C
Chris Mason 已提交
6813 6814
void btrfs_destroy_inode(struct inode *inode)
{
6815
	struct btrfs_ordered_extent *ordered;
6816 6817
	struct btrfs_root *root = BTRFS_I(inode)->root;

C
Chris Mason 已提交
6818 6819
	WARN_ON(!list_empty(&inode->i_dentry));
	WARN_ON(inode->i_data.nrpages);
6820
	WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents));
6821
	WARN_ON(atomic_read(&BTRFS_I(inode)->reserved_extents));
C
Chris Mason 已提交
6822

6823 6824 6825 6826 6827 6828 6829 6830
	/*
	 * This can happen where we create an inode, but somebody else also
	 * created the same inode and we need to destroy the one we already
	 * created.
	 */
	if (!root)
		goto free;

6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841
	/*
	 * Make sure we're properly removed from the ordered operation
	 * lists.
	 */
	smp_mb();
	if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
		spin_lock(&root->fs_info->ordered_extent_lock);
		list_del_init(&BTRFS_I(inode)->ordered_operations);
		spin_unlock(&root->fs_info->ordered_extent_lock);
	}

6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856
	if (root == root->fs_info->tree_root) {
		struct btrfs_block_group_cache *block_group;

		block_group = btrfs_lookup_block_group(root->fs_info,
						BTRFS_I(inode)->block_group);
		if (block_group && block_group->inode == inode) {
			spin_lock(&block_group->lock);
			block_group->inode = NULL;
			spin_unlock(&block_group->lock);
			btrfs_put_block_group(block_group);
		} else if (block_group) {
			btrfs_put_block_group(block_group);
		}
	}

6857
	spin_lock(&root->orphan_lock);
6858
	if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
6859 6860 6861
		printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n",
		       inode->i_ino);
		list_del_init(&BTRFS_I(inode)->i_orphan);
6862
	}
6863
	spin_unlock(&root->orphan_lock);
6864

C
Chris Mason 已提交
6865
	while (1) {
6866 6867 6868 6869
		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
		if (!ordered)
			break;
		else {
C
Chris Mason 已提交
6870 6871 6872 6873
			printk(KERN_ERR "btrfs found ordered "
			       "extent %llu %llu on inode cleanup\n",
			       (unsigned long long)ordered->file_offset,
			       (unsigned long long)ordered->len);
6874 6875 6876 6877 6878
			btrfs_remove_ordered_extent(inode, ordered);
			btrfs_put_ordered_extent(ordered);
			btrfs_put_ordered_extent(ordered);
		}
	}
6879
	inode_tree_del(inode);
6880
	btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
6881
free:
N
Nick Piggin 已提交
6882
	call_rcu(&inode->i_rcu, btrfs_i_callback);
C
Chris Mason 已提交
6883 6884
}

6885
int btrfs_drop_inode(struct inode *inode)
6886 6887
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
6888

6889 6890
	if (btrfs_root_refs(&root->root_item) == 0 &&
	    root != root->fs_info->tree_root)
6891
		return 1;
6892
	else
6893
		return generic_drop_inode(inode);
6894 6895
}

6896
static void init_once(void *foo)
C
Chris Mason 已提交
6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912
{
	struct btrfs_inode *ei = (struct btrfs_inode *) foo;

	inode_init_once(&ei->vfs_inode);
}

void btrfs_destroy_cachep(void)
{
	if (btrfs_inode_cachep)
		kmem_cache_destroy(btrfs_inode_cachep);
	if (btrfs_trans_handle_cachep)
		kmem_cache_destroy(btrfs_trans_handle_cachep);
	if (btrfs_transaction_cachep)
		kmem_cache_destroy(btrfs_transaction_cachep);
	if (btrfs_path_cachep)
		kmem_cache_destroy(btrfs_path_cachep);
6913 6914
	if (btrfs_free_space_cachep)
		kmem_cache_destroy(btrfs_free_space_cachep);
C
Chris Mason 已提交
6915 6916 6917 6918
}

int btrfs_init_cachep(void)
{
6919 6920 6921
	btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
			sizeof(struct btrfs_inode), 0,
			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
C
Chris Mason 已提交
6922 6923
	if (!btrfs_inode_cachep)
		goto fail;
6924 6925 6926 6927

	btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
			sizeof(struct btrfs_trans_handle), 0,
			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
C
Chris Mason 已提交
6928 6929
	if (!btrfs_trans_handle_cachep)
		goto fail;
6930 6931 6932 6933

	btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
			sizeof(struct btrfs_transaction), 0,
			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
C
Chris Mason 已提交
6934 6935
	if (!btrfs_transaction_cachep)
		goto fail;
6936 6937 6938 6939

	btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
			sizeof(struct btrfs_path), 0,
			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
C
Chris Mason 已提交
6940 6941
	if (!btrfs_path_cachep)
		goto fail;
6942

6943 6944 6945 6946 6947 6948
	btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space_cache",
			sizeof(struct btrfs_free_space), 0,
			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
	if (!btrfs_free_space_cachep)
		goto fail;

C
Chris Mason 已提交
6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959
	return 0;
fail:
	btrfs_destroy_cachep();
	return -ENOMEM;
}

static int btrfs_getattr(struct vfsmount *mnt,
			 struct dentry *dentry, struct kstat *stat)
{
	struct inode *inode = dentry->d_inode;
	generic_fillattr(inode, stat);
6960
	stat->dev = BTRFS_I(inode)->root->anon_super.s_dev;
C
Chris Mason 已提交
6961
	stat->blksize = PAGE_CACHE_SIZE;
6962 6963
	stat->blocks = (inode_get_bytes(inode) +
			BTRFS_I(inode)->delalloc_bytes) >> 9;
C
Chris Mason 已提交
6964 6965 6966
	return 0;
}

6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986
/*
 * If a file is moved, it will inherit the cow and compression flags of the new
 * directory.
 */
static void fixup_inode_flags(struct inode *dir, struct inode *inode)
{
	struct btrfs_inode *b_dir = BTRFS_I(dir);
	struct btrfs_inode *b_inode = BTRFS_I(inode);

	if (b_dir->flags & BTRFS_INODE_NODATACOW)
		b_inode->flags |= BTRFS_INODE_NODATACOW;
	else
		b_inode->flags &= ~BTRFS_INODE_NODATACOW;

	if (b_dir->flags & BTRFS_INODE_COMPRESS)
		b_inode->flags |= BTRFS_INODE_COMPRESS;
	else
		b_inode->flags &= ~BTRFS_INODE_COMPRESS;
}

C
Chris Mason 已提交
6987 6988
static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
			   struct inode *new_dir, struct dentry *new_dentry)
C
Chris Mason 已提交
6989 6990 6991
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(old_dir)->root;
6992
	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
C
Chris Mason 已提交
6993 6994 6995
	struct inode *new_inode = new_dentry->d_inode;
	struct inode *old_inode = old_dentry->d_inode;
	struct timespec ctime = CURRENT_TIME;
6996
	u64 index = 0;
6997
	u64 root_objectid;
C
Chris Mason 已提交
6998 6999
	int ret;

7000 7001 7002
	if (new_dir->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
		return -EPERM;

7003 7004
	/* we only allow rename subvolume link between subvolumes */
	if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
7005 7006
		return -EXDEV;

7007 7008
	if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
	    (new_inode && new_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID))
C
Chris Mason 已提交
7009
		return -ENOTEMPTY;
7010

7011 7012 7013
	if (S_ISDIR(old_inode->i_mode) && new_inode &&
	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
		return -ENOTEMPTY;
7014 7015 7016 7017 7018
	/*
	 * we're using rename to replace one file with another.
	 * and the replacement file is large.  Start IO on it now so
	 * we don't add too much work to the end of the transaction
	 */
7019
	if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
7020 7021 7022
	    old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
		filemap_flush(old_inode->i_mapping);

7023 7024 7025
	/* close the racy window with snapshot create/destroy ioctl */
	if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
		down_read(&root->fs_info->subvol_sem);
7026 7027 7028 7029 7030 7031 7032 7033 7034
	/*
	 * We want to reserve the absolute worst case amount of items.  So if
	 * both inodes are subvols and we need to unlink them then that would
	 * require 4 item modifications, but if they are both normal inodes it
	 * would require 5 item modifications, so we'll assume their normal
	 * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
	 * should cover the worst case number of items we'll modify.
	 */
	trans = btrfs_start_transaction(root, 20);
7035 7036 7037 7038
	if (IS_ERR(trans)) {
                ret = PTR_ERR(trans);
                goto out_notrans;
        }
7039

7040
	btrfs_set_trans_block_group(trans, new_dir);
7041

7042 7043
	if (dest != root)
		btrfs_record_root_in_trans(trans, dest);
7044

7045 7046 7047
	ret = btrfs_set_inode_index(new_dir, &index);
	if (ret)
		goto out_fail;
7048

7049
	if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
7050 7051 7052
		/* force full log commit if subvolume involved. */
		root->fs_info->last_trans_log_full_commit = trans->transid;
	} else {
7053 7054 7055 7056 7057 7058 7059
		ret = btrfs_insert_inode_ref(trans, dest,
					     new_dentry->d_name.name,
					     new_dentry->d_name.len,
					     old_inode->i_ino,
					     new_dir->i_ino, index);
		if (ret)
			goto out_fail;
7060 7061 7062 7063 7064 7065 7066 7067 7068
		/*
		 * this is an ugly little race, but the rename is required
		 * to make sure that if we crash, the inode is either at the
		 * old name or the new one.  pinning the log transaction lets
		 * us make sure we don't allow a log commit to come in after
		 * we unlink the name but before we add the new name back in.
		 */
		btrfs_pin_log_trans(root);
	}
7069 7070 7071 7072 7073 7074 7075 7076 7077
	/*
	 * make sure the inode gets flushed if it is replacing
	 * something.
	 */
	if (new_inode && new_inode->i_size &&
	    old_inode && S_ISREG(old_inode->i_mode)) {
		btrfs_add_ordered_operation(trans, root, old_inode);
	}

C
Chris Mason 已提交
7078 7079 7080
	old_dir->i_ctime = old_dir->i_mtime = ctime;
	new_dir->i_ctime = new_dir->i_mtime = ctime;
	old_inode->i_ctime = ctime;
7081

7082 7083 7084
	if (old_dentry->d_parent != new_dentry->d_parent)
		btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);

7085 7086 7087 7088 7089 7090
	if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
		root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
		ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
					old_dentry->d_name.name,
					old_dentry->d_name.len);
	} else {
7091 7092 7093 7094 7095 7096
		ret = __btrfs_unlink_inode(trans, root, old_dir,
					old_dentry->d_inode,
					old_dentry->d_name.name,
					old_dentry->d_name.len);
		if (!ret)
			ret = btrfs_update_inode(trans, root, old_inode);
7097 7098
	}
	BUG_ON(ret);
C
Chris Mason 已提交
7099 7100 7101

	if (new_inode) {
		new_inode->i_ctime = CURRENT_TIME;
7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116
		if (unlikely(new_inode->i_ino ==
			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
			root_objectid = BTRFS_I(new_inode)->location.objectid;
			ret = btrfs_unlink_subvol(trans, dest, new_dir,
						root_objectid,
						new_dentry->d_name.name,
						new_dentry->d_name.len);
			BUG_ON(new_inode->i_nlink == 0);
		} else {
			ret = btrfs_unlink_inode(trans, dest, new_dir,
						 new_dentry->d_inode,
						 new_dentry->d_name.name,
						 new_dentry->d_name.len);
		}
		BUG_ON(ret);
7117
		if (new_inode->i_nlink == 0) {
7118
			ret = btrfs_orphan_add(trans, new_dentry->d_inode);
7119
			BUG_ON(ret);
7120
		}
C
Chris Mason 已提交
7121
	}
7122

7123 7124
	fixup_inode_flags(new_dir, old_inode);

7125 7126
	ret = btrfs_add_link(trans, new_dir, old_inode,
			     new_dentry->d_name.name,
7127
			     new_dentry->d_name.len, 0, index);
7128
	BUG_ON(ret);
C
Chris Mason 已提交
7129

7130
	if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) {
7131 7132 7133
		struct dentry *parent = dget_parent(new_dentry);
		btrfs_log_new_name(trans, old_inode, old_dir, parent);
		dput(parent);
7134 7135
		btrfs_end_log_trans(root);
	}
C
Chris Mason 已提交
7136
out_fail:
7137
	btrfs_end_transaction_throttle(trans, root);
7138
out_notrans:
7139 7140
	if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
		up_read(&root->fs_info->subvol_sem);
J
Josef Bacik 已提交
7141

C
Chris Mason 已提交
7142 7143 7144
	return ret;
}

C
Chris Mason 已提交
7145 7146 7147 7148
/*
 * some fairly slow code that needs optimization. This walks the list
 * of all the inodes with pending delalloc and forces them to disk.
 */
Y
Yan, Zheng 已提交
7149
int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
7150 7151 7152
{
	struct list_head *head = &root->fs_info->delalloc_inodes;
	struct btrfs_inode *binode;
7153
	struct inode *inode;
7154

Y
Yan Zheng 已提交
7155 7156 7157
	if (root->fs_info->sb->s_flags & MS_RDONLY)
		return -EROFS;

7158
	spin_lock(&root->fs_info->delalloc_lock);
C
Chris Mason 已提交
7159
	while (!list_empty(head)) {
7160 7161
		binode = list_entry(head->next, struct btrfs_inode,
				    delalloc_inodes);
7162 7163 7164
		inode = igrab(&binode->vfs_inode);
		if (!inode)
			list_del_init(&binode->delalloc_inodes);
7165
		spin_unlock(&root->fs_info->delalloc_lock);
7166
		if (inode) {
7167
			filemap_flush(inode->i_mapping);
Y
Yan, Zheng 已提交
7168 7169 7170 7171
			if (delay_iput)
				btrfs_add_delayed_iput(inode);
			else
				iput(inode);
7172 7173
		}
		cond_resched();
7174
		spin_lock(&root->fs_info->delalloc_lock);
7175
	}
7176
	spin_unlock(&root->fs_info->delalloc_lock);
7177 7178 7179 7180 7181 7182

	/* the filemap_flush will queue IO into the worker threads, but
	 * we have to make sure the IO is actually started and that
	 * ordered extents get created before we return
	 */
	atomic_inc(&root->fs_info->async_submit_draining);
C
Chris Mason 已提交
7183
	while (atomic_read(&root->fs_info->nr_async_submits) ||
7184
	      atomic_read(&root->fs_info->async_delalloc_pages)) {
7185
		wait_event(root->fs_info->async_submit_wait,
7186 7187
		   (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
		    atomic_read(&root->fs_info->async_delalloc_pages) == 0));
7188 7189
	}
	atomic_dec(&root->fs_info->async_submit_draining);
7190 7191 7192
	return 0;
}

J
Josef Bacik 已提交
7193 7194
int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput,
				   int sync)
7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215
{
	struct btrfs_inode *binode;
	struct inode *inode = NULL;

	spin_lock(&root->fs_info->delalloc_lock);
	while (!list_empty(&root->fs_info->delalloc_inodes)) {
		binode = list_entry(root->fs_info->delalloc_inodes.next,
				    struct btrfs_inode, delalloc_inodes);
		inode = igrab(&binode->vfs_inode);
		if (inode) {
			list_move_tail(&binode->delalloc_inodes,
				       &root->fs_info->delalloc_inodes);
			break;
		}

		list_del_init(&binode->delalloc_inodes);
		cond_resched_lock(&root->fs_info->delalloc_lock);
	}
	spin_unlock(&root->fs_info->delalloc_lock);

	if (inode) {
J
Josef Bacik 已提交
7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235
		if (sync) {
			filemap_write_and_wait(inode->i_mapping);
			/*
			 * We have to do this because compression doesn't
			 * actually set PG_writeback until it submits the pages
			 * for IO, which happens in an async thread, so we could
			 * race and not actually wait for any writeback pages
			 * because they've not been submitted yet.  Technically
			 * this could still be the case for the ordered stuff
			 * since the async thread may not have started to do its
			 * work yet.  If this becomes the case then we need to
			 * figure out a way to make sure that in writepage we
			 * wait for any async pages to be submitted before
			 * returning so that fdatawait does what its supposed to
			 * do.
			 */
			btrfs_wait_ordered_range(inode, 0, (u64)-1);
		} else {
			filemap_flush(inode->i_mapping);
		}
7236 7237 7238 7239 7240 7241 7242 7243 7244
		if (delay_iput)
			btrfs_add_delayed_iput(inode);
		else
			iput(inode);
		return 1;
	}
	return 0;
}

C
Chris Mason 已提交
7245 7246 7247 7248 7249 7250 7251
static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
			 const char *symname)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *root = BTRFS_I(dir)->root;
	struct btrfs_path *path;
	struct btrfs_key key;
7252
	struct inode *inode = NULL;
C
Chris Mason 已提交
7253 7254 7255
	int err;
	int drop_inode = 0;
	u64 objectid;
7256
	u64 index = 0 ;
C
Chris Mason 已提交
7257 7258
	int name_len;
	int datasize;
7259
	unsigned long ptr;
C
Chris Mason 已提交
7260
	struct btrfs_file_extent_item *ei;
7261
	struct extent_buffer *leaf;
7262
	unsigned long nr = 0;
C
Chris Mason 已提交
7263 7264 7265 7266

	name_len = strlen(symname) + 1;
	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
		return -ENAMETOOLONG;
7267

7268 7269 7270
	err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
	if (err)
		return err;
J
Josef Bacik 已提交
7271 7272 7273 7274 7275
	/*
	 * 2 items for inode item and ref
	 * 2 items for dir items
	 * 1 item for xattr if selinux is on
	 */
7276 7277 7278
	trans = btrfs_start_transaction(root, 5);
	if (IS_ERR(trans))
		return PTR_ERR(trans);
7279

C
Chris Mason 已提交
7280 7281
	btrfs_set_trans_block_group(trans, dir);

7282
	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
7283
				dentry->d_name.len, dir->i_ino, objectid,
7284 7285
				BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
				&index);
C
Chris Mason 已提交
7286 7287 7288 7289
	err = PTR_ERR(inode);
	if (IS_ERR(inode))
		goto out_unlock;

7290
	err = btrfs_init_inode_security(trans, inode, dir);
J
Josef Bacik 已提交
7291 7292 7293 7294 7295
	if (err) {
		drop_inode = 1;
		goto out_unlock;
	}

C
Chris Mason 已提交
7296
	btrfs_set_trans_block_group(trans, inode);
7297
	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
C
Chris Mason 已提交
7298 7299 7300 7301
	if (err)
		drop_inode = 1;
	else {
		inode->i_mapping->a_ops = &btrfs_aops;
C
Chris Mason 已提交
7302
		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
C
Chris Mason 已提交
7303 7304
		inode->i_fop = &btrfs_file_operations;
		inode->i_op = &btrfs_file_inode_operations;
7305
		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
C
Chris Mason 已提交
7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316 7317 7318 7319
	}
	btrfs_update_inode_block_group(trans, inode);
	btrfs_update_inode_block_group(trans, dir);
	if (drop_inode)
		goto out_unlock;

	path = btrfs_alloc_path();
	BUG_ON(!path);
	key.objectid = inode->i_ino;
	key.offset = 0;
	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
	datasize = btrfs_file_extent_calc_inline_size(name_len);
	err = btrfs_insert_empty_item(trans, root, path, &key,
				      datasize);
7320 7321 7322 7323
	if (err) {
		drop_inode = 1;
		goto out_unlock;
	}
7324 7325 7326 7327 7328
	leaf = path->nodes[0];
	ei = btrfs_item_ptr(leaf, path->slots[0],
			    struct btrfs_file_extent_item);
	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
	btrfs_set_file_extent_type(leaf, ei,
C
Chris Mason 已提交
7329
				   BTRFS_FILE_EXTENT_INLINE);
C
Chris Mason 已提交
7330 7331 7332 7333 7334
	btrfs_set_file_extent_encryption(leaf, ei, 0);
	btrfs_set_file_extent_compression(leaf, ei, 0);
	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);

C
Chris Mason 已提交
7335
	ptr = btrfs_file_extent_inline_start(ei);
7336 7337
	write_extent_buffer(leaf, symname, ptr, name_len);
	btrfs_mark_buffer_dirty(leaf);
C
Chris Mason 已提交
7338
	btrfs_free_path(path);
7339

C
Chris Mason 已提交
7340 7341
	inode->i_op = &btrfs_symlink_inode_operations;
	inode->i_mapping->a_ops = &btrfs_symlink_aops;
C
Chris Mason 已提交
7342
	inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
Y
Yan Zheng 已提交
7343
	inode_set_bytes(inode, name_len);
7344
	btrfs_i_size_write(inode, name_len - 1);
7345 7346 7347
	err = btrfs_update_inode(trans, root, inode);
	if (err)
		drop_inode = 1;
C
Chris Mason 已提交
7348 7349

out_unlock:
7350
	nr = trans->blocks_used;
7351
	btrfs_end_transaction_throttle(trans, root);
C
Chris Mason 已提交
7352 7353 7354 7355
	if (drop_inode) {
		inode_dec_link_count(inode);
		iput(inode);
	}
7356
	btrfs_btree_balance_dirty(root, nr);
C
Chris Mason 已提交
7357 7358
	return err;
}
7359

7360 7361 7362 7363
static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
				       u64 start, u64 num_bytes, u64 min_size,
				       loff_t actual_len, u64 *alloc_hint,
				       struct btrfs_trans_handle *trans)
Y
Yan Zheng 已提交
7364 7365 7366 7367
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_key ins;
	u64 cur_offset = start;
7368
	u64 i_size;
Y
Yan Zheng 已提交
7369
	int ret = 0;
7370
	bool own_trans = true;
Y
Yan Zheng 已提交
7371

7372 7373
	if (trans)
		own_trans = false;
Y
Yan Zheng 已提交
7374
	while (num_bytes > 0) {
7375 7376 7377 7378 7379 7380
		if (own_trans) {
			trans = btrfs_start_transaction(root, 3);
			if (IS_ERR(trans)) {
				ret = PTR_ERR(trans);
				break;
			}
7381 7382
		}

7383 7384
		ret = btrfs_reserve_extent(trans, root, num_bytes, min_size,
					   0, *alloc_hint, (u64)-1, &ins, 1);
7385
		if (ret) {
7386 7387
			if (own_trans)
				btrfs_end_transaction(trans, root);
7388
			break;
Y
Yan Zheng 已提交
7389
		}
7390

Y
Yan Zheng 已提交
7391 7392 7393
		ret = insert_reserved_file_extent(trans, inode,
						  cur_offset, ins.objectid,
						  ins.offset, ins.offset,
Y
Yan, Zheng 已提交
7394
						  ins.offset, 0, 0, 0,
Y
Yan Zheng 已提交
7395 7396
						  BTRFS_FILE_EXTENT_PREALLOC);
		BUG_ON(ret);
C
Chris Mason 已提交
7397 7398
		btrfs_drop_extent_cache(inode, cur_offset,
					cur_offset + ins.offset -1, 0);
7399

Y
Yan Zheng 已提交
7400 7401
		num_bytes -= ins.offset;
		cur_offset += ins.offset;
7402
		*alloc_hint = ins.objectid + ins.offset;
7403

Y
Yan Zheng 已提交
7404
		inode->i_ctime = CURRENT_TIME;
7405
		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
Y
Yan Zheng 已提交
7406
		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
7407 7408
		    (actual_len > inode->i_size) &&
		    (cur_offset > inode->i_size)) {
7409
			if (cur_offset > actual_len)
7410
				i_size = actual_len;
7411
			else
7412 7413 7414
				i_size = cur_offset;
			i_size_write(inode, i_size);
			btrfs_ordered_update_i_size(inode, i_size, NULL);
7415 7416
		}

Y
Yan Zheng 已提交
7417 7418 7419
		ret = btrfs_update_inode(trans, root, inode);
		BUG_ON(ret);

7420 7421
		if (own_trans)
			btrfs_end_transaction(trans, root);
7422
	}
Y
Yan Zheng 已提交
7423 7424 7425
	return ret;
}

7426 7427 7428 7429 7430 7431 7432 7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443
int btrfs_prealloc_file_range(struct inode *inode, int mode,
			      u64 start, u64 num_bytes, u64 min_size,
			      loff_t actual_len, u64 *alloc_hint)
{
	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
					   min_size, actual_len, alloc_hint,
					   NULL);
}

int btrfs_prealloc_file_range_trans(struct inode *inode,
				    struct btrfs_trans_handle *trans, int mode,
				    u64 start, u64 num_bytes, u64 min_size,
				    loff_t actual_len, u64 *alloc_hint)
{
	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
					   min_size, actual_len, alloc_hint, trans);
}

7444 7445 7446 7447 7448
static int btrfs_set_page_dirty(struct page *page)
{
	return __set_page_dirty_nobuffers(page);
}

7449
static int btrfs_permission(struct inode *inode, int mask, unsigned int flags)
Y
Yan 已提交
7450
{
L
Li Zefan 已提交
7451 7452 7453 7454
	struct btrfs_root *root = BTRFS_I(inode)->root;

	if (btrfs_root_readonly(root) && (mask & MAY_WRITE))
		return -EROFS;
7455
	if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE))
Y
Yan 已提交
7456
		return -EACCES;
7457
	return generic_permission(inode, mask, flags, btrfs_check_acl);
Y
Yan 已提交
7458
}
C
Chris Mason 已提交
7459

7460
static const struct inode_operations btrfs_dir_inode_operations = {
7461
	.getattr	= btrfs_getattr,
C
Chris Mason 已提交
7462 7463 7464 7465 7466 7467 7468 7469 7470
	.lookup		= btrfs_lookup,
	.create		= btrfs_create,
	.unlink		= btrfs_unlink,
	.link		= btrfs_link,
	.mkdir		= btrfs_mkdir,
	.rmdir		= btrfs_rmdir,
	.rename		= btrfs_rename,
	.symlink	= btrfs_symlink,
	.setattr	= btrfs_setattr,
J
Josef Bacik 已提交
7471
	.mknod		= btrfs_mknod,
7472 7473
	.setxattr	= btrfs_setxattr,
	.getxattr	= btrfs_getxattr,
J
Josef Bacik 已提交
7474
	.listxattr	= btrfs_listxattr,
7475
	.removexattr	= btrfs_removexattr,
Y
Yan 已提交
7476
	.permission	= btrfs_permission,
C
Chris Mason 已提交
7477
};
7478
static const struct inode_operations btrfs_dir_ro_inode_operations = {
C
Chris Mason 已提交
7479
	.lookup		= btrfs_lookup,
Y
Yan 已提交
7480
	.permission	= btrfs_permission,
C
Chris Mason 已提交
7481
};
7482

7483
static const struct file_operations btrfs_dir_file_operations = {
C
Chris Mason 已提交
7484 7485
	.llseek		= generic_file_llseek,
	.read		= generic_read_dir,
7486
	.readdir	= btrfs_real_readdir,
C
Christoph Hellwig 已提交
7487
	.unlocked_ioctl	= btrfs_ioctl,
C
Chris Mason 已提交
7488
#ifdef CONFIG_COMPAT
C
Christoph Hellwig 已提交
7489
	.compat_ioctl	= btrfs_ioctl,
C
Chris Mason 已提交
7490
#endif
S
Sage Weil 已提交
7491
	.release        = btrfs_release_file,
7492
	.fsync		= btrfs_sync_file,
C
Chris Mason 已提交
7493 7494
};

7495
static struct extent_io_ops btrfs_extent_io_ops = {
7496
	.fill_delalloc = run_delalloc_range,
7497
	.submit_bio_hook = btrfs_submit_bio_hook,
7498
	.merge_bio_hook = btrfs_merge_bio_hook,
7499
	.readpage_end_io_hook = btrfs_readpage_end_io_hook,
7500
	.writepage_end_io_hook = btrfs_writepage_end_io_hook,
7501
	.writepage_start_hook = btrfs_writepage_start_hook,
7502
	.readpage_io_failed_hook = btrfs_io_failed_hook,
C
Chris Mason 已提交
7503 7504
	.set_bit_hook = btrfs_set_bit_hook,
	.clear_bit_hook = btrfs_clear_bit_hook,
J
Josef Bacik 已提交
7505 7506
	.merge_extent_hook = btrfs_merge_extent_hook,
	.split_extent_hook = btrfs_split_extent_hook,
7507 7508
};

7509 7510 7511 7512 7513 7514 7515 7516 7517 7518 7519 7520
/*
 * btrfs doesn't support the bmap operation because swapfiles
 * use bmap to make a mapping of extents in the file.  They assume
 * these extents won't change over the life of the file and they
 * use the bmap result to do IO directly to the drive.
 *
 * the btrfs bmap call would return logical addresses that aren't
 * suitable for IO and they also will change frequently as COW
 * operations happen.  So, swapfile + btrfs == corruption.
 *
 * For now we're avoiding this by dropping bmap.
 */
7521
static const struct address_space_operations btrfs_aops = {
C
Chris Mason 已提交
7522 7523
	.readpage	= btrfs_readpage,
	.writepage	= btrfs_writepage,
C
Chris Mason 已提交
7524
	.writepages	= btrfs_writepages,
C
Chris Mason 已提交
7525
	.readpages	= btrfs_readpages,
C
Chris Mason 已提交
7526
	.sync_page	= block_sync_page,
7527
	.direct_IO	= btrfs_direct_IO,
7528 7529
	.invalidatepage = btrfs_invalidatepage,
	.releasepage	= btrfs_releasepage,
7530
	.set_page_dirty	= btrfs_set_page_dirty,
7531
	.error_remove_page = generic_error_remove_page,
C
Chris Mason 已提交
7532 7533
};

7534
static const struct address_space_operations btrfs_symlink_aops = {
C
Chris Mason 已提交
7535 7536
	.readpage	= btrfs_readpage,
	.writepage	= btrfs_writepage,
C
Chris Mason 已提交
7537 7538
	.invalidatepage = btrfs_invalidatepage,
	.releasepage	= btrfs_releasepage,
C
Chris Mason 已提交
7539 7540
};

7541
static const struct inode_operations btrfs_file_inode_operations = {
C
Chris Mason 已提交
7542 7543
	.getattr	= btrfs_getattr,
	.setattr	= btrfs_setattr,
7544 7545
	.setxattr	= btrfs_setxattr,
	.getxattr	= btrfs_getxattr,
J
Josef Bacik 已提交
7546
	.listxattr      = btrfs_listxattr,
7547
	.removexattr	= btrfs_removexattr,
Y
Yan 已提交
7548
	.permission	= btrfs_permission,
Y
Yehuda Sadeh 已提交
7549
	.fiemap		= btrfs_fiemap,
C
Chris Mason 已提交
7550
};
7551
static const struct inode_operations btrfs_special_inode_operations = {
J
Josef Bacik 已提交
7552 7553
	.getattr	= btrfs_getattr,
	.setattr	= btrfs_setattr,
Y
Yan 已提交
7554
	.permission	= btrfs_permission,
7555 7556
	.setxattr	= btrfs_setxattr,
	.getxattr	= btrfs_getxattr,
J
Josef Bacik 已提交
7557
	.listxattr	= btrfs_listxattr,
7558
	.removexattr	= btrfs_removexattr,
J
Josef Bacik 已提交
7559
};
7560
static const struct inode_operations btrfs_symlink_inode_operations = {
C
Chris Mason 已提交
7561 7562 7563
	.readlink	= generic_readlink,
	.follow_link	= page_follow_link_light,
	.put_link	= page_put_link,
7564
	.getattr	= btrfs_getattr,
Y
Yan 已提交
7565
	.permission	= btrfs_permission,
J
Jim Owens 已提交
7566 7567 7568 7569
	.setxattr	= btrfs_setxattr,
	.getxattr	= btrfs_getxattr,
	.listxattr	= btrfs_listxattr,
	.removexattr	= btrfs_removexattr,
C
Chris Mason 已提交
7570
};
7571

7572
const struct dentry_operations btrfs_dentry_operations = {
7573 7574
	.d_delete	= btrfs_dentry_delete,
};