transaction.c 41.1 KB
Newer Older
C
Chris Mason 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

C
Chris Mason 已提交
19
#include <linux/fs.h>
20
#include <linux/slab.h>
C
Chris Mason 已提交
21
#include <linux/sched.h>
22
#include <linux/writeback.h>
23
#include <linux/pagemap.h>
24
#include <linux/blkdev.h>
C
Chris Mason 已提交
25 26 27
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
28
#include "locking.h"
29
#include "tree-log.h"
30
#include "inode-map.h"
31
#include "volumes.h"
C
Chris Mason 已提交
32

33 34
#define BTRFS_ROOT_TRANS_TAG 0

35
void put_transaction(struct btrfs_transaction *transaction)
C
Chris Mason 已提交
36
{
37 38
	WARN_ON(atomic_read(&transaction->use_count) == 0);
	if (atomic_dec_and_test(&transaction->use_count)) {
J
Josef Bacik 已提交
39
		BUG_ON(!list_empty(&transaction->list));
40
		WARN_ON(transaction->delayed_refs.root.rb_node);
C
Chris Mason 已提交
41 42
		memset(transaction, 0, sizeof(*transaction));
		kmem_cache_free(btrfs_transaction_cachep, transaction);
C
Chris Mason 已提交
43
	}
C
Chris Mason 已提交
44 45
}

J
Josef Bacik 已提交
46 47 48 49 50 51
static noinline void switch_commit_root(struct btrfs_root *root)
{
	free_extent_buffer(root->commit_root);
	root->commit_root = btrfs_root_node(root);
}

C
Chris Mason 已提交
52 53 54
/*
 * either allocate a new transaction or hop into the existing one
 */
J
Josef Bacik 已提交
55
static noinline int join_transaction(struct btrfs_root *root, int nofail)
C
Chris Mason 已提交
56 57
{
	struct btrfs_transaction *cur_trans;
58
	struct btrfs_fs_info *fs_info = root->fs_info;
J
Josef Bacik 已提交
59

60
	spin_lock(&fs_info->trans_lock);
61
loop:
62
	/* The file system has been taken offline. No new transactions. */
63 64
	if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
		spin_unlock(&fs_info->trans_lock);
65 66 67
		return -EROFS;
	}

68
	if (fs_info->trans_no_join) {
J
Josef Bacik 已提交
69
		if (!nofail) {
70
			spin_unlock(&fs_info->trans_lock);
J
Josef Bacik 已提交
71 72 73 74
			return -EBUSY;
		}
	}

75
	cur_trans = fs_info->running_transaction;
J
Josef Bacik 已提交
76
	if (cur_trans) {
77
		if (cur_trans->aborted) {
78
			spin_unlock(&fs_info->trans_lock);
79
			return cur_trans->aborted;
80
		}
J
Josef Bacik 已提交
81
		atomic_inc(&cur_trans->use_count);
82
		atomic_inc(&cur_trans->num_writers);
83
		cur_trans->num_joined++;
84
		spin_unlock(&fs_info->trans_lock);
J
Josef Bacik 已提交
85
		return 0;
C
Chris Mason 已提交
86
	}
87
	spin_unlock(&fs_info->trans_lock);
J
Josef Bacik 已提交
88 89 90 91

	cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
	if (!cur_trans)
		return -ENOMEM;
92

93 94
	spin_lock(&fs_info->trans_lock);
	if (fs_info->running_transaction) {
95 96 97 98
		/*
		 * someone started a transaction after we unlocked.  Make sure
		 * to redo the trans_no_join checks above
		 */
J
Josef Bacik 已提交
99
		kmem_cache_free(btrfs_transaction_cachep, cur_trans);
100
		cur_trans = fs_info->running_transaction;
101
		goto loop;
102 103 104 105
	} else if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
		spin_unlock(&root->fs_info->trans_lock);
		kmem_cache_free(btrfs_transaction_cachep, cur_trans);
		return -EROFS;
C
Chris Mason 已提交
106
	}
107

J
Josef Bacik 已提交
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
	atomic_set(&cur_trans->num_writers, 1);
	cur_trans->num_joined = 0;
	init_waitqueue_head(&cur_trans->writer_wait);
	init_waitqueue_head(&cur_trans->commit_wait);
	cur_trans->in_commit = 0;
	cur_trans->blocked = 0;
	/*
	 * One for this trans handle, one so it will live on until we
	 * commit the transaction.
	 */
	atomic_set(&cur_trans->use_count, 2);
	cur_trans->commit_done = 0;
	cur_trans->start_time = get_seconds();

	cur_trans->delayed_refs.root = RB_ROOT;
	cur_trans->delayed_refs.num_entries = 0;
	cur_trans->delayed_refs.num_heads_ready = 0;
	cur_trans->delayed_refs.num_heads = 0;
	cur_trans->delayed_refs.flushing = 0;
	cur_trans->delayed_refs.run_delayed_start = 0;
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145

	/*
	 * although the tree mod log is per file system and not per transaction,
	 * the log must never go across transaction boundaries.
	 */
	smp_mb();
	if (!list_empty(&fs_info->tree_mod_seq_list)) {
		printk(KERN_ERR "btrfs: tree_mod_seq_list not empty when "
			"creating a fresh transaction\n");
		WARN_ON(1);
	}
	if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log)) {
		printk(KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
			"creating a fresh transaction\n");
		WARN_ON(1);
	}
	atomic_set(&fs_info->tree_mod_seq, 0);

J
Josef Bacik 已提交
146 147 148 149
	spin_lock_init(&cur_trans->commit_lock);
	spin_lock_init(&cur_trans->delayed_refs.lock);

	INIT_LIST_HEAD(&cur_trans->pending_snapshots);
150
	list_add_tail(&cur_trans->list, &fs_info->trans_list);
J
Josef Bacik 已提交
151
	extent_io_tree_init(&cur_trans->dirty_pages,
152 153 154 155
			     fs_info->btree_inode->i_mapping);
	fs_info->generation++;
	cur_trans->transid = fs_info->generation;
	fs_info->running_transaction = cur_trans;
156
	cur_trans->aborted = 0;
157
	spin_unlock(&fs_info->trans_lock);
158

C
Chris Mason 已提交
159 160 161
	return 0;
}

C
Chris Mason 已提交
162
/*
C
Chris Mason 已提交
163 164 165 166
 * this does all the record keeping required to make sure that a reference
 * counted root is properly recorded in a given transaction.  This is required
 * to make sure the old root from before we joined the transaction is deleted
 * when the transaction commits
C
Chris Mason 已提交
167
 */
C
Chris Mason 已提交
168
static int record_root_in_trans(struct btrfs_trans_handle *trans,
J
Josef Bacik 已提交
169
			       struct btrfs_root *root)
170
{
171
	if (root->ref_cows && root->last_trans < trans->transid) {
172
		WARN_ON(root == root->fs_info->extent_root);
173 174
		WARN_ON(root->commit_root != root->node);

C
Chris Mason 已提交
175 176 177 178 179 180 181 182 183 184 185 186
		/*
		 * see below for in_trans_setup usage rules
		 * we have the reloc mutex held now, so there
		 * is only one writer in this function
		 */
		root->in_trans_setup = 1;

		/* make sure readers find in_trans_setup before
		 * they find our root->last_trans update
		 */
		smp_wmb();

J
Josef Bacik 已提交
187 188 189 190 191
		spin_lock(&root->fs_info->fs_roots_radix_lock);
		if (root->last_trans == trans->transid) {
			spin_unlock(&root->fs_info->fs_roots_radix_lock);
			return 0;
		}
192 193 194
		radix_tree_tag_set(&root->fs_info->fs_roots_radix,
			   (unsigned long)root->root_key.objectid,
			   BTRFS_ROOT_TRANS_TAG);
J
Josef Bacik 已提交
195
		spin_unlock(&root->fs_info->fs_roots_radix_lock);
C
Chris Mason 已提交
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
		root->last_trans = trans->transid;

		/* this is pretty tricky.  We don't want to
		 * take the relocation lock in btrfs_record_root_in_trans
		 * unless we're really doing the first setup for this root in
		 * this transaction.
		 *
		 * Normally we'd use root->last_trans as a flag to decide
		 * if we want to take the expensive mutex.
		 *
		 * But, we have to set root->last_trans before we
		 * init the relocation root, otherwise, we trip over warnings
		 * in ctree.c.  The solution used here is to flag ourselves
		 * with root->in_trans_setup.  When this is 1, we're still
		 * fixing up the reloc trees and everyone must wait.
		 *
		 * When this is zero, they can trust root->last_trans and fly
		 * through btrfs_record_root_in_trans without having to take the
		 * lock.  smp_wmb() makes sure that all the writes above are
		 * done before we pop in the zero below
		 */
217
		btrfs_init_reloc_root(trans, root);
C
Chris Mason 已提交
218 219
		smp_wmb();
		root->in_trans_setup = 0;
220 221 222
	}
	return 0;
}
223

C
Chris Mason 已提交
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246

int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
			       struct btrfs_root *root)
{
	if (!root->ref_cows)
		return 0;

	/*
	 * see record_root_in_trans for comments about in_trans_setup usage
	 * and barriers
	 */
	smp_rmb();
	if (root->last_trans == trans->transid &&
	    !root->in_trans_setup)
		return 0;

	mutex_lock(&root->fs_info->reloc_mutex);
	record_root_in_trans(trans, root);
	mutex_unlock(&root->fs_info->reloc_mutex);

	return 0;
}

C
Chris Mason 已提交
247 248 249 250
/* wait for commit against the current transaction to become unblocked
 * when this is done, it is safe to start a new transaction, but the current
 * transaction might not be fully on disk.
 */
C
Chris Mason 已提交
251
static void wait_current_trans(struct btrfs_root *root)
C
Chris Mason 已提交
252
{
253
	struct btrfs_transaction *cur_trans;
C
Chris Mason 已提交
254

J
Josef Bacik 已提交
255
	spin_lock(&root->fs_info->trans_lock);
256
	cur_trans = root->fs_info->running_transaction;
C
Chris Mason 已提交
257
	if (cur_trans && cur_trans->blocked) {
258
		atomic_inc(&cur_trans->use_count);
J
Josef Bacik 已提交
259
		spin_unlock(&root->fs_info->trans_lock);
L
Li Zefan 已提交
260 261 262

		wait_event(root->fs_info->transaction_wait,
			   !cur_trans->blocked);
263
		put_transaction(cur_trans);
J
Josef Bacik 已提交
264 265
	} else {
		spin_unlock(&root->fs_info->trans_lock);
266
	}
C
Chris Mason 已提交
267 268
}

269 270 271 272
enum btrfs_trans_type {
	TRANS_START,
	TRANS_JOIN,
	TRANS_USERSPACE,
273
	TRANS_JOIN_NOLOCK,
274 275
};

276 277
static int may_wait_transaction(struct btrfs_root *root, int type)
{
J
Josef Bacik 已提交
278 279 280 281 282 283 284 285
	if (root->fs_info->log_root_recovering)
		return 0;

	if (type == TRANS_USERSPACE)
		return 1;

	if (type == TRANS_START &&
	    !atomic_read(&root->fs_info->open_ioctl_trans))
286
		return 1;
J
Josef Bacik 已提交
287

288 289 290
	return 0;
}

291
static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
292
						    u64 num_items, int type)
C
Chris Mason 已提交
293
{
294 295
	struct btrfs_trans_handle *h;
	struct btrfs_transaction *cur_trans;
296
	u64 num_bytes = 0;
C
Chris Mason 已提交
297
	int ret;
L
liubo 已提交
298 299 300

	if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
		return ERR_PTR(-EROFS);
301 302 303 304 305 306 307 308 309

	if (current->journal_info) {
		WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
		h = current->journal_info;
		h->use_count++;
		h->orig_rsv = h->block_rsv;
		h->block_rsv = NULL;
		goto got_it;
	}
310 311 312 313 314 315 316

	/*
	 * Do the reservation before we join the transaction so we can do all
	 * the appropriate flushing if need be.
	 */
	if (num_items > 0 && root != root->fs_info->chunk_root) {
		num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
317
		ret = btrfs_block_rsv_add(root,
318 319 320 321 322
					  &root->fs_info->trans_block_rsv,
					  num_bytes);
		if (ret)
			return ERR_PTR(ret);
	}
323 324 325 326
again:
	h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
	if (!h)
		return ERR_PTR(-ENOMEM);
C
Chris Mason 已提交
327

328
	if (may_wait_transaction(root, type))
C
Chris Mason 已提交
329
		wait_current_trans(root);
330

J
Josef Bacik 已提交
331 332 333 334 335 336
	do {
		ret = join_transaction(root, type == TRANS_JOIN_NOLOCK);
		if (ret == -EBUSY)
			wait_current_trans(root);
	} while (ret == -EBUSY);

T
Tsutomu Itoh 已提交
337
	if (ret < 0) {
338
		kmem_cache_free(btrfs_trans_handle_cachep, h);
T
Tsutomu Itoh 已提交
339 340
		return ERR_PTR(ret);
	}
341

342 343 344 345
	cur_trans = root->fs_info->running_transaction;

	h->transid = cur_trans->transid;
	h->transaction = cur_trans;
C
Chris Mason 已提交
346
	h->blocks_used = 0;
347
	h->bytes_reserved = 0;
348
	h->root = root;
349
	h->delayed_ref_updates = 0;
350
	h->use_count = 1;
351
	h->block_rsv = NULL;
352
	h->orig_rsv = NULL;
353
	h->aborted = 0;
354 355
	h->delayed_ref_elem.seq = 0;
	INIT_LIST_HEAD(&h->qgroup_ref_list);
356

357 358 359 360 361 362
	smp_mb();
	if (cur_trans->blocked && may_wait_transaction(root, type)) {
		btrfs_commit_transaction(h, root);
		goto again;
	}

363
	if (num_bytes) {
J
Josef Bacik 已提交
364
		trace_btrfs_space_reservation(root->fs_info, "transaction",
365
					      h->transid, num_bytes, 1);
366 367
		h->block_rsv = &root->fs_info->trans_block_rsv;
		h->bytes_reserved = num_bytes;
368
	}
J
Josef Bacik 已提交
369

370
got_it:
J
Josef Bacik 已提交
371
	btrfs_record_root_in_trans(h, root);
372 373 374

	if (!current->journal_info && type != TRANS_USERSPACE)
		current->journal_info = h;
C
Chris Mason 已提交
375 376 377
	return h;
}

378
struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
379
						   int num_items)
380
{
381
	return start_transaction(root, num_items, TRANS_START);
382
}
383
struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
384
{
385
	return start_transaction(root, 0, TRANS_JOIN);
386 387
}

388
struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
389 390 391 392
{
	return start_transaction(root, 0, TRANS_JOIN_NOLOCK);
}

393
struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
394
{
395
	return start_transaction(root, 0, TRANS_USERSPACE);
396 397
}

C
Chris Mason 已提交
398
/* wait for a transaction commit to be fully complete */
399
static noinline void wait_for_commit(struct btrfs_root *root,
400 401
				    struct btrfs_transaction *commit)
{
L
Li Zefan 已提交
402
	wait_event(commit->commit_wait, commit->commit_done);
403 404
}

405 406 407 408 409 410 411 412
int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
{
	struct btrfs_transaction *cur_trans = NULL, *t;
	int ret;

	ret = 0;
	if (transid) {
		if (transid <= root->fs_info->last_trans_committed)
J
Josef Bacik 已提交
413
			goto out;
414 415

		/* find specified transaction */
J
Josef Bacik 已提交
416
		spin_lock(&root->fs_info->trans_lock);
417 418 419
		list_for_each_entry(t, &root->fs_info->trans_list, list) {
			if (t->transid == transid) {
				cur_trans = t;
J
Josef Bacik 已提交
420
				atomic_inc(&cur_trans->use_count);
421 422 423 424 425
				break;
			}
			if (t->transid > transid)
				break;
		}
J
Josef Bacik 已提交
426
		spin_unlock(&root->fs_info->trans_lock);
427 428
		ret = -EINVAL;
		if (!cur_trans)
J
Josef Bacik 已提交
429
			goto out;  /* bad transid */
430 431
	} else {
		/* find newest transaction that is committing | committed */
J
Josef Bacik 已提交
432
		spin_lock(&root->fs_info->trans_lock);
433 434 435 436
		list_for_each_entry_reverse(t, &root->fs_info->trans_list,
					    list) {
			if (t->in_commit) {
				if (t->commit_done)
437
					break;
438
				cur_trans = t;
J
Josef Bacik 已提交
439
				atomic_inc(&cur_trans->use_count);
440 441 442
				break;
			}
		}
J
Josef Bacik 已提交
443
		spin_unlock(&root->fs_info->trans_lock);
444
		if (!cur_trans)
J
Josef Bacik 已提交
445
			goto out;  /* nothing committing|committed */
446 447 448 449 450 451
	}

	wait_for_commit(root, cur_trans);

	put_transaction(cur_trans);
	ret = 0;
J
Josef Bacik 已提交
452
out:
453 454 455
	return ret;
}

C
Chris Mason 已提交
456 457
void btrfs_throttle(struct btrfs_root *root)
{
J
Josef Bacik 已提交
458
	if (!atomic_read(&root->fs_info->open_ioctl_trans))
459
		wait_current_trans(root);
C
Chris Mason 已提交
460 461
}

462 463 464 465
static int should_end_transaction(struct btrfs_trans_handle *trans,
				  struct btrfs_root *root)
{
	int ret;
466 467

	ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
468 469 470 471 472 473 474
	return ret ? 1 : 0;
}

int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root)
{
	struct btrfs_transaction *cur_trans = trans->transaction;
475
	struct btrfs_block_rsv *rsv = trans->block_rsv;
476
	int updates;
477
	int err;
478

J
Josef Bacik 已提交
479
	smp_mb();
480 481 482
	if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
		return 1;

483 484 485 486 487 488
	/*
	 * We need to do this in case we're deleting csums so the global block
	 * rsv get's used instead of the csum block rsv.
	 */
	trans->block_rsv = NULL;

489 490
	updates = trans->delayed_ref_updates;
	trans->delayed_ref_updates = 0;
491 492 493 494 495
	if (updates) {
		err = btrfs_run_delayed_refs(trans, root, updates);
		if (err) /* Error code will also eval true */
			return err;
	}
496

497 498
	trans->block_rsv = rsv;

499 500 501
	return should_end_transaction(trans, root);
}

502
static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
503
			  struct btrfs_root *root, int throttle, int lock)
C
Chris Mason 已提交
504
{
505
	struct btrfs_transaction *cur_trans = trans->transaction;
506
	struct btrfs_fs_info *info = root->fs_info;
507
	int count = 0;
508
	int err = 0;
509

510 511 512 513 514
	if (--trans->use_count) {
		trans->block_rsv = trans->orig_rsv;
		return 0;
	}

515 516 517 518 519
	/*
	 * do the qgroup accounting as early as possible
	 */
	err = btrfs_delayed_refs_qgroup_accounting(trans, info);

520
	btrfs_trans_release_metadata(trans, root);
521
	trans->block_rsv = NULL;
522 523 524 525 526
	/*
	 * the same root has to be passed to start_transaction and
	 * end_transaction. Subvolume quota depends on this.
	 */
	WARN_ON(trans->root != root);
527
	while (count < 2) {
528 529 530 531 532 533 534 535 536 537
		unsigned long cur = trans->delayed_ref_updates;
		trans->delayed_ref_updates = 0;
		if (cur &&
		    trans->transaction->delayed_refs.num_heads_ready > 64) {
			trans->delayed_ref_updates = 0;
			btrfs_run_delayed_refs(trans, root, cur);
		} else {
			break;
		}
		count++;
538 539
	}

J
Josef Bacik 已提交
540 541
	if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
	    should_end_transaction(trans, root)) {
542
		trans->transaction->blocked = 1;
J
Josef Bacik 已提交
543 544
		smp_wmb();
	}
545

546
	if (lock && cur_trans->blocked && !cur_trans->in_commit) {
547 548 549 550 551 552 553
		if (throttle) {
			/*
			 * We may race with somebody else here so end up having
			 * to call end_transaction on ourselves again, so inc
			 * our use_count.
			 */
			trans->use_count++;
554
			return btrfs_commit_transaction(trans, root);
555
		} else {
556
			wake_up_process(info->transaction_kthread);
557
		}
558 559 560
	}

	WARN_ON(cur_trans != info->running_transaction);
561 562
	WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
	atomic_dec(&cur_trans->num_writers);
563

564
	smp_mb();
C
Chris Mason 已提交
565 566 567
	if (waitqueue_active(&cur_trans->writer_wait))
		wake_up(&cur_trans->writer_wait);
	put_transaction(cur_trans);
J
Josef Bacik 已提交
568 569 570

	if (current->journal_info == trans)
		current->journal_info = NULL;
571

Y
Yan, Zheng 已提交
572 573 574
	if (throttle)
		btrfs_run_delayed_iputs(root);

575 576
	if (trans->aborted ||
	    root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
577
		err = -EIO;
578
	}
579
	assert_qgroups_uptodate(trans);
580

581 582 583
	memset(trans, 0, sizeof(*trans));
	kmem_cache_free(btrfs_trans_handle_cachep, trans);
	return err;
C
Chris Mason 已提交
584 585
}

586 587 588
int btrfs_end_transaction(struct btrfs_trans_handle *trans,
			  struct btrfs_root *root)
{
589 590 591 592 593 594
	int ret;

	ret = __btrfs_end_transaction(trans, root, 0, 1);
	if (ret)
		return ret;
	return 0;
595 596 597 598 599
}

int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
				   struct btrfs_root *root)
{
600 601 602 603 604 605
	int ret;

	ret = __btrfs_end_transaction(trans, root, 1, 1);
	if (ret)
		return ret;
	return 0;
606 607 608 609 610
}

int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root)
{
611 612 613 614 615 616 617 618 619 620 621 622
	int ret;

	ret = __btrfs_end_transaction(trans, root, 0, 0);
	if (ret)
		return ret;
	return 0;
}

int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
				struct btrfs_root *root)
{
	return __btrfs_end_transaction(trans, root, 1, 1);
623 624
}

C
Chris Mason 已提交
625 626 627
/*
 * when btree blocks are allocated, they have some corresponding bits set for
 * them in one of two extent_io trees.  This is used to make sure all of
628
 * those extents are sent to disk but does not wait on them
C
Chris Mason 已提交
629
 */
630
int btrfs_write_marked_extents(struct btrfs_root *root,
631
			       struct extent_io_tree *dirty_pages, int mark)
C
Chris Mason 已提交
632
{
633
	int err = 0;
634
	int werr = 0;
J
Josef Bacik 已提交
635
	struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
636
	u64 start = 0;
637
	u64 end;
638

J
Josef Bacik 已提交
639 640 641 642 643 644 645 646 647
	while (!find_first_extent_bit(dirty_pages, start, &start, &end,
				      mark)) {
		convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, mark,
				   GFP_NOFS);
		err = filemap_fdatawrite_range(mapping, start, end);
		if (err)
			werr = err;
		cond_resched();
		start = end + 1;
648
	}
649 650 651 652 653 654 655 656 657 658 659 660
	if (err)
		werr = err;
	return werr;
}

/*
 * when btree blocks are allocated, they have some corresponding bits set for
 * them in one of two extent_io trees.  This is used to make sure all of
 * those extents are on disk for transaction or log commit.  We wait
 * on all the pages and clear them from the dirty pages state tree
 */
int btrfs_wait_marked_extents(struct btrfs_root *root,
661
			      struct extent_io_tree *dirty_pages, int mark)
662 663 664
{
	int err = 0;
	int werr = 0;
J
Josef Bacik 已提交
665
	struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
666 667
	u64 start = 0;
	u64 end;
668

J
Josef Bacik 已提交
669 670 671 672 673 674 675 676
	while (!find_first_extent_bit(dirty_pages, start, &start, &end,
				      EXTENT_NEED_WAIT)) {
		clear_extent_bits(dirty_pages, start, end, EXTENT_NEED_WAIT, GFP_NOFS);
		err = filemap_fdatawait_range(mapping, start, end);
		if (err)
			werr = err;
		cond_resched();
		start = end + 1;
677
	}
678 679 680
	if (err)
		werr = err;
	return werr;
C
Chris Mason 已提交
681 682
}

683 684 685 686 687 688
/*
 * when btree blocks are allocated, they have some corresponding bits set for
 * them in one of two extent_io trees.  This is used to make sure all of
 * those extents are on disk for transaction or log commit
 */
int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
689
				struct extent_io_tree *dirty_pages, int mark)
690 691 692 693
{
	int ret;
	int ret2;

694 695
	ret = btrfs_write_marked_extents(root, dirty_pages, mark);
	ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
696 697 698 699 700 701

	if (ret)
		return ret;
	if (ret2)
		return ret2;
	return 0;
702 703
}

704 705 706 707 708 709 710 711 712
int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
				     struct btrfs_root *root)
{
	if (!trans || !trans->transaction) {
		struct inode *btree_inode;
		btree_inode = root->fs_info->btree_inode;
		return filemap_write_and_wait(btree_inode->i_mapping);
	}
	return btrfs_write_and_wait_marked_extents(root,
713 714
					   &trans->transaction->dirty_pages,
					   EXTENT_DIRTY);
715 716
}

C
Chris Mason 已提交
717 718 719 720 721 722 723 724 725 726
/*
 * this is used to update the root pointer in the tree of tree roots.
 *
 * But, in the case of the extent allocation tree, updating the root
 * pointer may allocate blocks which may change the root of the extent
 * allocation tree.
 *
 * So, this loops and repeats and makes sure the cowonly root didn't
 * change while the root pointer was being updated in the metadata.
 */
727 728
static int update_cowonly_root(struct btrfs_trans_handle *trans,
			       struct btrfs_root *root)
C
Chris Mason 已提交
729 730
{
	int ret;
731
	u64 old_root_bytenr;
732
	u64 old_root_used;
733
	struct btrfs_root *tree_root = root->fs_info->tree_root;
C
Chris Mason 已提交
734

735
	old_root_used = btrfs_root_used(&root->root_item);
736
	btrfs_write_dirty_block_groups(trans, root);
737

C
Chris Mason 已提交
738
	while (1) {
739
		old_root_bytenr = btrfs_root_bytenr(&root->root_item);
740 741
		if (old_root_bytenr == root->node->start &&
		    old_root_used == btrfs_root_used(&root->root_item))
C
Chris Mason 已提交
742
			break;
743

744
		btrfs_set_root_node(&root->root_item, root->node);
C
Chris Mason 已提交
745
		ret = btrfs_update_root(trans, tree_root,
746 747
					&root->root_key,
					&root->root_item);
748 749
		if (ret)
			return ret;
750

751
		old_root_used = btrfs_root_used(&root->root_item);
752
		ret = btrfs_write_dirty_block_groups(trans, root);
753 754
		if (ret)
			return ret;
755
	}
756 757 758 759

	if (root != root->fs_info->extent_root)
		switch_commit_root(root);

760 761 762
	return 0;
}

C
Chris Mason 已提交
763 764
/*
 * update all the cowonly tree roots on disk
765 766 767 768
 *
 * The error handling in this function may not be obvious. Any of the
 * failures will cause the file system to go offline. We still need
 * to clean up the delayed refs.
C
Chris Mason 已提交
769
 */
770 771
static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
					 struct btrfs_root *root)
772 773 774
{
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct list_head *next;
775
	struct extent_buffer *eb;
776
	int ret;
777

778
	ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
779 780
	if (ret)
		return ret;
781

782
	eb = btrfs_lock_root_node(fs_info->tree_root);
783 784
	ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
			      0, &eb);
785 786
	btrfs_tree_unlock(eb);
	free_extent_buffer(eb);
787

788 789 790
	if (ret)
		return ret;

791
	ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
792 793
	if (ret)
		return ret;
794

795 796 797
	ret = btrfs_run_dev_stats(trans, root->fs_info);
	BUG_ON(ret);

798 799 800 801 802 803 804
	ret = btrfs_run_qgroups(trans, root->fs_info);
	BUG_ON(ret);

	/* run_qgroups might have added some more refs */
	ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
	BUG_ON(ret);

C
Chris Mason 已提交
805
	while (!list_empty(&fs_info->dirty_cowonly_roots)) {
806 807 808
		next = fs_info->dirty_cowonly_roots.next;
		list_del_init(next);
		root = list_entry(next, struct btrfs_root, dirty_list);
809

810 811 812
		ret = update_cowonly_root(trans, root);
		if (ret)
			return ret;
C
Chris Mason 已提交
813
	}
814 815 816 817 818

	down_write(&fs_info->extent_commit_sem);
	switch_commit_root(fs_info->extent_root);
	up_write(&fs_info->extent_commit_sem);

C
Chris Mason 已提交
819 820 821
	return 0;
}

C
Chris Mason 已提交
822 823 824 825 826
/*
 * dead roots are old snapshots that need to be deleted.  This allocates
 * a dirty root struct and adds it into the list of dead roots that need to
 * be deleted
 */
827
int btrfs_add_dead_root(struct btrfs_root *root)
828
{
J
Josef Bacik 已提交
829
	spin_lock(&root->fs_info->trans_lock);
830
	list_add(&root->root_list, &root->fs_info->dead_roots);
J
Josef Bacik 已提交
831
	spin_unlock(&root->fs_info->trans_lock);
832 833 834
	return 0;
}

C
Chris Mason 已提交
835
/*
836
 * update all the cowonly tree roots on disk
C
Chris Mason 已提交
837
 */
838 839
static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
				    struct btrfs_root *root)
840 841
{
	struct btrfs_root *gang[8];
842
	struct btrfs_fs_info *fs_info = root->fs_info;
843 844
	int i;
	int ret;
845 846
	int err = 0;

J
Josef Bacik 已提交
847
	spin_lock(&fs_info->fs_roots_radix_lock);
C
Chris Mason 已提交
848
	while (1) {
849 850
		ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
						 (void **)gang, 0,
851 852 853 854 855 856
						 ARRAY_SIZE(gang),
						 BTRFS_ROOT_TRANS_TAG);
		if (ret == 0)
			break;
		for (i = 0; i < ret; i++) {
			root = gang[i];
857 858 859
			radix_tree_tag_clear(&fs_info->fs_roots_radix,
					(unsigned long)root->root_key.objectid,
					BTRFS_ROOT_TRANS_TAG);
J
Josef Bacik 已提交
860
			spin_unlock(&fs_info->fs_roots_radix_lock);
Y
Yan Zheng 已提交
861

862
			btrfs_free_log(trans, root);
863
			btrfs_update_reloc_root(trans, root);
864
			btrfs_orphan_commit_root(trans, root);
865

866 867
			btrfs_save_ino_cache(root, trans);

868 869 870 871
			/* see comments in should_cow_block() */
			root->force_cow = 0;
			smp_wmb();

872
			if (root->commit_root != root->node) {
873
				mutex_lock(&root->fs_commit_mutex);
J
Josef Bacik 已提交
874
				switch_commit_root(root);
875 876 877
				btrfs_unpin_free_ino(root);
				mutex_unlock(&root->fs_commit_mutex);

878 879 880
				btrfs_set_root_node(&root->root_item,
						    root->node);
			}
881 882

			err = btrfs_update_root(trans, fs_info->tree_root,
883 884
						&root->root_key,
						&root->root_item);
J
Josef Bacik 已提交
885
			spin_lock(&fs_info->fs_roots_radix_lock);
886 887
			if (err)
				break;
888 889
		}
	}
J
Josef Bacik 已提交
890
	spin_unlock(&fs_info->fs_roots_radix_lock);
891
	return err;
892 893
}

C
Chris Mason 已提交
894 895 896 897
/*
 * defrag a given btree.  If cacheonly == 1, this won't read from the disk,
 * otherwise every leaf in the btree is read and defragged.
 */
898 899 900 901
int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
{
	struct btrfs_fs_info *info = root->fs_info;
	struct btrfs_trans_handle *trans;
902
	int ret;
903
	unsigned long nr;
904

905
	if (xchg(&root->defrag_running, 1))
906
		return 0;
907

908
	while (1) {
909 910 911 912
		trans = btrfs_start_transaction(root, 0);
		if (IS_ERR(trans))
			return PTR_ERR(trans);

913
		ret = btrfs_defrag_leaves(trans, root, cacheonly);
914

915
		nr = trans->blocks_used;
916
		btrfs_end_transaction(trans, root);
917
		btrfs_btree_balance_dirty(info->tree_root, nr);
918 919
		cond_resched();

920
		if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
921 922 923
			break;
	}
	root->defrag_running = 0;
924
	return ret;
925 926
}

C
Chris Mason 已提交
927 928 929 930
/*
 * new snapshots need to be created at a very specific time in the
 * transaction commit.  This does the actual creation
 */
931
static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
932 933 934 935
				   struct btrfs_fs_info *fs_info,
				   struct btrfs_pending_snapshot *pending)
{
	struct btrfs_key key;
936
	struct btrfs_root_item *new_root_item;
937 938
	struct btrfs_root *tree_root = fs_info->tree_root;
	struct btrfs_root *root = pending->root;
939
	struct btrfs_root *parent_root;
L
Liu Bo 已提交
940
	struct btrfs_block_rsv *rsv;
941
	struct inode *parent_inode;
942
	struct dentry *parent;
943
	struct dentry *dentry;
944
	struct extent_buffer *tmp;
945
	struct extent_buffer *old;
946
	int ret;
947
	u64 to_reserve = 0;
948
	u64 index = 0;
949
	u64 objectid;
L
Li Zefan 已提交
950
	u64 root_flags;
951

L
Liu Bo 已提交
952 953
	rsv = trans->block_rsv;

954 955
	new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
	if (!new_root_item) {
956
		ret = pending->error = -ENOMEM;
957 958
		goto fail;
	}
959

960
	ret = btrfs_find_free_objectid(tree_root, &objectid);
961 962
	if (ret) {
		pending->error = ret;
963
		goto fail;
964
	}
965

966
	btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
967 968

	if (to_reserve > 0) {
969 970
		ret = btrfs_block_rsv_add_noflush(root, &pending->block_rsv,
						  to_reserve);
971 972 973 974 975 976
		if (ret) {
			pending->error = ret;
			goto fail;
		}
	}

977
	key.objectid = objectid;
978 979
	key.offset = (u64)-1;
	key.type = BTRFS_ROOT_ITEM_KEY;
980

981
	trans->block_rsv = &pending->block_rsv;
982

983
	dentry = pending->dentry;
984 985
	parent = dget_parent(dentry);
	parent_inode = parent->d_inode;
986
	parent_root = BTRFS_I(parent_inode)->root;
C
Chris Mason 已提交
987
	record_root_in_trans(trans, parent_root);
988

989 990 991
	/*
	 * insert the directory item
	 */
992
	ret = btrfs_set_inode_index(parent_inode, &index);
993
	BUG_ON(ret); /* -ENOMEM */
994
	ret = btrfs_insert_dir_item(trans, parent_root,
995
				dentry->d_name.name, dentry->d_name.len,
996
				parent_inode, &key,
997
				BTRFS_FT_DIR, index);
998
	if (ret == -EEXIST) {
999 1000 1001
		pending->error = -EEXIST;
		dput(parent);
		goto fail;
1002 1003 1004
	} else if (ret) {
		goto abort_trans_dput;
	}
1005

1006 1007
	btrfs_i_size_write(parent_inode, parent_inode->i_size +
					 dentry->d_name.len * 2);
1008
	ret = btrfs_update_inode(trans, parent_root, parent_inode);
1009
	if (ret)
1010
		goto abort_trans_dput;
1011

1012 1013 1014 1015 1016 1017 1018
	/*
	 * pull in the delayed directory update
	 * and the delayed inode item
	 * otherwise we corrupt the FS during
	 * snapshot
	 */
	ret = btrfs_run_delayed_items(trans, root);
1019 1020
	if (ret) { /* Transaction aborted */
		dput(parent);
1021
		goto fail;
1022
	}
1023

C
Chris Mason 已提交
1024
	record_root_in_trans(trans, root);
1025 1026
	btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
	memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1027
	btrfs_check_and_init_root_item(new_root_item);
1028

L
Li Zefan 已提交
1029 1030 1031 1032 1033 1034 1035
	root_flags = btrfs_root_flags(new_root_item);
	if (pending->readonly)
		root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
	else
		root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
	btrfs_set_root_flags(new_root_item, root_flags);

1036
	old = btrfs_lock_root_node(root);
1037
	ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
1038 1039 1040 1041 1042
	if (ret) {
		btrfs_tree_unlock(old);
		free_extent_buffer(old);
		goto abort_trans_dput;
	}
1043

1044 1045
	btrfs_set_lock_blocking(old);

1046
	ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1047
	/* clean up in any case */
1048 1049
	btrfs_tree_unlock(old);
	free_extent_buffer(old);
1050 1051
	if (ret)
		goto abort_trans_dput;
1052

1053 1054 1055 1056
	/* see comments in should_cow_block() */
	root->force_cow = 1;
	smp_wmb();

1057
	btrfs_set_root_node(new_root_item, tmp);
1058 1059 1060
	/* record when the snapshot was created in key.offset */
	key.offset = trans->transid;
	ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1061 1062
	btrfs_tree_unlock(tmp);
	free_extent_buffer(tmp);
1063
	if (ret)
1064
		goto abort_trans_dput;
1065

1066 1067 1068 1069
	/*
	 * insert root back/forward references
	 */
	ret = btrfs_add_root_ref(trans, tree_root, objectid,
1070
				 parent_root->root_key.objectid,
L
Li Zefan 已提交
1071
				 btrfs_ino(parent_inode), index,
1072
				 dentry->d_name.name, dentry->d_name.len);
1073
	dput(parent);
1074 1075
	if (ret)
		goto fail;
1076

1077 1078
	key.offset = (u64)-1;
	pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
1079 1080
	if (IS_ERR(pending->snap)) {
		ret = PTR_ERR(pending->snap);
1081
		goto abort_trans;
1082
	}
1083

1084 1085 1086 1087
	ret = btrfs_reloc_post_snapshot(trans, pending);
	if (ret)
		goto abort_trans;
	ret = 0;
1088
fail:
1089
	kfree(new_root_item);
L
Liu Bo 已提交
1090
	trans->block_rsv = rsv;
1091
	btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
1092 1093
	return ret;

1094 1095
abort_trans_dput:
	dput(parent);
1096 1097 1098
abort_trans:
	btrfs_abort_transaction(trans, root, ret);
	goto fail;
1099 1100
}

C
Chris Mason 已提交
1101 1102 1103
/*
 * create all the snapshots we've scheduled for creation
 */
1104 1105
static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
					     struct btrfs_fs_info *fs_info)
1106 1107 1108 1109
{
	struct btrfs_pending_snapshot *pending;
	struct list_head *head = &trans->transaction->pending_snapshots;

1110 1111
	list_for_each_entry(pending, head, list)
		create_pending_snapshot(trans, fs_info, pending);
1112 1113 1114
	return 0;
}

1115 1116 1117 1118 1119
static void update_super_roots(struct btrfs_root *root)
{
	struct btrfs_root_item *root_item;
	struct btrfs_super_block *super;

1120
	super = root->fs_info->super_copy;
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130

	root_item = &root->fs_info->chunk_root->root_item;
	super->chunk_root = root_item->bytenr;
	super->chunk_root_generation = root_item->generation;
	super->chunk_root_level = root_item->level;

	root_item = &root->fs_info->tree_root->root_item;
	super->root = root_item->bytenr;
	super->generation = root_item->generation;
	super->root_level = root_item->level;
1131
	if (btrfs_test_opt(root, SPACE_CACHE))
1132
		super->cache_generation = root_item->generation;
1133 1134
}

1135 1136 1137
int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
{
	int ret = 0;
J
Josef Bacik 已提交
1138
	spin_lock(&info->trans_lock);
1139 1140
	if (info->running_transaction)
		ret = info->running_transaction->in_commit;
J
Josef Bacik 已提交
1141
	spin_unlock(&info->trans_lock);
1142 1143 1144
	return ret;
}

1145 1146 1147
int btrfs_transaction_blocked(struct btrfs_fs_info *info)
{
	int ret = 0;
J
Josef Bacik 已提交
1148
	spin_lock(&info->trans_lock);
1149 1150
	if (info->running_transaction)
		ret = info->running_transaction->blocked;
J
Josef Bacik 已提交
1151
	spin_unlock(&info->trans_lock);
1152 1153 1154
	return ret;
}

S
Sage Weil 已提交
1155 1156 1157 1158 1159 1160 1161
/*
 * wait for the current transaction commit to start and block subsequent
 * transaction joins
 */
static void wait_current_trans_commit_start(struct btrfs_root *root,
					    struct btrfs_transaction *trans)
{
L
Li Zefan 已提交
1162
	wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit);
S
Sage Weil 已提交
1163 1164 1165 1166 1167 1168 1169 1170 1171
}

/*
 * wait for the current transaction to start and then become unblocked.
 * caller holds ref.
 */
static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
					 struct btrfs_transaction *trans)
{
L
Li Zefan 已提交
1172 1173
	wait_event(root->fs_info->transaction_wait,
		   trans->commit_done || (trans->in_commit && !trans->blocked));
S
Sage Weil 已提交
1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
}

/*
 * commit transactions asynchronously. once btrfs_commit_transaction_async
 * returns, any subsequent transaction will not be allowed to join.
 */
struct btrfs_async_commit {
	struct btrfs_trans_handle *newtrans;
	struct btrfs_root *root;
	struct delayed_work work;
};

static void do_async_commit(struct work_struct *work)
{
	struct btrfs_async_commit *ac =
		container_of(work, struct btrfs_async_commit, work.work);

	btrfs_commit_transaction(ac->newtrans, ac->root);
	kfree(ac);
}

int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
				   struct btrfs_root *root,
				   int wait_for_unblock)
{
	struct btrfs_async_commit *ac;
	struct btrfs_transaction *cur_trans;

	ac = kmalloc(sizeof(*ac), GFP_NOFS);
T
Tsutomu Itoh 已提交
1203 1204
	if (!ac)
		return -ENOMEM;
S
Sage Weil 已提交
1205 1206 1207

	INIT_DELAYED_WORK(&ac->work, do_async_commit);
	ac->root = root;
1208
	ac->newtrans = btrfs_join_transaction(root);
1209 1210 1211 1212 1213
	if (IS_ERR(ac->newtrans)) {
		int err = PTR_ERR(ac->newtrans);
		kfree(ac);
		return err;
	}
S
Sage Weil 已提交
1214 1215 1216

	/* take transaction reference */
	cur_trans = trans->transaction;
1217
	atomic_inc(&cur_trans->use_count);
S
Sage Weil 已提交
1218 1219 1220 1221 1222 1223 1224 1225 1226 1227

	btrfs_end_transaction(trans, root);
	schedule_delayed_work(&ac->work, 0);

	/* wait for transaction to start and unblock */
	if (wait_for_unblock)
		wait_current_trans_commit_start_and_unblock(root, cur_trans);
	else
		wait_current_trans_commit_start(root, cur_trans);

1228 1229 1230 1231
	if (current->journal_info == trans)
		current->journal_info = NULL;

	put_transaction(cur_trans);
S
Sage Weil 已提交
1232 1233 1234
	return 0;
}

1235 1236

static void cleanup_transaction(struct btrfs_trans_handle *trans,
1237
				struct btrfs_root *root, int err)
1238 1239 1240 1241 1242
{
	struct btrfs_transaction *cur_trans = trans->transaction;

	WARN_ON(trans->use_count > 1);

1243 1244
	btrfs_abort_transaction(trans, root, err);

1245 1246
	spin_lock(&root->fs_info->trans_lock);
	list_del_init(&cur_trans->list);
1247 1248 1249 1250
	if (cur_trans == root->fs_info->running_transaction) {
		root->fs_info->running_transaction = NULL;
		root->fs_info->trans_no_join = 0;
	}
1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267
	spin_unlock(&root->fs_info->trans_lock);

	btrfs_cleanup_one_transaction(trans->transaction, root);

	put_transaction(cur_trans);
	put_transaction(cur_trans);

	trace_btrfs_transaction_commit(root);

	btrfs_scrub_continue(root);

	if (current->journal_info == trans)
		current->journal_info = NULL;

	kmem_cache_free(btrfs_trans_handle_cachep, trans);
}

S
Sage Weil 已提交
1268 1269 1270 1271 1272 1273 1274
/*
 * btrfs_transaction state sequence:
 *    in_commit = 0, blocked = 0  (initial)
 *    in_commit = 1, blocked = 1
 *    blocked = 0
 *    commit_done = 1
 */
C
Chris Mason 已提交
1275 1276 1277
int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
			     struct btrfs_root *root)
{
1278
	unsigned long joined = 0;
1279
	struct btrfs_transaction *cur_trans = trans->transaction;
C
Chris Mason 已提交
1280
	struct btrfs_transaction *prev_trans = NULL;
C
Chris Mason 已提交
1281
	DEFINE_WAIT(wait);
1282
	int ret = -EIO;
1283 1284
	int should_grow = 0;
	unsigned long now = get_seconds();
1285
	int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
C
Chris Mason 已提交
1286

1287 1288
	btrfs_run_ordered_operations(root, 0);

1289
	btrfs_trans_release_metadata(trans, root);
1290 1291
	trans->block_rsv = NULL;

1292 1293 1294
	if (cur_trans->aborted)
		goto cleanup_transaction;

1295 1296 1297 1298
	/* make a pass through all the delayed refs we have so far
	 * any runnings procs may add more while we are here
	 */
	ret = btrfs_run_delayed_refs(trans, root, 0);
1299 1300
	if (ret)
		goto cleanup_transaction;
1301

1302
	cur_trans = trans->transaction;
1303

1304 1305 1306 1307
	/*
	 * set the flushing flag so procs in this transaction have to
	 * start sending their work down.
	 */
1308
	cur_trans->delayed_refs.flushing = 1;
1309

1310
	ret = btrfs_run_delayed_refs(trans, root, 0);
1311 1312
	if (ret)
		goto cleanup_transaction;
1313

J
Josef Bacik 已提交
1314
	spin_lock(&cur_trans->commit_lock);
1315
	if (cur_trans->in_commit) {
J
Josef Bacik 已提交
1316
		spin_unlock(&cur_trans->commit_lock);
1317
		atomic_inc(&cur_trans->use_count);
1318
		ret = btrfs_end_transaction(trans, root);
C
Chris Mason 已提交
1319

1320
		wait_for_commit(root, cur_trans);
1321

C
Chris Mason 已提交
1322
		put_transaction(cur_trans);
1323

1324
		return ret;
C
Chris Mason 已提交
1325
	}
1326

C
Chris Mason 已提交
1327
	trans->transaction->in_commit = 1;
1328
	trans->transaction->blocked = 1;
J
Josef Bacik 已提交
1329
	spin_unlock(&cur_trans->commit_lock);
S
Sage Weil 已提交
1330 1331
	wake_up(&root->fs_info->transaction_blocked_wait);

J
Josef Bacik 已提交
1332
	spin_lock(&root->fs_info->trans_lock);
C
Chris Mason 已提交
1333 1334 1335 1336
	if (cur_trans->list.prev != &root->fs_info->trans_list) {
		prev_trans = list_entry(cur_trans->list.prev,
					struct btrfs_transaction, list);
		if (!prev_trans->commit_done) {
1337
			atomic_inc(&prev_trans->use_count);
J
Josef Bacik 已提交
1338
			spin_unlock(&root->fs_info->trans_lock);
C
Chris Mason 已提交
1339 1340 1341

			wait_for_commit(root, prev_trans);

1342
			put_transaction(prev_trans);
J
Josef Bacik 已提交
1343 1344
		} else {
			spin_unlock(&root->fs_info->trans_lock);
C
Chris Mason 已提交
1345
		}
J
Josef Bacik 已提交
1346 1347
	} else {
		spin_unlock(&root->fs_info->trans_lock);
C
Chris Mason 已提交
1348
	}
1349

1350 1351 1352
	if (now < cur_trans->start_time || now - cur_trans->start_time < 1)
		should_grow = 1;

1353
	do {
1354
		int snap_pending = 0;
J
Josef Bacik 已提交
1355

1356
		joined = cur_trans->num_joined;
1357 1358 1359
		if (!list_empty(&trans->transaction->pending_snapshots))
			snap_pending = 1;

C
Chris Mason 已提交
1360
		WARN_ON(cur_trans != trans->transaction);
1361

1362
		if (flush_on_commit || snap_pending) {
Y
Yan, Zheng 已提交
1363
			btrfs_start_delalloc_inodes(root, 1);
1364
			btrfs_wait_ordered_extents(root, 0, 1);
1365 1366
		}

1367
		ret = btrfs_run_delayed_items(trans, root);
1368 1369
		if (ret)
			goto cleanup_transaction;
1370

1371 1372 1373 1374 1375 1376 1377
		/*
		 * running the delayed items may have added new refs. account
		 * them now so that they hinder processing of more delayed refs
		 * as little as possible.
		 */
		btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);

1378 1379 1380 1381 1382 1383 1384 1385 1386
		/*
		 * rename don't use btrfs_join_transaction, so, once we
		 * set the transaction to blocked above, we aren't going
		 * to get any new ordered operations.  We can safely run
		 * it here and no for sure that nothing new will be added
		 * to the list
		 */
		btrfs_run_ordered_operations(root, 1);

1387 1388 1389
		prepare_to_wait(&cur_trans->writer_wait, &wait,
				TASK_UNINTERRUPTIBLE);

1390
		if (atomic_read(&cur_trans->num_writers) > 1)
1391 1392 1393
			schedule_timeout(MAX_SCHEDULE_TIMEOUT);
		else if (should_grow)
			schedule_timeout(1);
1394 1395

		finish_wait(&cur_trans->writer_wait, &wait);
1396
	} while (atomic_read(&cur_trans->num_writers) > 1 ||
1397
		 (should_grow && cur_trans->num_joined != joined));
1398

1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409
	/*
	 * Ok now we need to make sure to block out any other joins while we
	 * commit the transaction.  We could have started a join before setting
	 * no_join so make sure to wait for num_writers to == 1 again.
	 */
	spin_lock(&root->fs_info->trans_lock);
	root->fs_info->trans_no_join = 1;
	spin_unlock(&root->fs_info->trans_lock);
	wait_event(cur_trans->writer_wait,
		   atomic_read(&cur_trans->num_writers) == 1);

C
Chris Mason 已提交
1410 1411 1412 1413 1414 1415 1416
	/*
	 * the reloc mutex makes sure that we stop
	 * the balancing code from coming in and moving
	 * extents around in the middle of the commit
	 */
	mutex_lock(&root->fs_info->reloc_mutex);

1417
	ret = btrfs_run_delayed_items(trans, root);
1418 1419 1420 1421
	if (ret) {
		mutex_unlock(&root->fs_info->reloc_mutex);
		goto cleanup_transaction;
	}
1422

1423
	ret = create_pending_snapshots(trans, root->fs_info);
1424 1425 1426 1427
	if (ret) {
		mutex_unlock(&root->fs_info->reloc_mutex);
		goto cleanup_transaction;
	}
1428

1429
	ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1430 1431 1432 1433
	if (ret) {
		mutex_unlock(&root->fs_info->reloc_mutex);
		goto cleanup_transaction;
	}
1434

1435 1436 1437 1438 1439 1440
	/*
	 * make sure none of the code above managed to slip in a
	 * delayed item
	 */
	btrfs_assert_delayed_root_empty(root);

C
Chris Mason 已提交
1441
	WARN_ON(cur_trans != trans->transaction);
C
Chris Mason 已提交
1442

A
Arne Jansen 已提交
1443
	btrfs_scrub_pause(root);
1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458
	/* btrfs_commit_tree_roots is responsible for getting the
	 * various roots consistent with each other.  Every pointer
	 * in the tree of tree roots has to point to the most up to date
	 * root for every subvolume and other tree.  So, we have to keep
	 * the tree logging code from jumping in and changing any
	 * of the trees.
	 *
	 * At this point in the commit, there can't be any tree-log
	 * writers, but a little lower down we drop the trans mutex
	 * and let new people in.  By holding the tree_log_mutex
	 * from now until after the super is written, we avoid races
	 * with the tree-log code.
	 */
	mutex_lock(&root->fs_info->tree_log_mutex);

1459
	ret = commit_fs_roots(trans, root);
1460 1461
	if (ret) {
		mutex_unlock(&root->fs_info->tree_log_mutex);
1462
		mutex_unlock(&root->fs_info->reloc_mutex);
1463 1464
		goto cleanup_transaction;
	}
1465

1466
	/* commit_fs_roots gets rid of all the tree log roots, it is now
1467 1468 1469 1470
	 * safe to free the root of tree log roots
	 */
	btrfs_free_log_root_tree(trans, root->fs_info);

1471
	ret = commit_cowonly_roots(trans, root);
1472 1473
	if (ret) {
		mutex_unlock(&root->fs_info->tree_log_mutex);
1474
		mutex_unlock(&root->fs_info->reloc_mutex);
1475 1476
		goto cleanup_transaction;
	}
1477

1478 1479
	btrfs_prepare_extent_commit(trans, root);

C
Chris Mason 已提交
1480
	cur_trans = root->fs_info->running_transaction;
1481 1482 1483

	btrfs_set_root_node(&root->fs_info->tree_root->root_item,
			    root->fs_info->tree_root->node);
J
Josef Bacik 已提交
1484
	switch_commit_root(root->fs_info->tree_root);
1485 1486 1487

	btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
			    root->fs_info->chunk_root->node);
J
Josef Bacik 已提交
1488
	switch_commit_root(root->fs_info->chunk_root);
1489

1490
	assert_qgroups_uptodate(trans);
1491
	update_super_roots(root);
1492 1493

	if (!root->fs_info->log_root_recovering) {
1494 1495
		btrfs_set_super_log_root(root->fs_info->super_copy, 0);
		btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
1496 1497
	}

1498 1499
	memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
	       sizeof(*root->fs_info->super_copy));
C
Chris Mason 已提交
1500

1501
	trans->transaction->blocked = 0;
J
Josef Bacik 已提交
1502 1503 1504 1505
	spin_lock(&root->fs_info->trans_lock);
	root->fs_info->running_transaction = NULL;
	root->fs_info->trans_no_join = 0;
	spin_unlock(&root->fs_info->trans_lock);
C
Chris Mason 已提交
1506
	mutex_unlock(&root->fs_info->reloc_mutex);
1507

1508
	wake_up(&root->fs_info->transaction_wait);
1509

C
Chris Mason 已提交
1510
	ret = btrfs_write_and_wait_transaction(trans, root);
1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522
	if (ret) {
		btrfs_error(root->fs_info, ret,
			    "Error while writing out transaction.");
		mutex_unlock(&root->fs_info->tree_log_mutex);
		goto cleanup_transaction;
	}

	ret = write_ctree_super(trans, root, 0);
	if (ret) {
		mutex_unlock(&root->fs_info->tree_log_mutex);
		goto cleanup_transaction;
	}
1523

1524 1525 1526 1527 1528 1529
	/*
	 * the super is written, we can safely allow the tree-loggers
	 * to go about their business
	 */
	mutex_unlock(&root->fs_info->tree_log_mutex);

1530
	btrfs_finish_extent_commit(trans, root);
1531

C
Chris Mason 已提交
1532
	cur_trans->commit_done = 1;
1533

1534
	root->fs_info->last_trans_committed = cur_trans->transid;
J
Josef Bacik 已提交
1535

C
Chris Mason 已提交
1536
	wake_up(&cur_trans->commit_wait);
1537

J
Josef Bacik 已提交
1538
	spin_lock(&root->fs_info->trans_lock);
1539
	list_del_init(&cur_trans->list);
J
Josef Bacik 已提交
1540 1541
	spin_unlock(&root->fs_info->trans_lock);

C
Chris Mason 已提交
1542
	put_transaction(cur_trans);
C
Chris Mason 已提交
1543
	put_transaction(cur_trans);
1544

1545 1546
	trace_btrfs_transaction_commit(root);

A
Arne Jansen 已提交
1547 1548
	btrfs_scrub_continue(root);

J
Josef Bacik 已提交
1549 1550 1551
	if (current->journal_info == trans)
		current->journal_info = NULL;

C
Chris Mason 已提交
1552
	kmem_cache_free(btrfs_trans_handle_cachep, trans);
Y
Yan, Zheng 已提交
1553 1554 1555 1556

	if (current != root->fs_info->transaction_kthread)
		btrfs_run_delayed_iputs(root);

C
Chris Mason 已提交
1557
	return ret;
1558 1559 1560 1561 1562 1563

cleanup_transaction:
	btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
//	WARN_ON(1);
	if (current->journal_info == trans)
		current->journal_info = NULL;
1564
	cleanup_transaction(trans, root, ret);
1565 1566

	return ret;
C
Chris Mason 已提交
1567 1568
}

C
Chris Mason 已提交
1569 1570 1571
/*
 * interface function to delete all the snapshots we have scheduled for deletion
 */
1572 1573
int btrfs_clean_old_snapshots(struct btrfs_root *root)
{
1574 1575 1576
	LIST_HEAD(list);
	struct btrfs_fs_info *fs_info = root->fs_info;

J
Josef Bacik 已提交
1577
	spin_lock(&fs_info->trans_lock);
1578
	list_splice_init(&fs_info->dead_roots, &list);
J
Josef Bacik 已提交
1579
	spin_unlock(&fs_info->trans_lock);
1580

1581
	while (!list_empty(&list)) {
1582 1583
		int ret;

1584
		root = list_entry(list.next, struct btrfs_root, root_list);
1585 1586
		list_del(&root->root_list);

1587 1588
		btrfs_kill_all_delayed_nodes(root);

1589 1590
		if (btrfs_header_backref_rev(root->node) <
		    BTRFS_MIXED_BACKREF_REV)
1591
			ret = btrfs_drop_snapshot(root, NULL, 0, 0);
1592
		else
1593 1594
			ret =btrfs_drop_snapshot(root, NULL, 1, 0);
		BUG_ON(ret < 0);
1595 1596 1597
	}
	return 0;
}