transaction.c 36.8 KB
Newer Older
C
Chris Mason 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

C
Chris Mason 已提交
19
#include <linux/fs.h>
20
#include <linux/slab.h>
C
Chris Mason 已提交
21
#include <linux/sched.h>
22
#include <linux/writeback.h>
23
#include <linux/pagemap.h>
24
#include <linux/blkdev.h>
C
Chris Mason 已提交
25 26 27
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
28
#include "locking.h"
29
#include "tree-log.h"
30
#include "inode-map.h"
C
Chris Mason 已提交
31

32 33
#define BTRFS_ROOT_TRANS_TAG 0

34
static noinline void put_transaction(struct btrfs_transaction *transaction)
C
Chris Mason 已提交
35
{
36 37
	WARN_ON(atomic_read(&transaction->use_count) == 0);
	if (atomic_dec_and_test(&transaction->use_count)) {
J
Josef Bacik 已提交
38
		BUG_ON(!list_empty(&transaction->list));
C
Chris Mason 已提交
39 40
		memset(transaction, 0, sizeof(*transaction));
		kmem_cache_free(btrfs_transaction_cachep, transaction);
C
Chris Mason 已提交
41
	}
C
Chris Mason 已提交
42 43
}

J
Josef Bacik 已提交
44 45 46 47 48 49
static noinline void switch_commit_root(struct btrfs_root *root)
{
	free_extent_buffer(root->commit_root);
	root->commit_root = btrfs_root_node(root);
}

C
Chris Mason 已提交
50 51 52
/*
 * either allocate a new transaction or hop into the existing one
 */
J
Josef Bacik 已提交
53
static noinline int join_transaction(struct btrfs_root *root, int nofail)
C
Chris Mason 已提交
54 55
{
	struct btrfs_transaction *cur_trans;
J
Josef Bacik 已提交
56 57 58 59 60 61 62 63 64

	spin_lock(&root->fs_info->trans_lock);
	if (root->fs_info->trans_no_join) {
		if (!nofail) {
			spin_unlock(&root->fs_info->trans_lock);
			return -EBUSY;
		}
	}

C
Chris Mason 已提交
65
	cur_trans = root->fs_info->running_transaction;
J
Josef Bacik 已提交
66 67
	if (cur_trans) {
		atomic_inc(&cur_trans->use_count);
68
		atomic_inc(&cur_trans->num_writers);
69
		cur_trans->num_joined++;
J
Josef Bacik 已提交
70 71
		spin_unlock(&root->fs_info->trans_lock);
		return 0;
C
Chris Mason 已提交
72
	}
J
Josef Bacik 已提交
73 74 75 76 77 78 79 80 81 82
	spin_unlock(&root->fs_info->trans_lock);

	cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
	if (!cur_trans)
		return -ENOMEM;
	spin_lock(&root->fs_info->trans_lock);
	if (root->fs_info->running_transaction) {
		kmem_cache_free(btrfs_transaction_cachep, cur_trans);
		cur_trans = root->fs_info->running_transaction;
		atomic_inc(&cur_trans->use_count);
83
		atomic_inc(&cur_trans->num_writers);
84
		cur_trans->num_joined++;
J
Josef Bacik 已提交
85 86
		spin_unlock(&root->fs_info->trans_lock);
		return 0;
C
Chris Mason 已提交
87
	}
J
Josef Bacik 已提交
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
	atomic_set(&cur_trans->num_writers, 1);
	cur_trans->num_joined = 0;
	init_waitqueue_head(&cur_trans->writer_wait);
	init_waitqueue_head(&cur_trans->commit_wait);
	cur_trans->in_commit = 0;
	cur_trans->blocked = 0;
	/*
	 * One for this trans handle, one so it will live on until we
	 * commit the transaction.
	 */
	atomic_set(&cur_trans->use_count, 2);
	cur_trans->commit_done = 0;
	cur_trans->start_time = get_seconds();

	cur_trans->delayed_refs.root = RB_ROOT;
	cur_trans->delayed_refs.num_entries = 0;
	cur_trans->delayed_refs.num_heads_ready = 0;
	cur_trans->delayed_refs.num_heads = 0;
	cur_trans->delayed_refs.flushing = 0;
	cur_trans->delayed_refs.run_delayed_start = 0;
	spin_lock_init(&cur_trans->commit_lock);
	spin_lock_init(&cur_trans->delayed_refs.lock);

	INIT_LIST_HEAD(&cur_trans->pending_snapshots);
	list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
	extent_io_tree_init(&cur_trans->dirty_pages,
C
Chris Mason 已提交
114
			     root->fs_info->btree_inode->i_mapping);
J
Josef Bacik 已提交
115 116 117 118
	root->fs_info->generation++;
	cur_trans->transid = root->fs_info->generation;
	root->fs_info->running_transaction = cur_trans;
	spin_unlock(&root->fs_info->trans_lock);
119

C
Chris Mason 已提交
120 121 122
	return 0;
}

C
Chris Mason 已提交
123
/*
C
Chris Mason 已提交
124 125 126 127
 * this does all the record keeping required to make sure that a reference
 * counted root is properly recorded in a given transaction.  This is required
 * to make sure the old root from before we joined the transaction is deleted
 * when the transaction commits
C
Chris Mason 已提交
128
 */
C
Chris Mason 已提交
129
static int record_root_in_trans(struct btrfs_trans_handle *trans,
J
Josef Bacik 已提交
130
			       struct btrfs_root *root)
131
{
132
	if (root->ref_cows && root->last_trans < trans->transid) {
133
		WARN_ON(root == root->fs_info->extent_root);
134 135
		WARN_ON(root->commit_root != root->node);

C
Chris Mason 已提交
136 137 138 139 140 141 142 143 144 145 146 147
		/*
		 * see below for in_trans_setup usage rules
		 * we have the reloc mutex held now, so there
		 * is only one writer in this function
		 */
		root->in_trans_setup = 1;

		/* make sure readers find in_trans_setup before
		 * they find our root->last_trans update
		 */
		smp_wmb();

J
Josef Bacik 已提交
148 149 150 151 152
		spin_lock(&root->fs_info->fs_roots_radix_lock);
		if (root->last_trans == trans->transid) {
			spin_unlock(&root->fs_info->fs_roots_radix_lock);
			return 0;
		}
153 154 155
		radix_tree_tag_set(&root->fs_info->fs_roots_radix,
			   (unsigned long)root->root_key.objectid,
			   BTRFS_ROOT_TRANS_TAG);
J
Josef Bacik 已提交
156
		spin_unlock(&root->fs_info->fs_roots_radix_lock);
C
Chris Mason 已提交
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
		root->last_trans = trans->transid;

		/* this is pretty tricky.  We don't want to
		 * take the relocation lock in btrfs_record_root_in_trans
		 * unless we're really doing the first setup for this root in
		 * this transaction.
		 *
		 * Normally we'd use root->last_trans as a flag to decide
		 * if we want to take the expensive mutex.
		 *
		 * But, we have to set root->last_trans before we
		 * init the relocation root, otherwise, we trip over warnings
		 * in ctree.c.  The solution used here is to flag ourselves
		 * with root->in_trans_setup.  When this is 1, we're still
		 * fixing up the reloc trees and everyone must wait.
		 *
		 * When this is zero, they can trust root->last_trans and fly
		 * through btrfs_record_root_in_trans without having to take the
		 * lock.  smp_wmb() makes sure that all the writes above are
		 * done before we pop in the zero below
		 */
178
		btrfs_init_reloc_root(trans, root);
C
Chris Mason 已提交
179 180
		smp_wmb();
		root->in_trans_setup = 0;
181 182 183
	}
	return 0;
}
184

C
Chris Mason 已提交
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207

int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
			       struct btrfs_root *root)
{
	if (!root->ref_cows)
		return 0;

	/*
	 * see record_root_in_trans for comments about in_trans_setup usage
	 * and barriers
	 */
	smp_rmb();
	if (root->last_trans == trans->transid &&
	    !root->in_trans_setup)
		return 0;

	mutex_lock(&root->fs_info->reloc_mutex);
	record_root_in_trans(trans, root);
	mutex_unlock(&root->fs_info->reloc_mutex);

	return 0;
}

C
Chris Mason 已提交
208 209 210 211
/* wait for commit against the current transaction to become unblocked
 * when this is done, it is safe to start a new transaction, but the current
 * transaction might not be fully on disk.
 */
C
Chris Mason 已提交
212
static void wait_current_trans(struct btrfs_root *root)
C
Chris Mason 已提交
213
{
214
	struct btrfs_transaction *cur_trans;
C
Chris Mason 已提交
215

J
Josef Bacik 已提交
216
	spin_lock(&root->fs_info->trans_lock);
217
	cur_trans = root->fs_info->running_transaction;
C
Chris Mason 已提交
218
	if (cur_trans && cur_trans->blocked) {
219
		atomic_inc(&cur_trans->use_count);
J
Josef Bacik 已提交
220
		spin_unlock(&root->fs_info->trans_lock);
L
Li Zefan 已提交
221 222 223

		wait_event(root->fs_info->transaction_wait,
			   !cur_trans->blocked);
224
		put_transaction(cur_trans);
J
Josef Bacik 已提交
225 226
	} else {
		spin_unlock(&root->fs_info->trans_lock);
227
	}
C
Chris Mason 已提交
228 229
}

230 231 232 233
enum btrfs_trans_type {
	TRANS_START,
	TRANS_JOIN,
	TRANS_USERSPACE,
234
	TRANS_JOIN_NOLOCK,
235 236
};

237 238
static int may_wait_transaction(struct btrfs_root *root, int type)
{
J
Josef Bacik 已提交
239 240 241 242 243 244 245 246
	if (root->fs_info->log_root_recovering)
		return 0;

	if (type == TRANS_USERSPACE)
		return 1;

	if (type == TRANS_START &&
	    !atomic_read(&root->fs_info->open_ioctl_trans))
247
		return 1;
J
Josef Bacik 已提交
248

249 250 251
	return 0;
}

252
static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
253
						    u64 num_items, int type)
C
Chris Mason 已提交
254
{
255 256
	struct btrfs_trans_handle *h;
	struct btrfs_transaction *cur_trans;
257
	u64 num_bytes = 0;
C
Chris Mason 已提交
258
	int ret;
L
liubo 已提交
259 260 261

	if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
		return ERR_PTR(-EROFS);
262 263 264 265 266 267 268 269 270

	if (current->journal_info) {
		WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
		h = current->journal_info;
		h->use_count++;
		h->orig_rsv = h->block_rsv;
		h->block_rsv = NULL;
		goto got_it;
	}
271 272 273 274 275 276 277 278 279 280 281 282 283

	/*
	 * Do the reservation before we join the transaction so we can do all
	 * the appropriate flushing if need be.
	 */
	if (num_items > 0 && root != root->fs_info->chunk_root) {
		num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
		ret = btrfs_block_rsv_add(NULL, root,
					  &root->fs_info->trans_block_rsv,
					  num_bytes);
		if (ret)
			return ERR_PTR(ret);
	}
284 285 286 287
again:
	h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
	if (!h)
		return ERR_PTR(-ENOMEM);
C
Chris Mason 已提交
288

289
	if (may_wait_transaction(root, type))
C
Chris Mason 已提交
290
		wait_current_trans(root);
291

J
Josef Bacik 已提交
292 293 294 295 296 297
	do {
		ret = join_transaction(root, type == TRANS_JOIN_NOLOCK);
		if (ret == -EBUSY)
			wait_current_trans(root);
	} while (ret == -EBUSY);

T
Tsutomu Itoh 已提交
298
	if (ret < 0) {
299
		kmem_cache_free(btrfs_trans_handle_cachep, h);
T
Tsutomu Itoh 已提交
300 301
		return ERR_PTR(ret);
	}
302

303 304 305 306
	cur_trans = root->fs_info->running_transaction;

	h->transid = cur_trans->transid;
	h->transaction = cur_trans;
C
Chris Mason 已提交
307
	h->blocks_used = 0;
308
	h->bytes_reserved = 0;
309
	h->delayed_ref_updates = 0;
310
	h->use_count = 1;
311
	h->block_rsv = NULL;
312
	h->orig_rsv = NULL;
313

314 315 316 317 318 319
	smp_mb();
	if (cur_trans->blocked && may_wait_transaction(root, type)) {
		btrfs_commit_transaction(h, root);
		goto again;
	}

320 321 322
	if (num_bytes) {
		h->block_rsv = &root->fs_info->trans_block_rsv;
		h->bytes_reserved = num_bytes;
323
	}
J
Josef Bacik 已提交
324

325
got_it:
J
Josef Bacik 已提交
326
	btrfs_record_root_in_trans(h, root);
327 328 329

	if (!current->journal_info && type != TRANS_USERSPACE)
		current->journal_info = h;
C
Chris Mason 已提交
330 331 332
	return h;
}

333
struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
334
						   int num_items)
335
{
336
	return start_transaction(root, num_items, TRANS_START);
337
}
338
struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
339
{
340
	return start_transaction(root, 0, TRANS_JOIN);
341 342
}

343
struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
344 345 346 347
{
	return start_transaction(root, 0, TRANS_JOIN_NOLOCK);
}

348
struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
349
{
350
	return start_transaction(root, 0, TRANS_USERSPACE);
351 352
}

C
Chris Mason 已提交
353
/* wait for a transaction commit to be fully complete */
354 355 356
static noinline int wait_for_commit(struct btrfs_root *root,
				    struct btrfs_transaction *commit)
{
L
Li Zefan 已提交
357
	wait_event(commit->commit_wait, commit->commit_done);
358 359 360
	return 0;
}

361 362 363 364 365 366 367 368
int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
{
	struct btrfs_transaction *cur_trans = NULL, *t;
	int ret;

	ret = 0;
	if (transid) {
		if (transid <= root->fs_info->last_trans_committed)
J
Josef Bacik 已提交
369
			goto out;
370 371

		/* find specified transaction */
J
Josef Bacik 已提交
372
		spin_lock(&root->fs_info->trans_lock);
373 374 375
		list_for_each_entry(t, &root->fs_info->trans_list, list) {
			if (t->transid == transid) {
				cur_trans = t;
J
Josef Bacik 已提交
376
				atomic_inc(&cur_trans->use_count);
377 378 379 380 381
				break;
			}
			if (t->transid > transid)
				break;
		}
J
Josef Bacik 已提交
382
		spin_unlock(&root->fs_info->trans_lock);
383 384
		ret = -EINVAL;
		if (!cur_trans)
J
Josef Bacik 已提交
385
			goto out;  /* bad transid */
386 387
	} else {
		/* find newest transaction that is committing | committed */
J
Josef Bacik 已提交
388
		spin_lock(&root->fs_info->trans_lock);
389 390 391 392
		list_for_each_entry_reverse(t, &root->fs_info->trans_list,
					    list) {
			if (t->in_commit) {
				if (t->commit_done)
393
					break;
394
				cur_trans = t;
J
Josef Bacik 已提交
395
				atomic_inc(&cur_trans->use_count);
396 397 398
				break;
			}
		}
J
Josef Bacik 已提交
399
		spin_unlock(&root->fs_info->trans_lock);
400
		if (!cur_trans)
J
Josef Bacik 已提交
401
			goto out;  /* nothing committing|committed */
402 403 404 405 406 407
	}

	wait_for_commit(root, cur_trans);

	put_transaction(cur_trans);
	ret = 0;
J
Josef Bacik 已提交
408
out:
409 410 411
	return ret;
}

C
Chris Mason 已提交
412 413
void btrfs_throttle(struct btrfs_root *root)
{
J
Josef Bacik 已提交
414
	if (!atomic_read(&root->fs_info->open_ioctl_trans))
415
		wait_current_trans(root);
C
Chris Mason 已提交
416 417
}

418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
static int should_end_transaction(struct btrfs_trans_handle *trans,
				  struct btrfs_root *root)
{
	int ret;
	ret = btrfs_block_rsv_check(trans, root,
				    &root->fs_info->global_block_rsv, 0, 5);
	return ret ? 1 : 0;
}

int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root)
{
	struct btrfs_transaction *cur_trans = trans->transaction;
	int updates;

J
Josef Bacik 已提交
433
	smp_mb();
434 435 436 437 438 439 440 441 442 443 444
	if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
		return 1;

	updates = trans->delayed_ref_updates;
	trans->delayed_ref_updates = 0;
	if (updates)
		btrfs_run_delayed_refs(trans, root, updates);

	return should_end_transaction(trans, root);
}

445
static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
446
			  struct btrfs_root *root, int throttle, int lock)
C
Chris Mason 已提交
447
{
448
	struct btrfs_transaction *cur_trans = trans->transaction;
449
	struct btrfs_fs_info *info = root->fs_info;
450 451
	int count = 0;

452 453 454 455 456
	if (--trans->use_count) {
		trans->block_rsv = trans->orig_rsv;
		return 0;
	}

457 458 459 460 461 462
	while (count < 4) {
		unsigned long cur = trans->delayed_ref_updates;
		trans->delayed_ref_updates = 0;
		if (cur &&
		    trans->transaction->delayed_refs.num_heads_ready > 64) {
			trans->delayed_ref_updates = 0;
463 464 465 466 467 468 469

			/*
			 * do a full flush if the transaction is trying
			 * to close
			 */
			if (trans->transaction->delayed_refs.flushing)
				cur = 0;
470 471 472 473 474
			btrfs_run_delayed_refs(trans, root, cur);
		} else {
			break;
		}
		count++;
475 476
	}

477 478
	btrfs_trans_release_metadata(trans, root);

J
Josef Bacik 已提交
479 480
	if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
	    should_end_transaction(trans, root)) {
481
		trans->transaction->blocked = 1;
J
Josef Bacik 已提交
482 483
		smp_wmb();
	}
484

485
	if (lock && cur_trans->blocked && !cur_trans->in_commit) {
486 487 488 489 490 491 492
		if (throttle) {
			/*
			 * We may race with somebody else here so end up having
			 * to call end_transaction on ourselves again, so inc
			 * our use_count.
			 */
			trans->use_count++;
493
			return btrfs_commit_transaction(trans, root);
494
		} else {
495
			wake_up_process(info->transaction_kthread);
496
		}
497 498 499
	}

	WARN_ON(cur_trans != info->running_transaction);
500 501
	WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
	atomic_dec(&cur_trans->num_writers);
502

503
	smp_mb();
C
Chris Mason 已提交
504 505 506
	if (waitqueue_active(&cur_trans->writer_wait))
		wake_up(&cur_trans->writer_wait);
	put_transaction(cur_trans);
J
Josef Bacik 已提交
507 508 509

	if (current->journal_info == trans)
		current->journal_info = NULL;
C
Chris Mason 已提交
510
	memset(trans, 0, sizeof(*trans));
C
Chris Mason 已提交
511
	kmem_cache_free(btrfs_trans_handle_cachep, trans);
512

Y
Yan, Zheng 已提交
513 514 515
	if (throttle)
		btrfs_run_delayed_iputs(root);

C
Chris Mason 已提交
516 517 518
	return 0;
}

519 520 521
int btrfs_end_transaction(struct btrfs_trans_handle *trans,
			  struct btrfs_root *root)
{
522 523 524 525 526 527
	int ret;

	ret = __btrfs_end_transaction(trans, root, 0, 1);
	if (ret)
		return ret;
	return 0;
528 529 530 531 532
}

int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
				   struct btrfs_root *root)
{
533 534 535 536 537 538
	int ret;

	ret = __btrfs_end_transaction(trans, root, 1, 1);
	if (ret)
		return ret;
	return 0;
539 540 541 542 543
}

int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root)
{
544 545 546 547 548 549 550 551 552 553 554 555
	int ret;

	ret = __btrfs_end_transaction(trans, root, 0, 0);
	if (ret)
		return ret;
	return 0;
}

int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
				struct btrfs_root *root)
{
	return __btrfs_end_transaction(trans, root, 1, 1);
556 557
}

C
Chris Mason 已提交
558 559 560
/*
 * when btree blocks are allocated, they have some corresponding bits set for
 * them in one of two extent_io trees.  This is used to make sure all of
561
 * those extents are sent to disk but does not wait on them
C
Chris Mason 已提交
562
 */
563
int btrfs_write_marked_extents(struct btrfs_root *root,
564
			       struct extent_io_tree *dirty_pages, int mark)
C
Chris Mason 已提交
565
{
566
	int ret;
567
	int err = 0;
568 569 570
	int werr = 0;
	struct page *page;
	struct inode *btree_inode = root->fs_info->btree_inode;
571
	u64 start = 0;
572 573
	u64 end;
	unsigned long index;
574

C
Chris Mason 已提交
575
	while (1) {
576
		ret = find_first_extent_bit(dirty_pages, start, &start, &end,
577
					    mark);
578
		if (ret)
579
			break;
C
Chris Mason 已提交
580
		while (start <= end) {
581 582
			cond_resched();

583
			index = start >> PAGE_CACHE_SHIFT;
584
			start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
C
Chris Mason 已提交
585
			page = find_get_page(btree_inode->i_mapping, index);
586 587
			if (!page)
				continue;
C
Chris Mason 已提交
588 589 590 591 592 593 594 595

			btree_lock_page_hook(page);
			if (!page->mapping) {
				unlock_page(page);
				page_cache_release(page);
				continue;
			}

596 597 598 599 600 601 602 603 604
			if (PageWriteback(page)) {
				if (PageDirty(page))
					wait_on_page_writeback(page);
				else {
					unlock_page(page);
					page_cache_release(page);
					continue;
				}
			}
605 606 607 608 609 610
			err = write_one_page(page, 0);
			if (err)
				werr = err;
			page_cache_release(page);
		}
	}
611 612 613 614 615 616 617 618 619 620 621 622
	if (err)
		werr = err;
	return werr;
}

/*
 * when btree blocks are allocated, they have some corresponding bits set for
 * them in one of two extent_io trees.  This is used to make sure all of
 * those extents are on disk for transaction or log commit.  We wait
 * on all the pages and clear them from the dirty pages state tree
 */
int btrfs_wait_marked_extents(struct btrfs_root *root,
623
			      struct extent_io_tree *dirty_pages, int mark)
624 625 626 627 628 629 630 631 632 633
{
	int ret;
	int err = 0;
	int werr = 0;
	struct page *page;
	struct inode *btree_inode = root->fs_info->btree_inode;
	u64 start = 0;
	u64 end;
	unsigned long index;

C
Chris Mason 已提交
634
	while (1) {
635 636
		ret = find_first_extent_bit(dirty_pages, start, &start, &end,
					    mark);
637 638 639
		if (ret)
			break;

640
		clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
C
Chris Mason 已提交
641
		while (start <= end) {
642 643 644 645 646 647
			index = start >> PAGE_CACHE_SHIFT;
			start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
			page = find_get_page(btree_inode->i_mapping, index);
			if (!page)
				continue;
			if (PageDirty(page)) {
C
Chris Mason 已提交
648 649
				btree_lock_page_hook(page);
				wait_on_page_writeback(page);
650 651 652 653
				err = write_one_page(page, 0);
				if (err)
					werr = err;
			}
654
			wait_on_page_writeback(page);
655 656 657 658
			page_cache_release(page);
			cond_resched();
		}
	}
659 660 661
	if (err)
		werr = err;
	return werr;
C
Chris Mason 已提交
662 663
}

664 665 666 667 668 669
/*
 * when btree blocks are allocated, they have some corresponding bits set for
 * them in one of two extent_io trees.  This is used to make sure all of
 * those extents are on disk for transaction or log commit
 */
int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
670
				struct extent_io_tree *dirty_pages, int mark)
671 672 673 674
{
	int ret;
	int ret2;

675 676
	ret = btrfs_write_marked_extents(root, dirty_pages, mark);
	ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
677 678 679
	return ret || ret2;
}

680 681 682 683 684 685 686 687 688
int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
				     struct btrfs_root *root)
{
	if (!trans || !trans->transaction) {
		struct inode *btree_inode;
		btree_inode = root->fs_info->btree_inode;
		return filemap_write_and_wait(btree_inode->i_mapping);
	}
	return btrfs_write_and_wait_marked_extents(root,
689 690
					   &trans->transaction->dirty_pages,
					   EXTENT_DIRTY);
691 692
}

C
Chris Mason 已提交
693 694 695 696 697 698 699 700 701 702
/*
 * this is used to update the root pointer in the tree of tree roots.
 *
 * But, in the case of the extent allocation tree, updating the root
 * pointer may allocate blocks which may change the root of the extent
 * allocation tree.
 *
 * So, this loops and repeats and makes sure the cowonly root didn't
 * change while the root pointer was being updated in the metadata.
 */
703 704
static int update_cowonly_root(struct btrfs_trans_handle *trans,
			       struct btrfs_root *root)
C
Chris Mason 已提交
705 706
{
	int ret;
707
	u64 old_root_bytenr;
708
	u64 old_root_used;
709
	struct btrfs_root *tree_root = root->fs_info->tree_root;
C
Chris Mason 已提交
710

711
	old_root_used = btrfs_root_used(&root->root_item);
712
	btrfs_write_dirty_block_groups(trans, root);
713

C
Chris Mason 已提交
714
	while (1) {
715
		old_root_bytenr = btrfs_root_bytenr(&root->root_item);
716 717
		if (old_root_bytenr == root->node->start &&
		    old_root_used == btrfs_root_used(&root->root_item))
C
Chris Mason 已提交
718
			break;
719

720
		btrfs_set_root_node(&root->root_item, root->node);
C
Chris Mason 已提交
721
		ret = btrfs_update_root(trans, tree_root,
722 723
					&root->root_key,
					&root->root_item);
C
Chris Mason 已提交
724
		BUG_ON(ret);
725

726
		old_root_used = btrfs_root_used(&root->root_item);
727
		ret = btrfs_write_dirty_block_groups(trans, root);
728
		BUG_ON(ret);
729
	}
730 731 732 733

	if (root != root->fs_info->extent_root)
		switch_commit_root(root);

734 735 736
	return 0;
}

C
Chris Mason 已提交
737 738 739
/*
 * update all the cowonly tree roots on disk
 */
740 741
static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
					 struct btrfs_root *root)
742 743 744
{
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct list_head *next;
745
	struct extent_buffer *eb;
746
	int ret;
747

748 749
	ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
	BUG_ON(ret);
750

751
	eb = btrfs_lock_root_node(fs_info->tree_root);
752
	btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb);
753 754
	btrfs_tree_unlock(eb);
	free_extent_buffer(eb);
755

756 757
	ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
	BUG_ON(ret);
758

C
Chris Mason 已提交
759
	while (!list_empty(&fs_info->dirty_cowonly_roots)) {
760 761 762
		next = fs_info->dirty_cowonly_roots.next;
		list_del_init(next);
		root = list_entry(next, struct btrfs_root, dirty_list);
763

764
		update_cowonly_root(trans, root);
C
Chris Mason 已提交
765
	}
766 767 768 769 770

	down_write(&fs_info->extent_commit_sem);
	switch_commit_root(fs_info->extent_root);
	up_write(&fs_info->extent_commit_sem);

C
Chris Mason 已提交
771 772 773
	return 0;
}

C
Chris Mason 已提交
774 775 776 777 778
/*
 * dead roots are old snapshots that need to be deleted.  This allocates
 * a dirty root struct and adds it into the list of dead roots that need to
 * be deleted
 */
779
int btrfs_add_dead_root(struct btrfs_root *root)
780
{
J
Josef Bacik 已提交
781
	spin_lock(&root->fs_info->trans_lock);
782
	list_add(&root->root_list, &root->fs_info->dead_roots);
J
Josef Bacik 已提交
783
	spin_unlock(&root->fs_info->trans_lock);
784 785 786
	return 0;
}

C
Chris Mason 已提交
787
/*
788
 * update all the cowonly tree roots on disk
C
Chris Mason 已提交
789
 */
790 791
static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
				    struct btrfs_root *root)
792 793
{
	struct btrfs_root *gang[8];
794
	struct btrfs_fs_info *fs_info = root->fs_info;
795 796
	int i;
	int ret;
797 798
	int err = 0;

J
Josef Bacik 已提交
799
	spin_lock(&fs_info->fs_roots_radix_lock);
C
Chris Mason 已提交
800
	while (1) {
801 802
		ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
						 (void **)gang, 0,
803 804 805 806 807 808
						 ARRAY_SIZE(gang),
						 BTRFS_ROOT_TRANS_TAG);
		if (ret == 0)
			break;
		for (i = 0; i < ret; i++) {
			root = gang[i];
809 810 811
			radix_tree_tag_clear(&fs_info->fs_roots_radix,
					(unsigned long)root->root_key.objectid,
					BTRFS_ROOT_TRANS_TAG);
J
Josef Bacik 已提交
812
			spin_unlock(&fs_info->fs_roots_radix_lock);
Y
Yan Zheng 已提交
813

814
			btrfs_free_log(trans, root);
815
			btrfs_update_reloc_root(trans, root);
816
			btrfs_orphan_commit_root(trans, root);
817

818 819
			btrfs_save_ino_cache(root, trans);

820
			if (root->commit_root != root->node) {
821
				mutex_lock(&root->fs_commit_mutex);
J
Josef Bacik 已提交
822
				switch_commit_root(root);
823 824 825
				btrfs_unpin_free_ino(root);
				mutex_unlock(&root->fs_commit_mutex);

826 827 828
				btrfs_set_root_node(&root->root_item,
						    root->node);
			}
829 830

			err = btrfs_update_root(trans, fs_info->tree_root,
831 832
						&root->root_key,
						&root->root_item);
J
Josef Bacik 已提交
833
			spin_lock(&fs_info->fs_roots_radix_lock);
834 835
			if (err)
				break;
836 837
		}
	}
J
Josef Bacik 已提交
838
	spin_unlock(&fs_info->fs_roots_radix_lock);
839
	return err;
840 841
}

C
Chris Mason 已提交
842 843 844 845
/*
 * defrag a given btree.  If cacheonly == 1, this won't read from the disk,
 * otherwise every leaf in the btree is read and defragged.
 */
846 847 848 849
int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
{
	struct btrfs_fs_info *info = root->fs_info;
	struct btrfs_trans_handle *trans;
850
	int ret;
851
	unsigned long nr;
852

853
	if (xchg(&root->defrag_running, 1))
854
		return 0;
855

856
	while (1) {
857 858 859 860
		trans = btrfs_start_transaction(root, 0);
		if (IS_ERR(trans))
			return PTR_ERR(trans);

861
		ret = btrfs_defrag_leaves(trans, root, cacheonly);
862

863
		nr = trans->blocks_used;
864
		btrfs_end_transaction(trans, root);
865
		btrfs_btree_balance_dirty(info->tree_root, nr);
866 867
		cond_resched();

868
		if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
869 870 871
			break;
	}
	root->defrag_running = 0;
872
	return ret;
873 874
}

C
Chris Mason 已提交
875 876 877 878
/*
 * new snapshots need to be created at a very specific time in the
 * transaction commit.  This does the actual creation
 */
879
static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
880 881 882 883
				   struct btrfs_fs_info *fs_info,
				   struct btrfs_pending_snapshot *pending)
{
	struct btrfs_key key;
884
	struct btrfs_root_item *new_root_item;
885 886
	struct btrfs_root *tree_root = fs_info->tree_root;
	struct btrfs_root *root = pending->root;
887 888
	struct btrfs_root *parent_root;
	struct inode *parent_inode;
889
	struct dentry *parent;
890
	struct dentry *dentry;
891
	struct extent_buffer *tmp;
892
	struct extent_buffer *old;
893
	int ret;
894
	u64 to_reserve = 0;
895
	u64 index = 0;
896
	u64 objectid;
L
Li Zefan 已提交
897
	u64 root_flags;
898

899 900
	new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
	if (!new_root_item) {
901
		pending->error = -ENOMEM;
902 903
		goto fail;
	}
904

905
	ret = btrfs_find_free_objectid(tree_root, &objectid);
906 907
	if (ret) {
		pending->error = ret;
908
		goto fail;
909
	}
910

911
	btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
912 913 914 915
	btrfs_orphan_pre_snapshot(trans, pending, &to_reserve);

	if (to_reserve > 0) {
		ret = btrfs_block_rsv_add(trans, root, &pending->block_rsv,
916
					  to_reserve);
917 918 919 920 921 922
		if (ret) {
			pending->error = ret;
			goto fail;
		}
	}

923
	key.objectid = objectid;
924 925
	key.offset = (u64)-1;
	key.type = BTRFS_ROOT_ITEM_KEY;
926

927
	trans->block_rsv = &pending->block_rsv;
928

929
	dentry = pending->dentry;
930 931
	parent = dget_parent(dentry);
	parent_inode = parent->d_inode;
932
	parent_root = BTRFS_I(parent_inode)->root;
C
Chris Mason 已提交
933
	record_root_in_trans(trans, parent_root);
934

935 936 937
	/*
	 * insert the directory item
	 */
938
	ret = btrfs_set_inode_index(parent_inode, &index);
939
	BUG_ON(ret);
940
	ret = btrfs_insert_dir_item(trans, parent_root,
941
				dentry->d_name.name, dentry->d_name.len,
942
				parent_inode, &key,
943
				BTRFS_FT_DIR, index);
944
	BUG_ON(ret);
945

946 947
	btrfs_i_size_write(parent_inode, parent_inode->i_size +
					 dentry->d_name.len * 2);
948 949 950
	ret = btrfs_update_inode(trans, parent_root, parent_inode);
	BUG_ON(ret);

951 952 953 954 955 956 957 958 959
	/*
	 * pull in the delayed directory update
	 * and the delayed inode item
	 * otherwise we corrupt the FS during
	 * snapshot
	 */
	ret = btrfs_run_delayed_items(trans, root);
	BUG_ON(ret);

C
Chris Mason 已提交
960
	record_root_in_trans(trans, root);
961 962
	btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
	memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
963
	btrfs_check_and_init_root_item(new_root_item);
964

L
Li Zefan 已提交
965 966 967 968 969 970 971
	root_flags = btrfs_root_flags(new_root_item);
	if (pending->readonly)
		root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
	else
		root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
	btrfs_set_root_flags(new_root_item, root_flags);

972 973 974 975 976 977 978 979 980
	old = btrfs_lock_root_node(root);
	btrfs_cow_block(trans, root, old, NULL, 0, &old);
	btrfs_set_lock_blocking(old);

	btrfs_copy_root(trans, root, old, &tmp, objectid);
	btrfs_tree_unlock(old);
	free_extent_buffer(old);

	btrfs_set_root_node(new_root_item, tmp);
981 982 983
	/* record when the snapshot was created in key.offset */
	key.offset = trans->transid;
	ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
984 985
	btrfs_tree_unlock(tmp);
	free_extent_buffer(tmp);
986
	BUG_ON(ret);
987

988 989 990 991
	/*
	 * insert root back/forward references
	 */
	ret = btrfs_add_root_ref(trans, tree_root, objectid,
992
				 parent_root->root_key.objectid,
L
Li Zefan 已提交
993
				 btrfs_ino(parent_inode), index,
994
				 dentry->d_name.name, dentry->d_name.len);
995
	BUG_ON(ret);
996
	dput(parent);
997

998 999 1000
	key.offset = (u64)-1;
	pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
	BUG_ON(IS_ERR(pending->snap));
1001

1002
	btrfs_reloc_post_snapshot(trans, pending);
1003
	btrfs_orphan_post_snapshot(trans, pending);
1004
fail:
1005
	kfree(new_root_item);
1006 1007
	btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
	return 0;
1008 1009
}

C
Chris Mason 已提交
1010 1011 1012
/*
 * create all the snapshots we've scheduled for creation
 */
1013 1014
static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
					     struct btrfs_fs_info *fs_info)
1015 1016 1017 1018 1019
{
	struct btrfs_pending_snapshot *pending;
	struct list_head *head = &trans->transaction->pending_snapshots;
	int ret;

Q
Qinghuang Feng 已提交
1020
	list_for_each_entry(pending, head, list) {
1021 1022 1023 1024 1025 1026
		ret = create_pending_snapshot(trans, fs_info, pending);
		BUG_ON(ret);
	}
	return 0;
}

1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
static void update_super_roots(struct btrfs_root *root)
{
	struct btrfs_root_item *root_item;
	struct btrfs_super_block *super;

	super = &root->fs_info->super_copy;

	root_item = &root->fs_info->chunk_root->root_item;
	super->chunk_root = root_item->bytenr;
	super->chunk_root_generation = root_item->generation;
	super->chunk_root_level = root_item->level;

	root_item = &root->fs_info->tree_root->root_item;
	super->root = root_item->bytenr;
	super->generation = root_item->generation;
	super->root_level = root_item->level;
1043 1044
	if (super->cache_generation != 0 || btrfs_test_opt(root, SPACE_CACHE))
		super->cache_generation = root_item->generation;
1045 1046
}

1047 1048 1049
int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
{
	int ret = 0;
J
Josef Bacik 已提交
1050
	spin_lock(&info->trans_lock);
1051 1052
	if (info->running_transaction)
		ret = info->running_transaction->in_commit;
J
Josef Bacik 已提交
1053
	spin_unlock(&info->trans_lock);
1054 1055 1056
	return ret;
}

1057 1058 1059
int btrfs_transaction_blocked(struct btrfs_fs_info *info)
{
	int ret = 0;
J
Josef Bacik 已提交
1060
	spin_lock(&info->trans_lock);
1061 1062
	if (info->running_transaction)
		ret = info->running_transaction->blocked;
J
Josef Bacik 已提交
1063
	spin_unlock(&info->trans_lock);
1064 1065 1066
	return ret;
}

S
Sage Weil 已提交
1067 1068 1069 1070 1071 1072 1073
/*
 * wait for the current transaction commit to start and block subsequent
 * transaction joins
 */
static void wait_current_trans_commit_start(struct btrfs_root *root,
					    struct btrfs_transaction *trans)
{
L
Li Zefan 已提交
1074
	wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit);
S
Sage Weil 已提交
1075 1076 1077 1078 1079 1080 1081 1082 1083
}

/*
 * wait for the current transaction to start and then become unblocked.
 * caller holds ref.
 */
static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
					 struct btrfs_transaction *trans)
{
L
Li Zefan 已提交
1084 1085
	wait_event(root->fs_info->transaction_wait,
		   trans->commit_done || (trans->in_commit && !trans->blocked));
S
Sage Weil 已提交
1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
}

/*
 * commit transactions asynchronously. once btrfs_commit_transaction_async
 * returns, any subsequent transaction will not be allowed to join.
 */
struct btrfs_async_commit {
	struct btrfs_trans_handle *newtrans;
	struct btrfs_root *root;
	struct delayed_work work;
};

static void do_async_commit(struct work_struct *work)
{
	struct btrfs_async_commit *ac =
		container_of(work, struct btrfs_async_commit, work.work);

	btrfs_commit_transaction(ac->newtrans, ac->root);
	kfree(ac);
}

int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
				   struct btrfs_root *root,
				   int wait_for_unblock)
{
	struct btrfs_async_commit *ac;
	struct btrfs_transaction *cur_trans;

	ac = kmalloc(sizeof(*ac), GFP_NOFS);
T
Tsutomu Itoh 已提交
1115 1116
	if (!ac)
		return -ENOMEM;
S
Sage Weil 已提交
1117 1118 1119

	INIT_DELAYED_WORK(&ac->work, do_async_commit);
	ac->root = root;
1120
	ac->newtrans = btrfs_join_transaction(root);
1121 1122 1123 1124 1125
	if (IS_ERR(ac->newtrans)) {
		int err = PTR_ERR(ac->newtrans);
		kfree(ac);
		return err;
	}
S
Sage Weil 已提交
1126 1127 1128

	/* take transaction reference */
	cur_trans = trans->transaction;
1129
	atomic_inc(&cur_trans->use_count);
S
Sage Weil 已提交
1130 1131 1132 1133 1134 1135 1136 1137 1138 1139

	btrfs_end_transaction(trans, root);
	schedule_delayed_work(&ac->work, 0);

	/* wait for transaction to start and unblock */
	if (wait_for_unblock)
		wait_current_trans_commit_start_and_unblock(root, cur_trans);
	else
		wait_current_trans_commit_start(root, cur_trans);

1140 1141 1142 1143
	if (current->journal_info == trans)
		current->journal_info = NULL;

	put_transaction(cur_trans);
S
Sage Weil 已提交
1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
	return 0;
}

/*
 * btrfs_transaction state sequence:
 *    in_commit = 0, blocked = 0  (initial)
 *    in_commit = 1, blocked = 1
 *    blocked = 0
 *    commit_done = 1
 */
C
Chris Mason 已提交
1154 1155 1156
int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
			     struct btrfs_root *root)
{
1157
	unsigned long joined = 0;
C
Chris Mason 已提交
1158
	struct btrfs_transaction *cur_trans;
C
Chris Mason 已提交
1159
	struct btrfs_transaction *prev_trans = NULL;
C
Chris Mason 已提交
1160
	DEFINE_WAIT(wait);
1161
	int ret;
1162 1163
	int should_grow = 0;
	unsigned long now = get_seconds();
1164
	int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
C
Chris Mason 已提交
1165

1166 1167
	btrfs_run_ordered_operations(root, 0);

1168 1169 1170 1171 1172 1173
	/* make a pass through all the delayed refs we have so far
	 * any runnings procs may add more while we are here
	 */
	ret = btrfs_run_delayed_refs(trans, root, 0);
	BUG_ON(ret);

1174 1175
	btrfs_trans_release_metadata(trans, root);

1176
	cur_trans = trans->transaction;
1177 1178 1179 1180
	/*
	 * set the flushing flag so procs in this transaction have to
	 * start sending their work down.
	 */
1181
	cur_trans->delayed_refs.flushing = 1;
1182

1183
	ret = btrfs_run_delayed_refs(trans, root, 0);
1184 1185
	BUG_ON(ret);

J
Josef Bacik 已提交
1186
	spin_lock(&cur_trans->commit_lock);
1187
	if (cur_trans->in_commit) {
J
Josef Bacik 已提交
1188
		spin_unlock(&cur_trans->commit_lock);
1189
		atomic_inc(&cur_trans->use_count);
C
Chris Mason 已提交
1190
		btrfs_end_transaction(trans, root);
C
Chris Mason 已提交
1191

C
Chris Mason 已提交
1192 1193
		ret = wait_for_commit(root, cur_trans);
		BUG_ON(ret);
1194

C
Chris Mason 已提交
1195
		put_transaction(cur_trans);
1196

C
Chris Mason 已提交
1197 1198
		return 0;
	}
1199

C
Chris Mason 已提交
1200
	trans->transaction->in_commit = 1;
1201
	trans->transaction->blocked = 1;
J
Josef Bacik 已提交
1202
	spin_unlock(&cur_trans->commit_lock);
S
Sage Weil 已提交
1203 1204
	wake_up(&root->fs_info->transaction_blocked_wait);

J
Josef Bacik 已提交
1205
	spin_lock(&root->fs_info->trans_lock);
C
Chris Mason 已提交
1206 1207 1208 1209
	if (cur_trans->list.prev != &root->fs_info->trans_list) {
		prev_trans = list_entry(cur_trans->list.prev,
					struct btrfs_transaction, list);
		if (!prev_trans->commit_done) {
1210
			atomic_inc(&prev_trans->use_count);
J
Josef Bacik 已提交
1211
			spin_unlock(&root->fs_info->trans_lock);
C
Chris Mason 已提交
1212 1213 1214

			wait_for_commit(root, prev_trans);

1215
			put_transaction(prev_trans);
J
Josef Bacik 已提交
1216 1217
		} else {
			spin_unlock(&root->fs_info->trans_lock);
C
Chris Mason 已提交
1218
		}
J
Josef Bacik 已提交
1219 1220
	} else {
		spin_unlock(&root->fs_info->trans_lock);
C
Chris Mason 已提交
1221
	}
1222

1223 1224 1225
	if (now < cur_trans->start_time || now - cur_trans->start_time < 1)
		should_grow = 1;

1226
	do {
1227
		int snap_pending = 0;
J
Josef Bacik 已提交
1228

1229
		joined = cur_trans->num_joined;
1230 1231 1232
		if (!list_empty(&trans->transaction->pending_snapshots))
			snap_pending = 1;

C
Chris Mason 已提交
1233
		WARN_ON(cur_trans != trans->transaction);
1234

1235
		if (flush_on_commit || snap_pending) {
Y
Yan, Zheng 已提交
1236 1237
			btrfs_start_delalloc_inodes(root, 1);
			ret = btrfs_wait_ordered_extents(root, 0, 1);
1238
			BUG_ON(ret);
1239 1240
		}

1241 1242 1243
		ret = btrfs_run_delayed_items(trans, root);
		BUG_ON(ret);

1244 1245 1246 1247 1248 1249 1250 1251 1252
		/*
		 * rename don't use btrfs_join_transaction, so, once we
		 * set the transaction to blocked above, we aren't going
		 * to get any new ordered operations.  We can safely run
		 * it here and no for sure that nothing new will be added
		 * to the list
		 */
		btrfs_run_ordered_operations(root, 1);

1253 1254 1255
		prepare_to_wait(&cur_trans->writer_wait, &wait,
				TASK_UNINTERRUPTIBLE);

1256
		if (atomic_read(&cur_trans->num_writers) > 1)
1257 1258 1259
			schedule_timeout(MAX_SCHEDULE_TIMEOUT);
		else if (should_grow)
			schedule_timeout(1);
1260 1261

		finish_wait(&cur_trans->writer_wait, &wait);
1262
	} while (atomic_read(&cur_trans->num_writers) > 1 ||
1263
		 (should_grow && cur_trans->num_joined != joined));
1264

1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275
	/*
	 * Ok now we need to make sure to block out any other joins while we
	 * commit the transaction.  We could have started a join before setting
	 * no_join so make sure to wait for num_writers to == 1 again.
	 */
	spin_lock(&root->fs_info->trans_lock);
	root->fs_info->trans_no_join = 1;
	spin_unlock(&root->fs_info->trans_lock);
	wait_event(cur_trans->writer_wait,
		   atomic_read(&cur_trans->num_writers) == 1);

C
Chris Mason 已提交
1276 1277 1278 1279 1280 1281 1282
	/*
	 * the reloc mutex makes sure that we stop
	 * the balancing code from coming in and moving
	 * extents around in the middle of the commit
	 */
	mutex_lock(&root->fs_info->reloc_mutex);

1283
	ret = btrfs_run_delayed_items(trans, root);
1284 1285
	BUG_ON(ret);

1286
	ret = create_pending_snapshots(trans, root->fs_info);
1287 1288
	BUG_ON(ret);

1289 1290 1291
	ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
	BUG_ON(ret);

1292 1293 1294 1295 1296 1297
	/*
	 * make sure none of the code above managed to slip in a
	 * delayed item
	 */
	btrfs_assert_delayed_root_empty(root);

C
Chris Mason 已提交
1298
	WARN_ON(cur_trans != trans->transaction);
C
Chris Mason 已提交
1299

A
Arne Jansen 已提交
1300
	btrfs_scrub_pause(root);
1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315
	/* btrfs_commit_tree_roots is responsible for getting the
	 * various roots consistent with each other.  Every pointer
	 * in the tree of tree roots has to point to the most up to date
	 * root for every subvolume and other tree.  So, we have to keep
	 * the tree logging code from jumping in and changing any
	 * of the trees.
	 *
	 * At this point in the commit, there can't be any tree-log
	 * writers, but a little lower down we drop the trans mutex
	 * and let new people in.  By holding the tree_log_mutex
	 * from now until after the super is written, we avoid races
	 * with the tree-log code.
	 */
	mutex_lock(&root->fs_info->tree_log_mutex);

1316
	ret = commit_fs_roots(trans, root);
1317 1318
	BUG_ON(ret);

1319
	/* commit_fs_roots gets rid of all the tree log roots, it is now
1320 1321 1322 1323
	 * safe to free the root of tree log roots
	 */
	btrfs_free_log_root_tree(trans, root->fs_info);

1324
	ret = commit_cowonly_roots(trans, root);
C
Chris Mason 已提交
1325
	BUG_ON(ret);
1326

1327 1328
	btrfs_prepare_extent_commit(trans, root);

C
Chris Mason 已提交
1329
	cur_trans = root->fs_info->running_transaction;
1330 1331 1332

	btrfs_set_root_node(&root->fs_info->tree_root->root_item,
			    root->fs_info->tree_root->node);
J
Josef Bacik 已提交
1333
	switch_commit_root(root->fs_info->tree_root);
1334 1335 1336

	btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
			    root->fs_info->chunk_root->node);
J
Josef Bacik 已提交
1337
	switch_commit_root(root->fs_info->chunk_root);
1338 1339

	update_super_roots(root);
1340 1341 1342 1343 1344 1345

	if (!root->fs_info->log_root_recovering) {
		btrfs_set_super_log_root(&root->fs_info->super_copy, 0);
		btrfs_set_super_log_root_level(&root->fs_info->super_copy, 0);
	}

1346 1347
	memcpy(&root->fs_info->super_for_commit, &root->fs_info->super_copy,
	       sizeof(root->fs_info->super_copy));
C
Chris Mason 已提交
1348

1349
	trans->transaction->blocked = 0;
J
Josef Bacik 已提交
1350 1351 1352 1353
	spin_lock(&root->fs_info->trans_lock);
	root->fs_info->running_transaction = NULL;
	root->fs_info->trans_no_join = 0;
	spin_unlock(&root->fs_info->trans_lock);
C
Chris Mason 已提交
1354
	mutex_unlock(&root->fs_info->reloc_mutex);
1355

1356
	wake_up(&root->fs_info->transaction_wait);
1357

C
Chris Mason 已提交
1358 1359
	ret = btrfs_write_and_wait_transaction(trans, root);
	BUG_ON(ret);
Y
Yan Zheng 已提交
1360
	write_ctree_super(trans, root, 0);
1361

1362 1363 1364 1365 1366 1367
	/*
	 * the super is written, we can safely allow the tree-loggers
	 * to go about their business
	 */
	mutex_unlock(&root->fs_info->tree_log_mutex);

1368
	btrfs_finish_extent_commit(trans, root);
1369

C
Chris Mason 已提交
1370
	cur_trans->commit_done = 1;
1371

1372
	root->fs_info->last_trans_committed = cur_trans->transid;
J
Josef Bacik 已提交
1373

C
Chris Mason 已提交
1374
	wake_up(&cur_trans->commit_wait);
1375

J
Josef Bacik 已提交
1376
	spin_lock(&root->fs_info->trans_lock);
1377
	list_del_init(&cur_trans->list);
J
Josef Bacik 已提交
1378 1379
	spin_unlock(&root->fs_info->trans_lock);

C
Chris Mason 已提交
1380
	put_transaction(cur_trans);
C
Chris Mason 已提交
1381
	put_transaction(cur_trans);
1382

1383 1384
	trace_btrfs_transaction_commit(root);

A
Arne Jansen 已提交
1385 1386
	btrfs_scrub_continue(root);

J
Josef Bacik 已提交
1387 1388 1389
	if (current->journal_info == trans)
		current->journal_info = NULL;

C
Chris Mason 已提交
1390
	kmem_cache_free(btrfs_trans_handle_cachep, trans);
Y
Yan, Zheng 已提交
1391 1392 1393 1394

	if (current != root->fs_info->transaction_kthread)
		btrfs_run_delayed_iputs(root);

C
Chris Mason 已提交
1395 1396 1397
	return ret;
}

C
Chris Mason 已提交
1398 1399 1400
/*
 * interface function to delete all the snapshots we have scheduled for deletion
 */
1401 1402
int btrfs_clean_old_snapshots(struct btrfs_root *root)
{
1403 1404 1405
	LIST_HEAD(list);
	struct btrfs_fs_info *fs_info = root->fs_info;

J
Josef Bacik 已提交
1406
	spin_lock(&fs_info->trans_lock);
1407
	list_splice_init(&fs_info->dead_roots, &list);
J
Josef Bacik 已提交
1408
	spin_unlock(&fs_info->trans_lock);
1409

1410 1411
	while (!list_empty(&list)) {
		root = list_entry(list.next, struct btrfs_root, root_list);
1412 1413
		list_del(&root->root_list);

1414 1415
		btrfs_kill_all_delayed_nodes(root);

1416 1417
		if (btrfs_header_backref_rev(root->node) <
		    BTRFS_MIXED_BACKREF_REV)
1418
			btrfs_drop_snapshot(root, NULL, 0);
1419
		else
1420
			btrfs_drop_snapshot(root, NULL, 1);
1421 1422 1423
	}
	return 0;
}