commit.c 35.0 KB
Newer Older
1
/*
2
 * linux/fs/jbd2/commit.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
 *
 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
 *
 * Copyright 1998 Red Hat corp --- All Rights Reserved
 *
 * This file is part of the Linux kernel and is made available under
 * the terms of the GNU General Public License, version 2, or at your
 * option, any later version, incorporated herein by reference.
 *
 * Journal commit routines for the generic filesystem journaling code;
 * part of the ext2fs journaling system.
 */

#include <linux/time.h>
#include <linux/fs.h>
18
#include <linux/jbd2.h>
19 20 21 22
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
23
#include <linux/jiffies.h>
24
#include <linux/crc32.h>
25 26
#include <linux/writeback.h>
#include <linux/backing-dev.h>
27
#include <linux/bio.h>
28
#include <linux/blkdev.h>
29
#include <linux/bitops.h>
30
#include <trace/events/jbd2.h>
31 32

/*
33
 * IO end handler for temporary buffer_heads handling writes to the journal.
34 35 36
 */
static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
{
37 38
	struct buffer_head *orig_bh = bh->b_private;

39 40 41 42 43
	BUFFER_TRACE(bh, "");
	if (uptodate)
		set_buffer_uptodate(bh);
	else
		clear_buffer_uptodate(bh);
44 45
	if (orig_bh) {
		clear_bit_unlock(BH_Shadow, &orig_bh->b_state);
46
		smp_mb__after_atomic();
47 48
		wake_up_bit(&orig_bh->b_state, BH_Shadow);
	}
49 50 51 52
	unlock_buffer(bh);
}

/*
53 54
 * When an ext4 file is truncated, it is possible that some pages are not
 * successfully freed, because they are attached to a committing transaction.
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
 * After the transaction commits, these pages are left on the LRU, with no
 * ->mapping, and with attached buffers.  These pages are trivially reclaimable
 * by the VM, but their apparent absence upsets the VM accounting, and it makes
 * the numbers in /proc/meminfo look odd.
 *
 * So here, we have a buffer which has just come off the forget list.  Look to
 * see if we can strip all buffers from the backing page.
 *
 * Called under lock_journal(), and possibly under journal_datalist_lock.  The
 * caller provided us with a ref against the buffer, and we drop that here.
 */
static void release_buffer_page(struct buffer_head *bh)
{
	struct page *page;

	if (buffer_dirty(bh))
		goto nope;
	if (atomic_read(&bh->b_count) != 1)
		goto nope;
	page = bh->b_page;
	if (!page)
		goto nope;
	if (page->mapping)
		goto nope;

	/* OK, it's a truncated page */
N
Nick Piggin 已提交
81
	if (!trylock_page(page))
82 83 84 85 86 87 88 89 90 91 92 93 94
		goto nope;

	page_cache_get(page);
	__brelse(bh);
	try_to_free_buffers(page);
	unlock_page(page);
	page_cache_release(page);
	return;

nope:
	__brelse(bh);
}

95
static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
D
Darrick J. Wong 已提交
96 97 98 99
{
	struct commit_header *h;
	__u32 csum;

100
	if (!jbd2_journal_has_csum_v2or3(j))
D
Darrick J. Wong 已提交
101 102
		return;

103
	h = (struct commit_header *)(bh->b_data);
D
Darrick J. Wong 已提交
104 105 106
	h->h_chksum_type = 0;
	h->h_chksum_size = 0;
	h->h_chksum[0] = 0;
107
	csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
D
Darrick J. Wong 已提交
108 109 110
	h->h_chksum[0] = cpu_to_be32(csum);
}

111 112
/*
 * Done it all: now submit the commit record.  We should have
113 114 115 116 117 118
 * cleaned up our previous buffers by now, so if we are in abort
 * mode we can now just skip the rest of the journal write
 * entirely.
 *
 * Returns 1 if the journal needs to be aborted or 0 on success
 */
119 120 121 122
static int journal_submit_commit_record(journal_t *journal,
					transaction_t *commit_transaction,
					struct buffer_head **cbh,
					__u32 crc32_sum)
123
{
124
	struct commit_header *tmp;
125
	struct buffer_head *bh;
126
	int ret;
127
	struct timespec now = current_kernel_time();
128

129 130
	*cbh = NULL;

131 132 133
	if (is_journal_aborted(journal))
		return 0;

134 135
	bh = jbd2_journal_get_descriptor_buffer(journal);
	if (!bh)
136 137
		return 1;

138 139 140 141
	tmp = (struct commit_header *)bh->b_data;
	tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
	tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
	tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
142 143
	tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
	tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
144 145 146 147 148 149

	if (JBD2_HAS_COMPAT_FEATURE(journal,
				    JBD2_FEATURE_COMPAT_CHECKSUM)) {
		tmp->h_chksum_type 	= JBD2_CRC32_CHKSUM;
		tmp->h_chksum_size 	= JBD2_CRC32_CHKSUM_SIZE;
		tmp->h_chksum[0] 	= cpu_to_be32(crc32_sum);
150
	}
151
	jbd2_commit_block_csum_set(journal, bh);
152

153
	BUFFER_TRACE(bh, "submit commit block");
154
	lock_buffer(bh);
155
	clear_buffer_dirty(bh);
156 157 158 159
	set_buffer_uptodate(bh);
	bh->b_end_io = journal_end_buffer_io_sync;

	if (journal->j_flags & JBD2_BARRIER &&
160
	    !JBD2_HAS_INCOMPAT_FEATURE(journal,
161
				       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT))
J
Jens Axboe 已提交
162
		ret = submit_bh(WRITE_SYNC | WRITE_FLUSH_FUA, bh);
163
	else
J
Jens Axboe 已提交
164
		ret = submit_bh(WRITE_SYNC, bh);
165

166 167 168 169 170 171 172 173
	*cbh = bh;
	return ret;
}

/*
 * This function along with journal_submit_commit_record
 * allows to write the commit record asynchronously.
 */
174 175
static int journal_wait_on_commit_record(journal_t *journal,
					 struct buffer_head *bh)
176 177 178 179 180
{
	int ret = 0;

	clear_buffer_dirty(bh);
	wait_on_buffer(bh);
181

182 183 184 185 186
	if (unlikely(!buffer_uptodate(bh)))
		ret = -EIO;
	put_bh(bh);            /* One for getblk() */

	return ret;
187 188
}

189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
/*
 * write the filemap data using writepage() address_space_operations.
 * We don't do block allocation here even for delalloc. We don't
 * use writepages() because with dealyed allocation we may be doing
 * block allocation in writepages().
 */
static int journal_submit_inode_data_buffers(struct address_space *mapping)
{
	int ret;
	struct writeback_control wbc = {
		.sync_mode =  WB_SYNC_ALL,
		.nr_to_write = mapping->nrpages * 2,
		.range_start = 0,
		.range_end = i_size_read(mapping->host),
	};

	ret = generic_writepages(mapping, &wbc);
	return ret;
}

209 210 211 212 213 214 215 216
/*
 * Submit all the data buffers of inode associated with the transaction to
 * disk.
 *
 * We are in a committing transaction. Therefore no new inode can be added to
 * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
 * operate on from being released while we write out pages.
 */
217
static int journal_submit_data_buffers(journal_t *journal,
218 219 220 221 222 223 224 225 226
		transaction_t *commit_transaction)
{
	struct jbd2_inode *jinode;
	int err, ret = 0;
	struct address_space *mapping;

	spin_lock(&journal->j_list_lock);
	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
		mapping = jinode->i_vfs_inode->i_mapping;
227
		set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
228
		spin_unlock(&journal->j_list_lock);
229 230 231 232 233 234
		/*
		 * submit the inode data buffers. We use writepage
		 * instead of writepages. Because writepages can do
		 * block allocation  with delalloc. We need to write
		 * only allocated blocks here.
		 */
235
		trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
236
		err = journal_submit_inode_data_buffers(mapping);
237 238 239 240
		if (!ret)
			ret = err;
		spin_lock(&journal->j_list_lock);
		J_ASSERT(jinode->i_transaction == commit_transaction);
241
		clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
242
		smp_mb__after_atomic();
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
	}
	spin_unlock(&journal->j_list_lock);
	return ret;
}

/*
 * Wait for data submitted for writeout, refile inodes to proper
 * transaction if needed.
 *
 */
static int journal_finish_inode_data_buffers(journal_t *journal,
		transaction_t *commit_transaction)
{
	struct jbd2_inode *jinode, *next_i;
	int err, ret = 0;

260
	/* For locking, see the comment in journal_submit_data_buffers() */
261 262
	spin_lock(&journal->j_list_lock);
	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
263
		set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
264 265
		spin_unlock(&journal->j_list_lock);
		err = filemap_fdatawait(jinode->i_vfs_inode->i_mapping);
266 267 268
		if (err) {
			/*
			 * Because AS_EIO is cleared by
269
			 * filemap_fdatawait_range(), set it again so
270 271 272 273 274 275 276 277
			 * that user process can get -EIO from fsync().
			 */
			set_bit(AS_EIO,
				&jinode->i_vfs_inode->i_mapping->flags);

			if (!ret)
				ret = err;
		}
278
		spin_lock(&journal->j_list_lock);
279
		clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
280
		smp_mb__after_atomic();
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
	}

	/* Now refile inode to proper lists */
	list_for_each_entry_safe(jinode, next_i,
				 &commit_transaction->t_inode_list, i_list) {
		list_del(&jinode->i_list);
		if (jinode->i_next_transaction) {
			jinode->i_transaction = jinode->i_next_transaction;
			jinode->i_next_transaction = NULL;
			list_add(&jinode->i_list,
				&jinode->i_transaction->t_inode_list);
		} else {
			jinode->i_transaction = NULL;
		}
	}
	spin_unlock(&journal->j_list_lock);

	return ret;
}

302 303 304 305 306 307
static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
{
	struct page *page = bh->b_page;
	char *addr;
	__u32 checksum;

308
	addr = kmap_atomic(page);
309 310
	checksum = crc32_be(crc32_sum,
		(void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
311
	kunmap_atomic(addr);
312 313 314 315

	return checksum;
}

316
static void write_tag_block(journal_t *j, journal_block_tag_t *tag,
317
				   unsigned long long block)
Z
Zach Brown 已提交
318 319
{
	tag->t_blocknr = cpu_to_be32(block & (u32)~0);
320
	if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_64BIT))
Z
Zach Brown 已提交
321 322 323
		tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
}

324
static void jbd2_descr_block_csum_set(journal_t *j,
325
				      struct buffer_head *bh)
326 327 328 329
{
	struct jbd2_journal_block_tail *tail;
	__u32 csum;

330
	if (!jbd2_journal_has_csum_v2or3(j))
331 332
		return;

333
	tail = (struct jbd2_journal_block_tail *)(bh->b_data + j->j_blocksize -
334 335
			sizeof(struct jbd2_journal_block_tail));
	tail->t_checksum = 0;
336
	csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
337 338 339
	tail->t_checksum = cpu_to_be32(csum);
}

340 341 342
static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
				    struct buffer_head *bh, __u32 sequence)
{
343
	journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
344 345
	struct page *page = bh->b_page;
	__u8 *addr;
346
	__u32 csum32;
347
	__be32 seq;
348

349
	if (!jbd2_journal_has_csum_v2or3(j))
350 351
		return;

352
	seq = cpu_to_be32(sequence);
353
	addr = kmap_atomic(page);
354
	csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
355 356
	csum32 = jbd2_chksum(j, csum32, addr + offset_in_page(bh->b_data),
			     bh->b_size);
357
	kunmap_atomic(addr);
358

359 360 361 362
	if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V3))
		tag3->t_checksum = cpu_to_be32(csum32);
	else
		tag->t_checksum = cpu_to_be16(csum32);
363
}
364
/*
365
 * jbd2_journal_commit_transaction
366 367 368 369
 *
 * The primary function for committing a transaction to the log.  This
 * function is called by the journal thread to begin a complete commit.
 */
370
void jbd2_journal_commit_transaction(journal_t *journal)
371
{
372
	struct transaction_stats_s stats;
373
	transaction_t *commit_transaction;
374 375
	struct journal_head *jh;
	struct buffer_head *descriptor;
376 377 378 379
	struct buffer_head **wbuf = journal->j_wbuf;
	int bufs;
	int flags;
	int err;
380
	unsigned long long blocknr;
J
Josef Bacik 已提交
381 382
	ktime_t start_time;
	u64 commit_time;
383 384 385 386 387 388
	char *tagp = NULL;
	journal_header_t *header;
	journal_block_tag_t *tag = NULL;
	int space_left = 0;
	int first_tag = 0;
	int tag_flag;
389
	int i;
Z
Zach Brown 已提交
390
	int tag_bytes = journal_tag_bytes(journal);
391 392
	struct buffer_head *cbh = NULL; /* For transactional checksums */
	__u32 crc32_sum = ~0;
393
	struct blk_plug plug;
394 395 396 397
	/* Tail of the journal */
	unsigned long first_block;
	tid_t first_tid;
	int update_tail;
398
	int csum_size = 0;
399
	LIST_HEAD(io_bufs);
400
	LIST_HEAD(log_bufs);
401

402
	if (jbd2_journal_has_csum_v2or3(journal))
403
		csum_size = sizeof(struct jbd2_journal_block_tail);
404 405 406 407 408 409

	/*
	 * First job: lock down the current transaction and wait for
	 * all outstanding updates to complete.
	 */

410 411
	/* Do we need to erase the effects of a prior jbd2_journal_flush? */
	if (journal->j_flags & JBD2_FLUSHED) {
412
		jbd_debug(3, "super block updated\n");
413
		mutex_lock(&journal->j_checkpoint_mutex);
414 415 416 417 418 419 420 421 422 423
		/*
		 * We hold j_checkpoint_mutex so tail cannot change under us.
		 * We don't need any special data guarantees for writing sb
		 * since journal is empty and it is ok for write to be
		 * flushed only with transaction commit.
		 */
		jbd2_journal_update_sb_log_tail(journal,
						journal->j_tail_sequence,
						journal->j_tail,
						WRITE_SYNC);
424
		mutex_unlock(&journal->j_checkpoint_mutex);
425 426 427 428 429 430 431 432 433
	} else {
		jbd_debug(3, "superblock not updated\n");
	}

	J_ASSERT(journal->j_running_transaction != NULL);
	J_ASSERT(journal->j_committing_transaction == NULL);

	commit_transaction = journal->j_running_transaction;

434
	trace_jbd2_start_commit(journal, commit_transaction);
E
Eryu Guan 已提交
435
	jbd_debug(1, "JBD2: starting commit of transaction %d\n",
436 437
			commit_transaction->t_tid);

438
	write_lock(&journal->j_state_lock);
439
	J_ASSERT(commit_transaction->t_state == T_RUNNING);
440 441
	commit_transaction->t_state = T_LOCKED;

442
	trace_jbd2_commit_locking(journal, commit_transaction);
443
	stats.run.rs_wait = commit_transaction->t_max_wait;
444
	stats.run.rs_request_delay = 0;
445
	stats.run.rs_locked = jiffies;
446 447 448 449
	if (commit_transaction->t_requested)
		stats.run.rs_request_delay =
			jbd2_time_diff(commit_transaction->t_requested,
				       stats.run.rs_locked);
450 451
	stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
					      stats.run.rs_locked);
452

453
	spin_lock(&commit_transaction->t_handle_lock);
454
	while (atomic_read(&commit_transaction->t_updates)) {
455 456 457 458
		DEFINE_WAIT(wait);

		prepare_to_wait(&journal->j_wait_updates, &wait,
					TASK_UNINTERRUPTIBLE);
459
		if (atomic_read(&commit_transaction->t_updates)) {
460
			spin_unlock(&commit_transaction->t_handle_lock);
461
			write_unlock(&journal->j_state_lock);
462
			schedule();
463
			write_lock(&journal->j_state_lock);
464 465 466 467 468 469
			spin_lock(&commit_transaction->t_handle_lock);
		}
		finish_wait(&journal->j_wait_updates, &wait);
	}
	spin_unlock(&commit_transaction->t_handle_lock);

470
	J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
471 472 473 474 475 476 477
			journal->j_max_transaction_buffers);

	/*
	 * First thing we are allowed to do is to discard any remaining
	 * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
	 * that there are no such buffers: if a large filesystem
	 * operation like a truncate needs to split itself over multiple
478
	 * transactions, then it may try to do a jbd2_journal_restart() while
479 480 481 482 483 484 485
	 * there are still BJ_Reserved buffers outstanding.  These must
	 * be released cleanly from the current transaction.
	 *
	 * In this case, the filesystem must still reserve write access
	 * again before modifying the buffer in the new transaction, but
	 * we do not require it to remember exactly which old buffers it
	 * has reserved.  This is consistent with the existing behaviour
486
	 * that multiple jbd2_journal_get_write_access() calls to the same
L
Lucas De Marchi 已提交
487
	 * buffer are perfectly permissible.
488 489 490 491 492
	 */
	while (commit_transaction->t_reserved_list) {
		jh = commit_transaction->t_reserved_list;
		JBUFFER_TRACE(jh, "reserved, unused: refile");
		/*
493
		 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
494 495 496 497 498 499
		 * leave undo-committed data.
		 */
		if (jh->b_committed_data) {
			struct buffer_head *bh = jh2bh(jh);

			jbd_lock_bh_state(bh);
M
Mingming Cao 已提交
500
			jbd2_free(jh->b_committed_data, bh->b_size);
501 502 503
			jh->b_committed_data = NULL;
			jbd_unlock_bh_state(bh);
		}
504
		jbd2_journal_refile_buffer(journal, jh);
505 506 507 508 509 510 511 512
	}

	/*
	 * Now try to drop any written-back buffers from the journal's
	 * checkpoint lists.  We do this *before* commit because it potentially
	 * frees some memory
	 */
	spin_lock(&journal->j_list_lock);
513
	__jbd2_journal_clean_checkpoint_list(journal, false);
514 515
	spin_unlock(&journal->j_list_lock);

E
Eryu Guan 已提交
516
	jbd_debug(3, "JBD2: commit phase 1\n");
517

518 519 520 521 522 523
	/*
	 * Clear revoked flag to reflect there is no revoked buffers
	 * in the next transaction which is going to be started.
	 */
	jbd2_clear_buffer_revoked_flags(journal);

524 525 526
	/*
	 * Switch to a new revoke table.
	 */
527
	jbd2_journal_switch_revoke_table(journal);
528

J
Jan Kara 已提交
529 530 531 532 533 534
	/*
	 * Reserved credits cannot be claimed anymore, free them
	 */
	atomic_sub(atomic_read(&journal->j_reserved_credits),
		   &commit_transaction->t_outstanding_credits);

535
	trace_jbd2_commit_flushing(journal, commit_transaction);
536 537 538
	stats.run.rs_flushing = jiffies;
	stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
					     stats.run.rs_flushing);
539

540 541 542
	commit_transaction->t_state = T_FLUSH;
	journal->j_committing_transaction = commit_transaction;
	journal->j_running_transaction = NULL;
J
Josef Bacik 已提交
543
	start_time = ktime_get();
544 545
	commit_transaction->t_log_start = journal->j_head;
	wake_up(&journal->j_wait_transaction_locked);
546
	write_unlock(&journal->j_state_lock);
547

548
	jbd_debug(3, "JBD2: commit phase 2a\n");
549 550 551 552 553

	/*
	 * Now start flushing things to disk, in the order they appear
	 * on the transaction lists.  Data blocks go first.
	 */
554
	err = journal_submit_data_buffers(journal, commit_transaction);
555
	if (err)
556
		jbd2_journal_abort(journal, err);
557

558
	blk_start_plug(&plug);
559
	jbd2_journal_write_revoke_records(journal, commit_transaction,
560
					  &log_bufs, WRITE_SYNC);
561

562
	jbd_debug(3, "JBD2: commit phase 2b\n");
563 564 565 566 567 568

	/*
	 * Way to go: we have now written out all of the data for a
	 * transaction!  Now comes the tricky part: we need to write out
	 * metadata.  Loop over the transaction's entire buffer list:
	 */
569
	write_lock(&journal->j_state_lock);
570
	commit_transaction->t_state = T_COMMIT;
571
	write_unlock(&journal->j_state_lock);
572

573
	trace_jbd2_commit_logging(journal, commit_transaction);
574 575 576
	stats.run.rs_logging = jiffies;
	stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
					       stats.run.rs_logging);
577 578
	stats.run.rs_blocks =
		atomic_read(&commit_transaction->t_outstanding_credits);
579
	stats.run.rs_blocks_logged = 0;
580

581
	J_ASSERT(commit_transaction->t_nr_buffers <=
582
		 atomic_read(&commit_transaction->t_outstanding_credits));
583

584
	err = 0;
585
	bufs = 0;
586
	descriptor = NULL;
587 588 589 590 591 592 593
	while (commit_transaction->t_buffers) {

		/* Find the next buffer to be journaled... */

		jh = commit_transaction->t_buffers;

		/* If we're in abort mode, we just un-journal the buffer and
594
		   release it. */
595 596

		if (is_journal_aborted(journal)) {
597
			clear_buffer_jbddirty(jh2bh(jh));
598
			JBUFFER_TRACE(jh, "journal is aborting: refile");
J
Joel Becker 已提交
599 600 601 602
			jbd2_buffer_abort_trigger(jh,
						  jh->b_frozen_data ?
						  jh->b_frozen_triggers :
						  jh->b_triggers);
603
			jbd2_journal_refile_buffer(journal, jh);
604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
			/* If that was the last one, we need to clean up
			 * any descriptor buffers which may have been
			 * already allocated, even if we are now
			 * aborting. */
			if (!commit_transaction->t_buffers)
				goto start_journal_io;
			continue;
		}

		/* Make sure we have a descriptor block in which to
		   record the metadata buffer. */

		if (!descriptor) {
			J_ASSERT (bufs == 0);

E
Eryu Guan 已提交
619
			jbd_debug(4, "JBD2: get descriptor\n");
620

621
			descriptor = jbd2_journal_get_descriptor_buffer(journal);
622
			if (!descriptor) {
623
				jbd2_journal_abort(journal, -EIO);
624 625 626
				continue;
			}

E
Eryu Guan 已提交
627
			jbd_debug(4, "JBD2: got buffer %llu (%p)\n",
628 629 630
				(unsigned long long)descriptor->b_blocknr,
				descriptor->b_data);
			header = (journal_header_t *)descriptor->b_data;
631 632
			header->h_magic     = cpu_to_be32(JBD2_MAGIC_NUMBER);
			header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK);
633 634
			header->h_sequence  = cpu_to_be32(commit_transaction->t_tid);

635 636 637
			tagp = &descriptor->b_data[sizeof(journal_header_t)];
			space_left = descriptor->b_size -
						sizeof(journal_header_t);
638
			first_tag = 1;
639 640 641
			set_buffer_jwrite(descriptor);
			set_buffer_dirty(descriptor);
			wbuf[bufs++] = descriptor;
642 643 644

			/* Record it so that we can wait for IO
                           completion later */
645 646
			BUFFER_TRACE(descriptor, "ph3: file as descriptor");
			jbd2_file_log_bh(&log_bufs, descriptor);
647 648 649 650
		}

		/* Where is the buffer to be written? */

651
		err = jbd2_journal_next_log_block(journal, &blocknr);
652 653 654 655
		/* If the block mapping failed, just abandon the buffer
		   and repeat this loop: we'll fall into the
		   refile-on-abort condition above. */
		if (err) {
656
			jbd2_journal_abort(journal, err);
657 658 659 660 661 662
			continue;
		}

		/*
		 * start_this_handle() uses t_outstanding_credits to determine
		 * the free space in the log, but this counter is changed
663
		 * by jbd2_journal_next_log_block() also.
664
		 */
665
		atomic_dec(&commit_transaction->t_outstanding_credits);
666 667 668

		/* Bump b_count to prevent truncate from stumbling over
                   the shadowed buffer!  @@@ This can go if we ever get
669
                   rid of the shadow pairing of buffers. */
670 671 672
		atomic_inc(&jh2bh(jh)->b_count);

		/*
673 674
		 * Make a temporary IO buffer with which to write it out
		 * (this will requeue the metadata buffer to BJ_Shadow).
675
		 */
676
		set_bit(BH_JWrite, &jh2bh(jh)->b_state);
677
		JBUFFER_TRACE(jh, "ph3: write metadata");
678
		flags = jbd2_journal_write_metadata_buffer(commit_transaction,
679
						jh, &wbuf[bufs], blocknr);
680 681 682 683
		if (flags < 0) {
			jbd2_journal_abort(journal, flags);
			continue;
		}
684
		jbd2_file_log_bh(&io_bufs, wbuf[bufs]);
685 686 687 688 689 690

		/* Record the new block's tag in the current descriptor
                   buffer */

		tag_flag = 0;
		if (flags & 1)
691
			tag_flag |= JBD2_FLAG_ESCAPE;
692
		if (!first_tag)
693
			tag_flag |= JBD2_FLAG_SAME_UUID;
694 695

		tag = (journal_block_tag_t *) tagp;
696
		write_tag_block(journal, tag, jh2bh(jh)->b_blocknr);
697
		tag->t_flags = cpu_to_be16(tag_flag);
698
		jbd2_block_tag_csum_set(journal, tag, wbuf[bufs],
699
					commit_transaction->t_tid);
Z
Zach Brown 已提交
700 701
		tagp += tag_bytes;
		space_left -= tag_bytes;
702
		bufs++;
703 704 705 706 707 708 709 710 711 712 713 714 715

		if (first_tag) {
			memcpy (tagp, journal->j_uuid, 16);
			tagp += 16;
			space_left -= 16;
			first_tag = 0;
		}

		/* If there's no more to do, or if the descriptor is full,
		   let the IO rip! */

		if (bufs == journal->j_wbufsize ||
		    commit_transaction->t_buffers == NULL ||
716
		    space_left < tag_bytes + 16 + csum_size) {
717

E
Eryu Guan 已提交
718
			jbd_debug(4, "JBD2: Submit %d IOs\n", bufs);
719 720 721 722 723

			/* Write an end-of-descriptor marker before
                           submitting the IOs.  "tag" still points to
                           the last tag we set up. */

724
			tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
725

726
			jbd2_descr_block_csum_set(journal, descriptor);
727 728 729
start_journal_io:
			for (i = 0; i < bufs; i++) {
				struct buffer_head *bh = wbuf[i];
730 731 732 733 734 735 736 737 738
				/*
				 * Compute checksum.
				 */
				if (JBD2_HAS_COMPAT_FEATURE(journal,
					JBD2_FEATURE_COMPAT_CHECKSUM)) {
					crc32_sum =
					    jbd2_checksum_data(crc32_sum, bh);
				}

739 740 741 742
				lock_buffer(bh);
				clear_buffer_dirty(bh);
				set_buffer_uptodate(bh);
				bh->b_end_io = journal_end_buffer_io_sync;
743
				submit_bh(WRITE_SYNC, bh);
744 745
			}
			cond_resched();
746
			stats.run.rs_blocks_logged += bufs;
747 748 749 750 751 752 753 754

			/* Force a new descriptor to be generated next
                           time round the loop. */
			descriptor = NULL;
			bufs = 0;
		}
	}

755 756 757 758 759 760 761 762 763 764
	err = journal_finish_inode_data_buffers(journal, commit_transaction);
	if (err) {
		printk(KERN_WARNING
			"JBD2: Detected IO errors while flushing file data "
		       "on %s\n", journal->j_devname);
		if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
			jbd2_journal_abort(journal, err);
		err = 0;
	}

765 766 767 768 769 770 771 772 773 774
	/*
	 * Get current oldest transaction in the log before we issue flush
	 * to the filesystem device. After the flush we can be sure that
	 * blocks of all older transactions are checkpointed to persistent
	 * storage and we will be safe to update journal start in the
	 * superblock with the numbers we get here.
	 */
	update_tail =
		jbd2_journal_get_log_tail(journal, &first_tid, &first_block);

775
	write_lock(&journal->j_state_lock);
776 777 778 779 780 781 782 783 784
	if (update_tail) {
		long freed = first_block - journal->j_tail;

		if (first_block < journal->j_tail)
			freed += journal->j_last - journal->j_first;
		/* Update tail only if we free significant amount of space */
		if (freed < journal->j_maxlen / 4)
			update_tail = 0;
	}
785 786 787
	J_ASSERT(commit_transaction->t_state == T_COMMIT);
	commit_transaction->t_state = T_COMMIT_DFLUSH;
	write_unlock(&journal->j_state_lock);
788

789 790 791 792 793
	/* 
	 * If the journal is not located on the file system device,
	 * then we must flush the file system device before we issue
	 * the commit record
	 */
794
	if (commit_transaction->t_need_data_flush &&
795 796
	    (journal->j_fs_dev != journal->j_dev) &&
	    (journal->j_flags & JBD2_BARRIER))
797
		blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
798

799
	/* Done it all: now write the commit record asynchronously. */
800
	if (JBD2_HAS_INCOMPAT_FEATURE(journal,
801
				      JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
802 803 804 805
		err = journal_submit_commit_record(journal, commit_transaction,
						 &cbh, crc32_sum);
		if (err)
			__jbd2_journal_abort_hard(journal);
806
	}
807

808 809
	blk_finish_plug(&plug);

810 811 812 813
	/* Lo and behold: we have just managed to send a transaction to
           the log.  Before we can commit it, wait for the IO so far to
           complete.  Control buffers being written are on the
           transaction's t_log_list queue, and metadata buffers are on
814
           the io_bufs list.
815 816 817 818 819 820

	   Wait for the buffers in reverse order.  That way we are
	   less likely to be woken up until all IOs have completed, and
	   so we incur less scheduling load.
	*/

E
Eryu Guan 已提交
821
	jbd_debug(3, "JBD2: commit phase 3\n");
822

823 824 825 826
	while (!list_empty(&io_bufs)) {
		struct buffer_head *bh = list_entry(io_bufs.prev,
						    struct buffer_head,
						    b_assoc_buffers);
827

828 829
		wait_on_buffer(bh);
		cond_resched();
830 831 832

		if (unlikely(!buffer_uptodate(bh)))
			err = -EIO;
833
		jbd2_unfile_log_bh(bh);
834 835

		/*
836 837
		 * The list contains temporary buffer heads created by
		 * jbd2_journal_write_metadata_buffer().
838 839 840 841 842 843
		 */
		BUFFER_TRACE(bh, "dumping temporary bh");
		__brelse(bh);
		J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
		free_buffer_head(bh);

844
		/* We also have to refile the corresponding shadowed buffer */
845 846
		jh = commit_transaction->t_shadow_list->b_tprev;
		bh = jh2bh(jh);
847
		clear_buffer_jwrite(bh);
848
		J_ASSERT_BH(bh, buffer_jbddirty(bh));
849
		J_ASSERT_BH(bh, !buffer_shadow(bh));
850 851 852 853 854 855

		/* The metadata is now released for reuse, but we need
                   to remember it against this transaction so that when
                   we finally commit, we can do any checkpointing
                   required. */
		JBUFFER_TRACE(jh, "file as BJ_Forget");
856
		jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
857 858 859 860 861 862
		JBUFFER_TRACE(jh, "brelse shadowed buffer");
		__brelse(bh);
	}

	J_ASSERT (commit_transaction->t_shadow_list == NULL);

E
Eryu Guan 已提交
863
	jbd_debug(3, "JBD2: commit phase 4\n");
864 865

	/* Here we wait for the revoke record and descriptor record buffers */
866
	while (!list_empty(&log_bufs)) {
867 868
		struct buffer_head *bh;

869 870 871
		bh = list_entry(log_bufs.prev, struct buffer_head, b_assoc_buffers);
		wait_on_buffer(bh);
		cond_resched();
872 873 874 875 876 877

		if (unlikely(!buffer_uptodate(bh)))
			err = -EIO;

		BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
		clear_buffer_jwrite(bh);
878
		jbd2_unfile_log_bh(bh);
879 880 881 882
		__brelse(bh);		/* One for getblk */
		/* AKPM: bforget here */
	}

883 884 885
	if (err)
		jbd2_journal_abort(journal, err);

E
Eryu Guan 已提交
886
	jbd_debug(3, "JBD2: commit phase 5\n");
887 888 889 890
	write_lock(&journal->j_state_lock);
	J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
	commit_transaction->t_state = T_COMMIT_JFLUSH;
	write_unlock(&journal->j_state_lock);
891

892
	if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
893
				       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
894 895 896 897 898
		err = journal_submit_commit_record(journal, commit_transaction,
						&cbh, crc32_sum);
		if (err)
			__jbd2_journal_abort_hard(journal);
	}
899
	if (cbh)
900
		err = journal_wait_on_commit_record(journal, cbh);
901 902 903
	if (JBD2_HAS_INCOMPAT_FEATURE(journal,
				      JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) &&
	    journal->j_flags & JBD2_BARRIER) {
904
		blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL);
905
	}
906 907

	if (err)
908
		jbd2_journal_abort(journal, err);
909

910 911 912 913 914 915 916 917
	/*
	 * Now disk caches for filesystem device are flushed so we are safe to
	 * erase checkpointed transactions from the log by updating journal
	 * superblock.
	 */
	if (update_tail)
		jbd2_update_log_tail(journal, first_tid, first_block);

918 919 920 921 922
	/* End of a transaction!  Finally, we can do checkpoint
           processing: any buffers committed as a result of this
           transaction can be removed from any checkpoint list it was on
           before. */

E
Eryu Guan 已提交
923
	jbd_debug(3, "JBD2: commit phase 6\n");
924

925
	J_ASSERT(list_empty(&commit_transaction->t_inode_list));
926 927 928 929 930 931 932 933 934 935 936 937 938
	J_ASSERT(commit_transaction->t_buffers == NULL);
	J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
	J_ASSERT(commit_transaction->t_shadow_list == NULL);

restart_loop:
	/*
	 * As there are other places (journal_unmap_buffer()) adding buffers
	 * to this list we have to be careful and hold the j_list_lock.
	 */
	spin_lock(&journal->j_list_lock);
	while (commit_transaction->t_forget) {
		transaction_t *cp_transaction;
		struct buffer_head *bh;
939
		int try_to_free = 0;
940 941 942 943

		jh = commit_transaction->t_forget;
		spin_unlock(&journal->j_list_lock);
		bh = jh2bh(jh);
944 945 946 947 948
		/*
		 * Get a reference so that bh cannot be freed before we are
		 * done with it.
		 */
		get_bh(bh);
949
		jbd_lock_bh_state(bh);
950
		J_ASSERT_JH(jh,	jh->b_transaction == commit_transaction);
951 952 953 954 955 956 957 958 959 960

		/*
		 * If there is undo-protected committed data against
		 * this buffer, then we can remove it now.  If it is a
		 * buffer needing such protection, the old frozen_data
		 * field now points to a committed version of the
		 * buffer, so rotate that field to the new committed
		 * data.
		 *
		 * Otherwise, we can just throw away the frozen data now.
J
Joel Becker 已提交
961 962 963
		 *
		 * We also know that the frozen data has already fired
		 * its triggers if they exist, so we can clear that too.
964 965
		 */
		if (jh->b_committed_data) {
M
Mingming Cao 已提交
966
			jbd2_free(jh->b_committed_data, bh->b_size);
967 968 969 970
			jh->b_committed_data = NULL;
			if (jh->b_frozen_data) {
				jh->b_committed_data = jh->b_frozen_data;
				jh->b_frozen_data = NULL;
J
Joel Becker 已提交
971
				jh->b_frozen_triggers = NULL;
972 973
			}
		} else if (jh->b_frozen_data) {
M
Mingming Cao 已提交
974
			jbd2_free(jh->b_frozen_data, bh->b_size);
975
			jh->b_frozen_data = NULL;
J
Joel Becker 已提交
976
			jh->b_frozen_triggers = NULL;
977 978 979 980 981 982
		}

		spin_lock(&journal->j_list_lock);
		cp_transaction = jh->b_cp_transaction;
		if (cp_transaction) {
			JBUFFER_TRACE(jh, "remove from old cp transaction");
983
			cp_transaction->t_chp_stats.cs_dropped++;
984
			__jbd2_journal_remove_checkpoint(jh);
985 986 987 988
		}

		/* Only re-checkpoint the buffer_head if it is marked
		 * dirty.  If the buffer was added to the BJ_Forget list
989
		 * by jbd2_journal_forget, it may no longer be dirty and
990 991 992
		 * there's no point in keeping a checkpoint record for
		 * it. */

993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
		/*
		* A buffer which has been freed while still being journaled by
		* a previous transaction.
		*/
		if (buffer_freed(bh)) {
			/*
			 * If the running transaction is the one containing
			 * "add to orphan" operation (b_next_transaction !=
			 * NULL), we have to wait for that transaction to
			 * commit before we can really get rid of the buffer.
			 * So just clear b_modified to not confuse transaction
			 * credit accounting and refile the buffer to
			 * BJ_Forget of the running transaction. If the just
			 * committed transaction contains "add to orphan"
			 * operation, we can completely invalidate the buffer
			 * now. We are rather through in that since the
			 * buffer may be still accessible when blocksize <
			 * pagesize and it is attached to the last partial
			 * page.
			 */
			jh->b_modified = 0;
			if (!jh->b_next_transaction) {
				clear_buffer_freed(bh);
				clear_buffer_jbddirty(bh);
				clear_buffer_mapped(bh);
				clear_buffer_new(bh);
				clear_buffer_req(bh);
				bh->b_bdev = NULL;
			}
1022 1023 1024 1025
		}

		if (buffer_jbddirty(bh)) {
			JBUFFER_TRACE(jh, "add to new checkpointing trans");
1026
			__jbd2_journal_insert_checkpoint(jh, commit_transaction);
1027 1028
			if (is_journal_aborted(journal))
				clear_buffer_jbddirty(bh);
1029 1030
		} else {
			J_ASSERT_BH(bh, !buffer_dirty(bh));
1031 1032
			/*
			 * The buffer on BJ_Forget list and not jbddirty means
1033 1034 1035 1036 1037
			 * it has been freed by this transaction and hence it
			 * could not have been reallocated until this
			 * transaction has committed. *BUT* it could be
			 * reallocated once we have written all the data to
			 * disk and before we process the buffer on BJ_Forget
1038 1039 1040 1041
			 * list.
			 */
			if (!jh->b_next_transaction)
				try_to_free = 1;
1042
		}
1043 1044 1045 1046 1047 1048 1049
		JBUFFER_TRACE(jh, "refile or unfile buffer");
		__jbd2_journal_refile_buffer(jh);
		jbd_unlock_bh_state(bh);
		if (try_to_free)
			release_buffer_page(bh);	/* Drops bh reference */
		else
			__brelse(bh);
1050 1051 1052 1053
		cond_resched_lock(&journal->j_list_lock);
	}
	spin_unlock(&journal->j_list_lock);
	/*
1054 1055 1056 1057
	 * This is a bit sleazy.  We use j_list_lock to protect transition
	 * of a transaction into T_FINISHED state and calling
	 * __jbd2_journal_drop_transaction(). Otherwise we could race with
	 * other checkpointing code processing the transaction...
1058
	 */
1059
	write_lock(&journal->j_state_lock);
1060 1061 1062 1063 1064 1065 1066
	spin_lock(&journal->j_list_lock);
	/*
	 * Now recheck if some buffers did not get attached to the transaction
	 * while the lock was dropped...
	 */
	if (commit_transaction->t_forget) {
		spin_unlock(&journal->j_list_lock);
1067
		write_unlock(&journal->j_state_lock);
1068 1069 1070
		goto restart_loop;
	}

1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
	/* Add the transaction to the checkpoint list
	 * __journal_remove_checkpoint() can not destroy transaction
	 * under us because it is not marked as T_FINISHED yet */
	if (journal->j_checkpoint_transactions == NULL) {
		journal->j_checkpoint_transactions = commit_transaction;
		commit_transaction->t_cpnext = commit_transaction;
		commit_transaction->t_cpprev = commit_transaction;
	} else {
		commit_transaction->t_cpnext =
			journal->j_checkpoint_transactions;
		commit_transaction->t_cpprev =
			commit_transaction->t_cpnext->t_cpprev;
		commit_transaction->t_cpnext->t_cpprev =
			commit_transaction;
		commit_transaction->t_cpprev->t_cpnext =
				commit_transaction;
	}
	spin_unlock(&journal->j_list_lock);

1090 1091
	/* Done with this transaction! */

E
Eryu Guan 已提交
1092
	jbd_debug(3, "JBD2: commit phase 7\n");
1093

1094
	J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
1095

1096
	commit_transaction->t_start = jiffies;
1097 1098
	stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
					      commit_transaction->t_start);
1099 1100

	/*
1101
	 * File the transaction statistics
1102 1103
	 */
	stats.ts_tid = commit_transaction->t_tid;
1104 1105
	stats.run.rs_handle_count =
		atomic_read(&commit_transaction->t_handle_count);
1106 1107
	trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
			     commit_transaction->t_tid, &stats.run);
1108
	stats.ts_requested = (commit_transaction->t_requested) ? 1 : 0;
1109

1110
	commit_transaction->t_state = T_COMMIT_CALLBACK;
1111 1112 1113
	J_ASSERT(commit_transaction == journal->j_committing_transaction);
	journal->j_commit_sequence = commit_transaction->t_tid;
	journal->j_committing_transaction = NULL;
J
Josef Bacik 已提交
1114
	commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1115

J
Josef Bacik 已提交
1116 1117 1118 1119 1120 1121 1122 1123 1124
	/*
	 * weight the commit time higher than the average time so we don't
	 * react too strongly to vast changes in the commit time
	 */
	if (likely(journal->j_average_commit_time))
		journal->j_average_commit_time = (commit_time +
				journal->j_average_commit_time*3) / 4;
	else
		journal->j_average_commit_time = commit_time;
1125

1126
	write_unlock(&journal->j_state_lock);
1127

1128 1129 1130
	if (journal->j_commit_callback)
		journal->j_commit_callback(journal, commit_transaction);

1131
	trace_jbd2_end_commit(journal, commit_transaction);
E
Eryu Guan 已提交
1132
	jbd_debug(1, "JBD2: commit %d complete, head %d\n",
1133 1134
		  journal->j_commit_sequence, journal->j_tail_sequence);

1135 1136 1137
	write_lock(&journal->j_state_lock);
	spin_lock(&journal->j_list_lock);
	commit_transaction->t_state = T_FINISHED;
1138
	/* Check if the transaction can be dropped now that we are finished */
1139 1140 1141 1142 1143 1144 1145
	if (commit_transaction->t_checkpoint_list == NULL &&
	    commit_transaction->t_checkpoint_io_list == NULL) {
		__jbd2_journal_drop_transaction(journal, commit_transaction);
		jbd2_journal_free_transaction(commit_transaction);
	}
	spin_unlock(&journal->j_list_lock);
	write_unlock(&journal->j_state_lock);
1146
	wake_up(&journal->j_wait_done_commit);
1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163

	/*
	 * Calculate overall stats
	 */
	spin_lock(&journal->j_history_lock);
	journal->j_stats.ts_tid++;
	journal->j_stats.ts_requested += stats.ts_requested;
	journal->j_stats.run.rs_wait += stats.run.rs_wait;
	journal->j_stats.run.rs_request_delay += stats.run.rs_request_delay;
	journal->j_stats.run.rs_running += stats.run.rs_running;
	journal->j_stats.run.rs_locked += stats.run.rs_locked;
	journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
	journal->j_stats.run.rs_logging += stats.run.rs_logging;
	journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
	journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
	journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
	spin_unlock(&journal->j_history_lock);
1164
}