commit.c 35.0 KB
Newer Older
1
/*
2
 * linux/fs/jbd2/commit.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
 *
 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
 *
 * Copyright 1998 Red Hat corp --- All Rights Reserved
 *
 * This file is part of the Linux kernel and is made available under
 * the terms of the GNU General Public License, version 2, or at your
 * option, any later version, incorporated herein by reference.
 *
 * Journal commit routines for the generic filesystem journaling code;
 * part of the ext2fs journaling system.
 */

#include <linux/time.h>
#include <linux/fs.h>
18
#include <linux/jbd2.h>
19 20 21 22
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
23
#include <linux/jiffies.h>
24
#include <linux/crc32.h>
25 26
#include <linux/writeback.h>
#include <linux/backing-dev.h>
27
#include <linux/bio.h>
28
#include <linux/blkdev.h>
29
#include <linux/bitops.h>
30
#include <trace/events/jbd2.h>
31 32

/*
33
 * IO end handler for temporary buffer_heads handling writes to the journal.
34 35 36
 */
static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
{
37 38
	struct buffer_head *orig_bh = bh->b_private;

39 40 41 42 43
	BUFFER_TRACE(bh, "");
	if (uptodate)
		set_buffer_uptodate(bh);
	else
		clear_buffer_uptodate(bh);
44 45 46 47 48
	if (orig_bh) {
		clear_bit_unlock(BH_Shadow, &orig_bh->b_state);
		smp_mb__after_clear_bit();
		wake_up_bit(&orig_bh->b_state, BH_Shadow);
	}
49 50 51 52
	unlock_buffer(bh);
}

/*
53 54
 * When an ext4 file is truncated, it is possible that some pages are not
 * successfully freed, because they are attached to a committing transaction.
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
 * After the transaction commits, these pages are left on the LRU, with no
 * ->mapping, and with attached buffers.  These pages are trivially reclaimable
 * by the VM, but their apparent absence upsets the VM accounting, and it makes
 * the numbers in /proc/meminfo look odd.
 *
 * So here, we have a buffer which has just come off the forget list.  Look to
 * see if we can strip all buffers from the backing page.
 *
 * Called under lock_journal(), and possibly under journal_datalist_lock.  The
 * caller provided us with a ref against the buffer, and we drop that here.
 */
static void release_buffer_page(struct buffer_head *bh)
{
	struct page *page;

	if (buffer_dirty(bh))
		goto nope;
	if (atomic_read(&bh->b_count) != 1)
		goto nope;
	page = bh->b_page;
	if (!page)
		goto nope;
	if (page->mapping)
		goto nope;

	/* OK, it's a truncated page */
N
Nick Piggin 已提交
81
	if (!trylock_page(page))
82 83 84 85 86 87 88 89 90 91 92 93 94
		goto nope;

	page_cache_get(page);
	__brelse(bh);
	try_to_free_buffers(page);
	unlock_page(page);
	page_cache_release(page);
	return;

nope:
	__brelse(bh);
}

95
static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
D
Darrick J. Wong 已提交
96 97 98 99 100 101 102
{
	struct commit_header *h;
	__u32 csum;

	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
		return;

103
	h = (struct commit_header *)(bh->b_data);
D
Darrick J. Wong 已提交
104 105 106
	h->h_chksum_type = 0;
	h->h_chksum_size = 0;
	h->h_chksum[0] = 0;
107
	csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
D
Darrick J. Wong 已提交
108 109 110
	h->h_chksum[0] = cpu_to_be32(csum);
}

111 112
/*
 * Done it all: now submit the commit record.  We should have
113 114 115 116 117 118
 * cleaned up our previous buffers by now, so if we are in abort
 * mode we can now just skip the rest of the journal write
 * entirely.
 *
 * Returns 1 if the journal needs to be aborted or 0 on success
 */
119 120 121 122
static int journal_submit_commit_record(journal_t *journal,
					transaction_t *commit_transaction,
					struct buffer_head **cbh,
					__u32 crc32_sum)
123
{
124
	struct commit_header *tmp;
125
	struct buffer_head *bh;
126
	int ret;
127
	struct timespec now = current_kernel_time();
128

129 130
	*cbh = NULL;

131 132 133
	if (is_journal_aborted(journal))
		return 0;

134 135
	bh = jbd2_journal_get_descriptor_buffer(journal);
	if (!bh)
136 137
		return 1;

138 139 140 141
	tmp = (struct commit_header *)bh->b_data;
	tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
	tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
	tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
142 143
	tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
	tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
144 145 146 147 148 149

	if (JBD2_HAS_COMPAT_FEATURE(journal,
				    JBD2_FEATURE_COMPAT_CHECKSUM)) {
		tmp->h_chksum_type 	= JBD2_CRC32_CHKSUM;
		tmp->h_chksum_size 	= JBD2_CRC32_CHKSUM_SIZE;
		tmp->h_chksum[0] 	= cpu_to_be32(crc32_sum);
150
	}
151
	jbd2_commit_block_csum_set(journal, bh);
152

153
	BUFFER_TRACE(bh, "submit commit block");
154
	lock_buffer(bh);
155
	clear_buffer_dirty(bh);
156 157 158 159
	set_buffer_uptodate(bh);
	bh->b_end_io = journal_end_buffer_io_sync;

	if (journal->j_flags & JBD2_BARRIER &&
160
	    !JBD2_HAS_INCOMPAT_FEATURE(journal,
161
				       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT))
J
Jens Axboe 已提交
162
		ret = submit_bh(WRITE_SYNC | WRITE_FLUSH_FUA, bh);
163
	else
J
Jens Axboe 已提交
164
		ret = submit_bh(WRITE_SYNC, bh);
165

166 167 168 169 170 171 172 173
	*cbh = bh;
	return ret;
}

/*
 * This function along with journal_submit_commit_record
 * allows to write the commit record asynchronously.
 */
174 175
static int journal_wait_on_commit_record(journal_t *journal,
					 struct buffer_head *bh)
176 177 178 179 180
{
	int ret = 0;

	clear_buffer_dirty(bh);
	wait_on_buffer(bh);
181

182 183 184 185 186
	if (unlikely(!buffer_uptodate(bh)))
		ret = -EIO;
	put_bh(bh);            /* One for getblk() */

	return ret;
187 188
}

189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
/*
 * write the filemap data using writepage() address_space_operations.
 * We don't do block allocation here even for delalloc. We don't
 * use writepages() because with dealyed allocation we may be doing
 * block allocation in writepages().
 */
static int journal_submit_inode_data_buffers(struct address_space *mapping)
{
	int ret;
	struct writeback_control wbc = {
		.sync_mode =  WB_SYNC_ALL,
		.nr_to_write = mapping->nrpages * 2,
		.range_start = 0,
		.range_end = i_size_read(mapping->host),
	};

	ret = generic_writepages(mapping, &wbc);
	return ret;
}

209 210 211 212 213 214 215 216
/*
 * Submit all the data buffers of inode associated with the transaction to
 * disk.
 *
 * We are in a committing transaction. Therefore no new inode can be added to
 * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
 * operate on from being released while we write out pages.
 */
217
static int journal_submit_data_buffers(journal_t *journal,
218 219 220 221 222 223 224 225 226
		transaction_t *commit_transaction)
{
	struct jbd2_inode *jinode;
	int err, ret = 0;
	struct address_space *mapping;

	spin_lock(&journal->j_list_lock);
	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
		mapping = jinode->i_vfs_inode->i_mapping;
227
		set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
228
		spin_unlock(&journal->j_list_lock);
229 230 231 232 233 234
		/*
		 * submit the inode data buffers. We use writepage
		 * instead of writepages. Because writepages can do
		 * block allocation  with delalloc. We need to write
		 * only allocated blocks here.
		 */
235
		trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
236
		err = journal_submit_inode_data_buffers(mapping);
237 238 239 240
		if (!ret)
			ret = err;
		spin_lock(&journal->j_list_lock);
		J_ASSERT(jinode->i_transaction == commit_transaction);
241 242
		clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
		smp_mb__after_clear_bit();
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
	}
	spin_unlock(&journal->j_list_lock);
	return ret;
}

/*
 * Wait for data submitted for writeout, refile inodes to proper
 * transaction if needed.
 *
 */
static int journal_finish_inode_data_buffers(journal_t *journal,
		transaction_t *commit_transaction)
{
	struct jbd2_inode *jinode, *next_i;
	int err, ret = 0;

260
	/* For locking, see the comment in journal_submit_data_buffers() */
261 262
	spin_lock(&journal->j_list_lock);
	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
263
		set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
264 265
		spin_unlock(&journal->j_list_lock);
		err = filemap_fdatawait(jinode->i_vfs_inode->i_mapping);
266 267 268
		if (err) {
			/*
			 * Because AS_EIO is cleared by
269
			 * filemap_fdatawait_range(), set it again so
270 271 272 273 274 275 276 277
			 * that user process can get -EIO from fsync().
			 */
			set_bit(AS_EIO,
				&jinode->i_vfs_inode->i_mapping->flags);

			if (!ret)
				ret = err;
		}
278
		spin_lock(&journal->j_list_lock);
279 280
		clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
		smp_mb__after_clear_bit();
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
	}

	/* Now refile inode to proper lists */
	list_for_each_entry_safe(jinode, next_i,
				 &commit_transaction->t_inode_list, i_list) {
		list_del(&jinode->i_list);
		if (jinode->i_next_transaction) {
			jinode->i_transaction = jinode->i_next_transaction;
			jinode->i_next_transaction = NULL;
			list_add(&jinode->i_list,
				&jinode->i_transaction->t_inode_list);
		} else {
			jinode->i_transaction = NULL;
		}
	}
	spin_unlock(&journal->j_list_lock);

	return ret;
}

302 303 304 305 306 307
static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
{
	struct page *page = bh->b_page;
	char *addr;
	__u32 checksum;

308
	addr = kmap_atomic(page);
309 310
	checksum = crc32_be(crc32_sum,
		(void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
311
	kunmap_atomic(addr);
312 313 314 315 316

	return checksum;
}

static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
317
				   unsigned long long block)
Z
Zach Brown 已提交
318 319
{
	tag->t_blocknr = cpu_to_be32(block & (u32)~0);
320
	if (tag_bytes > JBD2_TAG_SIZE32)
Z
Zach Brown 已提交
321 322 323
		tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
}

324
static void jbd2_descr_block_csum_set(journal_t *j,
325
				      struct buffer_head *bh)
326 327 328 329 330 331 332
{
	struct jbd2_journal_block_tail *tail;
	__u32 csum;

	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
		return;

333
	tail = (struct jbd2_journal_block_tail *)(bh->b_data + j->j_blocksize -
334 335
			sizeof(struct jbd2_journal_block_tail));
	tail->t_checksum = 0;
336
	csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
337 338 339
	tail->t_checksum = cpu_to_be32(csum);
}

340 341 342 343 344
static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
				    struct buffer_head *bh, __u32 sequence)
{
	struct page *page = bh->b_page;
	__u8 *addr;
345
	__u32 csum32;
346
	__be32 seq;
347 348 349 350

	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
		return;

351
	seq = cpu_to_be32(sequence);
352
	addr = kmap_atomic(page);
353
	csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
354 355
	csum32 = jbd2_chksum(j, csum32, addr + offset_in_page(bh->b_data),
			     bh->b_size);
356
	kunmap_atomic(addr);
357

358 359
	/* We only have space to store the lower 16 bits of the crc32c. */
	tag->t_checksum = cpu_to_be16(csum32);
360
}
361
/*
362
 * jbd2_journal_commit_transaction
363 364 365 366
 *
 * The primary function for committing a transaction to the log.  This
 * function is called by the journal thread to begin a complete commit.
 */
367
void jbd2_journal_commit_transaction(journal_t *journal)
368
{
369
	struct transaction_stats_s stats;
370
	transaction_t *commit_transaction;
371 372
	struct journal_head *jh;
	struct buffer_head *descriptor;
373 374 375 376
	struct buffer_head **wbuf = journal->j_wbuf;
	int bufs;
	int flags;
	int err;
377
	unsigned long long blocknr;
J
Josef Bacik 已提交
378 379
	ktime_t start_time;
	u64 commit_time;
380 381 382 383 384 385
	char *tagp = NULL;
	journal_header_t *header;
	journal_block_tag_t *tag = NULL;
	int space_left = 0;
	int first_tag = 0;
	int tag_flag;
386
	int i;
Z
Zach Brown 已提交
387
	int tag_bytes = journal_tag_bytes(journal);
388 389
	struct buffer_head *cbh = NULL; /* For transactional checksums */
	__u32 crc32_sum = ~0;
390
	struct blk_plug plug;
391 392 393 394
	/* Tail of the journal */
	unsigned long first_block;
	tid_t first_tid;
	int update_tail;
395
	int csum_size = 0;
396
	LIST_HEAD(io_bufs);
397
	LIST_HEAD(log_bufs);
398 399 400

	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
		csum_size = sizeof(struct jbd2_journal_block_tail);
401 402 403 404 405 406

	/*
	 * First job: lock down the current transaction and wait for
	 * all outstanding updates to complete.
	 */

407 408
	/* Do we need to erase the effects of a prior jbd2_journal_flush? */
	if (journal->j_flags & JBD2_FLUSHED) {
409
		jbd_debug(3, "super block updated\n");
410
		mutex_lock(&journal->j_checkpoint_mutex);
411 412 413 414 415 416 417 418 419 420
		/*
		 * We hold j_checkpoint_mutex so tail cannot change under us.
		 * We don't need any special data guarantees for writing sb
		 * since journal is empty and it is ok for write to be
		 * flushed only with transaction commit.
		 */
		jbd2_journal_update_sb_log_tail(journal,
						journal->j_tail_sequence,
						journal->j_tail,
						WRITE_SYNC);
421
		mutex_unlock(&journal->j_checkpoint_mutex);
422 423 424 425 426 427 428 429 430
	} else {
		jbd_debug(3, "superblock not updated\n");
	}

	J_ASSERT(journal->j_running_transaction != NULL);
	J_ASSERT(journal->j_committing_transaction == NULL);

	commit_transaction = journal->j_running_transaction;

431
	trace_jbd2_start_commit(journal, commit_transaction);
E
Eryu Guan 已提交
432
	jbd_debug(1, "JBD2: starting commit of transaction %d\n",
433 434
			commit_transaction->t_tid);

435
	write_lock(&journal->j_state_lock);
436
	J_ASSERT(commit_transaction->t_state == T_RUNNING);
437 438
	commit_transaction->t_state = T_LOCKED;

439
	trace_jbd2_commit_locking(journal, commit_transaction);
440
	stats.run.rs_wait = commit_transaction->t_max_wait;
441
	stats.run.rs_request_delay = 0;
442
	stats.run.rs_locked = jiffies;
443 444 445 446
	if (commit_transaction->t_requested)
		stats.run.rs_request_delay =
			jbd2_time_diff(commit_transaction->t_requested,
				       stats.run.rs_locked);
447 448
	stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
					      stats.run.rs_locked);
449

450
	spin_lock(&commit_transaction->t_handle_lock);
451
	while (atomic_read(&commit_transaction->t_updates)) {
452 453 454 455
		DEFINE_WAIT(wait);

		prepare_to_wait(&journal->j_wait_updates, &wait,
					TASK_UNINTERRUPTIBLE);
456
		if (atomic_read(&commit_transaction->t_updates)) {
457
			spin_unlock(&commit_transaction->t_handle_lock);
458
			write_unlock(&journal->j_state_lock);
459
			schedule();
460
			write_lock(&journal->j_state_lock);
461 462 463 464 465 466
			spin_lock(&commit_transaction->t_handle_lock);
		}
		finish_wait(&journal->j_wait_updates, &wait);
	}
	spin_unlock(&commit_transaction->t_handle_lock);

467
	J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
468 469 470 471 472 473 474
			journal->j_max_transaction_buffers);

	/*
	 * First thing we are allowed to do is to discard any remaining
	 * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
	 * that there are no such buffers: if a large filesystem
	 * operation like a truncate needs to split itself over multiple
475
	 * transactions, then it may try to do a jbd2_journal_restart() while
476 477 478 479 480 481 482
	 * there are still BJ_Reserved buffers outstanding.  These must
	 * be released cleanly from the current transaction.
	 *
	 * In this case, the filesystem must still reserve write access
	 * again before modifying the buffer in the new transaction, but
	 * we do not require it to remember exactly which old buffers it
	 * has reserved.  This is consistent with the existing behaviour
483
	 * that multiple jbd2_journal_get_write_access() calls to the same
L
Lucas De Marchi 已提交
484
	 * buffer are perfectly permissible.
485 486 487 488 489
	 */
	while (commit_transaction->t_reserved_list) {
		jh = commit_transaction->t_reserved_list;
		JBUFFER_TRACE(jh, "reserved, unused: refile");
		/*
490
		 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
491 492 493 494 495 496
		 * leave undo-committed data.
		 */
		if (jh->b_committed_data) {
			struct buffer_head *bh = jh2bh(jh);

			jbd_lock_bh_state(bh);
M
Mingming Cao 已提交
497
			jbd2_free(jh->b_committed_data, bh->b_size);
498 499 500
			jh->b_committed_data = NULL;
			jbd_unlock_bh_state(bh);
		}
501
		jbd2_journal_refile_buffer(journal, jh);
502 503 504 505 506 507 508 509
	}

	/*
	 * Now try to drop any written-back buffers from the journal's
	 * checkpoint lists.  We do this *before* commit because it potentially
	 * frees some memory
	 */
	spin_lock(&journal->j_list_lock);
510
	__jbd2_journal_clean_checkpoint_list(journal);
511 512
	spin_unlock(&journal->j_list_lock);

E
Eryu Guan 已提交
513
	jbd_debug(3, "JBD2: commit phase 1\n");
514

515 516 517 518 519 520
	/*
	 * Clear revoked flag to reflect there is no revoked buffers
	 * in the next transaction which is going to be started.
	 */
	jbd2_clear_buffer_revoked_flags(journal);

521 522 523
	/*
	 * Switch to a new revoke table.
	 */
524
	jbd2_journal_switch_revoke_table(journal);
525

J
Jan Kara 已提交
526 527 528 529 530 531
	/*
	 * Reserved credits cannot be claimed anymore, free them
	 */
	atomic_sub(atomic_read(&journal->j_reserved_credits),
		   &commit_transaction->t_outstanding_credits);

532
	trace_jbd2_commit_flushing(journal, commit_transaction);
533 534 535
	stats.run.rs_flushing = jiffies;
	stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
					     stats.run.rs_flushing);
536

537 538 539
	commit_transaction->t_state = T_FLUSH;
	journal->j_committing_transaction = commit_transaction;
	journal->j_running_transaction = NULL;
J
Josef Bacik 已提交
540
	start_time = ktime_get();
541 542
	commit_transaction->t_log_start = journal->j_head;
	wake_up(&journal->j_wait_transaction_locked);
543
	write_unlock(&journal->j_state_lock);
544

545
	jbd_debug(3, "JBD2: commit phase 2a\n");
546 547 548 549 550

	/*
	 * Now start flushing things to disk, in the order they appear
	 * on the transaction lists.  Data blocks go first.
	 */
551
	err = journal_submit_data_buffers(journal, commit_transaction);
552
	if (err)
553
		jbd2_journal_abort(journal, err);
554

555
	blk_start_plug(&plug);
556
	jbd2_journal_write_revoke_records(journal, commit_transaction,
557
					  &log_bufs, WRITE_SYNC);
558

559
	jbd_debug(3, "JBD2: commit phase 2b\n");
560 561 562 563 564 565

	/*
	 * Way to go: we have now written out all of the data for a
	 * transaction!  Now comes the tricky part: we need to write out
	 * metadata.  Loop over the transaction's entire buffer list:
	 */
566
	write_lock(&journal->j_state_lock);
567
	commit_transaction->t_state = T_COMMIT;
568
	write_unlock(&journal->j_state_lock);
569

570
	trace_jbd2_commit_logging(journal, commit_transaction);
571 572 573
	stats.run.rs_logging = jiffies;
	stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
					       stats.run.rs_logging);
574 575
	stats.run.rs_blocks =
		atomic_read(&commit_transaction->t_outstanding_credits);
576
	stats.run.rs_blocks_logged = 0;
577

578
	J_ASSERT(commit_transaction->t_nr_buffers <=
579
		 atomic_read(&commit_transaction->t_outstanding_credits));
580

581
	err = 0;
582
	bufs = 0;
583
	descriptor = NULL;
584 585 586 587 588 589 590
	while (commit_transaction->t_buffers) {

		/* Find the next buffer to be journaled... */

		jh = commit_transaction->t_buffers;

		/* If we're in abort mode, we just un-journal the buffer and
591
		   release it. */
592 593

		if (is_journal_aborted(journal)) {
594
			clear_buffer_jbddirty(jh2bh(jh));
595
			JBUFFER_TRACE(jh, "journal is aborting: refile");
J
Joel Becker 已提交
596 597 598 599
			jbd2_buffer_abort_trigger(jh,
						  jh->b_frozen_data ?
						  jh->b_frozen_triggers :
						  jh->b_triggers);
600
			jbd2_journal_refile_buffer(journal, jh);
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
			/* If that was the last one, we need to clean up
			 * any descriptor buffers which may have been
			 * already allocated, even if we are now
			 * aborting. */
			if (!commit_transaction->t_buffers)
				goto start_journal_io;
			continue;
		}

		/* Make sure we have a descriptor block in which to
		   record the metadata buffer. */

		if (!descriptor) {
			J_ASSERT (bufs == 0);

E
Eryu Guan 已提交
616
			jbd_debug(4, "JBD2: get descriptor\n");
617

618
			descriptor = jbd2_journal_get_descriptor_buffer(journal);
619
			if (!descriptor) {
620
				jbd2_journal_abort(journal, -EIO);
621 622 623
				continue;
			}

E
Eryu Guan 已提交
624
			jbd_debug(4, "JBD2: got buffer %llu (%p)\n",
625 626 627
				(unsigned long long)descriptor->b_blocknr,
				descriptor->b_data);
			header = (journal_header_t *)descriptor->b_data;
628 629
			header->h_magic     = cpu_to_be32(JBD2_MAGIC_NUMBER);
			header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK);
630 631
			header->h_sequence  = cpu_to_be32(commit_transaction->t_tid);

632 633 634
			tagp = &descriptor->b_data[sizeof(journal_header_t)];
			space_left = descriptor->b_size -
						sizeof(journal_header_t);
635
			first_tag = 1;
636 637 638
			set_buffer_jwrite(descriptor);
			set_buffer_dirty(descriptor);
			wbuf[bufs++] = descriptor;
639 640 641

			/* Record it so that we can wait for IO
                           completion later */
642 643
			BUFFER_TRACE(descriptor, "ph3: file as descriptor");
			jbd2_file_log_bh(&log_bufs, descriptor);
644 645 646 647
		}

		/* Where is the buffer to be written? */

648
		err = jbd2_journal_next_log_block(journal, &blocknr);
649 650 651 652
		/* If the block mapping failed, just abandon the buffer
		   and repeat this loop: we'll fall into the
		   refile-on-abort condition above. */
		if (err) {
653
			jbd2_journal_abort(journal, err);
654 655 656 657 658 659
			continue;
		}

		/*
		 * start_this_handle() uses t_outstanding_credits to determine
		 * the free space in the log, but this counter is changed
660
		 * by jbd2_journal_next_log_block() also.
661
		 */
662
		atomic_dec(&commit_transaction->t_outstanding_credits);
663 664 665

		/* Bump b_count to prevent truncate from stumbling over
                   the shadowed buffer!  @@@ This can go if we ever get
666
                   rid of the shadow pairing of buffers. */
667 668 669
		atomic_inc(&jh2bh(jh)->b_count);

		/*
670 671
		 * Make a temporary IO buffer with which to write it out
		 * (this will requeue the metadata buffer to BJ_Shadow).
672
		 */
673
		set_bit(BH_JWrite, &jh2bh(jh)->b_state);
674
		JBUFFER_TRACE(jh, "ph3: write metadata");
675
		flags = jbd2_journal_write_metadata_buffer(commit_transaction,
676
						jh, &wbuf[bufs], blocknr);
677 678 679 680
		if (flags < 0) {
			jbd2_journal_abort(journal, flags);
			continue;
		}
681
		jbd2_file_log_bh(&io_bufs, wbuf[bufs]);
682 683 684 685 686 687

		/* Record the new block's tag in the current descriptor
                   buffer */

		tag_flag = 0;
		if (flags & 1)
688
			tag_flag |= JBD2_FLAG_ESCAPE;
689
		if (!first_tag)
690
			tag_flag |= JBD2_FLAG_SAME_UUID;
691 692

		tag = (journal_block_tag_t *) tagp;
Z
Zach Brown 已提交
693
		write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
694
		tag->t_flags = cpu_to_be16(tag_flag);
695
		jbd2_block_tag_csum_set(journal, tag, wbuf[bufs],
696
					commit_transaction->t_tid);
Z
Zach Brown 已提交
697 698
		tagp += tag_bytes;
		space_left -= tag_bytes;
699
		bufs++;
700 701 702 703 704 705 706 707 708 709 710 711 712

		if (first_tag) {
			memcpy (tagp, journal->j_uuid, 16);
			tagp += 16;
			space_left -= 16;
			first_tag = 0;
		}

		/* If there's no more to do, or if the descriptor is full,
		   let the IO rip! */

		if (bufs == journal->j_wbufsize ||
		    commit_transaction->t_buffers == NULL ||
713
		    space_left < tag_bytes + 16 + csum_size) {
714

E
Eryu Guan 已提交
715
			jbd_debug(4, "JBD2: Submit %d IOs\n", bufs);
716 717 718 719 720

			/* Write an end-of-descriptor marker before
                           submitting the IOs.  "tag" still points to
                           the last tag we set up. */

721
			tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
722

723
			jbd2_descr_block_csum_set(journal, descriptor);
724 725 726
start_journal_io:
			for (i = 0; i < bufs; i++) {
				struct buffer_head *bh = wbuf[i];
727 728 729 730 731 732 733 734 735
				/*
				 * Compute checksum.
				 */
				if (JBD2_HAS_COMPAT_FEATURE(journal,
					JBD2_FEATURE_COMPAT_CHECKSUM)) {
					crc32_sum =
					    jbd2_checksum_data(crc32_sum, bh);
				}

736 737 738 739
				lock_buffer(bh);
				clear_buffer_dirty(bh);
				set_buffer_uptodate(bh);
				bh->b_end_io = journal_end_buffer_io_sync;
740
				submit_bh(WRITE_SYNC, bh);
741 742
			}
			cond_resched();
743
			stats.run.rs_blocks_logged += bufs;
744 745 746 747 748 749 750 751

			/* Force a new descriptor to be generated next
                           time round the loop. */
			descriptor = NULL;
			bufs = 0;
		}
	}

752 753 754 755 756 757 758 759 760 761
	err = journal_finish_inode_data_buffers(journal, commit_transaction);
	if (err) {
		printk(KERN_WARNING
			"JBD2: Detected IO errors while flushing file data "
		       "on %s\n", journal->j_devname);
		if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
			jbd2_journal_abort(journal, err);
		err = 0;
	}

762 763 764 765 766 767 768 769 770 771
	/*
	 * Get current oldest transaction in the log before we issue flush
	 * to the filesystem device. After the flush we can be sure that
	 * blocks of all older transactions are checkpointed to persistent
	 * storage and we will be safe to update journal start in the
	 * superblock with the numbers we get here.
	 */
	update_tail =
		jbd2_journal_get_log_tail(journal, &first_tid, &first_block);

772
	write_lock(&journal->j_state_lock);
773 774 775 776 777 778 779 780 781
	if (update_tail) {
		long freed = first_block - journal->j_tail;

		if (first_block < journal->j_tail)
			freed += journal->j_last - journal->j_first;
		/* Update tail only if we free significant amount of space */
		if (freed < journal->j_maxlen / 4)
			update_tail = 0;
	}
782 783 784
	J_ASSERT(commit_transaction->t_state == T_COMMIT);
	commit_transaction->t_state = T_COMMIT_DFLUSH;
	write_unlock(&journal->j_state_lock);
785

786 787 788 789 790
	/* 
	 * If the journal is not located on the file system device,
	 * then we must flush the file system device before we issue
	 * the commit record
	 */
791
	if (commit_transaction->t_need_data_flush &&
792 793
	    (journal->j_fs_dev != journal->j_dev) &&
	    (journal->j_flags & JBD2_BARRIER))
794
		blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
795

796
	/* Done it all: now write the commit record asynchronously. */
797
	if (JBD2_HAS_INCOMPAT_FEATURE(journal,
798
				      JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
799 800 801 802
		err = journal_submit_commit_record(journal, commit_transaction,
						 &cbh, crc32_sum);
		if (err)
			__jbd2_journal_abort_hard(journal);
803
	}
804

805 806
	blk_finish_plug(&plug);

807 808 809 810
	/* Lo and behold: we have just managed to send a transaction to
           the log.  Before we can commit it, wait for the IO so far to
           complete.  Control buffers being written are on the
           transaction's t_log_list queue, and metadata buffers are on
811
           the io_bufs list.
812 813 814 815 816 817

	   Wait for the buffers in reverse order.  That way we are
	   less likely to be woken up until all IOs have completed, and
	   so we incur less scheduling load.
	*/

E
Eryu Guan 已提交
818
	jbd_debug(3, "JBD2: commit phase 3\n");
819

820 821 822 823
	while (!list_empty(&io_bufs)) {
		struct buffer_head *bh = list_entry(io_bufs.prev,
						    struct buffer_head,
						    b_assoc_buffers);
824

825 826
		wait_on_buffer(bh);
		cond_resched();
827 828 829

		if (unlikely(!buffer_uptodate(bh)))
			err = -EIO;
830
		jbd2_unfile_log_bh(bh);
831 832

		/*
833 834
		 * The list contains temporary buffer heads created by
		 * jbd2_journal_write_metadata_buffer().
835 836 837 838 839 840
		 */
		BUFFER_TRACE(bh, "dumping temporary bh");
		__brelse(bh);
		J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
		free_buffer_head(bh);

841
		/* We also have to refile the corresponding shadowed buffer */
842 843
		jh = commit_transaction->t_shadow_list->b_tprev;
		bh = jh2bh(jh);
844
		clear_buffer_jwrite(bh);
845
		J_ASSERT_BH(bh, buffer_jbddirty(bh));
846
		J_ASSERT_BH(bh, !buffer_shadow(bh));
847 848 849 850 851 852

		/* The metadata is now released for reuse, but we need
                   to remember it against this transaction so that when
                   we finally commit, we can do any checkpointing
                   required. */
		JBUFFER_TRACE(jh, "file as BJ_Forget");
853
		jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
854 855 856 857 858 859
		JBUFFER_TRACE(jh, "brelse shadowed buffer");
		__brelse(bh);
	}

	J_ASSERT (commit_transaction->t_shadow_list == NULL);

E
Eryu Guan 已提交
860
	jbd_debug(3, "JBD2: commit phase 4\n");
861 862

	/* Here we wait for the revoke record and descriptor record buffers */
863
	while (!list_empty(&log_bufs)) {
864 865
		struct buffer_head *bh;

866 867 868
		bh = list_entry(log_bufs.prev, struct buffer_head, b_assoc_buffers);
		wait_on_buffer(bh);
		cond_resched();
869 870 871 872 873 874

		if (unlikely(!buffer_uptodate(bh)))
			err = -EIO;

		BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
		clear_buffer_jwrite(bh);
875
		jbd2_unfile_log_bh(bh);
876 877 878 879
		__brelse(bh);		/* One for getblk */
		/* AKPM: bforget here */
	}

880 881 882
	if (err)
		jbd2_journal_abort(journal, err);

E
Eryu Guan 已提交
883
	jbd_debug(3, "JBD2: commit phase 5\n");
884 885 886 887
	write_lock(&journal->j_state_lock);
	J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
	commit_transaction->t_state = T_COMMIT_JFLUSH;
	write_unlock(&journal->j_state_lock);
888

889
	if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
890
				       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
891 892 893 894 895
		err = journal_submit_commit_record(journal, commit_transaction,
						&cbh, crc32_sum);
		if (err)
			__jbd2_journal_abort_hard(journal);
	}
896
	if (cbh)
897
		err = journal_wait_on_commit_record(journal, cbh);
898 899 900
	if (JBD2_HAS_INCOMPAT_FEATURE(journal,
				      JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) &&
	    journal->j_flags & JBD2_BARRIER) {
901
		blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL);
902
	}
903 904

	if (err)
905
		jbd2_journal_abort(journal, err);
906

907 908 909 910 911 912 913 914
	/*
	 * Now disk caches for filesystem device are flushed so we are safe to
	 * erase checkpointed transactions from the log by updating journal
	 * superblock.
	 */
	if (update_tail)
		jbd2_update_log_tail(journal, first_tid, first_block);

915 916 917 918 919
	/* End of a transaction!  Finally, we can do checkpoint
           processing: any buffers committed as a result of this
           transaction can be removed from any checkpoint list it was on
           before. */

E
Eryu Guan 已提交
920
	jbd_debug(3, "JBD2: commit phase 6\n");
921

922
	J_ASSERT(list_empty(&commit_transaction->t_inode_list));
923 924 925 926 927 928 929 930 931 932 933 934 935
	J_ASSERT(commit_transaction->t_buffers == NULL);
	J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
	J_ASSERT(commit_transaction->t_shadow_list == NULL);

restart_loop:
	/*
	 * As there are other places (journal_unmap_buffer()) adding buffers
	 * to this list we have to be careful and hold the j_list_lock.
	 */
	spin_lock(&journal->j_list_lock);
	while (commit_transaction->t_forget) {
		transaction_t *cp_transaction;
		struct buffer_head *bh;
936
		int try_to_free = 0;
937 938 939 940

		jh = commit_transaction->t_forget;
		spin_unlock(&journal->j_list_lock);
		bh = jh2bh(jh);
941 942 943 944 945
		/*
		 * Get a reference so that bh cannot be freed before we are
		 * done with it.
		 */
		get_bh(bh);
946
		jbd_lock_bh_state(bh);
947
		J_ASSERT_JH(jh,	jh->b_transaction == commit_transaction);
948 949 950 951 952 953 954 955 956 957

		/*
		 * If there is undo-protected committed data against
		 * this buffer, then we can remove it now.  If it is a
		 * buffer needing such protection, the old frozen_data
		 * field now points to a committed version of the
		 * buffer, so rotate that field to the new committed
		 * data.
		 *
		 * Otherwise, we can just throw away the frozen data now.
J
Joel Becker 已提交
958 959 960
		 *
		 * We also know that the frozen data has already fired
		 * its triggers if they exist, so we can clear that too.
961 962
		 */
		if (jh->b_committed_data) {
M
Mingming Cao 已提交
963
			jbd2_free(jh->b_committed_data, bh->b_size);
964 965 966 967
			jh->b_committed_data = NULL;
			if (jh->b_frozen_data) {
				jh->b_committed_data = jh->b_frozen_data;
				jh->b_frozen_data = NULL;
J
Joel Becker 已提交
968
				jh->b_frozen_triggers = NULL;
969 970
			}
		} else if (jh->b_frozen_data) {
M
Mingming Cao 已提交
971
			jbd2_free(jh->b_frozen_data, bh->b_size);
972
			jh->b_frozen_data = NULL;
J
Joel Becker 已提交
973
			jh->b_frozen_triggers = NULL;
974 975 976 977 978 979
		}

		spin_lock(&journal->j_list_lock);
		cp_transaction = jh->b_cp_transaction;
		if (cp_transaction) {
			JBUFFER_TRACE(jh, "remove from old cp transaction");
980
			cp_transaction->t_chp_stats.cs_dropped++;
981
			__jbd2_journal_remove_checkpoint(jh);
982 983 984 985
		}

		/* Only re-checkpoint the buffer_head if it is marked
		 * dirty.  If the buffer was added to the BJ_Forget list
986
		 * by jbd2_journal_forget, it may no longer be dirty and
987 988 989
		 * there's no point in keeping a checkpoint record for
		 * it. */

990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
		/*
		* A buffer which has been freed while still being journaled by
		* a previous transaction.
		*/
		if (buffer_freed(bh)) {
			/*
			 * If the running transaction is the one containing
			 * "add to orphan" operation (b_next_transaction !=
			 * NULL), we have to wait for that transaction to
			 * commit before we can really get rid of the buffer.
			 * So just clear b_modified to not confuse transaction
			 * credit accounting and refile the buffer to
			 * BJ_Forget of the running transaction. If the just
			 * committed transaction contains "add to orphan"
			 * operation, we can completely invalidate the buffer
			 * now. We are rather through in that since the
			 * buffer may be still accessible when blocksize <
			 * pagesize and it is attached to the last partial
			 * page.
			 */
			jh->b_modified = 0;
			if (!jh->b_next_transaction) {
				clear_buffer_freed(bh);
				clear_buffer_jbddirty(bh);
				clear_buffer_mapped(bh);
				clear_buffer_new(bh);
				clear_buffer_req(bh);
				bh->b_bdev = NULL;
			}
1019 1020 1021 1022
		}

		if (buffer_jbddirty(bh)) {
			JBUFFER_TRACE(jh, "add to new checkpointing trans");
1023
			__jbd2_journal_insert_checkpoint(jh, commit_transaction);
1024 1025
			if (is_journal_aborted(journal))
				clear_buffer_jbddirty(bh);
1026 1027
		} else {
			J_ASSERT_BH(bh, !buffer_dirty(bh));
1028 1029
			/*
			 * The buffer on BJ_Forget list and not jbddirty means
1030 1031 1032 1033 1034
			 * it has been freed by this transaction and hence it
			 * could not have been reallocated until this
			 * transaction has committed. *BUT* it could be
			 * reallocated once we have written all the data to
			 * disk and before we process the buffer on BJ_Forget
1035 1036 1037 1038
			 * list.
			 */
			if (!jh->b_next_transaction)
				try_to_free = 1;
1039
		}
1040 1041 1042 1043 1044 1045 1046
		JBUFFER_TRACE(jh, "refile or unfile buffer");
		__jbd2_journal_refile_buffer(jh);
		jbd_unlock_bh_state(bh);
		if (try_to_free)
			release_buffer_page(bh);	/* Drops bh reference */
		else
			__brelse(bh);
1047 1048 1049 1050
		cond_resched_lock(&journal->j_list_lock);
	}
	spin_unlock(&journal->j_list_lock);
	/*
1051 1052 1053 1054
	 * This is a bit sleazy.  We use j_list_lock to protect transition
	 * of a transaction into T_FINISHED state and calling
	 * __jbd2_journal_drop_transaction(). Otherwise we could race with
	 * other checkpointing code processing the transaction...
1055
	 */
1056
	write_lock(&journal->j_state_lock);
1057 1058 1059 1060 1061 1062 1063
	spin_lock(&journal->j_list_lock);
	/*
	 * Now recheck if some buffers did not get attached to the transaction
	 * while the lock was dropped...
	 */
	if (commit_transaction->t_forget) {
		spin_unlock(&journal->j_list_lock);
1064
		write_unlock(&journal->j_state_lock);
1065 1066 1067
		goto restart_loop;
	}

1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
	/* Add the transaction to the checkpoint list
	 * __journal_remove_checkpoint() can not destroy transaction
	 * under us because it is not marked as T_FINISHED yet */
	if (journal->j_checkpoint_transactions == NULL) {
		journal->j_checkpoint_transactions = commit_transaction;
		commit_transaction->t_cpnext = commit_transaction;
		commit_transaction->t_cpprev = commit_transaction;
	} else {
		commit_transaction->t_cpnext =
			journal->j_checkpoint_transactions;
		commit_transaction->t_cpprev =
			commit_transaction->t_cpnext->t_cpprev;
		commit_transaction->t_cpnext->t_cpprev =
			commit_transaction;
		commit_transaction->t_cpprev->t_cpnext =
				commit_transaction;
	}
	spin_unlock(&journal->j_list_lock);

1087 1088
	/* Done with this transaction! */

E
Eryu Guan 已提交
1089
	jbd_debug(3, "JBD2: commit phase 7\n");
1090

1091
	J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
1092

1093
	commit_transaction->t_start = jiffies;
1094 1095
	stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
					      commit_transaction->t_start);
1096 1097

	/*
1098
	 * File the transaction statistics
1099 1100
	 */
	stats.ts_tid = commit_transaction->t_tid;
1101 1102
	stats.run.rs_handle_count =
		atomic_read(&commit_transaction->t_handle_count);
1103 1104
	trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
			     commit_transaction->t_tid, &stats.run);
1105
	stats.ts_requested = (commit_transaction->t_requested) ? 1 : 0;
1106

1107
	commit_transaction->t_state = T_COMMIT_CALLBACK;
1108 1109 1110
	J_ASSERT(commit_transaction == journal->j_committing_transaction);
	journal->j_commit_sequence = commit_transaction->t_tid;
	journal->j_committing_transaction = NULL;
J
Josef Bacik 已提交
1111
	commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1112

J
Josef Bacik 已提交
1113 1114 1115 1116 1117 1118 1119 1120 1121
	/*
	 * weight the commit time higher than the average time so we don't
	 * react too strongly to vast changes in the commit time
	 */
	if (likely(journal->j_average_commit_time))
		journal->j_average_commit_time = (commit_time +
				journal->j_average_commit_time*3) / 4;
	else
		journal->j_average_commit_time = commit_time;
1122

1123
	write_unlock(&journal->j_state_lock);
1124

1125 1126 1127
	if (journal->j_commit_callback)
		journal->j_commit_callback(journal, commit_transaction);

1128
	trace_jbd2_end_commit(journal, commit_transaction);
E
Eryu Guan 已提交
1129
	jbd_debug(1, "JBD2: commit %d complete, head %d\n",
1130 1131
		  journal->j_commit_sequence, journal->j_tail_sequence);

1132 1133 1134
	write_lock(&journal->j_state_lock);
	spin_lock(&journal->j_list_lock);
	commit_transaction->t_state = T_FINISHED;
1135
	/* Check if the transaction can be dropped now that we are finished */
1136 1137 1138 1139 1140 1141 1142
	if (commit_transaction->t_checkpoint_list == NULL &&
	    commit_transaction->t_checkpoint_io_list == NULL) {
		__jbd2_journal_drop_transaction(journal, commit_transaction);
		jbd2_journal_free_transaction(commit_transaction);
	}
	spin_unlock(&journal->j_list_lock);
	write_unlock(&journal->j_state_lock);
1143
	wake_up(&journal->j_wait_done_commit);
1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160

	/*
	 * Calculate overall stats
	 */
	spin_lock(&journal->j_history_lock);
	journal->j_stats.ts_tid++;
	journal->j_stats.ts_requested += stats.ts_requested;
	journal->j_stats.run.rs_wait += stats.run.rs_wait;
	journal->j_stats.run.rs_request_delay += stats.run.rs_request_delay;
	journal->j_stats.run.rs_running += stats.run.rs_running;
	journal->j_stats.run.rs_locked += stats.run.rs_locked;
	journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
	journal->j_stats.run.rs_logging += stats.run.rs_logging;
	journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
	journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
	journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
	spin_unlock(&journal->j_history_lock);
1161
}