commit.c 34.8 KB
Newer Older
1
/*
2
 * linux/fs/jbd2/commit.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
 *
 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
 *
 * Copyright 1998 Red Hat corp --- All Rights Reserved
 *
 * This file is part of the Linux kernel and is made available under
 * the terms of the GNU General Public License, version 2, or at your
 * option, any later version, incorporated herein by reference.
 *
 * Journal commit routines for the generic filesystem journaling code;
 * part of the ext2fs journaling system.
 */

#include <linux/time.h>
#include <linux/fs.h>
18
#include <linux/jbd2.h>
19 20 21 22
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
23
#include <linux/jiffies.h>
24
#include <linux/crc32.h>
25 26
#include <linux/writeback.h>
#include <linux/backing-dev.h>
27
#include <linux/bio.h>
28
#include <linux/blkdev.h>
29
#include <linux/bitops.h>
30
#include <trace/events/jbd2.h>
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45

/*
 * Default IO end handler for temporary BJ_IO buffer_heads.
 */
static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
{
	BUFFER_TRACE(bh, "");
	if (uptodate)
		set_buffer_uptodate(bh);
	else
		clear_buffer_uptodate(bh);
	unlock_buffer(bh);
}

/*
46 47
 * When an ext4 file is truncated, it is possible that some pages are not
 * successfully freed, because they are attached to a committing transaction.
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
 * After the transaction commits, these pages are left on the LRU, with no
 * ->mapping, and with attached buffers.  These pages are trivially reclaimable
 * by the VM, but their apparent absence upsets the VM accounting, and it makes
 * the numbers in /proc/meminfo look odd.
 *
 * So here, we have a buffer which has just come off the forget list.  Look to
 * see if we can strip all buffers from the backing page.
 *
 * Called under lock_journal(), and possibly under journal_datalist_lock.  The
 * caller provided us with a ref against the buffer, and we drop that here.
 */
static void release_buffer_page(struct buffer_head *bh)
{
	struct page *page;

	if (buffer_dirty(bh))
		goto nope;
	if (atomic_read(&bh->b_count) != 1)
		goto nope;
	page = bh->b_page;
	if (!page)
		goto nope;
	if (page->mapping)
		goto nope;

	/* OK, it's a truncated page */
N
Nick Piggin 已提交
74
	if (!trylock_page(page))
75 76 77 78 79 80 81 82 83 84 85 86 87
		goto nope;

	page_cache_get(page);
	__brelse(bh);
	try_to_free_buffers(page);
	unlock_page(page);
	page_cache_release(page);
	return;

nope:
	__brelse(bh);
}

D
Darrick J. Wong 已提交
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
static void jbd2_commit_block_csum_set(journal_t *j,
				       struct journal_head *descriptor)
{
	struct commit_header *h;
	__u32 csum;

	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
		return;

	h = (struct commit_header *)(jh2bh(descriptor)->b_data);
	h->h_chksum_type = 0;
	h->h_chksum_size = 0;
	h->h_chksum[0] = 0;
	csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data,
			   j->j_blocksize);
	h->h_chksum[0] = cpu_to_be32(csum);
}

106 107
/*
 * Done it all: now submit the commit record.  We should have
108 109 110 111 112 113
 * cleaned up our previous buffers by now, so if we are in abort
 * mode we can now just skip the rest of the journal write
 * entirely.
 *
 * Returns 1 if the journal needs to be aborted or 0 on success
 */
114 115 116 117
static int journal_submit_commit_record(journal_t *journal,
					transaction_t *commit_transaction,
					struct buffer_head **cbh,
					__u32 crc32_sum)
118 119
{
	struct journal_head *descriptor;
120
	struct commit_header *tmp;
121
	struct buffer_head *bh;
122
	int ret;
123
	struct timespec now = current_kernel_time();
124

125 126
	*cbh = NULL;

127 128 129
	if (is_journal_aborted(journal))
		return 0;

130
	descriptor = jbd2_journal_get_descriptor_buffer(journal);
131 132 133 134 135
	if (!descriptor)
		return 1;

	bh = jh2bh(descriptor);

136 137 138 139
	tmp = (struct commit_header *)bh->b_data;
	tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
	tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
	tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
140 141
	tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
	tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
142 143 144 145 146 147

	if (JBD2_HAS_COMPAT_FEATURE(journal,
				    JBD2_FEATURE_COMPAT_CHECKSUM)) {
		tmp->h_chksum_type 	= JBD2_CRC32_CHKSUM;
		tmp->h_chksum_size 	= JBD2_CRC32_CHKSUM_SIZE;
		tmp->h_chksum[0] 	= cpu_to_be32(crc32_sum);
148
	}
D
Darrick J. Wong 已提交
149
	jbd2_commit_block_csum_set(journal, descriptor);
150

151 152
	JBUFFER_TRACE(descriptor, "submit commit block");
	lock_buffer(bh);
153
	clear_buffer_dirty(bh);
154 155 156 157
	set_buffer_uptodate(bh);
	bh->b_end_io = journal_end_buffer_io_sync;

	if (journal->j_flags & JBD2_BARRIER &&
158
	    !JBD2_HAS_INCOMPAT_FEATURE(journal,
159
				       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT))
J
Jens Axboe 已提交
160
		ret = submit_bh(WRITE_SYNC | WRITE_FLUSH_FUA, bh);
161
	else
J
Jens Axboe 已提交
162
		ret = submit_bh(WRITE_SYNC, bh);
163

164 165 166 167 168 169 170 171
	*cbh = bh;
	return ret;
}

/*
 * This function along with journal_submit_commit_record
 * allows to write the commit record asynchronously.
 */
172 173
static int journal_wait_on_commit_record(journal_t *journal,
					 struct buffer_head *bh)
174 175 176 177 178
{
	int ret = 0;

	clear_buffer_dirty(bh);
	wait_on_buffer(bh);
179

180 181 182 183 184 185
	if (unlikely(!buffer_uptodate(bh)))
		ret = -EIO;
	put_bh(bh);            /* One for getblk() */
	jbd2_journal_put_journal_head(bh2jh(bh));

	return ret;
186 187
}

188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
/*
 * write the filemap data using writepage() address_space_operations.
 * We don't do block allocation here even for delalloc. We don't
 * use writepages() because with dealyed allocation we may be doing
 * block allocation in writepages().
 */
static int journal_submit_inode_data_buffers(struct address_space *mapping)
{
	int ret;
	struct writeback_control wbc = {
		.sync_mode =  WB_SYNC_ALL,
		.nr_to_write = mapping->nrpages * 2,
		.range_start = 0,
		.range_end = i_size_read(mapping->host),
	};

	ret = generic_writepages(mapping, &wbc);
	return ret;
}

208 209 210 211 212 213 214 215
/*
 * Submit all the data buffers of inode associated with the transaction to
 * disk.
 *
 * We are in a committing transaction. Therefore no new inode can be added to
 * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
 * operate on from being released while we write out pages.
 */
216
static int journal_submit_data_buffers(journal_t *journal,
217 218 219 220 221 222 223 224 225
		transaction_t *commit_transaction)
{
	struct jbd2_inode *jinode;
	int err, ret = 0;
	struct address_space *mapping;

	spin_lock(&journal->j_list_lock);
	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
		mapping = jinode->i_vfs_inode->i_mapping;
226
		set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
227
		spin_unlock(&journal->j_list_lock);
228 229 230 231 232 233
		/*
		 * submit the inode data buffers. We use writepage
		 * instead of writepages. Because writepages can do
		 * block allocation  with delalloc. We need to write
		 * only allocated blocks here.
		 */
234
		trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
235
		err = journal_submit_inode_data_buffers(mapping);
236 237 238 239
		if (!ret)
			ret = err;
		spin_lock(&journal->j_list_lock);
		J_ASSERT(jinode->i_transaction == commit_transaction);
240 241
		clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
		smp_mb__after_clear_bit();
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
	}
	spin_unlock(&journal->j_list_lock);
	return ret;
}

/*
 * Wait for data submitted for writeout, refile inodes to proper
 * transaction if needed.
 *
 */
static int journal_finish_inode_data_buffers(journal_t *journal,
		transaction_t *commit_transaction)
{
	struct jbd2_inode *jinode, *next_i;
	int err, ret = 0;

259
	/* For locking, see the comment in journal_submit_data_buffers() */
260 261
	spin_lock(&journal->j_list_lock);
	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
262
		set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
263 264
		spin_unlock(&journal->j_list_lock);
		err = filemap_fdatawait(jinode->i_vfs_inode->i_mapping);
265 266 267
		if (err) {
			/*
			 * Because AS_EIO is cleared by
268
			 * filemap_fdatawait_range(), set it again so
269 270 271 272 273 274 275 276
			 * that user process can get -EIO from fsync().
			 */
			set_bit(AS_EIO,
				&jinode->i_vfs_inode->i_mapping->flags);

			if (!ret)
				ret = err;
		}
277
		spin_lock(&journal->j_list_lock);
278 279
		clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
		smp_mb__after_clear_bit();
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
	}

	/* Now refile inode to proper lists */
	list_for_each_entry_safe(jinode, next_i,
				 &commit_transaction->t_inode_list, i_list) {
		list_del(&jinode->i_list);
		if (jinode->i_next_transaction) {
			jinode->i_transaction = jinode->i_next_transaction;
			jinode->i_next_transaction = NULL;
			list_add(&jinode->i_list,
				&jinode->i_transaction->t_inode_list);
		} else {
			jinode->i_transaction = NULL;
		}
	}
	spin_unlock(&journal->j_list_lock);

	return ret;
}

301 302 303 304 305 306
static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
{
	struct page *page = bh->b_page;
	char *addr;
	__u32 checksum;

307
	addr = kmap_atomic(page);
308 309
	checksum = crc32_be(crc32_sum,
		(void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
310
	kunmap_atomic(addr);
311 312 313 314 315

	return checksum;
}

static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
316
				   unsigned long long block)
Z
Zach Brown 已提交
317 318
{
	tag->t_blocknr = cpu_to_be32(block & (u32)~0);
319
	if (tag_bytes > JBD2_TAG_SIZE32)
Z
Zach Brown 已提交
320 321 322
		tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
}

323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
static void jbd2_descr_block_csum_set(journal_t *j,
				      struct journal_head *descriptor)
{
	struct jbd2_journal_block_tail *tail;
	__u32 csum;

	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
		return;

	tail = (struct jbd2_journal_block_tail *)
			(jh2bh(descriptor)->b_data + j->j_blocksize -
			sizeof(struct jbd2_journal_block_tail));
	tail->t_checksum = 0;
	csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data,
			   j->j_blocksize);
	tail->t_checksum = cpu_to_be32(csum);
}

341 342 343 344 345 346 347 348 349 350 351
static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
				    struct buffer_head *bh, __u32 sequence)
{
	struct page *page = bh->b_page;
	__u8 *addr;
	__u32 csum;

	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
		return;

	sequence = cpu_to_be32(sequence);
352
	addr = kmap_atomic(page);
353 354 355 356
	csum = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence,
			  sizeof(sequence));
	csum = jbd2_chksum(j, csum, addr + offset_in_page(bh->b_data),
			  bh->b_size);
357
	kunmap_atomic(addr);
358 359 360

	tag->t_checksum = cpu_to_be32(csum);
}
361
/*
362
 * jbd2_journal_commit_transaction
363 364 365 366
 *
 * The primary function for committing a transaction to the log.  This
 * function is called by the journal thread to begin a complete commit.
 */
367
void jbd2_journal_commit_transaction(journal_t *journal)
368
{
369
	struct transaction_stats_s stats;
370 371 372 373 374 375
	transaction_t *commit_transaction;
	struct journal_head *jh, *new_jh, *descriptor;
	struct buffer_head **wbuf = journal->j_wbuf;
	int bufs;
	int flags;
	int err;
376
	unsigned long long blocknr;
J
Josef Bacik 已提交
377 378
	ktime_t start_time;
	u64 commit_time;
379 380 381 382 383 384
	char *tagp = NULL;
	journal_header_t *header;
	journal_block_tag_t *tag = NULL;
	int space_left = 0;
	int first_tag = 0;
	int tag_flag;
385
	int i, to_free = 0;
Z
Zach Brown 已提交
386
	int tag_bytes = journal_tag_bytes(journal);
387 388
	struct buffer_head *cbh = NULL; /* For transactional checksums */
	__u32 crc32_sum = ~0;
389
	struct blk_plug plug;
390 391 392 393
	/* Tail of the journal */
	unsigned long first_block;
	tid_t first_tid;
	int update_tail;
394 395 396 397
	int csum_size = 0;

	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
		csum_size = sizeof(struct jbd2_journal_block_tail);
398 399 400 401 402 403

	/*
	 * First job: lock down the current transaction and wait for
	 * all outstanding updates to complete.
	 */

404 405
	/* Do we need to erase the effects of a prior jbd2_journal_flush? */
	if (journal->j_flags & JBD2_FLUSHED) {
406
		jbd_debug(3, "super block updated\n");
407
		mutex_lock(&journal->j_checkpoint_mutex);
408 409 410 411 412 413 414 415 416 417
		/*
		 * We hold j_checkpoint_mutex so tail cannot change under us.
		 * We don't need any special data guarantees for writing sb
		 * since journal is empty and it is ok for write to be
		 * flushed only with transaction commit.
		 */
		jbd2_journal_update_sb_log_tail(journal,
						journal->j_tail_sequence,
						journal->j_tail,
						WRITE_SYNC);
418
		mutex_unlock(&journal->j_checkpoint_mutex);
419 420 421 422 423 424 425 426 427 428
	} else {
		jbd_debug(3, "superblock not updated\n");
	}

	J_ASSERT(journal->j_running_transaction != NULL);
	J_ASSERT(journal->j_committing_transaction == NULL);

	commit_transaction = journal->j_running_transaction;
	J_ASSERT(commit_transaction->t_state == T_RUNNING);

429
	trace_jbd2_start_commit(journal, commit_transaction);
E
Eryu Guan 已提交
430
	jbd_debug(1, "JBD2: starting commit of transaction %d\n",
431 432
			commit_transaction->t_tid);

433
	write_lock(&journal->j_state_lock);
434 435
	commit_transaction->t_state = T_LOCKED;

436
	trace_jbd2_commit_locking(journal, commit_transaction);
437 438 439 440
	stats.run.rs_wait = commit_transaction->t_max_wait;
	stats.run.rs_locked = jiffies;
	stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
					      stats.run.rs_locked);
441

442
	spin_lock(&commit_transaction->t_handle_lock);
443
	while (atomic_read(&commit_transaction->t_updates)) {
444 445 446 447
		DEFINE_WAIT(wait);

		prepare_to_wait(&journal->j_wait_updates, &wait,
					TASK_UNINTERRUPTIBLE);
448
		if (atomic_read(&commit_transaction->t_updates)) {
449
			spin_unlock(&commit_transaction->t_handle_lock);
450
			write_unlock(&journal->j_state_lock);
451
			schedule();
452
			write_lock(&journal->j_state_lock);
453 454 455 456 457 458
			spin_lock(&commit_transaction->t_handle_lock);
		}
		finish_wait(&journal->j_wait_updates, &wait);
	}
	spin_unlock(&commit_transaction->t_handle_lock);

459
	J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
460 461 462 463 464 465 466
			journal->j_max_transaction_buffers);

	/*
	 * First thing we are allowed to do is to discard any remaining
	 * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
	 * that there are no such buffers: if a large filesystem
	 * operation like a truncate needs to split itself over multiple
467
	 * transactions, then it may try to do a jbd2_journal_restart() while
468 469 470 471 472 473 474
	 * there are still BJ_Reserved buffers outstanding.  These must
	 * be released cleanly from the current transaction.
	 *
	 * In this case, the filesystem must still reserve write access
	 * again before modifying the buffer in the new transaction, but
	 * we do not require it to remember exactly which old buffers it
	 * has reserved.  This is consistent with the existing behaviour
475
	 * that multiple jbd2_journal_get_write_access() calls to the same
L
Lucas De Marchi 已提交
476
	 * buffer are perfectly permissible.
477 478 479 480 481
	 */
	while (commit_transaction->t_reserved_list) {
		jh = commit_transaction->t_reserved_list;
		JBUFFER_TRACE(jh, "reserved, unused: refile");
		/*
482
		 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
483 484 485 486 487 488
		 * leave undo-committed data.
		 */
		if (jh->b_committed_data) {
			struct buffer_head *bh = jh2bh(jh);

			jbd_lock_bh_state(bh);
M
Mingming Cao 已提交
489
			jbd2_free(jh->b_committed_data, bh->b_size);
490 491 492
			jh->b_committed_data = NULL;
			jbd_unlock_bh_state(bh);
		}
493
		jbd2_journal_refile_buffer(journal, jh);
494 495 496 497 498 499 500 501
	}

	/*
	 * Now try to drop any written-back buffers from the journal's
	 * checkpoint lists.  We do this *before* commit because it potentially
	 * frees some memory
	 */
	spin_lock(&journal->j_list_lock);
502
	__jbd2_journal_clean_checkpoint_list(journal);
503 504
	spin_unlock(&journal->j_list_lock);

E
Eryu Guan 已提交
505
	jbd_debug(3, "JBD2: commit phase 1\n");
506

507 508 509 510 511 512
	/*
	 * Clear revoked flag to reflect there is no revoked buffers
	 * in the next transaction which is going to be started.
	 */
	jbd2_clear_buffer_revoked_flags(journal);

513 514 515
	/*
	 * Switch to a new revoke table.
	 */
516
	jbd2_journal_switch_revoke_table(journal);
517

518
	trace_jbd2_commit_flushing(journal, commit_transaction);
519 520 521
	stats.run.rs_flushing = jiffies;
	stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
					     stats.run.rs_flushing);
522

523 524 525
	commit_transaction->t_state = T_FLUSH;
	journal->j_committing_transaction = commit_transaction;
	journal->j_running_transaction = NULL;
J
Josef Bacik 已提交
526
	start_time = ktime_get();
527 528
	commit_transaction->t_log_start = journal->j_head;
	wake_up(&journal->j_wait_transaction_locked);
529
	write_unlock(&journal->j_state_lock);
530

E
Eryu Guan 已提交
531
	jbd_debug(3, "JBD2: commit phase 2\n");
532 533 534 535 536

	/*
	 * Now start flushing things to disk, in the order they appear
	 * on the transaction lists.  Data blocks go first.
	 */
537
	err = journal_submit_data_buffers(journal, commit_transaction);
538
	if (err)
539
		jbd2_journal_abort(journal, err);
540

541
	blk_start_plug(&plug);
542
	jbd2_journal_write_revoke_records(journal, commit_transaction,
543 544
					  WRITE_SYNC);
	blk_finish_plug(&plug);
545

E
Eryu Guan 已提交
546
	jbd_debug(3, "JBD2: commit phase 2\n");
547 548 549 550 551 552

	/*
	 * Way to go: we have now written out all of the data for a
	 * transaction!  Now comes the tricky part: we need to write out
	 * metadata.  Loop over the transaction's entire buffer list:
	 */
553
	write_lock(&journal->j_state_lock);
554
	commit_transaction->t_state = T_COMMIT;
555
	write_unlock(&journal->j_state_lock);
556

557
	trace_jbd2_commit_logging(journal, commit_transaction);
558 559 560
	stats.run.rs_logging = jiffies;
	stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
					       stats.run.rs_logging);
561 562
	stats.run.rs_blocks =
		atomic_read(&commit_transaction->t_outstanding_credits);
563
	stats.run.rs_blocks_logged = 0;
564

565
	J_ASSERT(commit_transaction->t_nr_buffers <=
566
		 atomic_read(&commit_transaction->t_outstanding_credits));
567

568
	err = 0;
569 570
	descriptor = NULL;
	bufs = 0;
571
	blk_start_plug(&plug);
572 573 574 575 576 577 578
	while (commit_transaction->t_buffers) {

		/* Find the next buffer to be journaled... */

		jh = commit_transaction->t_buffers;

		/* If we're in abort mode, we just un-journal the buffer and
579
		   release it. */
580 581

		if (is_journal_aborted(journal)) {
582
			clear_buffer_jbddirty(jh2bh(jh));
583
			JBUFFER_TRACE(jh, "journal is aborting: refile");
J
Joel Becker 已提交
584 585 586 587
			jbd2_buffer_abort_trigger(jh,
						  jh->b_frozen_data ?
						  jh->b_frozen_triggers :
						  jh->b_triggers);
588
			jbd2_journal_refile_buffer(journal, jh);
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
			/* If that was the last one, we need to clean up
			 * any descriptor buffers which may have been
			 * already allocated, even if we are now
			 * aborting. */
			if (!commit_transaction->t_buffers)
				goto start_journal_io;
			continue;
		}

		/* Make sure we have a descriptor block in which to
		   record the metadata buffer. */

		if (!descriptor) {
			struct buffer_head *bh;

			J_ASSERT (bufs == 0);

E
Eryu Guan 已提交
606
			jbd_debug(4, "JBD2: get descriptor\n");
607

608
			descriptor = jbd2_journal_get_descriptor_buffer(journal);
609
			if (!descriptor) {
610
				jbd2_journal_abort(journal, -EIO);
611 612 613 614
				continue;
			}

			bh = jh2bh(descriptor);
E
Eryu Guan 已提交
615
			jbd_debug(4, "JBD2: got buffer %llu (%p)\n",
616 617
				(unsigned long long)bh->b_blocknr, bh->b_data);
			header = (journal_header_t *)&bh->b_data[0];
618 619
			header->h_magic     = cpu_to_be32(JBD2_MAGIC_NUMBER);
			header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK);
620 621 622 623 624 625 626 627 628 629 630 631
			header->h_sequence  = cpu_to_be32(commit_transaction->t_tid);

			tagp = &bh->b_data[sizeof(journal_header_t)];
			space_left = bh->b_size - sizeof(journal_header_t);
			first_tag = 1;
			set_buffer_jwrite(bh);
			set_buffer_dirty(bh);
			wbuf[bufs++] = bh;

			/* Record it so that we can wait for IO
                           completion later */
			BUFFER_TRACE(bh, "ph3: file as descriptor");
632
			jbd2_journal_file_buffer(descriptor, commit_transaction,
633 634 635 636 637
					BJ_LogCtl);
		}

		/* Where is the buffer to be written? */

638
		err = jbd2_journal_next_log_block(journal, &blocknr);
639 640 641 642
		/* If the block mapping failed, just abandon the buffer
		   and repeat this loop: we'll fall into the
		   refile-on-abort condition above. */
		if (err) {
643
			jbd2_journal_abort(journal, err);
644 645 646 647 648 649
			continue;
		}

		/*
		 * start_this_handle() uses t_outstanding_credits to determine
		 * the free space in the log, but this counter is changed
650
		 * by jbd2_journal_next_log_block() also.
651
		 */
652
		atomic_dec(&commit_transaction->t_outstanding_credits);
653 654 655 656 657 658 659 660 661 662 663 664

		/* Bump b_count to prevent truncate from stumbling over
                   the shadowed buffer!  @@@ This can go if we ever get
                   rid of the BJ_IO/BJ_Shadow pairing of buffers. */
		atomic_inc(&jh2bh(jh)->b_count);

		/* Make a temporary IO buffer with which to write it out
                   (this will requeue both the metadata buffer and the
                   temporary IO buffer). new_bh goes on BJ_IO*/

		set_bit(BH_JWrite, &jh2bh(jh)->b_state);
		/*
665
		 * akpm: jbd2_journal_write_metadata_buffer() sets
666 667 668 669 670
		 * new_bh->b_transaction to commit_transaction.
		 * We need to clean this up before we release new_bh
		 * (which is of type BJ_IO)
		 */
		JBUFFER_TRACE(jh, "ph3: write metadata");
671
		flags = jbd2_journal_write_metadata_buffer(commit_transaction,
672
						      jh, &new_jh, blocknr);
673 674 675 676
		if (flags < 0) {
			jbd2_journal_abort(journal, flags);
			continue;
		}
677 678 679 680 681 682 683 684
		set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
		wbuf[bufs++] = jh2bh(new_jh);

		/* Record the new block's tag in the current descriptor
                   buffer */

		tag_flag = 0;
		if (flags & 1)
685
			tag_flag |= JBD2_FLAG_ESCAPE;
686
		if (!first_tag)
687
			tag_flag |= JBD2_FLAG_SAME_UUID;
688 689

		tag = (journal_block_tag_t *) tagp;
Z
Zach Brown 已提交
690
		write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
691
		tag->t_flags = cpu_to_be16(tag_flag);
692 693
		jbd2_block_tag_csum_set(journal, tag, jh2bh(new_jh),
					commit_transaction->t_tid);
Z
Zach Brown 已提交
694 695
		tagp += tag_bytes;
		space_left -= tag_bytes;
696 697 698 699 700 701 702 703 704 705 706 707 708

		if (first_tag) {
			memcpy (tagp, journal->j_uuid, 16);
			tagp += 16;
			space_left -= 16;
			first_tag = 0;
		}

		/* If there's no more to do, or if the descriptor is full,
		   let the IO rip! */

		if (bufs == journal->j_wbufsize ||
		    commit_transaction->t_buffers == NULL ||
709
		    space_left < tag_bytes + 16 + csum_size) {
710

E
Eryu Guan 已提交
711
			jbd_debug(4, "JBD2: Submit %d IOs\n", bufs);
712 713 714 715 716

			/* Write an end-of-descriptor marker before
                           submitting the IOs.  "tag" still points to
                           the last tag we set up. */

717
			tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
718

719
			jbd2_descr_block_csum_set(journal, descriptor);
720 721 722
start_journal_io:
			for (i = 0; i < bufs; i++) {
				struct buffer_head *bh = wbuf[i];
723 724 725 726 727 728 729 730 731
				/*
				 * Compute checksum.
				 */
				if (JBD2_HAS_COMPAT_FEATURE(journal,
					JBD2_FEATURE_COMPAT_CHECKSUM)) {
					crc32_sum =
					    jbd2_checksum_data(crc32_sum, bh);
				}

732 733 734 735
				lock_buffer(bh);
				clear_buffer_dirty(bh);
				set_buffer_uptodate(bh);
				bh->b_end_io = journal_end_buffer_io_sync;
736
				submit_bh(WRITE_SYNC, bh);
737 738
			}
			cond_resched();
739
			stats.run.rs_blocks_logged += bufs;
740 741 742 743 744 745 746 747

			/* Force a new descriptor to be generated next
                           time round the loop. */
			descriptor = NULL;
			bufs = 0;
		}
	}

748 749 750 751 752 753 754 755 756 757
	err = journal_finish_inode_data_buffers(journal, commit_transaction);
	if (err) {
		printk(KERN_WARNING
			"JBD2: Detected IO errors while flushing file data "
		       "on %s\n", journal->j_devname);
		if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
			jbd2_journal_abort(journal, err);
		err = 0;
	}

758 759 760 761 762 763 764 765 766 767
	/*
	 * Get current oldest transaction in the log before we issue flush
	 * to the filesystem device. After the flush we can be sure that
	 * blocks of all older transactions are checkpointed to persistent
	 * storage and we will be safe to update journal start in the
	 * superblock with the numbers we get here.
	 */
	update_tail =
		jbd2_journal_get_log_tail(journal, &first_tid, &first_block);

768
	write_lock(&journal->j_state_lock);
769 770 771 772 773 774 775 776 777
	if (update_tail) {
		long freed = first_block - journal->j_tail;

		if (first_block < journal->j_tail)
			freed += journal->j_last - journal->j_first;
		/* Update tail only if we free significant amount of space */
		if (freed < journal->j_maxlen / 4)
			update_tail = 0;
	}
778 779 780
	J_ASSERT(commit_transaction->t_state == T_COMMIT);
	commit_transaction->t_state = T_COMMIT_DFLUSH;
	write_unlock(&journal->j_state_lock);
781

782 783 784 785 786
	/* 
	 * If the journal is not located on the file system device,
	 * then we must flush the file system device before we issue
	 * the commit record
	 */
787
	if (commit_transaction->t_need_data_flush &&
788 789
	    (journal->j_fs_dev != journal->j_dev) &&
	    (journal->j_flags & JBD2_BARRIER))
790
		blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
791

792
	/* Done it all: now write the commit record asynchronously. */
793
	if (JBD2_HAS_INCOMPAT_FEATURE(journal,
794
				      JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
795 796 797 798
		err = journal_submit_commit_record(journal, commit_transaction,
						 &cbh, crc32_sum);
		if (err)
			__jbd2_journal_abort_hard(journal);
799
	}
800

801 802
	blk_finish_plug(&plug);

803 804 805 806 807 808 809 810 811 812 813
	/* Lo and behold: we have just managed to send a transaction to
           the log.  Before we can commit it, wait for the IO so far to
           complete.  Control buffers being written are on the
           transaction's t_log_list queue, and metadata buffers are on
           the t_iobuf_list queue.

	   Wait for the buffers in reverse order.  That way we are
	   less likely to be woken up until all IOs have completed, and
	   so we incur less scheduling load.
	*/

E
Eryu Guan 已提交
814
	jbd_debug(3, "JBD2: commit phase 3\n");
815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838

	/*
	 * akpm: these are BJ_IO, and j_list_lock is not needed.
	 * See __journal_try_to_free_buffer.
	 */
wait_for_iobuf:
	while (commit_transaction->t_iobuf_list != NULL) {
		struct buffer_head *bh;

		jh = commit_transaction->t_iobuf_list->b_tprev;
		bh = jh2bh(jh);
		if (buffer_locked(bh)) {
			wait_on_buffer(bh);
			goto wait_for_iobuf;
		}
		if (cond_resched())
			goto wait_for_iobuf;

		if (unlikely(!buffer_uptodate(bh)))
			err = -EIO;

		clear_buffer_jwrite(bh);

		JBUFFER_TRACE(jh, "ph4: unfile after journal write");
839
		jbd2_journal_unfile_buffer(journal, jh);
840 841 842

		/*
		 * ->t_iobuf_list should contain only dummy buffer_heads
843
		 * which were created by jbd2_journal_write_metadata_buffer().
844 845
		 */
		BUFFER_TRACE(bh, "dumping temporary bh");
846
		jbd2_journal_put_journal_head(jh);
847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862
		__brelse(bh);
		J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
		free_buffer_head(bh);

		/* We also have to unlock and free the corresponding
                   shadowed buffer */
		jh = commit_transaction->t_shadow_list->b_tprev;
		bh = jh2bh(jh);
		clear_bit(BH_JWrite, &bh->b_state);
		J_ASSERT_BH(bh, buffer_jbddirty(bh));

		/* The metadata is now released for reuse, but we need
                   to remember it against this transaction so that when
                   we finally commit, we can do any checkpointing
                   required. */
		JBUFFER_TRACE(jh, "file as BJ_Forget");
863
		jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
864 865 866 867 868 869 870
		/*
		 * Wake up any transactions which were waiting for this IO to
		 * complete. The barrier must be here so that changes by
		 * jbd2_journal_file_buffer() take effect before wake_up_bit()
		 * does the waitqueue check.
		 */
		smp_mb();
871 872 873 874 875 876 877
		wake_up_bit(&bh->b_state, BH_Unshadow);
		JBUFFER_TRACE(jh, "brelse shadowed buffer");
		__brelse(bh);
	}

	J_ASSERT (commit_transaction->t_shadow_list == NULL);

E
Eryu Guan 已提交
878
	jbd_debug(3, "JBD2: commit phase 4\n");
879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898

	/* Here we wait for the revoke record and descriptor record buffers */
 wait_for_ctlbuf:
	while (commit_transaction->t_log_list != NULL) {
		struct buffer_head *bh;

		jh = commit_transaction->t_log_list->b_tprev;
		bh = jh2bh(jh);
		if (buffer_locked(bh)) {
			wait_on_buffer(bh);
			goto wait_for_ctlbuf;
		}
		if (cond_resched())
			goto wait_for_ctlbuf;

		if (unlikely(!buffer_uptodate(bh)))
			err = -EIO;

		BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
		clear_buffer_jwrite(bh);
899 900
		jbd2_journal_unfile_buffer(journal, jh);
		jbd2_journal_put_journal_head(jh);
901 902 903 904
		__brelse(bh);		/* One for getblk */
		/* AKPM: bforget here */
	}

905 906 907
	if (err)
		jbd2_journal_abort(journal, err);

E
Eryu Guan 已提交
908
	jbd_debug(3, "JBD2: commit phase 5\n");
909 910 911 912
	write_lock(&journal->j_state_lock);
	J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
	commit_transaction->t_state = T_COMMIT_JFLUSH;
	write_unlock(&journal->j_state_lock);
913

914
	if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
915
				       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
916 917 918 919 920
		err = journal_submit_commit_record(journal, commit_transaction,
						&cbh, crc32_sum);
		if (err)
			__jbd2_journal_abort_hard(journal);
	}
921
	if (cbh)
922
		err = journal_wait_on_commit_record(journal, cbh);
923 924 925
	if (JBD2_HAS_INCOMPAT_FEATURE(journal,
				      JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) &&
	    journal->j_flags & JBD2_BARRIER) {
926
		blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL);
927
	}
928 929

	if (err)
930
		jbd2_journal_abort(journal, err);
931

932 933 934 935 936 937 938 939
	/*
	 * Now disk caches for filesystem device are flushed so we are safe to
	 * erase checkpointed transactions from the log by updating journal
	 * superblock.
	 */
	if (update_tail)
		jbd2_update_log_tail(journal, first_tid, first_block);

940 941 942 943 944
	/* End of a transaction!  Finally, we can do checkpoint
           processing: any buffers committed as a result of this
           transaction can be removed from any checkpoint list it was on
           before. */

E
Eryu Guan 已提交
945
	jbd_debug(3, "JBD2: commit phase 6\n");
946

947
	J_ASSERT(list_empty(&commit_transaction->t_inode_list));
948 949 950 951 952 953 954 955 956 957 958 959 960 961 962
	J_ASSERT(commit_transaction->t_buffers == NULL);
	J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
	J_ASSERT(commit_transaction->t_iobuf_list == NULL);
	J_ASSERT(commit_transaction->t_shadow_list == NULL);
	J_ASSERT(commit_transaction->t_log_list == NULL);

restart_loop:
	/*
	 * As there are other places (journal_unmap_buffer()) adding buffers
	 * to this list we have to be careful and hold the j_list_lock.
	 */
	spin_lock(&journal->j_list_lock);
	while (commit_transaction->t_forget) {
		transaction_t *cp_transaction;
		struct buffer_head *bh;
963
		int try_to_free = 0;
964 965 966 967

		jh = commit_transaction->t_forget;
		spin_unlock(&journal->j_list_lock);
		bh = jh2bh(jh);
968 969 970 971 972
		/*
		 * Get a reference so that bh cannot be freed before we are
		 * done with it.
		 */
		get_bh(bh);
973
		jbd_lock_bh_state(bh);
974
		J_ASSERT_JH(jh,	jh->b_transaction == commit_transaction);
975 976 977 978 979 980 981 982 983 984

		/*
		 * If there is undo-protected committed data against
		 * this buffer, then we can remove it now.  If it is a
		 * buffer needing such protection, the old frozen_data
		 * field now points to a committed version of the
		 * buffer, so rotate that field to the new committed
		 * data.
		 *
		 * Otherwise, we can just throw away the frozen data now.
J
Joel Becker 已提交
985 986 987
		 *
		 * We also know that the frozen data has already fired
		 * its triggers if they exist, so we can clear that too.
988 989
		 */
		if (jh->b_committed_data) {
M
Mingming Cao 已提交
990
			jbd2_free(jh->b_committed_data, bh->b_size);
991 992 993 994
			jh->b_committed_data = NULL;
			if (jh->b_frozen_data) {
				jh->b_committed_data = jh->b_frozen_data;
				jh->b_frozen_data = NULL;
J
Joel Becker 已提交
995
				jh->b_frozen_triggers = NULL;
996 997
			}
		} else if (jh->b_frozen_data) {
M
Mingming Cao 已提交
998
			jbd2_free(jh->b_frozen_data, bh->b_size);
999
			jh->b_frozen_data = NULL;
J
Joel Becker 已提交
1000
			jh->b_frozen_triggers = NULL;
1001 1002 1003 1004 1005 1006
		}

		spin_lock(&journal->j_list_lock);
		cp_transaction = jh->b_cp_transaction;
		if (cp_transaction) {
			JBUFFER_TRACE(jh, "remove from old cp transaction");
1007
			cp_transaction->t_chp_stats.cs_dropped++;
1008
			__jbd2_journal_remove_checkpoint(jh);
1009 1010 1011 1012
		}

		/* Only re-checkpoint the buffer_head if it is marked
		 * dirty.  If the buffer was added to the BJ_Forget list
1013
		 * by jbd2_journal_forget, it may no longer be dirty and
1014 1015 1016 1017 1018 1019
		 * there's no point in keeping a checkpoint record for
		 * it. */

		/* A buffer which has been freed while still being
		 * journaled by a previous transaction may end up still
		 * being dirty here, but we want to avoid writing back
1020 1021 1022 1023
		 * that buffer in the future after the "add to orphan"
		 * operation been committed,  That's not only a performance
		 * gain, it also stops aliasing problems if the buffer is
		 * left behind for writeback and gets reallocated for another
1024
		 * use in a different page. */
1025
		if (buffer_freed(bh) && !jh->b_next_transaction) {
1026 1027 1028 1029 1030 1031
			clear_buffer_freed(bh);
			clear_buffer_jbddirty(bh);
		}

		if (buffer_jbddirty(bh)) {
			JBUFFER_TRACE(jh, "add to new checkpointing trans");
1032
			__jbd2_journal_insert_checkpoint(jh, commit_transaction);
1033 1034
			if (is_journal_aborted(journal))
				clear_buffer_jbddirty(bh);
1035 1036
		} else {
			J_ASSERT_BH(bh, !buffer_dirty(bh));
1037 1038
			/*
			 * The buffer on BJ_Forget list and not jbddirty means
1039 1040 1041 1042 1043
			 * it has been freed by this transaction and hence it
			 * could not have been reallocated until this
			 * transaction has committed. *BUT* it could be
			 * reallocated once we have written all the data to
			 * disk and before we process the buffer on BJ_Forget
1044 1045 1046 1047
			 * list.
			 */
			if (!jh->b_next_transaction)
				try_to_free = 1;
1048
		}
1049 1050 1051 1052 1053 1054 1055
		JBUFFER_TRACE(jh, "refile or unfile buffer");
		__jbd2_journal_refile_buffer(jh);
		jbd_unlock_bh_state(bh);
		if (try_to_free)
			release_buffer_page(bh);	/* Drops bh reference */
		else
			__brelse(bh);
1056 1057 1058 1059
		cond_resched_lock(&journal->j_list_lock);
	}
	spin_unlock(&journal->j_list_lock);
	/*
1060 1061 1062 1063
	 * This is a bit sleazy.  We use j_list_lock to protect transition
	 * of a transaction into T_FINISHED state and calling
	 * __jbd2_journal_drop_transaction(). Otherwise we could race with
	 * other checkpointing code processing the transaction...
1064
	 */
1065
	write_lock(&journal->j_state_lock);
1066 1067 1068 1069 1070 1071 1072
	spin_lock(&journal->j_list_lock);
	/*
	 * Now recheck if some buffers did not get attached to the transaction
	 * while the lock was dropped...
	 */
	if (commit_transaction->t_forget) {
		spin_unlock(&journal->j_list_lock);
1073
		write_unlock(&journal->j_state_lock);
1074 1075 1076 1077 1078
		goto restart_loop;
	}

	/* Done with this transaction! */

E
Eryu Guan 已提交
1079
	jbd_debug(3, "JBD2: commit phase 7\n");
1080

1081
	J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
1082

1083
	commit_transaction->t_start = jiffies;
1084 1085
	stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
					      commit_transaction->t_start);
1086 1087

	/*
1088
	 * File the transaction statistics
1089 1090
	 */
	stats.ts_tid = commit_transaction->t_tid;
1091 1092
	stats.run.rs_handle_count =
		atomic_read(&commit_transaction->t_handle_count);
1093 1094
	trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
			     commit_transaction->t_tid, &stats.run);
1095 1096 1097 1098

	/*
	 * Calculate overall stats
	 */
1099
	spin_lock(&journal->j_history_lock);
1100
	journal->j_stats.ts_tid++;
1101 1102 1103 1104 1105 1106 1107 1108
	journal->j_stats.run.rs_wait += stats.run.rs_wait;
	journal->j_stats.run.rs_running += stats.run.rs_running;
	journal->j_stats.run.rs_locked += stats.run.rs_locked;
	journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
	journal->j_stats.run.rs_logging += stats.run.rs_logging;
	journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
	journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
	journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
1109 1110
	spin_unlock(&journal->j_history_lock);

1111 1112 1113 1114
	commit_transaction->t_state = T_FINISHED;
	J_ASSERT(commit_transaction == journal->j_committing_transaction);
	journal->j_commit_sequence = commit_transaction->t_tid;
	journal->j_committing_transaction = NULL;
J
Josef Bacik 已提交
1115
	commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1116

J
Josef Bacik 已提交
1117 1118 1119 1120 1121 1122 1123 1124 1125
	/*
	 * weight the commit time higher than the average time so we don't
	 * react too strongly to vast changes in the commit time
	 */
	if (likely(journal->j_average_commit_time))
		journal->j_average_commit_time = (commit_time +
				journal->j_average_commit_time*3) / 4;
	else
		journal->j_average_commit_time = commit_time;
1126
	write_unlock(&journal->j_state_lock);
1127

J
Jan Kara 已提交
1128 1129
	if (commit_transaction->t_checkpoint_list == NULL &&
	    commit_transaction->t_checkpoint_io_list == NULL) {
1130
		__jbd2_journal_drop_transaction(journal, commit_transaction);
1131
		to_free = 1;
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
	} else {
		if (journal->j_checkpoint_transactions == NULL) {
			journal->j_checkpoint_transactions = commit_transaction;
			commit_transaction->t_cpnext = commit_transaction;
			commit_transaction->t_cpprev = commit_transaction;
		} else {
			commit_transaction->t_cpnext =
				journal->j_checkpoint_transactions;
			commit_transaction->t_cpprev =
				commit_transaction->t_cpnext->t_cpprev;
			commit_transaction->t_cpnext->t_cpprev =
				commit_transaction;
			commit_transaction->t_cpprev->t_cpnext =
				commit_transaction;
		}
	}
	spin_unlock(&journal->j_list_lock);

1150 1151 1152
	if (journal->j_commit_callback)
		journal->j_commit_callback(journal, commit_transaction);

1153
	trace_jbd2_end_commit(journal, commit_transaction);
E
Eryu Guan 已提交
1154
	jbd_debug(1, "JBD2: commit %d complete, head %d\n",
1155
		  journal->j_commit_sequence, journal->j_tail_sequence);
1156
	if (to_free)
1157
		jbd2_journal_free_transaction(commit_transaction);
1158 1159 1160

	wake_up(&journal->j_wait_done_commit);
}