commit.c 32.2 KB
Newer Older
1
/*
2
 * linux/fs/jbd2/commit.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
 *
 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
 *
 * Copyright 1998 Red Hat corp --- All Rights Reserved
 *
 * This file is part of the Linux kernel and is made available under
 * the terms of the GNU General Public License, version 2, or at your
 * option, any later version, incorporated herein by reference.
 *
 * Journal commit routines for the generic filesystem journaling code;
 * part of the ext2fs journaling system.
 */

#include <linux/time.h>
#include <linux/fs.h>
18
#include <linux/jbd2.h>
19 20 21 22
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
23
#include <linux/jiffies.h>
24
#include <linux/crc32.h>
25 26
#include <linux/writeback.h>
#include <linux/backing-dev.h>
27
#include <linux/bio.h>
28
#include <linux/blkdev.h>
29
#include <trace/events/jbd2.h>
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44

/*
 * Default IO end handler for temporary BJ_IO buffer_heads.
 */
static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
{
	BUFFER_TRACE(bh, "");
	if (uptodate)
		set_buffer_uptodate(bh);
	else
		clear_buffer_uptodate(bh);
	unlock_buffer(bh);
}

/*
45 46
 * When an ext4 file is truncated, it is possible that some pages are not
 * successfully freed, because they are attached to a committing transaction.
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
 * After the transaction commits, these pages are left on the LRU, with no
 * ->mapping, and with attached buffers.  These pages are trivially reclaimable
 * by the VM, but their apparent absence upsets the VM accounting, and it makes
 * the numbers in /proc/meminfo look odd.
 *
 * So here, we have a buffer which has just come off the forget list.  Look to
 * see if we can strip all buffers from the backing page.
 *
 * Called under lock_journal(), and possibly under journal_datalist_lock.  The
 * caller provided us with a ref against the buffer, and we drop that here.
 */
static void release_buffer_page(struct buffer_head *bh)
{
	struct page *page;

	if (buffer_dirty(bh))
		goto nope;
	if (atomic_read(&bh->b_count) != 1)
		goto nope;
	page = bh->b_page;
	if (!page)
		goto nope;
	if (page->mapping)
		goto nope;

	/* OK, it's a truncated page */
N
Nick Piggin 已提交
73
	if (!trylock_page(page))
74 75 76 77 78 79 80 81 82 83 84 85 86
		goto nope;

	page_cache_get(page);
	__brelse(bh);
	try_to_free_buffers(page);
	unlock_page(page);
	page_cache_release(page);
	return;

nope:
	__brelse(bh);
}

87 88
/*
 * Done it all: now submit the commit record.  We should have
89 90 91 92 93 94
 * cleaned up our previous buffers by now, so if we are in abort
 * mode we can now just skip the rest of the journal write
 * entirely.
 *
 * Returns 1 if the journal needs to be aborted or 0 on success
 */
95 96 97 98
static int journal_submit_commit_record(journal_t *journal,
					transaction_t *commit_transaction,
					struct buffer_head **cbh,
					__u32 crc32_sum)
99 100
{
	struct journal_head *descriptor;
101
	struct commit_header *tmp;
102
	struct buffer_head *bh;
103
	int ret;
104
	int barrier_done = 0;
105
	struct timespec now = current_kernel_time();
106 107 108 109

	if (is_journal_aborted(journal))
		return 0;

110
	descriptor = jbd2_journal_get_descriptor_buffer(journal);
111 112 113 114 115
	if (!descriptor)
		return 1;

	bh = jh2bh(descriptor);

116 117 118 119
	tmp = (struct commit_header *)bh->b_data;
	tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
	tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
	tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
120 121
	tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
	tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
122 123 124 125 126 127

	if (JBD2_HAS_COMPAT_FEATURE(journal,
				    JBD2_FEATURE_COMPAT_CHECKSUM)) {
		tmp->h_chksum_type 	= JBD2_CRC32_CHKSUM;
		tmp->h_chksum_size 	= JBD2_CRC32_CHKSUM_SIZE;
		tmp->h_chksum[0] 	= cpu_to_be32(crc32_sum);
128 129
	}

130 131
	JBUFFER_TRACE(descriptor, "submit commit block");
	lock_buffer(bh);
132
	clear_buffer_dirty(bh);
133 134 135 136
	set_buffer_uptodate(bh);
	bh->b_end_io = journal_end_buffer_io_sync;

	if (journal->j_flags & JBD2_BARRIER &&
137 138
	    !JBD2_HAS_INCOMPAT_FEATURE(journal,
				       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
139 140 141
		set_buffer_ordered(bh);
		barrier_done = 1;
	}
142
	ret = submit_bh(WRITE_SYNC_PLUG, bh);
143 144
	if (barrier_done)
		clear_buffer_ordered(bh);
145

146 147 148 149 150 151 152
	/* is it possible for another commit to fail at roughly
	 * the same time as this one?  If so, we don't want to
	 * trust the barrier flag in the super, but instead want
	 * to remember if we sent a barrier request
	 */
	if (ret == -EOPNOTSUPP && barrier_done) {
		printk(KERN_WARNING
153 154
		       "JBD: barrier-based sync failed on %s - "
		       "disabling barriers\n", journal->j_devname);
155
		spin_lock(&journal->j_state_lock);
156
		journal->j_flags &= ~JBD2_BARRIER;
157 158 159
		spin_unlock(&journal->j_state_lock);

		/* And try again, without the barrier */
160
		lock_buffer(bh);
161
		set_buffer_uptodate(bh);
162
		clear_buffer_dirty(bh);
163
		ret = submit_bh(WRITE_SYNC_PLUG, bh);
164
	}
165 166 167 168 169 170 171 172
	*cbh = bh;
	return ret;
}

/*
 * This function along with journal_submit_commit_record
 * allows to write the commit record asynchronously.
 */
173 174
static int journal_wait_on_commit_record(journal_t *journal,
					 struct buffer_head *bh)
175 176 177
{
	int ret = 0;

178
retry:
179 180
	clear_buffer_dirty(bh);
	wait_on_buffer(bh);
181 182 183 184 185 186 187 188 189 190 191 192 193
	if (buffer_eopnotsupp(bh) && (journal->j_flags & JBD2_BARRIER)) {
		printk(KERN_WARNING
		       "JBD2: wait_on_commit_record: sync failed on %s - "
		       "disabling barriers\n", journal->j_devname);
		spin_lock(&journal->j_state_lock);
		journal->j_flags &= ~JBD2_BARRIER;
		spin_unlock(&journal->j_state_lock);

		lock_buffer(bh);
		clear_buffer_dirty(bh);
		set_buffer_uptodate(bh);
		bh->b_end_io = journal_end_buffer_io_sync;

194
		ret = submit_bh(WRITE_SYNC_PLUG, bh);
195 196 197 198 199 200
		if (ret) {
			unlock_buffer(bh);
			return ret;
		}
		goto retry;
	}
201

202 203 204 205 206 207
	if (unlikely(!buffer_uptodate(bh)))
		ret = -EIO;
	put_bh(bh);            /* One for getblk() */
	jbd2_journal_put_journal_head(bh2jh(bh));

	return ret;
208 209
}

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
/*
 * write the filemap data using writepage() address_space_operations.
 * We don't do block allocation here even for delalloc. We don't
 * use writepages() because with dealyed allocation we may be doing
 * block allocation in writepages().
 */
static int journal_submit_inode_data_buffers(struct address_space *mapping)
{
	int ret;
	struct writeback_control wbc = {
		.sync_mode =  WB_SYNC_ALL,
		.nr_to_write = mapping->nrpages * 2,
		.range_start = 0,
		.range_end = i_size_read(mapping->host),
	};

	ret = generic_writepages(mapping, &wbc);
	return ret;
}

230 231 232 233 234 235 236 237
/*
 * Submit all the data buffers of inode associated with the transaction to
 * disk.
 *
 * We are in a committing transaction. Therefore no new inode can be added to
 * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
 * operate on from being released while we write out pages.
 */
238
static int journal_submit_data_buffers(journal_t *journal,
239 240 241 242 243 244 245 246 247 248 249
		transaction_t *commit_transaction)
{
	struct jbd2_inode *jinode;
	int err, ret = 0;
	struct address_space *mapping;

	spin_lock(&journal->j_list_lock);
	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
		mapping = jinode->i_vfs_inode->i_mapping;
		jinode->i_flags |= JI_COMMIT_RUNNING;
		spin_unlock(&journal->j_list_lock);
250 251 252 253 254 255
		/*
		 * submit the inode data buffers. We use writepage
		 * instead of writepages. Because writepages can do
		 * block allocation  with delalloc. We need to write
		 * only allocated blocks here.
		 */
256
		trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
257
		err = journal_submit_inode_data_buffers(mapping);
258 259 260 261
		if (!ret)
			ret = err;
		spin_lock(&journal->j_list_lock);
		J_ASSERT(jinode->i_transaction == commit_transaction);
262
		commit_transaction->t_flushed_data_blocks = 1;
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
		jinode->i_flags &= ~JI_COMMIT_RUNNING;
		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
	}
	spin_unlock(&journal->j_list_lock);
	return ret;
}

/*
 * Wait for data submitted for writeout, refile inodes to proper
 * transaction if needed.
 *
 */
static int journal_finish_inode_data_buffers(journal_t *journal,
		transaction_t *commit_transaction)
{
	struct jbd2_inode *jinode, *next_i;
	int err, ret = 0;

281
	/* For locking, see the comment in journal_submit_data_buffers() */
282 283 284 285 286
	spin_lock(&journal->j_list_lock);
	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
		jinode->i_flags |= JI_COMMIT_RUNNING;
		spin_unlock(&journal->j_list_lock);
		err = filemap_fdatawait(jinode->i_vfs_inode->i_mapping);
287 288 289
		if (err) {
			/*
			 * Because AS_EIO is cleared by
290
			 * filemap_fdatawait_range(), set it again so
291 292 293 294 295 296 297 298
			 * that user process can get -EIO from fsync().
			 */
			set_bit(AS_EIO,
				&jinode->i_vfs_inode->i_mapping->flags);

			if (!ret)
				ret = err;
		}
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
		spin_lock(&journal->j_list_lock);
		jinode->i_flags &= ~JI_COMMIT_RUNNING;
		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
	}

	/* Now refile inode to proper lists */
	list_for_each_entry_safe(jinode, next_i,
				 &commit_transaction->t_inode_list, i_list) {
		list_del(&jinode->i_list);
		if (jinode->i_next_transaction) {
			jinode->i_transaction = jinode->i_next_transaction;
			jinode->i_next_transaction = NULL;
			list_add(&jinode->i_list,
				&jinode->i_transaction->t_inode_list);
		} else {
			jinode->i_transaction = NULL;
		}
	}
	spin_unlock(&journal->j_list_lock);

	return ret;
}

322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
{
	struct page *page = bh->b_page;
	char *addr;
	__u32 checksum;

	addr = kmap_atomic(page, KM_USER0);
	checksum = crc32_be(crc32_sum,
		(void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
	kunmap_atomic(addr, KM_USER0);

	return checksum;
}

static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
337
				   unsigned long long block)
Z
Zach Brown 已提交
338 339
{
	tag->t_blocknr = cpu_to_be32(block & (u32)~0);
340
	if (tag_bytes > JBD2_TAG_SIZE32)
Z
Zach Brown 已提交
341 342 343
		tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
}

344
/*
345
 * jbd2_journal_commit_transaction
346 347 348 349
 *
 * The primary function for committing a transaction to the log.  This
 * function is called by the journal thread to begin a complete commit.
 */
350
void jbd2_journal_commit_transaction(journal_t *journal)
351
{
352
	struct transaction_stats_s stats;
353 354 355 356 357 358
	transaction_t *commit_transaction;
	struct journal_head *jh, *new_jh, *descriptor;
	struct buffer_head **wbuf = journal->j_wbuf;
	int bufs;
	int flags;
	int err;
359
	unsigned long long blocknr;
J
Josef Bacik 已提交
360 361
	ktime_t start_time;
	u64 commit_time;
362 363 364 365 366 367
	char *tagp = NULL;
	journal_header_t *header;
	journal_block_tag_t *tag = NULL;
	int space_left = 0;
	int first_tag = 0;
	int tag_flag;
368
	int i, to_free = 0;
Z
Zach Brown 已提交
369
	int tag_bytes = journal_tag_bytes(journal);
370 371
	struct buffer_head *cbh = NULL; /* For transactional checksums */
	__u32 crc32_sum = ~0;
372
	int write_op = WRITE;
373 374 375 376 377 378 379 380 381 382 383 384

	/*
	 * First job: lock down the current transaction and wait for
	 * all outstanding updates to complete.
	 */

#ifdef COMMIT_STATS
	spin_lock(&journal->j_list_lock);
	summarise_journal_usage(journal);
	spin_unlock(&journal->j_list_lock);
#endif

385 386
	/* Do we need to erase the effects of a prior jbd2_journal_flush? */
	if (journal->j_flags & JBD2_FLUSHED) {
387
		jbd_debug(3, "super block updated\n");
388
		jbd2_journal_update_superblock(journal, 1);
389 390 391 392 393 394 395 396 397 398
	} else {
		jbd_debug(3, "superblock not updated\n");
	}

	J_ASSERT(journal->j_running_transaction != NULL);
	J_ASSERT(journal->j_committing_transaction == NULL);

	commit_transaction = journal->j_running_transaction;
	J_ASSERT(commit_transaction->t_state == T_RUNNING);

399
	trace_jbd2_start_commit(journal, commit_transaction);
400 401 402 403 404 405
	jbd_debug(1, "JBD: starting commit of transaction %d\n",
			commit_transaction->t_tid);

	spin_lock(&journal->j_state_lock);
	commit_transaction->t_state = T_LOCKED;

406 407 408 409 410
	/*
	 * Use plugged writes here, since we want to submit several before
	 * we unplug the device. We don't do explicit unplugging in here,
	 * instead we rely on sync_buffer() doing the unplug for us.
	 */
411
	if (commit_transaction->t_synchronous_commit)
412
		write_op = WRITE_SYNC_PLUG;
413
	trace_jbd2_commit_locking(journal, commit_transaction);
414 415 416 417
	stats.run.rs_wait = commit_transaction->t_max_wait;
	stats.run.rs_locked = jiffies;
	stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
					      stats.run.rs_locked);
418

419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
	spin_lock(&commit_transaction->t_handle_lock);
	while (commit_transaction->t_updates) {
		DEFINE_WAIT(wait);

		prepare_to_wait(&journal->j_wait_updates, &wait,
					TASK_UNINTERRUPTIBLE);
		if (commit_transaction->t_updates) {
			spin_unlock(&commit_transaction->t_handle_lock);
			spin_unlock(&journal->j_state_lock);
			schedule();
			spin_lock(&journal->j_state_lock);
			spin_lock(&commit_transaction->t_handle_lock);
		}
		finish_wait(&journal->j_wait_updates, &wait);
	}
	spin_unlock(&commit_transaction->t_handle_lock);

	J_ASSERT (commit_transaction->t_outstanding_credits <=
			journal->j_max_transaction_buffers);

	/*
	 * First thing we are allowed to do is to discard any remaining
	 * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
	 * that there are no such buffers: if a large filesystem
	 * operation like a truncate needs to split itself over multiple
444
	 * transactions, then it may try to do a jbd2_journal_restart() while
445 446 447 448 449 450 451
	 * there are still BJ_Reserved buffers outstanding.  These must
	 * be released cleanly from the current transaction.
	 *
	 * In this case, the filesystem must still reserve write access
	 * again before modifying the buffer in the new transaction, but
	 * we do not require it to remember exactly which old buffers it
	 * has reserved.  This is consistent with the existing behaviour
452
	 * that multiple jbd2_journal_get_write_access() calls to the same
453 454 455 456 457 458
	 * buffer are perfectly permissable.
	 */
	while (commit_transaction->t_reserved_list) {
		jh = commit_transaction->t_reserved_list;
		JBUFFER_TRACE(jh, "reserved, unused: refile");
		/*
459
		 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
460 461 462 463 464 465
		 * leave undo-committed data.
		 */
		if (jh->b_committed_data) {
			struct buffer_head *bh = jh2bh(jh);

			jbd_lock_bh_state(bh);
M
Mingming Cao 已提交
466
			jbd2_free(jh->b_committed_data, bh->b_size);
467 468 469
			jh->b_committed_data = NULL;
			jbd_unlock_bh_state(bh);
		}
470
		jbd2_journal_refile_buffer(journal, jh);
471 472 473 474 475 476 477 478
	}

	/*
	 * Now try to drop any written-back buffers from the journal's
	 * checkpoint lists.  We do this *before* commit because it potentially
	 * frees some memory
	 */
	spin_lock(&journal->j_list_lock);
479
	__jbd2_journal_clean_checkpoint_list(journal);
480 481 482 483 484 485 486
	spin_unlock(&journal->j_list_lock);

	jbd_debug (3, "JBD: commit phase 1\n");

	/*
	 * Switch to a new revoke table.
	 */
487
	jbd2_journal_switch_revoke_table(journal);
488

489
	trace_jbd2_commit_flushing(journal, commit_transaction);
490 491 492
	stats.run.rs_flushing = jiffies;
	stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
					     stats.run.rs_flushing);
493

494 495 496
	commit_transaction->t_state = T_FLUSH;
	journal->j_committing_transaction = commit_transaction;
	journal->j_running_transaction = NULL;
J
Josef Bacik 已提交
497
	start_time = ktime_get();
498 499 500 501 502 503 504 505 506 507
	commit_transaction->t_log_start = journal->j_head;
	wake_up(&journal->j_wait_transaction_locked);
	spin_unlock(&journal->j_state_lock);

	jbd_debug (3, "JBD: commit phase 2\n");

	/*
	 * Now start flushing things to disk, in the order they appear
	 * on the transaction lists.  Data blocks go first.
	 */
508
	err = journal_submit_data_buffers(journal, commit_transaction);
509
	if (err)
510
		jbd2_journal_abort(journal, err);
511

512 513
	jbd2_journal_write_revoke_records(journal, commit_transaction,
					  write_op);
514 515 516 517 518 519 520 521

	jbd_debug(3, "JBD: commit phase 2\n");

	/*
	 * Way to go: we have now written out all of the data for a
	 * transaction!  Now comes the tricky part: we need to write out
	 * metadata.  Loop over the transaction's entire buffer list:
	 */
522
	spin_lock(&journal->j_state_lock);
523
	commit_transaction->t_state = T_COMMIT;
524
	spin_unlock(&journal->j_state_lock);
525

526
	trace_jbd2_commit_logging(journal, commit_transaction);
527 528 529 530 531
	stats.run.rs_logging = jiffies;
	stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
					       stats.run.rs_logging);
	stats.run.rs_blocks = commit_transaction->t_outstanding_credits;
	stats.run.rs_blocks_logged = 0;
532

533 534 535
	J_ASSERT(commit_transaction->t_nr_buffers <=
		 commit_transaction->t_outstanding_credits);

536
	err = 0;
537 538 539 540 541 542 543 544 545
	descriptor = NULL;
	bufs = 0;
	while (commit_transaction->t_buffers) {

		/* Find the next buffer to be journaled... */

		jh = commit_transaction->t_buffers;

		/* If we're in abort mode, we just un-journal the buffer and
546
		   release it. */
547 548

		if (is_journal_aborted(journal)) {
549
			clear_buffer_jbddirty(jh2bh(jh));
550
			JBUFFER_TRACE(jh, "journal is aborting: refile");
J
Joel Becker 已提交
551 552 553 554
			jbd2_buffer_abort_trigger(jh,
						  jh->b_frozen_data ?
						  jh->b_frozen_triggers :
						  jh->b_triggers);
555
			jbd2_journal_refile_buffer(journal, jh);
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
			/* If that was the last one, we need to clean up
			 * any descriptor buffers which may have been
			 * already allocated, even if we are now
			 * aborting. */
			if (!commit_transaction->t_buffers)
				goto start_journal_io;
			continue;
		}

		/* Make sure we have a descriptor block in which to
		   record the metadata buffer. */

		if (!descriptor) {
			struct buffer_head *bh;

			J_ASSERT (bufs == 0);

			jbd_debug(4, "JBD: get descriptor\n");

575
			descriptor = jbd2_journal_get_descriptor_buffer(journal);
576
			if (!descriptor) {
577
				jbd2_journal_abort(journal, -EIO);
578 579 580 581 582 583 584
				continue;
			}

			bh = jh2bh(descriptor);
			jbd_debug(4, "JBD: got buffer %llu (%p)\n",
				(unsigned long long)bh->b_blocknr, bh->b_data);
			header = (journal_header_t *)&bh->b_data[0];
585 586
			header->h_magic     = cpu_to_be32(JBD2_MAGIC_NUMBER);
			header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK);
587 588 589 590 591 592 593 594 595 596 597 598
			header->h_sequence  = cpu_to_be32(commit_transaction->t_tid);

			tagp = &bh->b_data[sizeof(journal_header_t)];
			space_left = bh->b_size - sizeof(journal_header_t);
			first_tag = 1;
			set_buffer_jwrite(bh);
			set_buffer_dirty(bh);
			wbuf[bufs++] = bh;

			/* Record it so that we can wait for IO
                           completion later */
			BUFFER_TRACE(bh, "ph3: file as descriptor");
599
			jbd2_journal_file_buffer(descriptor, commit_transaction,
600 601 602 603 604
					BJ_LogCtl);
		}

		/* Where is the buffer to be written? */

605
		err = jbd2_journal_next_log_block(journal, &blocknr);
606 607 608 609
		/* If the block mapping failed, just abandon the buffer
		   and repeat this loop: we'll fall into the
		   refile-on-abort condition above. */
		if (err) {
610
			jbd2_journal_abort(journal, err);
611 612 613 614 615 616
			continue;
		}

		/*
		 * start_this_handle() uses t_outstanding_credits to determine
		 * the free space in the log, but this counter is changed
617
		 * by jbd2_journal_next_log_block() also.
618 619 620 621 622 623 624 625 626 627 628 629 630 631
		 */
		commit_transaction->t_outstanding_credits--;

		/* Bump b_count to prevent truncate from stumbling over
                   the shadowed buffer!  @@@ This can go if we ever get
                   rid of the BJ_IO/BJ_Shadow pairing of buffers. */
		atomic_inc(&jh2bh(jh)->b_count);

		/* Make a temporary IO buffer with which to write it out
                   (this will requeue both the metadata buffer and the
                   temporary IO buffer). new_bh goes on BJ_IO*/

		set_bit(BH_JWrite, &jh2bh(jh)->b_state);
		/*
632
		 * akpm: jbd2_journal_write_metadata_buffer() sets
633 634 635 636 637
		 * new_bh->b_transaction to commit_transaction.
		 * We need to clean this up before we release new_bh
		 * (which is of type BJ_IO)
		 */
		JBUFFER_TRACE(jh, "ph3: write metadata");
638
		flags = jbd2_journal_write_metadata_buffer(commit_transaction,
639
						      jh, &new_jh, blocknr);
640 641 642 643
		if (flags < 0) {
			jbd2_journal_abort(journal, flags);
			continue;
		}
644 645 646 647 648 649 650 651
		set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
		wbuf[bufs++] = jh2bh(new_jh);

		/* Record the new block's tag in the current descriptor
                   buffer */

		tag_flag = 0;
		if (flags & 1)
652
			tag_flag |= JBD2_FLAG_ESCAPE;
653
		if (!first_tag)
654
			tag_flag |= JBD2_FLAG_SAME_UUID;
655 656

		tag = (journal_block_tag_t *) tagp;
Z
Zach Brown 已提交
657
		write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
658
		tag->t_flags = cpu_to_be32(tag_flag);
Z
Zach Brown 已提交
659 660
		tagp += tag_bytes;
		space_left -= tag_bytes;
661 662 663 664 665 666 667 668 669 670 671 672 673

		if (first_tag) {
			memcpy (tagp, journal->j_uuid, 16);
			tagp += 16;
			space_left -= 16;
			first_tag = 0;
		}

		/* If there's no more to do, or if the descriptor is full,
		   let the IO rip! */

		if (bufs == journal->j_wbufsize ||
		    commit_transaction->t_buffers == NULL ||
Z
Zach Brown 已提交
674
		    space_left < tag_bytes + 16) {
675 676 677 678 679 680 681

			jbd_debug(4, "JBD: Submit %d IOs\n", bufs);

			/* Write an end-of-descriptor marker before
                           submitting the IOs.  "tag" still points to
                           the last tag we set up. */

682
			tag->t_flags |= cpu_to_be32(JBD2_FLAG_LAST_TAG);
683 684 685 686

start_journal_io:
			for (i = 0; i < bufs; i++) {
				struct buffer_head *bh = wbuf[i];
687 688 689 690 691 692 693 694 695
				/*
				 * Compute checksum.
				 */
				if (JBD2_HAS_COMPAT_FEATURE(journal,
					JBD2_FEATURE_COMPAT_CHECKSUM)) {
					crc32_sum =
					    jbd2_checksum_data(crc32_sum, bh);
				}

696 697 698 699
				lock_buffer(bh);
				clear_buffer_dirty(bh);
				set_buffer_uptodate(bh);
				bh->b_end_io = journal_end_buffer_io_sync;
700
				submit_bh(write_op, bh);
701 702
			}
			cond_resched();
703
			stats.run.rs_blocks_logged += bufs;
704 705 706 707 708 709 710 711

			/* Force a new descriptor to be generated next
                           time round the loop. */
			descriptor = NULL;
			bufs = 0;
		}
	}

712 713 714 715 716 717 718 719 720
	/* 
	 * If the journal is not located on the file system device,
	 * then we must flush the file system device before we issue
	 * the commit record
	 */
	if (commit_transaction->t_flushed_data_blocks &&
	    (journal->j_fs_dev != journal->j_dev) &&
	    (journal->j_flags & JBD2_BARRIER))
		blkdev_issue_flush(journal->j_fs_dev, NULL);
721

722
	/* Done it all: now write the commit record asynchronously. */
723
	if (JBD2_HAS_INCOMPAT_FEATURE(journal,
724
				      JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
725 726 727 728
		err = journal_submit_commit_record(journal, commit_transaction,
						 &cbh, crc32_sum);
		if (err)
			__jbd2_journal_abort_hard(journal);
729 730
		if (journal->j_flags & JBD2_BARRIER)
			blkdev_issue_flush(journal->j_dev, NULL);
731 732
	}

733
	err = journal_finish_inode_data_buffers(journal, commit_transaction);
734 735 736
	if (err) {
		printk(KERN_WARNING
			"JBD2: Detected IO errors while flushing file data "
737
		       "on %s\n", journal->j_devname);
738 739
		if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
			jbd2_journal_abort(journal, err);
740 741
		err = 0;
	}
742

743 744 745 746 747 748 749 750 751 752 753
	/* Lo and behold: we have just managed to send a transaction to
           the log.  Before we can commit it, wait for the IO so far to
           complete.  Control buffers being written are on the
           transaction's t_log_list queue, and metadata buffers are on
           the t_iobuf_list queue.

	   Wait for the buffers in reverse order.  That way we are
	   less likely to be woken up until all IOs have completed, and
	   so we incur less scheduling load.
	*/

754
	jbd_debug(3, "JBD: commit phase 3\n");
755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778

	/*
	 * akpm: these are BJ_IO, and j_list_lock is not needed.
	 * See __journal_try_to_free_buffer.
	 */
wait_for_iobuf:
	while (commit_transaction->t_iobuf_list != NULL) {
		struct buffer_head *bh;

		jh = commit_transaction->t_iobuf_list->b_tprev;
		bh = jh2bh(jh);
		if (buffer_locked(bh)) {
			wait_on_buffer(bh);
			goto wait_for_iobuf;
		}
		if (cond_resched())
			goto wait_for_iobuf;

		if (unlikely(!buffer_uptodate(bh)))
			err = -EIO;

		clear_buffer_jwrite(bh);

		JBUFFER_TRACE(jh, "ph4: unfile after journal write");
779
		jbd2_journal_unfile_buffer(journal, jh);
780 781 782

		/*
		 * ->t_iobuf_list should contain only dummy buffer_heads
783
		 * which were created by jbd2_journal_write_metadata_buffer().
784 785
		 */
		BUFFER_TRACE(bh, "dumping temporary bh");
786
		jbd2_journal_put_journal_head(jh);
787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802
		__brelse(bh);
		J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
		free_buffer_head(bh);

		/* We also have to unlock and free the corresponding
                   shadowed buffer */
		jh = commit_transaction->t_shadow_list->b_tprev;
		bh = jh2bh(jh);
		clear_bit(BH_JWrite, &bh->b_state);
		J_ASSERT_BH(bh, buffer_jbddirty(bh));

		/* The metadata is now released for reuse, but we need
                   to remember it against this transaction so that when
                   we finally commit, we can do any checkpointing
                   required. */
		JBUFFER_TRACE(jh, "file as BJ_Forget");
803
		jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
804 805 806 807 808 809 810 811 812
		/* Wake up any transactions which were waiting for this
		   IO to complete */
		wake_up_bit(&bh->b_state, BH_Unshadow);
		JBUFFER_TRACE(jh, "brelse shadowed buffer");
		__brelse(bh);
	}

	J_ASSERT (commit_transaction->t_shadow_list == NULL);

813
	jbd_debug(3, "JBD: commit phase 4\n");
814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833

	/* Here we wait for the revoke record and descriptor record buffers */
 wait_for_ctlbuf:
	while (commit_transaction->t_log_list != NULL) {
		struct buffer_head *bh;

		jh = commit_transaction->t_log_list->b_tprev;
		bh = jh2bh(jh);
		if (buffer_locked(bh)) {
			wait_on_buffer(bh);
			goto wait_for_ctlbuf;
		}
		if (cond_resched())
			goto wait_for_ctlbuf;

		if (unlikely(!buffer_uptodate(bh)))
			err = -EIO;

		BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
		clear_buffer_jwrite(bh);
834 835
		jbd2_journal_unfile_buffer(journal, jh);
		jbd2_journal_put_journal_head(jh);
836 837 838 839
		__brelse(bh);		/* One for getblk */
		/* AKPM: bforget here */
	}

840 841 842
	if (err)
		jbd2_journal_abort(journal, err);

843
	jbd_debug(3, "JBD: commit phase 5\n");
844

845
	if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
846
				       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
847 848 849 850 851
		err = journal_submit_commit_record(journal, commit_transaction,
						&cbh, crc32_sum);
		if (err)
			__jbd2_journal_abort_hard(journal);
	}
852
	if (!err && !is_journal_aborted(journal))
853
		err = journal_wait_on_commit_record(journal, cbh);
854 855

	if (err)
856
		jbd2_journal_abort(journal, err);
857 858 859 860 861 862

	/* End of a transaction!  Finally, we can do checkpoint
           processing: any buffers committed as a result of this
           transaction can be removed from any checkpoint list it was on
           before. */

863
	jbd_debug(3, "JBD: commit phase 6\n");
864

865
	J_ASSERT(list_empty(&commit_transaction->t_inode_list));
866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897
	J_ASSERT(commit_transaction->t_buffers == NULL);
	J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
	J_ASSERT(commit_transaction->t_iobuf_list == NULL);
	J_ASSERT(commit_transaction->t_shadow_list == NULL);
	J_ASSERT(commit_transaction->t_log_list == NULL);

restart_loop:
	/*
	 * As there are other places (journal_unmap_buffer()) adding buffers
	 * to this list we have to be careful and hold the j_list_lock.
	 */
	spin_lock(&journal->j_list_lock);
	while (commit_transaction->t_forget) {
		transaction_t *cp_transaction;
		struct buffer_head *bh;

		jh = commit_transaction->t_forget;
		spin_unlock(&journal->j_list_lock);
		bh = jh2bh(jh);
		jbd_lock_bh_state(bh);
		J_ASSERT_JH(jh,	jh->b_transaction == commit_transaction ||
			jh->b_transaction == journal->j_running_transaction);

		/*
		 * If there is undo-protected committed data against
		 * this buffer, then we can remove it now.  If it is a
		 * buffer needing such protection, the old frozen_data
		 * field now points to a committed version of the
		 * buffer, so rotate that field to the new committed
		 * data.
		 *
		 * Otherwise, we can just throw away the frozen data now.
J
Joel Becker 已提交
898 899 900
		 *
		 * We also know that the frozen data has already fired
		 * its triggers if they exist, so we can clear that too.
901 902
		 */
		if (jh->b_committed_data) {
M
Mingming Cao 已提交
903
			jbd2_free(jh->b_committed_data, bh->b_size);
904 905 906 907
			jh->b_committed_data = NULL;
			if (jh->b_frozen_data) {
				jh->b_committed_data = jh->b_frozen_data;
				jh->b_frozen_data = NULL;
J
Joel Becker 已提交
908
				jh->b_frozen_triggers = NULL;
909 910
			}
		} else if (jh->b_frozen_data) {
M
Mingming Cao 已提交
911
			jbd2_free(jh->b_frozen_data, bh->b_size);
912
			jh->b_frozen_data = NULL;
J
Joel Becker 已提交
913
			jh->b_frozen_triggers = NULL;
914 915 916 917 918 919
		}

		spin_lock(&journal->j_list_lock);
		cp_transaction = jh->b_cp_transaction;
		if (cp_transaction) {
			JBUFFER_TRACE(jh, "remove from old cp transaction");
920
			cp_transaction->t_chp_stats.cs_dropped++;
921
			__jbd2_journal_remove_checkpoint(jh);
922 923 924 925
		}

		/* Only re-checkpoint the buffer_head if it is marked
		 * dirty.  If the buffer was added to the BJ_Forget list
926
		 * by jbd2_journal_forget, it may no longer be dirty and
927 928 929 930 931 932
		 * there's no point in keeping a checkpoint record for
		 * it. */

		/* A buffer which has been freed while still being
		 * journaled by a previous transaction may end up still
		 * being dirty here, but we want to avoid writing back
933 934 935 936
		 * that buffer in the future after the "add to orphan"
		 * operation been committed,  That's not only a performance
		 * gain, it also stops aliasing problems if the buffer is
		 * left behind for writeback and gets reallocated for another
937
		 * use in a different page. */
938
		if (buffer_freed(bh) && !jh->b_next_transaction) {
939 940 941 942 943 944
			clear_buffer_freed(bh);
			clear_buffer_jbddirty(bh);
		}

		if (buffer_jbddirty(bh)) {
			JBUFFER_TRACE(jh, "add to new checkpointing trans");
945
			__jbd2_journal_insert_checkpoint(jh, commit_transaction);
946 947
			if (is_journal_aborted(journal))
				clear_buffer_jbddirty(bh);
948
			JBUFFER_TRACE(jh, "refile for checkpoint writeback");
949
			__jbd2_journal_refile_buffer(jh);
950 951 952 953 954 955 956 957 958 959 960
			jbd_unlock_bh_state(bh);
		} else {
			J_ASSERT_BH(bh, !buffer_dirty(bh));
			/* The buffer on BJ_Forget list and not jbddirty means
			 * it has been freed by this transaction and hence it
			 * could not have been reallocated until this
			 * transaction has committed. *BUT* it could be
			 * reallocated once we have written all the data to
			 * disk and before we process the buffer on BJ_Forget
			 * list. */
			JBUFFER_TRACE(jh, "refile or unfile freed buffer");
961
			__jbd2_journal_refile_buffer(jh);
962 963 964
			if (!jh->b_transaction) {
				jbd_unlock_bh_state(bh);
				 /* needs a brelse */
965
				jbd2_journal_remove_journal_head(bh);
966 967 968 969 970 971 972 973
				release_buffer_page(bh);
			} else
				jbd_unlock_bh_state(bh);
		}
		cond_resched_lock(&journal->j_list_lock);
	}
	spin_unlock(&journal->j_list_lock);
	/*
974 975 976 977
	 * This is a bit sleazy.  We use j_list_lock to protect transition
	 * of a transaction into T_FINISHED state and calling
	 * __jbd2_journal_drop_transaction(). Otherwise we could race with
	 * other checkpointing code processing the transaction...
978 979 980 981 982 983 984 985 986 987 988 989 990 991 992
	 */
	spin_lock(&journal->j_state_lock);
	spin_lock(&journal->j_list_lock);
	/*
	 * Now recheck if some buffers did not get attached to the transaction
	 * while the lock was dropped...
	 */
	if (commit_transaction->t_forget) {
		spin_unlock(&journal->j_list_lock);
		spin_unlock(&journal->j_state_lock);
		goto restart_loop;
	}

	/* Done with this transaction! */

993
	jbd_debug(3, "JBD: commit phase 7\n");
994 995 996

	J_ASSERT(commit_transaction->t_state == T_COMMIT);

997
	commit_transaction->t_start = jiffies;
998 999
	stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
					      commit_transaction->t_start);
1000 1001

	/*
1002
	 * File the transaction statistics
1003 1004
	 */
	stats.ts_tid = commit_transaction->t_tid;
1005 1006 1007
	stats.run.rs_handle_count = commit_transaction->t_handle_count;
	trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
			     commit_transaction->t_tid, &stats.run);
1008 1009 1010 1011

	/*
	 * Calculate overall stats
	 */
1012
	spin_lock(&journal->j_history_lock);
1013
	journal->j_stats.ts_tid++;
1014 1015 1016 1017 1018 1019 1020 1021
	journal->j_stats.run.rs_wait += stats.run.rs_wait;
	journal->j_stats.run.rs_running += stats.run.rs_running;
	journal->j_stats.run.rs_locked += stats.run.rs_locked;
	journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
	journal->j_stats.run.rs_logging += stats.run.rs_logging;
	journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
	journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
	journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
1022 1023
	spin_unlock(&journal->j_history_lock);

1024 1025 1026 1027
	commit_transaction->t_state = T_FINISHED;
	J_ASSERT(commit_transaction == journal->j_committing_transaction);
	journal->j_commit_sequence = commit_transaction->t_tid;
	journal->j_committing_transaction = NULL;
J
Josef Bacik 已提交
1028
	commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1029

J
Josef Bacik 已提交
1030 1031 1032 1033 1034 1035 1036 1037 1038
	/*
	 * weight the commit time higher than the average time so we don't
	 * react too strongly to vast changes in the commit time
	 */
	if (likely(journal->j_average_commit_time))
		journal->j_average_commit_time = (commit_time +
				journal->j_average_commit_time*3) / 4;
	else
		journal->j_average_commit_time = commit_time;
1039
	spin_unlock(&journal->j_state_lock);
1040

J
Jan Kara 已提交
1041 1042
	if (commit_transaction->t_checkpoint_list == NULL &&
	    commit_transaction->t_checkpoint_io_list == NULL) {
1043
		__jbd2_journal_drop_transaction(journal, commit_transaction);
1044
		to_free = 1;
1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
	} else {
		if (journal->j_checkpoint_transactions == NULL) {
			journal->j_checkpoint_transactions = commit_transaction;
			commit_transaction->t_cpnext = commit_transaction;
			commit_transaction->t_cpprev = commit_transaction;
		} else {
			commit_transaction->t_cpnext =
				journal->j_checkpoint_transactions;
			commit_transaction->t_cpprev =
				commit_transaction->t_cpnext->t_cpprev;
			commit_transaction->t_cpnext->t_cpprev =
				commit_transaction;
			commit_transaction->t_cpprev->t_cpnext =
				commit_transaction;
		}
	}
	spin_unlock(&journal->j_list_lock);

1063 1064 1065
	if (journal->j_commit_callback)
		journal->j_commit_callback(journal, commit_transaction);

1066
	trace_jbd2_end_commit(journal, commit_transaction);
1067 1068
	jbd_debug(1, "JBD: commit %d complete, head %d\n",
		  journal->j_commit_sequence, journal->j_tail_sequence);
1069 1070
	if (to_free)
		kfree(commit_transaction);
1071 1072 1073

	wake_up(&journal->j_wait_done_commit);
}