commit.c 31.5 KB
Newer Older
1
/*
2
 * linux/fs/jbd2/commit.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
 *
 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
 *
 * Copyright 1998 Red Hat corp --- All Rights Reserved
 *
 * This file is part of the Linux kernel and is made available under
 * the terms of the GNU General Public License, version 2, or at your
 * option, any later version, incorporated herein by reference.
 *
 * Journal commit routines for the generic filesystem journaling code;
 * part of the ext2fs journaling system.
 */

#include <linux/time.h>
#include <linux/fs.h>
18
#include <linux/jbd2.h>
19 20 21 22
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
23
#include <linux/jiffies.h>
24
#include <linux/crc32.h>
25 26
#include <linux/writeback.h>
#include <linux/backing-dev.h>
27
#include <linux/bio.h>
28
#include <linux/blkdev.h>
29
#include <linux/bitops.h>
30
#include <trace/events/jbd2.h>
31
#include <asm/system.h>
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46

/*
 * Default IO end handler for temporary BJ_IO buffer_heads.
 */
static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
{
	BUFFER_TRACE(bh, "");
	if (uptodate)
		set_buffer_uptodate(bh);
	else
		clear_buffer_uptodate(bh);
	unlock_buffer(bh);
}

/*
47 48
 * When an ext4 file is truncated, it is possible that some pages are not
 * successfully freed, because they are attached to a committing transaction.
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
 * After the transaction commits, these pages are left on the LRU, with no
 * ->mapping, and with attached buffers.  These pages are trivially reclaimable
 * by the VM, but their apparent absence upsets the VM accounting, and it makes
 * the numbers in /proc/meminfo look odd.
 *
 * So here, we have a buffer which has just come off the forget list.  Look to
 * see if we can strip all buffers from the backing page.
 *
 * Called under lock_journal(), and possibly under journal_datalist_lock.  The
 * caller provided us with a ref against the buffer, and we drop that here.
 */
static void release_buffer_page(struct buffer_head *bh)
{
	struct page *page;

	if (buffer_dirty(bh))
		goto nope;
	if (atomic_read(&bh->b_count) != 1)
		goto nope;
	page = bh->b_page;
	if (!page)
		goto nope;
	if (page->mapping)
		goto nope;

	/* OK, it's a truncated page */
N
Nick Piggin 已提交
75
	if (!trylock_page(page))
76 77 78 79 80 81 82 83 84 85 86 87 88
		goto nope;

	page_cache_get(page);
	__brelse(bh);
	try_to_free_buffers(page);
	unlock_page(page);
	page_cache_release(page);
	return;

nope:
	__brelse(bh);
}

89 90
/*
 * Done it all: now submit the commit record.  We should have
91 92 93 94 95 96
 * cleaned up our previous buffers by now, so if we are in abort
 * mode we can now just skip the rest of the journal write
 * entirely.
 *
 * Returns 1 if the journal needs to be aborted or 0 on success
 */
97 98 99 100
static int journal_submit_commit_record(journal_t *journal,
					transaction_t *commit_transaction,
					struct buffer_head **cbh,
					__u32 crc32_sum)
101 102
{
	struct journal_head *descriptor;
103
	struct commit_header *tmp;
104
	struct buffer_head *bh;
105
	int ret;
106
	struct timespec now = current_kernel_time();
107

108 109
	*cbh = NULL;

110 111 112
	if (is_journal_aborted(journal))
		return 0;

113
	descriptor = jbd2_journal_get_descriptor_buffer(journal);
114 115 116 117 118
	if (!descriptor)
		return 1;

	bh = jh2bh(descriptor);

119 120 121 122
	tmp = (struct commit_header *)bh->b_data;
	tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
	tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
	tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
123 124
	tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
	tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
125 126 127 128 129 130

	if (JBD2_HAS_COMPAT_FEATURE(journal,
				    JBD2_FEATURE_COMPAT_CHECKSUM)) {
		tmp->h_chksum_type 	= JBD2_CRC32_CHKSUM;
		tmp->h_chksum_size 	= JBD2_CRC32_CHKSUM_SIZE;
		tmp->h_chksum[0] 	= cpu_to_be32(crc32_sum);
131 132
	}

133 134
	JBUFFER_TRACE(descriptor, "submit commit block");
	lock_buffer(bh);
135
	clear_buffer_dirty(bh);
136 137 138 139
	set_buffer_uptodate(bh);
	bh->b_end_io = journal_end_buffer_io_sync;

	if (journal->j_flags & JBD2_BARRIER &&
140
	    !JBD2_HAS_INCOMPAT_FEATURE(journal,
141
				       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT))
J
Jens Axboe 已提交
142
		ret = submit_bh(WRITE_SYNC | WRITE_FLUSH_FUA, bh);
143
	else
J
Jens Axboe 已提交
144
		ret = submit_bh(WRITE_SYNC, bh);
145

146 147 148 149 150 151 152 153
	*cbh = bh;
	return ret;
}

/*
 * This function along with journal_submit_commit_record
 * allows to write the commit record asynchronously.
 */
154 155
static int journal_wait_on_commit_record(journal_t *journal,
					 struct buffer_head *bh)
156 157 158 159 160
{
	int ret = 0;

	clear_buffer_dirty(bh);
	wait_on_buffer(bh);
161

162 163 164 165 166 167
	if (unlikely(!buffer_uptodate(bh)))
		ret = -EIO;
	put_bh(bh);            /* One for getblk() */
	jbd2_journal_put_journal_head(bh2jh(bh));

	return ret;
168 169
}

170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
/*
 * write the filemap data using writepage() address_space_operations.
 * We don't do block allocation here even for delalloc. We don't
 * use writepages() because with dealyed allocation we may be doing
 * block allocation in writepages().
 */
static int journal_submit_inode_data_buffers(struct address_space *mapping)
{
	int ret;
	struct writeback_control wbc = {
		.sync_mode =  WB_SYNC_ALL,
		.nr_to_write = mapping->nrpages * 2,
		.range_start = 0,
		.range_end = i_size_read(mapping->host),
	};

	ret = generic_writepages(mapping, &wbc);
	return ret;
}

190 191 192 193 194 195 196 197
/*
 * Submit all the data buffers of inode associated with the transaction to
 * disk.
 *
 * We are in a committing transaction. Therefore no new inode can be added to
 * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
 * operate on from being released while we write out pages.
 */
198
static int journal_submit_data_buffers(journal_t *journal,
199 200 201 202 203 204 205 206 207
		transaction_t *commit_transaction)
{
	struct jbd2_inode *jinode;
	int err, ret = 0;
	struct address_space *mapping;

	spin_lock(&journal->j_list_lock);
	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
		mapping = jinode->i_vfs_inode->i_mapping;
208
		set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
209
		spin_unlock(&journal->j_list_lock);
210 211 212 213 214 215
		/*
		 * submit the inode data buffers. We use writepage
		 * instead of writepages. Because writepages can do
		 * block allocation  with delalloc. We need to write
		 * only allocated blocks here.
		 */
216
		trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
217
		err = journal_submit_inode_data_buffers(mapping);
218 219 220 221
		if (!ret)
			ret = err;
		spin_lock(&journal->j_list_lock);
		J_ASSERT(jinode->i_transaction == commit_transaction);
222 223
		clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
		smp_mb__after_clear_bit();
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
	}
	spin_unlock(&journal->j_list_lock);
	return ret;
}

/*
 * Wait for data submitted for writeout, refile inodes to proper
 * transaction if needed.
 *
 */
static int journal_finish_inode_data_buffers(journal_t *journal,
		transaction_t *commit_transaction)
{
	struct jbd2_inode *jinode, *next_i;
	int err, ret = 0;

241
	/* For locking, see the comment in journal_submit_data_buffers() */
242 243
	spin_lock(&journal->j_list_lock);
	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
244
		set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
245 246
		spin_unlock(&journal->j_list_lock);
		err = filemap_fdatawait(jinode->i_vfs_inode->i_mapping);
247 248 249
		if (err) {
			/*
			 * Because AS_EIO is cleared by
250
			 * filemap_fdatawait_range(), set it again so
251 252 253 254 255 256 257 258
			 * that user process can get -EIO from fsync().
			 */
			set_bit(AS_EIO,
				&jinode->i_vfs_inode->i_mapping->flags);

			if (!ret)
				ret = err;
		}
259
		spin_lock(&journal->j_list_lock);
260 261
		clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
		smp_mb__after_clear_bit();
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
	}

	/* Now refile inode to proper lists */
	list_for_each_entry_safe(jinode, next_i,
				 &commit_transaction->t_inode_list, i_list) {
		list_del(&jinode->i_list);
		if (jinode->i_next_transaction) {
			jinode->i_transaction = jinode->i_next_transaction;
			jinode->i_next_transaction = NULL;
			list_add(&jinode->i_list,
				&jinode->i_transaction->t_inode_list);
		} else {
			jinode->i_transaction = NULL;
		}
	}
	spin_unlock(&journal->j_list_lock);

	return ret;
}

283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
{
	struct page *page = bh->b_page;
	char *addr;
	__u32 checksum;

	addr = kmap_atomic(page, KM_USER0);
	checksum = crc32_be(crc32_sum,
		(void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
	kunmap_atomic(addr, KM_USER0);

	return checksum;
}

static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
298
				   unsigned long long block)
Z
Zach Brown 已提交
299 300
{
	tag->t_blocknr = cpu_to_be32(block & (u32)~0);
301
	if (tag_bytes > JBD2_TAG_SIZE32)
Z
Zach Brown 已提交
302 303 304
		tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
}

305
/*
306
 * jbd2_journal_commit_transaction
307 308 309 310
 *
 * The primary function for committing a transaction to the log.  This
 * function is called by the journal thread to begin a complete commit.
 */
311
void jbd2_journal_commit_transaction(journal_t *journal)
312
{
313
	struct transaction_stats_s stats;
314 315 316 317 318 319
	transaction_t *commit_transaction;
	struct journal_head *jh, *new_jh, *descriptor;
	struct buffer_head **wbuf = journal->j_wbuf;
	int bufs;
	int flags;
	int err;
320
	unsigned long long blocknr;
J
Josef Bacik 已提交
321 322
	ktime_t start_time;
	u64 commit_time;
323 324 325 326 327 328
	char *tagp = NULL;
	journal_header_t *header;
	journal_block_tag_t *tag = NULL;
	int space_left = 0;
	int first_tag = 0;
	int tag_flag;
329
	int i, to_free = 0;
Z
Zach Brown 已提交
330
	int tag_bytes = journal_tag_bytes(journal);
331 332
	struct buffer_head *cbh = NULL; /* For transactional checksums */
	__u32 crc32_sum = ~0;
333
	struct blk_plug plug;
334 335 336 337 338 339 340 341 342 343 344 345

	/*
	 * First job: lock down the current transaction and wait for
	 * all outstanding updates to complete.
	 */

#ifdef COMMIT_STATS
	spin_lock(&journal->j_list_lock);
	summarise_journal_usage(journal);
	spin_unlock(&journal->j_list_lock);
#endif

346 347
	/* Do we need to erase the effects of a prior jbd2_journal_flush? */
	if (journal->j_flags & JBD2_FLUSHED) {
348
		jbd_debug(3, "super block updated\n");
349
		jbd2_journal_update_superblock(journal, 1);
350 351 352 353 354 355 356 357 358 359
	} else {
		jbd_debug(3, "superblock not updated\n");
	}

	J_ASSERT(journal->j_running_transaction != NULL);
	J_ASSERT(journal->j_committing_transaction == NULL);

	commit_transaction = journal->j_running_transaction;
	J_ASSERT(commit_transaction->t_state == T_RUNNING);

360
	trace_jbd2_start_commit(journal, commit_transaction);
361 362 363
	jbd_debug(1, "JBD: starting commit of transaction %d\n",
			commit_transaction->t_tid);

364
	write_lock(&journal->j_state_lock);
365 366
	commit_transaction->t_state = T_LOCKED;

367
	trace_jbd2_commit_locking(journal, commit_transaction);
368 369 370 371
	stats.run.rs_wait = commit_transaction->t_max_wait;
	stats.run.rs_locked = jiffies;
	stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
					      stats.run.rs_locked);
372

373
	spin_lock(&commit_transaction->t_handle_lock);
374
	while (atomic_read(&commit_transaction->t_updates)) {
375 376 377 378
		DEFINE_WAIT(wait);

		prepare_to_wait(&journal->j_wait_updates, &wait,
					TASK_UNINTERRUPTIBLE);
379
		if (atomic_read(&commit_transaction->t_updates)) {
380
			spin_unlock(&commit_transaction->t_handle_lock);
381
			write_unlock(&journal->j_state_lock);
382
			schedule();
383
			write_lock(&journal->j_state_lock);
384 385 386 387 388 389
			spin_lock(&commit_transaction->t_handle_lock);
		}
		finish_wait(&journal->j_wait_updates, &wait);
	}
	spin_unlock(&commit_transaction->t_handle_lock);

390
	J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
391 392 393 394 395 396 397
			journal->j_max_transaction_buffers);

	/*
	 * First thing we are allowed to do is to discard any remaining
	 * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
	 * that there are no such buffers: if a large filesystem
	 * operation like a truncate needs to split itself over multiple
398
	 * transactions, then it may try to do a jbd2_journal_restart() while
399 400 401 402 403 404 405
	 * there are still BJ_Reserved buffers outstanding.  These must
	 * be released cleanly from the current transaction.
	 *
	 * In this case, the filesystem must still reserve write access
	 * again before modifying the buffer in the new transaction, but
	 * we do not require it to remember exactly which old buffers it
	 * has reserved.  This is consistent with the existing behaviour
406
	 * that multiple jbd2_journal_get_write_access() calls to the same
L
Lucas De Marchi 已提交
407
	 * buffer are perfectly permissible.
408 409 410 411 412
	 */
	while (commit_transaction->t_reserved_list) {
		jh = commit_transaction->t_reserved_list;
		JBUFFER_TRACE(jh, "reserved, unused: refile");
		/*
413
		 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
414 415 416 417 418 419
		 * leave undo-committed data.
		 */
		if (jh->b_committed_data) {
			struct buffer_head *bh = jh2bh(jh);

			jbd_lock_bh_state(bh);
M
Mingming Cao 已提交
420
			jbd2_free(jh->b_committed_data, bh->b_size);
421 422 423
			jh->b_committed_data = NULL;
			jbd_unlock_bh_state(bh);
		}
424
		jbd2_journal_refile_buffer(journal, jh);
425 426 427 428 429 430 431 432
	}

	/*
	 * Now try to drop any written-back buffers from the journal's
	 * checkpoint lists.  We do this *before* commit because it potentially
	 * frees some memory
	 */
	spin_lock(&journal->j_list_lock);
433
	__jbd2_journal_clean_checkpoint_list(journal);
434 435 436 437 438 439 440
	spin_unlock(&journal->j_list_lock);

	jbd_debug (3, "JBD: commit phase 1\n");

	/*
	 * Switch to a new revoke table.
	 */
441
	jbd2_journal_switch_revoke_table(journal);
442

443
	trace_jbd2_commit_flushing(journal, commit_transaction);
444 445 446
	stats.run.rs_flushing = jiffies;
	stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
					     stats.run.rs_flushing);
447

448 449 450
	commit_transaction->t_state = T_FLUSH;
	journal->j_committing_transaction = commit_transaction;
	journal->j_running_transaction = NULL;
J
Josef Bacik 已提交
451
	start_time = ktime_get();
452 453
	commit_transaction->t_log_start = journal->j_head;
	wake_up(&journal->j_wait_transaction_locked);
454
	write_unlock(&journal->j_state_lock);
455 456 457 458 459 460 461

	jbd_debug (3, "JBD: commit phase 2\n");

	/*
	 * Now start flushing things to disk, in the order they appear
	 * on the transaction lists.  Data blocks go first.
	 */
462
	err = journal_submit_data_buffers(journal, commit_transaction);
463
	if (err)
464
		jbd2_journal_abort(journal, err);
465

466
	blk_start_plug(&plug);
467
	jbd2_journal_write_revoke_records(journal, commit_transaction,
468 469
					  WRITE_SYNC);
	blk_finish_plug(&plug);
470 471 472 473 474 475 476 477

	jbd_debug(3, "JBD: commit phase 2\n");

	/*
	 * Way to go: we have now written out all of the data for a
	 * transaction!  Now comes the tricky part: we need to write out
	 * metadata.  Loop over the transaction's entire buffer list:
	 */
478
	write_lock(&journal->j_state_lock);
479
	commit_transaction->t_state = T_COMMIT;
480
	write_unlock(&journal->j_state_lock);
481

482
	trace_jbd2_commit_logging(journal, commit_transaction);
483 484 485
	stats.run.rs_logging = jiffies;
	stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
					       stats.run.rs_logging);
486 487
	stats.run.rs_blocks =
		atomic_read(&commit_transaction->t_outstanding_credits);
488
	stats.run.rs_blocks_logged = 0;
489

490
	J_ASSERT(commit_transaction->t_nr_buffers <=
491
		 atomic_read(&commit_transaction->t_outstanding_credits));
492

493
	err = 0;
494 495
	descriptor = NULL;
	bufs = 0;
496
	blk_start_plug(&plug);
497 498 499 500 501 502 503
	while (commit_transaction->t_buffers) {

		/* Find the next buffer to be journaled... */

		jh = commit_transaction->t_buffers;

		/* If we're in abort mode, we just un-journal the buffer and
504
		   release it. */
505 506

		if (is_journal_aborted(journal)) {
507
			clear_buffer_jbddirty(jh2bh(jh));
508
			JBUFFER_TRACE(jh, "journal is aborting: refile");
J
Joel Becker 已提交
509 510 511 512
			jbd2_buffer_abort_trigger(jh,
						  jh->b_frozen_data ?
						  jh->b_frozen_triggers :
						  jh->b_triggers);
513
			jbd2_journal_refile_buffer(journal, jh);
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
			/* If that was the last one, we need to clean up
			 * any descriptor buffers which may have been
			 * already allocated, even if we are now
			 * aborting. */
			if (!commit_transaction->t_buffers)
				goto start_journal_io;
			continue;
		}

		/* Make sure we have a descriptor block in which to
		   record the metadata buffer. */

		if (!descriptor) {
			struct buffer_head *bh;

			J_ASSERT (bufs == 0);

			jbd_debug(4, "JBD: get descriptor\n");

533
			descriptor = jbd2_journal_get_descriptor_buffer(journal);
534
			if (!descriptor) {
535
				jbd2_journal_abort(journal, -EIO);
536 537 538 539 540 541 542
				continue;
			}

			bh = jh2bh(descriptor);
			jbd_debug(4, "JBD: got buffer %llu (%p)\n",
				(unsigned long long)bh->b_blocknr, bh->b_data);
			header = (journal_header_t *)&bh->b_data[0];
543 544
			header->h_magic     = cpu_to_be32(JBD2_MAGIC_NUMBER);
			header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK);
545 546 547 548 549 550 551 552 553 554 555 556
			header->h_sequence  = cpu_to_be32(commit_transaction->t_tid);

			tagp = &bh->b_data[sizeof(journal_header_t)];
			space_left = bh->b_size - sizeof(journal_header_t);
			first_tag = 1;
			set_buffer_jwrite(bh);
			set_buffer_dirty(bh);
			wbuf[bufs++] = bh;

			/* Record it so that we can wait for IO
                           completion later */
			BUFFER_TRACE(bh, "ph3: file as descriptor");
557
			jbd2_journal_file_buffer(descriptor, commit_transaction,
558 559 560 561 562
					BJ_LogCtl);
		}

		/* Where is the buffer to be written? */

563
		err = jbd2_journal_next_log_block(journal, &blocknr);
564 565 566 567
		/* If the block mapping failed, just abandon the buffer
		   and repeat this loop: we'll fall into the
		   refile-on-abort condition above. */
		if (err) {
568
			jbd2_journal_abort(journal, err);
569 570 571 572 573 574
			continue;
		}

		/*
		 * start_this_handle() uses t_outstanding_credits to determine
		 * the free space in the log, but this counter is changed
575
		 * by jbd2_journal_next_log_block() also.
576
		 */
577
		atomic_dec(&commit_transaction->t_outstanding_credits);
578 579 580 581 582 583 584 585 586 587 588 589

		/* Bump b_count to prevent truncate from stumbling over
                   the shadowed buffer!  @@@ This can go if we ever get
                   rid of the BJ_IO/BJ_Shadow pairing of buffers. */
		atomic_inc(&jh2bh(jh)->b_count);

		/* Make a temporary IO buffer with which to write it out
                   (this will requeue both the metadata buffer and the
                   temporary IO buffer). new_bh goes on BJ_IO*/

		set_bit(BH_JWrite, &jh2bh(jh)->b_state);
		/*
590
		 * akpm: jbd2_journal_write_metadata_buffer() sets
591 592 593 594 595
		 * new_bh->b_transaction to commit_transaction.
		 * We need to clean this up before we release new_bh
		 * (which is of type BJ_IO)
		 */
		JBUFFER_TRACE(jh, "ph3: write metadata");
596
		flags = jbd2_journal_write_metadata_buffer(commit_transaction,
597
						      jh, &new_jh, blocknr);
598 599 600 601
		if (flags < 0) {
			jbd2_journal_abort(journal, flags);
			continue;
		}
602 603 604 605 606 607 608 609
		set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
		wbuf[bufs++] = jh2bh(new_jh);

		/* Record the new block's tag in the current descriptor
                   buffer */

		tag_flag = 0;
		if (flags & 1)
610
			tag_flag |= JBD2_FLAG_ESCAPE;
611
		if (!first_tag)
612
			tag_flag |= JBD2_FLAG_SAME_UUID;
613 614

		tag = (journal_block_tag_t *) tagp;
Z
Zach Brown 已提交
615
		write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
616
		tag->t_flags = cpu_to_be32(tag_flag);
Z
Zach Brown 已提交
617 618
		tagp += tag_bytes;
		space_left -= tag_bytes;
619 620 621 622 623 624 625 626 627 628 629 630 631

		if (first_tag) {
			memcpy (tagp, journal->j_uuid, 16);
			tagp += 16;
			space_left -= 16;
			first_tag = 0;
		}

		/* If there's no more to do, or if the descriptor is full,
		   let the IO rip! */

		if (bufs == journal->j_wbufsize ||
		    commit_transaction->t_buffers == NULL ||
Z
Zach Brown 已提交
632
		    space_left < tag_bytes + 16) {
633 634 635 636 637 638 639

			jbd_debug(4, "JBD: Submit %d IOs\n", bufs);

			/* Write an end-of-descriptor marker before
                           submitting the IOs.  "tag" still points to
                           the last tag we set up. */

640
			tag->t_flags |= cpu_to_be32(JBD2_FLAG_LAST_TAG);
641 642 643 644

start_journal_io:
			for (i = 0; i < bufs; i++) {
				struct buffer_head *bh = wbuf[i];
645 646 647 648 649 650 651 652 653
				/*
				 * Compute checksum.
				 */
				if (JBD2_HAS_COMPAT_FEATURE(journal,
					JBD2_FEATURE_COMPAT_CHECKSUM)) {
					crc32_sum =
					    jbd2_checksum_data(crc32_sum, bh);
				}

654 655 656 657
				lock_buffer(bh);
				clear_buffer_dirty(bh);
				set_buffer_uptodate(bh);
				bh->b_end_io = journal_end_buffer_io_sync;
658
				submit_bh(WRITE_SYNC, bh);
659 660
			}
			cond_resched();
661
			stats.run.rs_blocks_logged += bufs;
662 663 664 665 666 667 668 669

			/* Force a new descriptor to be generated next
                           time round the loop. */
			descriptor = NULL;
			bufs = 0;
		}
	}

670 671 672 673 674 675 676 677 678 679
	err = journal_finish_inode_data_buffers(journal, commit_transaction);
	if (err) {
		printk(KERN_WARNING
			"JBD2: Detected IO errors while flushing file data "
		       "on %s\n", journal->j_devname);
		if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
			jbd2_journal_abort(journal, err);
		err = 0;
	}

680 681 682 683
	write_lock(&journal->j_state_lock);
	J_ASSERT(commit_transaction->t_state == T_COMMIT);
	commit_transaction->t_state = T_COMMIT_DFLUSH;
	write_unlock(&journal->j_state_lock);
684 685 686 687 688
	/* 
	 * If the journal is not located on the file system device,
	 * then we must flush the file system device before we issue
	 * the commit record
	 */
689
	if (commit_transaction->t_need_data_flush &&
690 691
	    (journal->j_fs_dev != journal->j_dev) &&
	    (journal->j_flags & JBD2_BARRIER))
692
		blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
693

694
	/* Done it all: now write the commit record asynchronously. */
695
	if (JBD2_HAS_INCOMPAT_FEATURE(journal,
696
				      JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
697 698 699 700
		err = journal_submit_commit_record(journal, commit_transaction,
						 &cbh, crc32_sum);
		if (err)
			__jbd2_journal_abort_hard(journal);
701
	}
702

703 704
	blk_finish_plug(&plug);

705 706 707 708 709 710 711 712 713 714 715
	/* Lo and behold: we have just managed to send a transaction to
           the log.  Before we can commit it, wait for the IO so far to
           complete.  Control buffers being written are on the
           transaction's t_log_list queue, and metadata buffers are on
           the t_iobuf_list queue.

	   Wait for the buffers in reverse order.  That way we are
	   less likely to be woken up until all IOs have completed, and
	   so we incur less scheduling load.
	*/

716
	jbd_debug(3, "JBD: commit phase 3\n");
717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740

	/*
	 * akpm: these are BJ_IO, and j_list_lock is not needed.
	 * See __journal_try_to_free_buffer.
	 */
wait_for_iobuf:
	while (commit_transaction->t_iobuf_list != NULL) {
		struct buffer_head *bh;

		jh = commit_transaction->t_iobuf_list->b_tprev;
		bh = jh2bh(jh);
		if (buffer_locked(bh)) {
			wait_on_buffer(bh);
			goto wait_for_iobuf;
		}
		if (cond_resched())
			goto wait_for_iobuf;

		if (unlikely(!buffer_uptodate(bh)))
			err = -EIO;

		clear_buffer_jwrite(bh);

		JBUFFER_TRACE(jh, "ph4: unfile after journal write");
741
		jbd2_journal_unfile_buffer(journal, jh);
742 743 744

		/*
		 * ->t_iobuf_list should contain only dummy buffer_heads
745
		 * which were created by jbd2_journal_write_metadata_buffer().
746 747
		 */
		BUFFER_TRACE(bh, "dumping temporary bh");
748
		jbd2_journal_put_journal_head(jh);
749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764
		__brelse(bh);
		J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
		free_buffer_head(bh);

		/* We also have to unlock and free the corresponding
                   shadowed buffer */
		jh = commit_transaction->t_shadow_list->b_tprev;
		bh = jh2bh(jh);
		clear_bit(BH_JWrite, &bh->b_state);
		J_ASSERT_BH(bh, buffer_jbddirty(bh));

		/* The metadata is now released for reuse, but we need
                   to remember it against this transaction so that when
                   we finally commit, we can do any checkpointing
                   required. */
		JBUFFER_TRACE(jh, "file as BJ_Forget");
765
		jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
766 767 768 769 770 771 772
		/*
		 * Wake up any transactions which were waiting for this IO to
		 * complete. The barrier must be here so that changes by
		 * jbd2_journal_file_buffer() take effect before wake_up_bit()
		 * does the waitqueue check.
		 */
		smp_mb();
773 774 775 776 777 778 779
		wake_up_bit(&bh->b_state, BH_Unshadow);
		JBUFFER_TRACE(jh, "brelse shadowed buffer");
		__brelse(bh);
	}

	J_ASSERT (commit_transaction->t_shadow_list == NULL);

780
	jbd_debug(3, "JBD: commit phase 4\n");
781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800

	/* Here we wait for the revoke record and descriptor record buffers */
 wait_for_ctlbuf:
	while (commit_transaction->t_log_list != NULL) {
		struct buffer_head *bh;

		jh = commit_transaction->t_log_list->b_tprev;
		bh = jh2bh(jh);
		if (buffer_locked(bh)) {
			wait_on_buffer(bh);
			goto wait_for_ctlbuf;
		}
		if (cond_resched())
			goto wait_for_ctlbuf;

		if (unlikely(!buffer_uptodate(bh)))
			err = -EIO;

		BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
		clear_buffer_jwrite(bh);
801 802
		jbd2_journal_unfile_buffer(journal, jh);
		jbd2_journal_put_journal_head(jh);
803 804 805 806
		__brelse(bh);		/* One for getblk */
		/* AKPM: bforget here */
	}

807 808 809
	if (err)
		jbd2_journal_abort(journal, err);

810
	jbd_debug(3, "JBD: commit phase 5\n");
811 812 813 814
	write_lock(&journal->j_state_lock);
	J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
	commit_transaction->t_state = T_COMMIT_JFLUSH;
	write_unlock(&journal->j_state_lock);
815

816
	if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
817
				       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
818 819 820 821 822
		err = journal_submit_commit_record(journal, commit_transaction,
						&cbh, crc32_sum);
		if (err)
			__jbd2_journal_abort_hard(journal);
	}
823
	if (cbh)
824
		err = journal_wait_on_commit_record(journal, cbh);
825 826 827
	if (JBD2_HAS_INCOMPAT_FEATURE(journal,
				      JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) &&
	    journal->j_flags & JBD2_BARRIER) {
828
		blkdev_issue_flush(journal->j_dev, GFP_KERNEL, NULL);
829
	}
830 831

	if (err)
832
		jbd2_journal_abort(journal, err);
833 834 835 836 837 838

	/* End of a transaction!  Finally, we can do checkpoint
           processing: any buffers committed as a result of this
           transaction can be removed from any checkpoint list it was on
           before. */

839
	jbd_debug(3, "JBD: commit phase 6\n");
840

841
	J_ASSERT(list_empty(&commit_transaction->t_inode_list));
842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861
	J_ASSERT(commit_transaction->t_buffers == NULL);
	J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
	J_ASSERT(commit_transaction->t_iobuf_list == NULL);
	J_ASSERT(commit_transaction->t_shadow_list == NULL);
	J_ASSERT(commit_transaction->t_log_list == NULL);

restart_loop:
	/*
	 * As there are other places (journal_unmap_buffer()) adding buffers
	 * to this list we have to be careful and hold the j_list_lock.
	 */
	spin_lock(&journal->j_list_lock);
	while (commit_transaction->t_forget) {
		transaction_t *cp_transaction;
		struct buffer_head *bh;

		jh = commit_transaction->t_forget;
		spin_unlock(&journal->j_list_lock);
		bh = jh2bh(jh);
		jbd_lock_bh_state(bh);
862
		J_ASSERT_JH(jh,	jh->b_transaction == commit_transaction);
863 864 865 866 867 868 869 870 871 872

		/*
		 * If there is undo-protected committed data against
		 * this buffer, then we can remove it now.  If it is a
		 * buffer needing such protection, the old frozen_data
		 * field now points to a committed version of the
		 * buffer, so rotate that field to the new committed
		 * data.
		 *
		 * Otherwise, we can just throw away the frozen data now.
J
Joel Becker 已提交
873 874 875
		 *
		 * We also know that the frozen data has already fired
		 * its triggers if they exist, so we can clear that too.
876 877
		 */
		if (jh->b_committed_data) {
M
Mingming Cao 已提交
878
			jbd2_free(jh->b_committed_data, bh->b_size);
879 880 881 882
			jh->b_committed_data = NULL;
			if (jh->b_frozen_data) {
				jh->b_committed_data = jh->b_frozen_data;
				jh->b_frozen_data = NULL;
J
Joel Becker 已提交
883
				jh->b_frozen_triggers = NULL;
884 885
			}
		} else if (jh->b_frozen_data) {
M
Mingming Cao 已提交
886
			jbd2_free(jh->b_frozen_data, bh->b_size);
887
			jh->b_frozen_data = NULL;
J
Joel Becker 已提交
888
			jh->b_frozen_triggers = NULL;
889 890 891 892 893 894
		}

		spin_lock(&journal->j_list_lock);
		cp_transaction = jh->b_cp_transaction;
		if (cp_transaction) {
			JBUFFER_TRACE(jh, "remove from old cp transaction");
895
			cp_transaction->t_chp_stats.cs_dropped++;
896
			__jbd2_journal_remove_checkpoint(jh);
897 898 899 900
		}

		/* Only re-checkpoint the buffer_head if it is marked
		 * dirty.  If the buffer was added to the BJ_Forget list
901
		 * by jbd2_journal_forget, it may no longer be dirty and
902 903 904 905 906 907
		 * there's no point in keeping a checkpoint record for
		 * it. */

		/* A buffer which has been freed while still being
		 * journaled by a previous transaction may end up still
		 * being dirty here, but we want to avoid writing back
908 909 910 911
		 * that buffer in the future after the "add to orphan"
		 * operation been committed,  That's not only a performance
		 * gain, it also stops aliasing problems if the buffer is
		 * left behind for writeback and gets reallocated for another
912
		 * use in a different page. */
913
		if (buffer_freed(bh) && !jh->b_next_transaction) {
914 915 916 917 918 919
			clear_buffer_freed(bh);
			clear_buffer_jbddirty(bh);
		}

		if (buffer_jbddirty(bh)) {
			JBUFFER_TRACE(jh, "add to new checkpointing trans");
920
			__jbd2_journal_insert_checkpoint(jh, commit_transaction);
921 922
			if (is_journal_aborted(journal))
				clear_buffer_jbddirty(bh);
923
			JBUFFER_TRACE(jh, "refile for checkpoint writeback");
924
			__jbd2_journal_refile_buffer(jh);
925 926 927 928 929 930 931 932 933 934 935
			jbd_unlock_bh_state(bh);
		} else {
			J_ASSERT_BH(bh, !buffer_dirty(bh));
			/* The buffer on BJ_Forget list and not jbddirty means
			 * it has been freed by this transaction and hence it
			 * could not have been reallocated until this
			 * transaction has committed. *BUT* it could be
			 * reallocated once we have written all the data to
			 * disk and before we process the buffer on BJ_Forget
			 * list. */
			JBUFFER_TRACE(jh, "refile or unfile freed buffer");
936
			__jbd2_journal_refile_buffer(jh);
937 938 939
			if (!jh->b_transaction) {
				jbd_unlock_bh_state(bh);
				 /* needs a brelse */
940
				jbd2_journal_remove_journal_head(bh);
941 942 943 944 945 946 947 948
				release_buffer_page(bh);
			} else
				jbd_unlock_bh_state(bh);
		}
		cond_resched_lock(&journal->j_list_lock);
	}
	spin_unlock(&journal->j_list_lock);
	/*
949 950 951 952
	 * This is a bit sleazy.  We use j_list_lock to protect transition
	 * of a transaction into T_FINISHED state and calling
	 * __jbd2_journal_drop_transaction(). Otherwise we could race with
	 * other checkpointing code processing the transaction...
953
	 */
954
	write_lock(&journal->j_state_lock);
955 956 957 958 959 960 961
	spin_lock(&journal->j_list_lock);
	/*
	 * Now recheck if some buffers did not get attached to the transaction
	 * while the lock was dropped...
	 */
	if (commit_transaction->t_forget) {
		spin_unlock(&journal->j_list_lock);
962
		write_unlock(&journal->j_state_lock);
963 964 965 966 967
		goto restart_loop;
	}

	/* Done with this transaction! */

968
	jbd_debug(3, "JBD: commit phase 7\n");
969

970
	J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
971

972
	commit_transaction->t_start = jiffies;
973 974
	stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
					      commit_transaction->t_start);
975 976

	/*
977
	 * File the transaction statistics
978 979
	 */
	stats.ts_tid = commit_transaction->t_tid;
980 981
	stats.run.rs_handle_count =
		atomic_read(&commit_transaction->t_handle_count);
982 983
	trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
			     commit_transaction->t_tid, &stats.run);
984 985 986 987

	/*
	 * Calculate overall stats
	 */
988
	spin_lock(&journal->j_history_lock);
989
	journal->j_stats.ts_tid++;
990 991 992 993 994 995 996 997
	journal->j_stats.run.rs_wait += stats.run.rs_wait;
	journal->j_stats.run.rs_running += stats.run.rs_running;
	journal->j_stats.run.rs_locked += stats.run.rs_locked;
	journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
	journal->j_stats.run.rs_logging += stats.run.rs_logging;
	journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
	journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
	journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
998 999
	spin_unlock(&journal->j_history_lock);

1000 1001 1002 1003
	commit_transaction->t_state = T_FINISHED;
	J_ASSERT(commit_transaction == journal->j_committing_transaction);
	journal->j_commit_sequence = commit_transaction->t_tid;
	journal->j_committing_transaction = NULL;
J
Josef Bacik 已提交
1004
	commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1005

J
Josef Bacik 已提交
1006 1007 1008 1009 1010 1011 1012 1013 1014
	/*
	 * weight the commit time higher than the average time so we don't
	 * react too strongly to vast changes in the commit time
	 */
	if (likely(journal->j_average_commit_time))
		journal->j_average_commit_time = (commit_time +
				journal->j_average_commit_time*3) / 4;
	else
		journal->j_average_commit_time = commit_time;
1015
	write_unlock(&journal->j_state_lock);
1016

J
Jan Kara 已提交
1017 1018
	if (commit_transaction->t_checkpoint_list == NULL &&
	    commit_transaction->t_checkpoint_io_list == NULL) {
1019
		__jbd2_journal_drop_transaction(journal, commit_transaction);
1020
		to_free = 1;
1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
	} else {
		if (journal->j_checkpoint_transactions == NULL) {
			journal->j_checkpoint_transactions = commit_transaction;
			commit_transaction->t_cpnext = commit_transaction;
			commit_transaction->t_cpprev = commit_transaction;
		} else {
			commit_transaction->t_cpnext =
				journal->j_checkpoint_transactions;
			commit_transaction->t_cpprev =
				commit_transaction->t_cpnext->t_cpprev;
			commit_transaction->t_cpnext->t_cpprev =
				commit_transaction;
			commit_transaction->t_cpprev->t_cpnext =
				commit_transaction;
		}
	}
	spin_unlock(&journal->j_list_lock);

1039 1040 1041
	if (journal->j_commit_callback)
		journal->j_commit_callback(journal, commit_transaction);

1042
	trace_jbd2_end_commit(journal, commit_transaction);
1043 1044
	jbd_debug(1, "JBD: commit %d complete, head %d\n",
		  journal->j_commit_sequence, journal->j_tail_sequence);
1045 1046
	if (to_free)
		kfree(commit_transaction);
1047 1048 1049

	wake_up(&journal->j_wait_done_commit);
}