commit.c 36.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0+
2
/*
3
 * linux/fs/jbd2/commit.c
4 5 6 7 8 9 10 11 12 13 14
 *
 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
 *
 * Copyright 1998 Red Hat corp --- All Rights Reserved
 *
 * Journal commit routines for the generic filesystem journaling code;
 * part of the ext2fs journaling system.
 */

#include <linux/time.h>
#include <linux/fs.h>
15
#include <linux/jbd2.h>
16 17 18 19
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
20
#include <linux/jiffies.h>
21
#include <linux/crc32.h>
22 23
#include <linux/writeback.h>
#include <linux/backing-dev.h>
24
#include <linux/bio.h>
25
#include <linux/blkdev.h>
26
#include <linux/bitops.h>
27
#include <trace/events/jbd2.h>
28 29

/*
30
 * IO end handler for temporary buffer_heads handling writes to the journal.
31 32 33
 */
static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
{
34 35
	struct buffer_head *orig_bh = bh->b_private;

36 37 38 39 40
	BUFFER_TRACE(bh, "");
	if (uptodate)
		set_buffer_uptodate(bh);
	else
		clear_buffer_uptodate(bh);
41 42
	if (orig_bh) {
		clear_bit_unlock(BH_Shadow, &orig_bh->b_state);
43
		smp_mb__after_atomic();
44 45
		wake_up_bit(&orig_bh->b_state, BH_Shadow);
	}
46 47 48 49
	unlock_buffer(bh);
}

/*
50 51
 * When an ext4 file is truncated, it is possible that some pages are not
 * successfully freed, because they are attached to a committing transaction.
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
 * After the transaction commits, these pages are left on the LRU, with no
 * ->mapping, and with attached buffers.  These pages are trivially reclaimable
 * by the VM, but their apparent absence upsets the VM accounting, and it makes
 * the numbers in /proc/meminfo look odd.
 *
 * So here, we have a buffer which has just come off the forget list.  Look to
 * see if we can strip all buffers from the backing page.
 *
 * Called under lock_journal(), and possibly under journal_datalist_lock.  The
 * caller provided us with a ref against the buffer, and we drop that here.
 */
static void release_buffer_page(struct buffer_head *bh)
{
	struct page *page;

	if (buffer_dirty(bh))
		goto nope;
	if (atomic_read(&bh->b_count) != 1)
		goto nope;
	page = bh->b_page;
	if (!page)
		goto nope;
	if (page->mapping)
		goto nope;

	/* OK, it's a truncated page */
N
Nick Piggin 已提交
78
	if (!trylock_page(page))
79 80
		goto nope;

81
	get_page(page);
82 83 84
	__brelse(bh);
	try_to_free_buffers(page);
	unlock_page(page);
85
	put_page(page);
86 87 88 89 90 91
	return;

nope:
	__brelse(bh);
}

92
static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
D
Darrick J. Wong 已提交
93 94 95 96
{
	struct commit_header *h;
	__u32 csum;

97
	if (!jbd2_journal_has_csum_v2or3(j))
D
Darrick J. Wong 已提交
98 99
		return;

100
	h = (struct commit_header *)(bh->b_data);
D
Darrick J. Wong 已提交
101 102 103
	h->h_chksum_type = 0;
	h->h_chksum_size = 0;
	h->h_chksum[0] = 0;
104
	csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
D
Darrick J. Wong 已提交
105 106 107
	h->h_chksum[0] = cpu_to_be32(csum);
}

108 109
/*
 * Done it all: now submit the commit record.  We should have
110 111 112 113 114 115
 * cleaned up our previous buffers by now, so if we are in abort
 * mode we can now just skip the rest of the journal write
 * entirely.
 *
 * Returns 1 if the journal needs to be aborted or 0 on success
 */
116 117 118 119
static int journal_submit_commit_record(journal_t *journal,
					transaction_t *commit_transaction,
					struct buffer_head **cbh,
					__u32 crc32_sum)
120
{
121
	struct commit_header *tmp;
122
	struct buffer_head *bh;
123
	int ret;
124
	struct timespec64 now;
125

126 127
	*cbh = NULL;

128 129 130
	if (is_journal_aborted(journal))
		return 0;

131 132
	bh = jbd2_journal_get_descriptor_buffer(commit_transaction,
						JBD2_COMMIT_BLOCK);
133
	if (!bh)
134 135
		return 1;

136
	tmp = (struct commit_header *)bh->b_data;
137
	ktime_get_coarse_real_ts64(&now);
138 139
	tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
	tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
140

141
	if (jbd2_has_feature_checksum(journal)) {
142 143 144
		tmp->h_chksum_type 	= JBD2_CRC32_CHKSUM;
		tmp->h_chksum_size 	= JBD2_CRC32_CHKSUM_SIZE;
		tmp->h_chksum[0] 	= cpu_to_be32(crc32_sum);
145
	}
146
	jbd2_commit_block_csum_set(journal, bh);
147

148
	BUFFER_TRACE(bh, "submit commit block");
149
	lock_buffer(bh);
150
	clear_buffer_dirty(bh);
151 152 153 154
	set_buffer_uptodate(bh);
	bh->b_end_io = journal_end_buffer_io_sync;

	if (journal->j_flags & JBD2_BARRIER &&
155
	    !jbd2_has_feature_async_commit(journal))
156 157
		ret = submit_bh(REQ_OP_WRITE,
			REQ_SYNC | REQ_PREFLUSH | REQ_FUA, bh);
158
	else
159
		ret = submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
160

161 162 163 164 165 166 167 168
	*cbh = bh;
	return ret;
}

/*
 * This function along with journal_submit_commit_record
 * allows to write the commit record asynchronously.
 */
169 170
static int journal_wait_on_commit_record(journal_t *journal,
					 struct buffer_head *bh)
171 172 173 174 175
{
	int ret = 0;

	clear_buffer_dirty(bh);
	wait_on_buffer(bh);
176

177 178 179 180 181
	if (unlikely(!buffer_uptodate(bh)))
		ret = -EIO;
	put_bh(bh);            /* One for getblk() */

	return ret;
182 183
}

184 185 186
/*
 * write the filemap data using writepage() address_space_operations.
 * We don't do block allocation here even for delalloc. We don't
187
 * use writepages() because with delayed allocation we may be doing
188 189
 * block allocation in writepages().
 */
190
int jbd2_journal_submit_inode_data_buffers(struct jbd2_inode *jinode)
191
{
192
	struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
193 194 195
	struct writeback_control wbc = {
		.sync_mode =  WB_SYNC_ALL,
		.nr_to_write = mapping->nrpages * 2,
196 197
		.range_start = jinode->i_dirty_start,
		.range_end = jinode->i_dirty_end,
198 199
	};

200 201 202 203 204 205
	/*
	 * submit the inode data buffers. We use writepage
	 * instead of writepages. Because writepages can do
	 * block allocation with delalloc. We need to write
	 * only allocated blocks here.
	 */
206
	return generic_writepages(mapping, &wbc);
207 208
}

209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
/* Send all the data buffers related to an inode */
int jbd2_submit_inode_data(struct jbd2_inode *jinode)
{

	if (!jinode || !(jinode->i_flags & JI_WRITE_DATA))
		return 0;

	trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
	return jbd2_journal_submit_inode_data_buffers(jinode);

}
EXPORT_SYMBOL(jbd2_submit_inode_data);

int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode)
{
	if (!jinode || !(jinode->i_flags & JI_WAIT_DATA) ||
		!jinode->i_vfs_inode || !jinode->i_vfs_inode->i_mapping)
		return 0;
	return filemap_fdatawait_range_keep_errors(
		jinode->i_vfs_inode->i_mapping, jinode->i_dirty_start,
		jinode->i_dirty_end);
}
EXPORT_SYMBOL(jbd2_wait_inode_data);

233 234 235 236 237 238 239 240
/*
 * Submit all the data buffers of inode associated with the transaction to
 * disk.
 *
 * We are in a committing transaction. Therefore no new inode can be added to
 * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
 * operate on from being released while we write out pages.
 */
241
static int journal_submit_data_buffers(journal_t *journal,
242 243 244 245 246 247 248
		transaction_t *commit_transaction)
{
	struct jbd2_inode *jinode;
	int err, ret = 0;

	spin_lock(&journal->j_list_lock);
	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
249 250
		if (!(jinode->i_flags & JI_WRITE_DATA))
			continue;
251
		jinode->i_flags |= JI_COMMIT_RUNNING;
252
		spin_unlock(&journal->j_list_lock);
253
		/* submit the inode data buffers. */
254
		trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
255 256 257 258 259
		if (journal->j_submit_inode_data_buffers) {
			err = journal->j_submit_inode_data_buffers(jinode);
			if (!ret)
				ret = err;
		}
260 261
		spin_lock(&journal->j_list_lock);
		J_ASSERT(jinode->i_transaction == commit_transaction);
262 263
		jinode->i_flags &= ~JI_COMMIT_RUNNING;
		smp_mb();
264 265 266 267 268 269
		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
	}
	spin_unlock(&journal->j_list_lock);
	return ret;
}

270 271 272 273 274 275 276 277 278
int jbd2_journal_finish_inode_data_buffers(struct jbd2_inode *jinode)
{
	struct address_space *mapping = jinode->i_vfs_inode->i_mapping;

	return filemap_fdatawait_range_keep_errors(mapping,
						   jinode->i_dirty_start,
						   jinode->i_dirty_end);
}

279 280 281 282 283 284 285 286 287 288 289
/*
 * Wait for data submitted for writeout, refile inodes to proper
 * transaction if needed.
 *
 */
static int journal_finish_inode_data_buffers(journal_t *journal,
		transaction_t *commit_transaction)
{
	struct jbd2_inode *jinode, *next_i;
	int err, ret = 0;

290
	/* For locking, see the comment in journal_submit_data_buffers() */
291 292
	spin_lock(&journal->j_list_lock);
	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
293 294
		if (!(jinode->i_flags & JI_WAIT_DATA))
			continue;
295
		jinode->i_flags |= JI_COMMIT_RUNNING;
296
		spin_unlock(&journal->j_list_lock);
297 298 299 300 301 302
		/* wait for the inode data buffers writeout. */
		if (journal->j_finish_inode_data_buffers) {
			err = journal->j_finish_inode_data_buffers(jinode);
			if (!ret)
				ret = err;
		}
303
		spin_lock(&journal->j_list_lock);
304 305
		jinode->i_flags &= ~JI_COMMIT_RUNNING;
		smp_mb();
306 307 308 309 310 311 312 313 314 315 316 317 318 319
		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
	}

	/* Now refile inode to proper lists */
	list_for_each_entry_safe(jinode, next_i,
				 &commit_transaction->t_inode_list, i_list) {
		list_del(&jinode->i_list);
		if (jinode->i_next_transaction) {
			jinode->i_transaction = jinode->i_next_transaction;
			jinode->i_next_transaction = NULL;
			list_add(&jinode->i_list,
				&jinode->i_transaction->t_inode_list);
		} else {
			jinode->i_transaction = NULL;
320 321
			jinode->i_dirty_start = 0;
			jinode->i_dirty_end = 0;
322 323 324 325 326 327 328
		}
	}
	spin_unlock(&journal->j_list_lock);

	return ret;
}

329 330 331 332 333 334
static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
{
	struct page *page = bh->b_page;
	char *addr;
	__u32 checksum;

335
	addr = kmap_atomic(page);
336 337
	checksum = crc32_be(crc32_sum,
		(void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
338
	kunmap_atomic(addr);
339 340 341 342

	return checksum;
}

343
static void write_tag_block(journal_t *j, journal_block_tag_t *tag,
344
				   unsigned long long block)
Z
Zach Brown 已提交
345 346
{
	tag->t_blocknr = cpu_to_be32(block & (u32)~0);
347
	if (jbd2_has_feature_64bit(j))
Z
Zach Brown 已提交
348 349 350
		tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
}

351 352 353
static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
				    struct buffer_head *bh, __u32 sequence)
{
354
	journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
355 356
	struct page *page = bh->b_page;
	__u8 *addr;
357
	__u32 csum32;
358
	__be32 seq;
359

360
	if (!jbd2_journal_has_csum_v2or3(j))
361 362
		return;

363
	seq = cpu_to_be32(sequence);
364
	addr = kmap_atomic(page);
365
	csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
366 367
	csum32 = jbd2_chksum(j, csum32, addr + offset_in_page(bh->b_data),
			     bh->b_size);
368
	kunmap_atomic(addr);
369

370
	if (jbd2_has_feature_csum3(j))
371 372 373
		tag3->t_checksum = cpu_to_be32(csum32);
	else
		tag->t_checksum = cpu_to_be16(csum32);
374
}
375
/*
376
 * jbd2_journal_commit_transaction
377 378 379 380
 *
 * The primary function for committing a transaction to the log.  This
 * function is called by the journal thread to begin a complete commit.
 */
381
void jbd2_journal_commit_transaction(journal_t *journal)
382
{
383
	struct transaction_stats_s stats;
384
	transaction_t *commit_transaction;
385 386
	struct journal_head *jh;
	struct buffer_head *descriptor;
387 388 389 390
	struct buffer_head **wbuf = journal->j_wbuf;
	int bufs;
	int flags;
	int err;
391
	unsigned long long blocknr;
J
Josef Bacik 已提交
392 393
	ktime_t start_time;
	u64 commit_time;
394 395 396 397 398
	char *tagp = NULL;
	journal_block_tag_t *tag = NULL;
	int space_left = 0;
	int first_tag = 0;
	int tag_flag;
399
	int i;
Z
Zach Brown 已提交
400
	int tag_bytes = journal_tag_bytes(journal);
401 402
	struct buffer_head *cbh = NULL; /* For transactional checksums */
	__u32 crc32_sum = ~0;
403
	struct blk_plug plug;
404 405 406 407
	/* Tail of the journal */
	unsigned long first_block;
	tid_t first_tid;
	int update_tail;
408
	int csum_size = 0;
409
	LIST_HEAD(io_bufs);
410
	LIST_HEAD(log_bufs);
411

412
	if (jbd2_journal_has_csum_v2or3(journal))
413
		csum_size = sizeof(struct jbd2_journal_block_tail);
414 415 416 417 418 419

	/*
	 * First job: lock down the current transaction and wait for
	 * all outstanding updates to complete.
	 */

420 421
	/* Do we need to erase the effects of a prior jbd2_journal_flush? */
	if (journal->j_flags & JBD2_FLUSHED) {
422
		jbd_debug(3, "super block updated\n");
423
		mutex_lock_io(&journal->j_checkpoint_mutex);
424 425 426 427 428 429 430 431 432
		/*
		 * We hold j_checkpoint_mutex so tail cannot change under us.
		 * We don't need any special data guarantees for writing sb
		 * since journal is empty and it is ok for write to be
		 * flushed only with transaction commit.
		 */
		jbd2_journal_update_sb_log_tail(journal,
						journal->j_tail_sequence,
						journal->j_tail,
433
						REQ_SYNC);
434
		mutex_unlock(&journal->j_checkpoint_mutex);
435 436 437 438 439 440 441
	} else {
		jbd_debug(3, "superblock not updated\n");
	}

	J_ASSERT(journal->j_running_transaction != NULL);
	J_ASSERT(journal->j_committing_transaction == NULL);

442 443 444 445 446 447 448 449 450 451 452
	write_lock(&journal->j_state_lock);
	journal->j_flags |= JBD2_FULL_COMMIT_ONGOING;
	while (journal->j_flags & JBD2_FAST_COMMIT_ONGOING) {
		DEFINE_WAIT(wait);

		prepare_to_wait(&journal->j_fc_wait, &wait,
				TASK_UNINTERRUPTIBLE);
		write_unlock(&journal->j_state_lock);
		schedule();
		write_lock(&journal->j_state_lock);
		finish_wait(&journal->j_fc_wait, &wait);
453 454 455 456 457 458 459 460 461
		/*
		 * TODO: by blocking fast commits here, we are increasing
		 * fsync() latency slightly. Strictly speaking, we don't need
		 * to block fast commits until the transaction enters T_FLUSH
		 * state. So an optimization is possible where we block new fast
		 * commits here and wait for existing ones to complete
		 * just before we enter T_FLUSH. That way, the existing fast
		 * commits and this full commit can proceed parallely.
		 */
462 463 464
	}
	write_unlock(&journal->j_state_lock);

465 466
	commit_transaction = journal->j_running_transaction;

467
	trace_jbd2_start_commit(journal, commit_transaction);
E
Eryu Guan 已提交
468
	jbd_debug(1, "JBD2: starting commit of transaction %d\n",
469 470
			commit_transaction->t_tid);

471
	write_lock(&journal->j_state_lock);
472
	journal->j_fc_off = 0;
473
	J_ASSERT(commit_transaction->t_state == T_RUNNING);
474 475
	commit_transaction->t_state = T_LOCKED;

476
	trace_jbd2_commit_locking(journal, commit_transaction);
477
	stats.run.rs_wait = commit_transaction->t_max_wait;
478
	stats.run.rs_request_delay = 0;
479
	stats.run.rs_locked = jiffies;
480 481 482 483
	if (commit_transaction->t_requested)
		stats.run.rs_request_delay =
			jbd2_time_diff(commit_transaction->t_requested,
				       stats.run.rs_locked);
484 485
	stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
					      stats.run.rs_locked);
486

487
	spin_lock(&commit_transaction->t_handle_lock);
488
	while (atomic_read(&commit_transaction->t_updates)) {
489 490 491 492
		DEFINE_WAIT(wait);

		prepare_to_wait(&journal->j_wait_updates, &wait,
					TASK_UNINTERRUPTIBLE);
493
		if (atomic_read(&commit_transaction->t_updates)) {
494
			spin_unlock(&commit_transaction->t_handle_lock);
495
			write_unlock(&journal->j_state_lock);
496
			schedule();
497
			write_lock(&journal->j_state_lock);
498 499 500 501 502
			spin_lock(&commit_transaction->t_handle_lock);
		}
		finish_wait(&journal->j_wait_updates, &wait);
	}
	spin_unlock(&commit_transaction->t_handle_lock);
503 504
	commit_transaction->t_state = T_SWITCH;
	write_unlock(&journal->j_state_lock);
505

506
	J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
507 508 509 510 511 512 513
			journal->j_max_transaction_buffers);

	/*
	 * First thing we are allowed to do is to discard any remaining
	 * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
	 * that there are no such buffers: if a large filesystem
	 * operation like a truncate needs to split itself over multiple
514
	 * transactions, then it may try to do a jbd2_journal_restart() while
515 516 517 518 519 520 521
	 * there are still BJ_Reserved buffers outstanding.  These must
	 * be released cleanly from the current transaction.
	 *
	 * In this case, the filesystem must still reserve write access
	 * again before modifying the buffer in the new transaction, but
	 * we do not require it to remember exactly which old buffers it
	 * has reserved.  This is consistent with the existing behaviour
522
	 * that multiple jbd2_journal_get_write_access() calls to the same
L
Lucas De Marchi 已提交
523
	 * buffer are perfectly permissible.
524 525 526 527 528
	 */
	while (commit_transaction->t_reserved_list) {
		jh = commit_transaction->t_reserved_list;
		JBUFFER_TRACE(jh, "reserved, unused: refile");
		/*
529
		 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
530 531 532 533 534
		 * leave undo-committed data.
		 */
		if (jh->b_committed_data) {
			struct buffer_head *bh = jh2bh(jh);

T
Thomas Gleixner 已提交
535
			spin_lock(&jh->b_state_lock);
M
Mingming Cao 已提交
536
			jbd2_free(jh->b_committed_data, bh->b_size);
537
			jh->b_committed_data = NULL;
T
Thomas Gleixner 已提交
538
			spin_unlock(&jh->b_state_lock);
539
		}
540
		jbd2_journal_refile_buffer(journal, jh);
541 542 543 544 545 546 547 548
	}

	/*
	 * Now try to drop any written-back buffers from the journal's
	 * checkpoint lists.  We do this *before* commit because it potentially
	 * frees some memory
	 */
	spin_lock(&journal->j_list_lock);
549
	__jbd2_journal_clean_checkpoint_list(journal, false);
550 551
	spin_unlock(&journal->j_list_lock);

E
Eryu Guan 已提交
552
	jbd_debug(3, "JBD2: commit phase 1\n");
553

554 555 556 557 558 559
	/*
	 * Clear revoked flag to reflect there is no revoked buffers
	 * in the next transaction which is going to be started.
	 */
	jbd2_clear_buffer_revoked_flags(journal);

560 561 562
	/*
	 * Switch to a new revoke table.
	 */
563
	jbd2_journal_switch_revoke_table(journal);
564

J
Jan Kara 已提交
565 566 567 568 569 570
	/*
	 * Reserved credits cannot be claimed anymore, free them
	 */
	atomic_sub(atomic_read(&journal->j_reserved_credits),
		   &commit_transaction->t_outstanding_credits);

571
	write_lock(&journal->j_state_lock);
572
	trace_jbd2_commit_flushing(journal, commit_transaction);
573 574 575
	stats.run.rs_flushing = jiffies;
	stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
					     stats.run.rs_flushing);
576

577 578 579
	commit_transaction->t_state = T_FLUSH;
	journal->j_committing_transaction = commit_transaction;
	journal->j_running_transaction = NULL;
J
Josef Bacik 已提交
580
	start_time = ktime_get();
581 582
	commit_transaction->t_log_start = journal->j_head;
	wake_up(&journal->j_wait_transaction_locked);
583
	write_unlock(&journal->j_state_lock);
584

585
	jbd_debug(3, "JBD2: commit phase 2a\n");
586 587 588 589 590

	/*
	 * Now start flushing things to disk, in the order they appear
	 * on the transaction lists.  Data blocks go first.
	 */
591
	err = journal_submit_data_buffers(journal, commit_transaction);
592
	if (err)
593
		jbd2_journal_abort(journal, err);
594

595
	blk_start_plug(&plug);
596
	jbd2_journal_write_revoke_records(commit_transaction, &log_bufs);
597

598
	jbd_debug(3, "JBD2: commit phase 2b\n");
599 600 601 602 603 604

	/*
	 * Way to go: we have now written out all of the data for a
	 * transaction!  Now comes the tricky part: we need to write out
	 * metadata.  Loop over the transaction's entire buffer list:
	 */
605
	write_lock(&journal->j_state_lock);
606
	commit_transaction->t_state = T_COMMIT;
607
	write_unlock(&journal->j_state_lock);
608

609
	trace_jbd2_commit_logging(journal, commit_transaction);
610 611 612
	stats.run.rs_logging = jiffies;
	stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
					       stats.run.rs_logging);
613
	stats.run.rs_blocks = commit_transaction->t_nr_buffers;
614
	stats.run.rs_blocks_logged = 0;
615

616
	J_ASSERT(commit_transaction->t_nr_buffers <=
617
		 atomic_read(&commit_transaction->t_outstanding_credits));
618

619
	err = 0;
620
	bufs = 0;
621
	descriptor = NULL;
622 623 624 625 626 627 628
	while (commit_transaction->t_buffers) {

		/* Find the next buffer to be journaled... */

		jh = commit_transaction->t_buffers;

		/* If we're in abort mode, we just un-journal the buffer and
629
		   release it. */
630 631

		if (is_journal_aborted(journal)) {
632
			clear_buffer_jbddirty(jh2bh(jh));
633
			JBUFFER_TRACE(jh, "journal is aborting: refile");
J
Joel Becker 已提交
634 635 636 637
			jbd2_buffer_abort_trigger(jh,
						  jh->b_frozen_data ?
						  jh->b_frozen_triggers :
						  jh->b_triggers);
638
			jbd2_journal_refile_buffer(journal, jh);
639 640 641 642 643 644 645 646 647 648 649 650 651 652 653
			/* If that was the last one, we need to clean up
			 * any descriptor buffers which may have been
			 * already allocated, even if we are now
			 * aborting. */
			if (!commit_transaction->t_buffers)
				goto start_journal_io;
			continue;
		}

		/* Make sure we have a descriptor block in which to
		   record the metadata buffer. */

		if (!descriptor) {
			J_ASSERT (bufs == 0);

E
Eryu Guan 已提交
654
			jbd_debug(4, "JBD2: get descriptor\n");
655

656 657 658
			descriptor = jbd2_journal_get_descriptor_buffer(
							commit_transaction,
							JBD2_DESCRIPTOR_BLOCK);
659
			if (!descriptor) {
660
				jbd2_journal_abort(journal, -EIO);
661 662 663
				continue;
			}

E
Eryu Guan 已提交
664
			jbd_debug(4, "JBD2: got buffer %llu (%p)\n",
665 666 667 668 669
				(unsigned long long)descriptor->b_blocknr,
				descriptor->b_data);
			tagp = &descriptor->b_data[sizeof(journal_header_t)];
			space_left = descriptor->b_size -
						sizeof(journal_header_t);
670
			first_tag = 1;
671 672 673
			set_buffer_jwrite(descriptor);
			set_buffer_dirty(descriptor);
			wbuf[bufs++] = descriptor;
674 675 676

			/* Record it so that we can wait for IO
                           completion later */
677 678
			BUFFER_TRACE(descriptor, "ph3: file as descriptor");
			jbd2_file_log_bh(&log_bufs, descriptor);
679 680 681 682
		}

		/* Where is the buffer to be written? */

683
		err = jbd2_journal_next_log_block(journal, &blocknr);
684 685 686 687
		/* If the block mapping failed, just abandon the buffer
		   and repeat this loop: we'll fall into the
		   refile-on-abort condition above. */
		if (err) {
688
			jbd2_journal_abort(journal, err);
689 690 691 692 693
			continue;
		}

		/*
		 * start_this_handle() uses t_outstanding_credits to determine
694
		 * the free space in the log.
695
		 */
696
		atomic_dec(&commit_transaction->t_outstanding_credits);
697 698 699

		/* Bump b_count to prevent truncate from stumbling over
                   the shadowed buffer!  @@@ This can go if we ever get
700
                   rid of the shadow pairing of buffers. */
701 702 703
		atomic_inc(&jh2bh(jh)->b_count);

		/*
704 705
		 * Make a temporary IO buffer with which to write it out
		 * (this will requeue the metadata buffer to BJ_Shadow).
706
		 */
707
		set_bit(BH_JWrite, &jh2bh(jh)->b_state);
708
		JBUFFER_TRACE(jh, "ph3: write metadata");
709
		flags = jbd2_journal_write_metadata_buffer(commit_transaction,
710
						jh, &wbuf[bufs], blocknr);
711 712 713 714
		if (flags < 0) {
			jbd2_journal_abort(journal, flags);
			continue;
		}
715
		jbd2_file_log_bh(&io_bufs, wbuf[bufs]);
716 717 718 719 720 721

		/* Record the new block's tag in the current descriptor
                   buffer */

		tag_flag = 0;
		if (flags & 1)
722
			tag_flag |= JBD2_FLAG_ESCAPE;
723
		if (!first_tag)
724
			tag_flag |= JBD2_FLAG_SAME_UUID;
725 726

		tag = (journal_block_tag_t *) tagp;
727
		write_tag_block(journal, tag, jh2bh(jh)->b_blocknr);
728
		tag->t_flags = cpu_to_be16(tag_flag);
729
		jbd2_block_tag_csum_set(journal, tag, wbuf[bufs],
730
					commit_transaction->t_tid);
Z
Zach Brown 已提交
731 732
		tagp += tag_bytes;
		space_left -= tag_bytes;
733
		bufs++;
734 735 736 737 738 739 740 741 742 743 744 745 746

		if (first_tag) {
			memcpy (tagp, journal->j_uuid, 16);
			tagp += 16;
			space_left -= 16;
			first_tag = 0;
		}

		/* If there's no more to do, or if the descriptor is full,
		   let the IO rip! */

		if (bufs == journal->j_wbufsize ||
		    commit_transaction->t_buffers == NULL ||
747
		    space_left < tag_bytes + 16 + csum_size) {
748

E
Eryu Guan 已提交
749
			jbd_debug(4, "JBD2: Submit %d IOs\n", bufs);
750 751 752 753 754

			/* Write an end-of-descriptor marker before
                           submitting the IOs.  "tag" still points to
                           the last tag we set up. */

755
			tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
756
start_journal_io:
757 758 759 760
			if (descriptor)
				jbd2_descriptor_block_csum_set(journal,
							descriptor);

761 762
			for (i = 0; i < bufs; i++) {
				struct buffer_head *bh = wbuf[i];
763 764 765
				/*
				 * Compute checksum.
				 */
766
				if (jbd2_has_feature_checksum(journal)) {
767 768 769 770
					crc32_sum =
					    jbd2_checksum_data(crc32_sum, bh);
				}

771 772 773 774
				lock_buffer(bh);
				clear_buffer_dirty(bh);
				set_buffer_uptodate(bh);
				bh->b_end_io = journal_end_buffer_io_sync;
775
				submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
776 777 778 779 780 781 782 783 784 785
			}
			cond_resched();

			/* Force a new descriptor to be generated next
                           time round the loop. */
			descriptor = NULL;
			bufs = 0;
		}
	}

786 787 788 789 790 791 792 793 794 795
	err = journal_finish_inode_data_buffers(journal, commit_transaction);
	if (err) {
		printk(KERN_WARNING
			"JBD2: Detected IO errors while flushing file data "
		       "on %s\n", journal->j_devname);
		if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
			jbd2_journal_abort(journal, err);
		err = 0;
	}

796 797 798 799 800 801 802 803 804 805
	/*
	 * Get current oldest transaction in the log before we issue flush
	 * to the filesystem device. After the flush we can be sure that
	 * blocks of all older transactions are checkpointed to persistent
	 * storage and we will be safe to update journal start in the
	 * superblock with the numbers we get here.
	 */
	update_tail =
		jbd2_journal_get_log_tail(journal, &first_tid, &first_block);

806
	write_lock(&journal->j_state_lock);
807 808 809 810 811 812
	if (update_tail) {
		long freed = first_block - journal->j_tail;

		if (first_block < journal->j_tail)
			freed += journal->j_last - journal->j_first;
		/* Update tail only if we free significant amount of space */
813
		if (freed < jbd2_journal_get_max_txn_bufs(journal))
814 815
			update_tail = 0;
	}
816 817 818
	J_ASSERT(commit_transaction->t_state == T_COMMIT);
	commit_transaction->t_state = T_COMMIT_DFLUSH;
	write_unlock(&journal->j_state_lock);
819

820 821 822 823 824
	/* 
	 * If the journal is not located on the file system device,
	 * then we must flush the file system device before we issue
	 * the commit record
	 */
825
	if (commit_transaction->t_need_data_flush &&
826 827
	    (journal->j_fs_dev != journal->j_dev) &&
	    (journal->j_flags & JBD2_BARRIER))
828
		blkdev_issue_flush(journal->j_fs_dev);
829

830
	/* Done it all: now write the commit record asynchronously. */
831
	if (jbd2_has_feature_async_commit(journal)) {
832 833 834
		err = journal_submit_commit_record(journal, commit_transaction,
						 &cbh, crc32_sum);
		if (err)
835
			jbd2_journal_abort(journal, err);
836
	}
837

838 839
	blk_finish_plug(&plug);

840 841 842 843
	/* Lo and behold: we have just managed to send a transaction to
           the log.  Before we can commit it, wait for the IO so far to
           complete.  Control buffers being written are on the
           transaction's t_log_list queue, and metadata buffers are on
844
           the io_bufs list.
845 846 847 848 849 850

	   Wait for the buffers in reverse order.  That way we are
	   less likely to be woken up until all IOs have completed, and
	   so we incur less scheduling load.
	*/

E
Eryu Guan 已提交
851
	jbd_debug(3, "JBD2: commit phase 3\n");
852

853 854 855 856
	while (!list_empty(&io_bufs)) {
		struct buffer_head *bh = list_entry(io_bufs.prev,
						    struct buffer_head,
						    b_assoc_buffers);
857

858 859
		wait_on_buffer(bh);
		cond_resched();
860 861 862

		if (unlikely(!buffer_uptodate(bh)))
			err = -EIO;
863
		jbd2_unfile_log_bh(bh);
864
		stats.run.rs_blocks_logged++;
865 866

		/*
867 868
		 * The list contains temporary buffer heads created by
		 * jbd2_journal_write_metadata_buffer().
869 870 871 872 873 874
		 */
		BUFFER_TRACE(bh, "dumping temporary bh");
		__brelse(bh);
		J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
		free_buffer_head(bh);

875
		/* We also have to refile the corresponding shadowed buffer */
876 877
		jh = commit_transaction->t_shadow_list->b_tprev;
		bh = jh2bh(jh);
878
		clear_buffer_jwrite(bh);
879
		J_ASSERT_BH(bh, buffer_jbddirty(bh));
880
		J_ASSERT_BH(bh, !buffer_shadow(bh));
881 882 883 884 885 886

		/* The metadata is now released for reuse, but we need
                   to remember it against this transaction so that when
                   we finally commit, we can do any checkpointing
                   required. */
		JBUFFER_TRACE(jh, "file as BJ_Forget");
887
		jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
888 889 890 891 892 893
		JBUFFER_TRACE(jh, "brelse shadowed buffer");
		__brelse(bh);
	}

	J_ASSERT (commit_transaction->t_shadow_list == NULL);

E
Eryu Guan 已提交
894
	jbd_debug(3, "JBD2: commit phase 4\n");
895 896

	/* Here we wait for the revoke record and descriptor record buffers */
897
	while (!list_empty(&log_bufs)) {
898 899
		struct buffer_head *bh;

900 901 902
		bh = list_entry(log_bufs.prev, struct buffer_head, b_assoc_buffers);
		wait_on_buffer(bh);
		cond_resched();
903 904 905 906 907 908

		if (unlikely(!buffer_uptodate(bh)))
			err = -EIO;

		BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
		clear_buffer_jwrite(bh);
909
		jbd2_unfile_log_bh(bh);
910
		stats.run.rs_blocks_logged++;
911 912 913 914
		__brelse(bh);		/* One for getblk */
		/* AKPM: bforget here */
	}

915 916 917
	if (err)
		jbd2_journal_abort(journal, err);

E
Eryu Guan 已提交
918
	jbd_debug(3, "JBD2: commit phase 5\n");
919 920 921 922
	write_lock(&journal->j_state_lock);
	J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
	commit_transaction->t_state = T_COMMIT_JFLUSH;
	write_unlock(&journal->j_state_lock);
923

924
	if (!jbd2_has_feature_async_commit(journal)) {
925 926 927
		err = journal_submit_commit_record(journal, commit_transaction,
						&cbh, crc32_sum);
		if (err)
928
			jbd2_journal_abort(journal, err);
929
	}
930
	if (cbh)
931
		err = journal_wait_on_commit_record(journal, cbh);
932
	stats.run.rs_blocks_logged++;
933
	if (jbd2_has_feature_async_commit(journal) &&
934
	    journal->j_flags & JBD2_BARRIER) {
935
		blkdev_issue_flush(journal->j_dev);
936
	}
937 938

	if (err)
939
		jbd2_journal_abort(journal, err);
940

941 942 943
	WARN_ON_ONCE(
		atomic_read(&commit_transaction->t_outstanding_credits) < 0);

944 945 946 947 948 949 950 951
	/*
	 * Now disk caches for filesystem device are flushed so we are safe to
	 * erase checkpointed transactions from the log by updating journal
	 * superblock.
	 */
	if (update_tail)
		jbd2_update_log_tail(journal, first_tid, first_block);

952 953 954 955 956
	/* End of a transaction!  Finally, we can do checkpoint
           processing: any buffers committed as a result of this
           transaction can be removed from any checkpoint list it was on
           before. */

E
Eryu Guan 已提交
957
	jbd_debug(3, "JBD2: commit phase 6\n");
958

959
	J_ASSERT(list_empty(&commit_transaction->t_inode_list));
960 961 962 963 964 965 966 967 968 969 970 971 972
	J_ASSERT(commit_transaction->t_buffers == NULL);
	J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
	J_ASSERT(commit_transaction->t_shadow_list == NULL);

restart_loop:
	/*
	 * As there are other places (journal_unmap_buffer()) adding buffers
	 * to this list we have to be careful and hold the j_list_lock.
	 */
	spin_lock(&journal->j_list_lock);
	while (commit_transaction->t_forget) {
		transaction_t *cp_transaction;
		struct buffer_head *bh;
973
		int try_to_free = 0;
974
		bool drop_ref;
975 976 977 978

		jh = commit_transaction->t_forget;
		spin_unlock(&journal->j_list_lock);
		bh = jh2bh(jh);
979 980 981 982 983
		/*
		 * Get a reference so that bh cannot be freed before we are
		 * done with it.
		 */
		get_bh(bh);
T
Thomas Gleixner 已提交
984
		spin_lock(&jh->b_state_lock);
985
		J_ASSERT_JH(jh,	jh->b_transaction == commit_transaction);
986 987 988 989 990 991 992 993 994 995

		/*
		 * If there is undo-protected committed data against
		 * this buffer, then we can remove it now.  If it is a
		 * buffer needing such protection, the old frozen_data
		 * field now points to a committed version of the
		 * buffer, so rotate that field to the new committed
		 * data.
		 *
		 * Otherwise, we can just throw away the frozen data now.
J
Joel Becker 已提交
996 997 998
		 *
		 * We also know that the frozen data has already fired
		 * its triggers if they exist, so we can clear that too.
999 1000
		 */
		if (jh->b_committed_data) {
M
Mingming Cao 已提交
1001
			jbd2_free(jh->b_committed_data, bh->b_size);
1002 1003 1004 1005
			jh->b_committed_data = NULL;
			if (jh->b_frozen_data) {
				jh->b_committed_data = jh->b_frozen_data;
				jh->b_frozen_data = NULL;
J
Joel Becker 已提交
1006
				jh->b_frozen_triggers = NULL;
1007 1008
			}
		} else if (jh->b_frozen_data) {
M
Mingming Cao 已提交
1009
			jbd2_free(jh->b_frozen_data, bh->b_size);
1010
			jh->b_frozen_data = NULL;
J
Joel Becker 已提交
1011
			jh->b_frozen_triggers = NULL;
1012 1013 1014 1015 1016 1017
		}

		spin_lock(&journal->j_list_lock);
		cp_transaction = jh->b_cp_transaction;
		if (cp_transaction) {
			JBUFFER_TRACE(jh, "remove from old cp transaction");
1018
			cp_transaction->t_chp_stats.cs_dropped++;
1019
			__jbd2_journal_remove_checkpoint(jh);
1020 1021 1022 1023
		}

		/* Only re-checkpoint the buffer_head if it is marked
		 * dirty.  If the buffer was added to the BJ_Forget list
1024
		 * by jbd2_journal_forget, it may no longer be dirty and
1025 1026 1027
		 * there's no point in keeping a checkpoint record for
		 * it. */

1028
		/*
1029 1030 1031 1032 1033 1034 1035 1036 1037
		 * A buffer which has been freed while still being journaled
		 * by a previous transaction, refile the buffer to BJ_Forget of
		 * the running transaction. If the just committed transaction
		 * contains "add to orphan" operation, we can completely
		 * invalidate the buffer now. We are rather through in that
		 * since the buffer may be still accessible when blocksize <
		 * pagesize and it is attached to the last partial page.
		 */
		if (buffer_freed(bh) && !jh->b_next_transaction) {
1038 1039
			struct address_space *mapping;

1040 1041
			clear_buffer_freed(bh);
			clear_buffer_jbddirty(bh);
1042 1043 1044 1045 1046 1047 1048 1049

			/*
			 * Block device buffers need to stay mapped all the
			 * time, so it is enough to clear buffer_jbddirty and
			 * buffer_freed bits. For the file mapping buffers (i.e.
			 * journalled data) we need to unmap buffer and clear
			 * more bits. We also need to be careful about the check
			 * because the data page mapping can get cleared under
1050 1051 1052 1053
			 * our hands. Note that if mapping == NULL, we don't
			 * need to make buffer unmapped because the page is
			 * already detached from the mapping and buffers cannot
			 * get reused.
1054 1055 1056 1057 1058 1059 1060 1061
			 */
			mapping = READ_ONCE(bh->b_page->mapping);
			if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) {
				clear_buffer_mapped(bh);
				clear_buffer_new(bh);
				clear_buffer_req(bh);
				bh->b_bdev = NULL;
			}
1062 1063 1064 1065
		}

		if (buffer_jbddirty(bh)) {
			JBUFFER_TRACE(jh, "add to new checkpointing trans");
1066
			__jbd2_journal_insert_checkpoint(jh, commit_transaction);
1067 1068
			if (is_journal_aborted(journal))
				clear_buffer_jbddirty(bh);
1069 1070
		} else {
			J_ASSERT_BH(bh, !buffer_dirty(bh));
1071 1072
			/*
			 * The buffer on BJ_Forget list and not jbddirty means
1073 1074 1075 1076 1077
			 * it has been freed by this transaction and hence it
			 * could not have been reallocated until this
			 * transaction has committed. *BUT* it could be
			 * reallocated once we have written all the data to
			 * disk and before we process the buffer on BJ_Forget
1078 1079 1080 1081
			 * list.
			 */
			if (!jh->b_next_transaction)
				try_to_free = 1;
1082
		}
1083
		JBUFFER_TRACE(jh, "refile or unfile buffer");
1084
		drop_ref = __jbd2_journal_refile_buffer(jh);
T
Thomas Gleixner 已提交
1085
		spin_unlock(&jh->b_state_lock);
1086 1087
		if (drop_ref)
			jbd2_journal_put_journal_head(jh);
1088 1089 1090 1091
		if (try_to_free)
			release_buffer_page(bh);	/* Drops bh reference */
		else
			__brelse(bh);
1092 1093 1094 1095
		cond_resched_lock(&journal->j_list_lock);
	}
	spin_unlock(&journal->j_list_lock);
	/*
1096 1097 1098 1099
	 * This is a bit sleazy.  We use j_list_lock to protect transition
	 * of a transaction into T_FINISHED state and calling
	 * __jbd2_journal_drop_transaction(). Otherwise we could race with
	 * other checkpointing code processing the transaction...
1100
	 */
1101
	write_lock(&journal->j_state_lock);
1102 1103 1104 1105 1106 1107 1108
	spin_lock(&journal->j_list_lock);
	/*
	 * Now recheck if some buffers did not get attached to the transaction
	 * while the lock was dropped...
	 */
	if (commit_transaction->t_forget) {
		spin_unlock(&journal->j_list_lock);
1109
		write_unlock(&journal->j_state_lock);
1110 1111 1112
		goto restart_loop;
	}

1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
	/* Add the transaction to the checkpoint list
	 * __journal_remove_checkpoint() can not destroy transaction
	 * under us because it is not marked as T_FINISHED yet */
	if (journal->j_checkpoint_transactions == NULL) {
		journal->j_checkpoint_transactions = commit_transaction;
		commit_transaction->t_cpnext = commit_transaction;
		commit_transaction->t_cpprev = commit_transaction;
	} else {
		commit_transaction->t_cpnext =
			journal->j_checkpoint_transactions;
		commit_transaction->t_cpprev =
			commit_transaction->t_cpnext->t_cpprev;
		commit_transaction->t_cpnext->t_cpprev =
			commit_transaction;
		commit_transaction->t_cpprev->t_cpnext =
				commit_transaction;
	}
	spin_unlock(&journal->j_list_lock);

1132 1133
	/* Done with this transaction! */

E
Eryu Guan 已提交
1134
	jbd_debug(3, "JBD2: commit phase 7\n");
1135

1136
	J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
1137

1138
	commit_transaction->t_start = jiffies;
1139 1140
	stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
					      commit_transaction->t_start);
1141 1142

	/*
1143
	 * File the transaction statistics
1144 1145
	 */
	stats.ts_tid = commit_transaction->t_tid;
1146 1147
	stats.run.rs_handle_count =
		atomic_read(&commit_transaction->t_handle_count);
1148 1149
	trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
			     commit_transaction->t_tid, &stats.run);
1150
	stats.ts_requested = (commit_transaction->t_requested) ? 1 : 0;
1151

1152
	commit_transaction->t_state = T_COMMIT_CALLBACK;
1153 1154 1155
	J_ASSERT(commit_transaction == journal->j_committing_transaction);
	journal->j_commit_sequence = commit_transaction->t_tid;
	journal->j_committing_transaction = NULL;
J
Josef Bacik 已提交
1156
	commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1157

J
Josef Bacik 已提交
1158 1159 1160 1161 1162 1163 1164 1165 1166
	/*
	 * weight the commit time higher than the average time so we don't
	 * react too strongly to vast changes in the commit time
	 */
	if (likely(journal->j_average_commit_time))
		journal->j_average_commit_time = (commit_time +
				journal->j_average_commit_time*3) / 4;
	else
		journal->j_average_commit_time = commit_time;
1167

1168
	write_unlock(&journal->j_state_lock);
1169

1170 1171
	if (journal->j_commit_callback)
		journal->j_commit_callback(journal, commit_transaction);
1172 1173
	if (journal->j_fc_cleanup_callback)
		journal->j_fc_cleanup_callback(journal, 1);
1174

1175
	trace_jbd2_end_commit(journal, commit_transaction);
E
Eryu Guan 已提交
1176
	jbd_debug(1, "JBD2: commit %d complete, head %d\n",
1177 1178
		  journal->j_commit_sequence, journal->j_tail_sequence);

1179
	write_lock(&journal->j_state_lock);
1180 1181
	journal->j_flags &= ~JBD2_FULL_COMMIT_ONGOING;
	journal->j_flags &= ~JBD2_FAST_COMMIT_ONGOING;
1182 1183
	spin_lock(&journal->j_list_lock);
	commit_transaction->t_state = T_FINISHED;
1184
	/* Check if the transaction can be dropped now that we are finished */
1185 1186 1187 1188 1189 1190 1191
	if (commit_transaction->t_checkpoint_list == NULL &&
	    commit_transaction->t_checkpoint_io_list == NULL) {
		__jbd2_journal_drop_transaction(journal, commit_transaction);
		jbd2_journal_free_transaction(commit_transaction);
	}
	spin_unlock(&journal->j_list_lock);
	write_unlock(&journal->j_state_lock);
1192
	wake_up(&journal->j_wait_done_commit);
1193
	wake_up(&journal->j_fc_wait);
1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210

	/*
	 * Calculate overall stats
	 */
	spin_lock(&journal->j_history_lock);
	journal->j_stats.ts_tid++;
	journal->j_stats.ts_requested += stats.ts_requested;
	journal->j_stats.run.rs_wait += stats.run.rs_wait;
	journal->j_stats.run.rs_request_delay += stats.run.rs_request_delay;
	journal->j_stats.run.rs_running += stats.run.rs_running;
	journal->j_stats.run.rs_locked += stats.run.rs_locked;
	journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
	journal->j_stats.run.rs_logging += stats.run.rs_logging;
	journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
	journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
	journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
	spin_unlock(&journal->j_history_lock);
1211
}