log.c 28.0 KB
Newer Older
D
David Teigland 已提交
1 2
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
B
Bob Peterson 已提交
3
 * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
D
David Teigland 已提交
4 5 6
 *
 * This copyrighted material is made available to anyone wishing to use,
 * modify, copy, or redistribute it subject to the terms and conditions
7
 * of the GNU General Public License version 2.
D
David Teigland 已提交
8 9 10 11 12 13 14
 */

#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
15
#include <linux/gfs2_ondisk.h>
16
#include <linux/crc32.h>
17
#include <linux/crc32c.h>
18
#include <linux/delay.h>
19 20
#include <linux/kthread.h>
#include <linux/freezer.h>
21
#include <linux/bio.h>
S
Steven Whitehouse 已提交
22
#include <linux/blkdev.h>
23
#include <linux/writeback.h>
B
Bob Peterson 已提交
24
#include <linux/list_sort.h>
D
David Teigland 已提交
25 26

#include "gfs2.h"
27
#include "incore.h"
D
David Teigland 已提交
28 29 30 31 32
#include "bmap.h"
#include "glock.h"
#include "log.h"
#include "lops.h"
#include "meta_io.h"
33
#include "util.h"
34
#include "dir.h"
S
Steven Whitehouse 已提交
35
#include "trace_gfs2.h"
D
David Teigland 已提交
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55

/**
 * gfs2_struct2blk - compute stuff
 * @sdp: the filesystem
 * @nstruct: the number of structures
 * @ssize: the size of the structures
 *
 * Compute the number of log descriptor blocks needed to hold a certain number
 * of structures of a certain size.
 *
 * Returns: the number of blocks needed (minimum is always 1)
 */

unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
			     unsigned int ssize)
{
	unsigned int blks;
	unsigned int first, second;

	blks = 1;
S
Steven Whitehouse 已提交
56
	first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize;
D
David Teigland 已提交
57 58

	if (nstruct > first) {
59 60
		second = (sdp->sd_sb.sb_bsize -
			  sizeof(struct gfs2_meta_header)) / ssize;
61
		blks += DIV_ROUND_UP(nstruct - first, second);
D
David Teigland 已提交
62 63 64 65 66
	}

	return blks;
}

67 68 69 70 71
/**
 * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
 * @mapping: The associated mapping (maybe NULL)
 * @bd: The gfs2_bufdata to remove
 *
72
 * The ail lock _must_ be held when calling this function
73 74 75
 *
 */

76
static void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
77
{
78
	bd->bd_tr = NULL;
79 80
	list_del_init(&bd->bd_ail_st_list);
	list_del_init(&bd->bd_ail_gl_list);
81 82 83 84
	atomic_dec(&bd->bd_gl->gl_ail_count);
	brelse(bd->bd_bh);
}

85 86 87
/**
 * gfs2_ail1_start_one - Start I/O on a part of the AIL
 * @sdp: the filesystem
88 89
 * @wbc: The writeback control structure
 * @ai: The ail structure
90 91 92
 *
 */

S
Steven Whitehouse 已提交
93 94
static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
			       struct writeback_control *wbc,
95 96
			       struct gfs2_trans *tr,
			       bool *withdraw)
D
Dave Chinner 已提交
97 98
__releases(&sdp->sd_ail_lock)
__acquires(&sdp->sd_ail_lock)
99
{
100
	struct gfs2_glock *gl = NULL;
101
	struct address_space *mapping;
102 103 104
	struct gfs2_bufdata *bd, *s;
	struct buffer_head *bh;

105
	list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) {
106
		bh = bd->bd_bh;
107

108
		gfs2_assert(sdp, bd->bd_tr == tr);
109

110
		if (!buffer_busy(bh)) {
111
			if (!buffer_uptodate(bh)) {
112
				gfs2_io_error_bh(sdp, bh);
113 114
				*withdraw = true;
			}
115
			list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
116 117 118 119 120 121 122 123
			continue;
		}

		if (!buffer_dirty(bh))
			continue;
		if (gl == bd->bd_gl)
			continue;
		gl = bd->bd_gl;
124
		list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list);
125
		mapping = bh->b_page->mapping;
S
Steven Whitehouse 已提交
126 127
		if (!mapping)
			continue;
128 129 130 131 132
		spin_unlock(&sdp->sd_ail_lock);
		generic_writepages(mapping, wbc);
		spin_lock(&sdp->sd_ail_lock);
		if (wbc->nr_to_write <= 0)
			break;
S
Steven Whitehouse 已提交
133
		return 1;
134
	}
S
Steven Whitehouse 已提交
135 136

	return 0;
137
}
138 139


140 141 142 143 144 145 146 147
/**
 * gfs2_ail1_flush - start writeback of some ail1 entries 
 * @sdp: The super block
 * @wbc: The writeback control structure
 *
 * Writes back some ail1 entries, according to the limits in the
 * writeback control structure
 */
148

149 150 151
void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
{
	struct list_head *head = &sdp->sd_ail1_list;
152
	struct gfs2_trans *tr;
S
Steven Whitehouse 已提交
153
	struct blk_plug plug;
154
	bool withdraw = false;
155

156
	trace_gfs2_ail_flush(sdp, wbc, 1);
S
Steven Whitehouse 已提交
157
	blk_start_plug(&plug);
158
	spin_lock(&sdp->sd_ail_lock);
S
Steven Whitehouse 已提交
159
restart:
160
	list_for_each_entry_reverse(tr, head, tr_list) {
161
		if (wbc->nr_to_write <= 0)
162
			break;
163
		if (gfs2_ail1_start_one(sdp, wbc, tr, &withdraw))
S
Steven Whitehouse 已提交
164
			goto restart;
165 166
	}
	spin_unlock(&sdp->sd_ail_lock);
S
Steven Whitehouse 已提交
167
	blk_finish_plug(&plug);
168 169
	if (withdraw)
		gfs2_lm_withdraw(sdp, NULL);
170
	trace_gfs2_ail_flush(sdp, wbc, 0);
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
}

/**
 * gfs2_ail1_start - start writeback of all ail1 entries
 * @sdp: The superblock
 */

static void gfs2_ail1_start(struct gfs2_sbd *sdp)
{
	struct writeback_control wbc = {
		.sync_mode = WB_SYNC_NONE,
		.nr_to_write = LONG_MAX,
		.range_start = 0,
		.range_end = LLONG_MAX,
	};

	return gfs2_ail1_flush(sdp, &wbc);
188 189 190 191 192 193 194 195 196
}

/**
 * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
 * @sdp: the filesystem
 * @ai: the AIL entry
 *
 */

197 198
static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
				bool *withdraw)
199 200 201 202
{
	struct gfs2_bufdata *bd, *s;
	struct buffer_head *bh;

203
	list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list,
204 205
					 bd_ail_st_list) {
		bh = bd->bd_bh;
206
		gfs2_assert(sdp, bd->bd_tr == tr);
207 208
		if (buffer_busy(bh))
			continue;
209
		if (!buffer_uptodate(bh)) {
210
			gfs2_io_error_bh(sdp, bh);
211 212
			*withdraw = true;
		}
213
		list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
214 215 216
	}
}

217 218 219 220 221 222
/**
 * gfs2_ail1_empty - Try to empty the ail1 lists
 * @sdp: The superblock
 *
 * Tries to empty the ail1 lists, starting with the oldest first
 */
D
David Teigland 已提交
223

224
static int gfs2_ail1_empty(struct gfs2_sbd *sdp)
D
David Teigland 已提交
225
{
226
	struct gfs2_trans *tr, *s;
227
	int oldest_tr = 1;
D
David Teigland 已提交
228
	int ret;
229
	bool withdraw = false;
D
David Teigland 已提交
230

D
Dave Chinner 已提交
231
	spin_lock(&sdp->sd_ail_lock);
232
	list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
233
		gfs2_ail1_empty_one(sdp, tr, &withdraw);
234
		if (list_empty(&tr->tr_ail1_list) && oldest_tr)
235
			list_move(&tr->tr_list, &sdp->sd_ail2_list);
236
		else
237
			oldest_tr = 0;
D
David Teigland 已提交
238 239
	}
	ret = list_empty(&sdp->sd_ail1_list);
D
Dave Chinner 已提交
240
	spin_unlock(&sdp->sd_ail_lock);
D
David Teigland 已提交
241

242 243 244
	if (withdraw)
		gfs2_lm_withdraw(sdp, "fatal: I/O error(s)\n");

D
David Teigland 已提交
245 246 247
	return ret;
}

248 249
static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
{
250
	struct gfs2_trans *tr;
251 252 253 254
	struct gfs2_bufdata *bd;
	struct buffer_head *bh;

	spin_lock(&sdp->sd_ail_lock);
255 256
	list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
		list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) {
257 258 259 260 261 262 263 264 265 266 267 268
			bh = bd->bd_bh;
			if (!buffer_locked(bh))
				continue;
			get_bh(bh);
			spin_unlock(&sdp->sd_ail_lock);
			wait_on_buffer(bh);
			brelse(bh);
			return;
		}
	}
	spin_unlock(&sdp->sd_ail_lock);
}
269 270 271 272 273 274 275 276

/**
 * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
 * @sdp: the filesystem
 * @ai: the AIL entry
 *
 */

277
static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
278
{
279
	struct list_head *head = &tr->tr_ail2_list;
280 281 282 283 284
	struct gfs2_bufdata *bd;

	while (!list_empty(head)) {
		bd = list_entry(head->prev, struct gfs2_bufdata,
				bd_ail_st_list);
285
		gfs2_assert(sdp, bd->bd_tr == tr);
286
		gfs2_remove_from_ail(bd);
287 288 289
	}
}

D
David Teigland 已提交
290 291
static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
{
292
	struct gfs2_trans *tr, *safe;
D
David Teigland 已提交
293 294 295 296
	unsigned int old_tail = sdp->sd_log_tail;
	int wrap = (new_tail < old_tail);
	int a, b, rm;

D
Dave Chinner 已提交
297
	spin_lock(&sdp->sd_ail_lock);
D
David Teigland 已提交
298

299 300 301
	list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) {
		a = (old_tail <= tr->tr_first);
		b = (tr->tr_first < new_tail);
D
David Teigland 已提交
302 303 304 305
		rm = (wrap) ? (a || b) : (a && b);
		if (!rm)
			continue;

306 307 308 309 310
		gfs2_ail2_empty_one(sdp, tr);
		list_del(&tr->tr_list);
		gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
		gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
		kfree(tr);
D
David Teigland 已提交
311 312
	}

D
Dave Chinner 已提交
313
	spin_unlock(&sdp->sd_ail_lock);
D
David Teigland 已提交
314 315
}

316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
/**
 * gfs2_log_release - Release a given number of log blocks
 * @sdp: The GFS2 superblock
 * @blks: The number of blocks
 *
 */

void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
{

	atomic_add(blks, &sdp->sd_log_blks_free);
	trace_gfs2_log_blocks(sdp, blks);
	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
				  sdp->sd_jdesc->jd_blocks);
	up_read(&sdp->sd_log_flush_lock);
}

D
David Teigland 已提交
333 334 335 336 337
/**
 * gfs2_log_reserve - Make a log reservation
 * @sdp: The GFS2 superblock
 * @blks: The number of blocks to reserve
 *
338
 * Note that we never give out the last few blocks of the journal. Thats
339
 * due to the fact that there is a small number of header blocks
340 341 342 343
 * associated with each log flush. The exact number can't be known until
 * flush time, so we ensure that we have just enough free blocks at all
 * times to avoid running out during a log flush.
 *
344 345 346 347 348 349
 * We no longer flush the log here, instead we wake up logd to do that
 * for us. To avoid the thundering herd and to ensure that we deal fairly
 * with queued waiters, we use an exclusive wait. This means that when we
 * get woken with enough journal space to get our reservation, we need to
 * wake the next waiter on the list.
 *
D
David Teigland 已提交
350 351 352 353 354
 * Returns: errno
 */

int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
{
355
	int ret = 0;
356
	unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize);
357 358 359 360
	unsigned wanted = blks + reserved_blks;
	DEFINE_WAIT(wait);
	int did_wait = 0;
	unsigned int free_blocks;
D
David Teigland 已提交
361 362 363 364

	if (gfs2_assert_warn(sdp, blks) ||
	    gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
		return -EINVAL;
365
	atomic_add(blks, &sdp->sd_log_blks_needed);
366 367 368 369 370 371 372 373 374 375 376 377 378
retry:
	free_blocks = atomic_read(&sdp->sd_log_blks_free);
	if (unlikely(free_blocks <= wanted)) {
		do {
			prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
					TASK_UNINTERRUPTIBLE);
			wake_up(&sdp->sd_logd_waitq);
			did_wait = 1;
			if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
				io_schedule();
			free_blocks = atomic_read(&sdp->sd_log_blks_free);
		} while(free_blocks <= wanted);
		finish_wait(&sdp->sd_log_waitq, &wait);
D
David Teigland 已提交
379
	}
380
	atomic_inc(&sdp->sd_reserving_log);
381
	if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
382 383 384
				free_blocks - blks) != free_blocks) {
		if (atomic_dec_and_test(&sdp->sd_reserving_log))
			wake_up(&sdp->sd_reserving_log_wait);
385
		goto retry;
386
	}
387
	atomic_sub(blks, &sdp->sd_log_blks_needed);
S
Steven Whitehouse 已提交
388
	trace_gfs2_log_blocks(sdp, -blks);
389 390 391 392 393 394 395

	/*
	 * If we waited, then so might others, wake them up _after_ we get
	 * our share of the log.
	 */
	if (unlikely(did_wait))
		wake_up(&sdp->sd_log_waitq);
396 397

	down_read(&sdp->sd_log_flush_lock);
398 399
	if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
		gfs2_log_release(sdp, blks);
400
		ret = -EROFS;
401
	}
402 403 404
	if (atomic_dec_and_test(&sdp->sd_reserving_log))
		wake_up(&sdp->sd_reserving_log_wait);
	return ret;
D
David Teigland 已提交
405 406 407 408 409 410 411 412 413 414 415 416 417 418
}

/**
 * log_distance - Compute distance between two journal blocks
 * @sdp: The GFS2 superblock
 * @newer: The most recent journal block of the pair
 * @older: The older journal block of the pair
 *
 *   Compute the distance (in the journal direction) between two
 *   blocks in the journal
 *
 * Returns: the distance in blocks
 */

S
Steven Whitehouse 已提交
419
static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
D
David Teigland 已提交
420 421 422 423 424 425 426 427 428 429 430
					unsigned int older)
{
	int dist;

	dist = newer - older;
	if (dist < 0)
		dist += sdp->sd_jdesc->jd_blocks;

	return dist;
}

431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
/**
 * calc_reserved - Calculate the number of blocks to reserve when
 *                 refunding a transaction's unused buffers.
 * @sdp: The GFS2 superblock
 *
 * This is complex.  We need to reserve room for all our currently used
 * metadata buffers (e.g. normal file I/O rewriting file time stamps) and 
 * all our journaled data buffers for journaled files (e.g. files in the 
 * meta_fs like rindex, or files for which chattr +j was done.)
 * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush
 * will count it as free space (sd_log_blks_free) and corruption will follow.
 *
 * We can have metadata bufs and jdata bufs in the same journal.  So each
 * type gets its own log header, for which we need to reserve a block.
 * In fact, each type has the potential for needing more than one header 
 * in cases where we have more buffers than will fit on a journal page.
 * Metadata journal entries take up half the space of journaled buffer entries.
 * Thus, metadata entries have buf_limit (502) and journaled buffers have
 * databuf_limit (251) before they cause a wrap around.
 *
 * Also, we need to reserve blocks for revoke journal entries and one for an
 * overall header for the lot.
 *
 * Returns: the number of blocks reserved
 */
static unsigned int calc_reserved(struct gfs2_sbd *sdp)
{
	unsigned int reserved = 0;
459 460 461
	unsigned int mbuf;
	unsigned int dbuf;
	struct gfs2_trans *tr = sdp->sd_log_tr;
462

463 464 465 466 467 468 469 470
	if (tr) {
		mbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
		dbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
		reserved = mbuf + dbuf;
		/* Account for header blocks */
		reserved += DIV_ROUND_UP(mbuf, buf_limit(sdp));
		reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp));
	}
471

472
	if (sdp->sd_log_commited_revoke > 0)
473
		reserved += gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
474 475 476 477 478 479 480
					  sizeof(u64));
	/* One for the overall header */
	if (reserved)
		reserved++;
	return reserved;
}

D
David Teigland 已提交
481 482
static unsigned int current_tail(struct gfs2_sbd *sdp)
{
483
	struct gfs2_trans *tr;
D
David Teigland 已提交
484 485
	unsigned int tail;

D
Dave Chinner 已提交
486
	spin_lock(&sdp->sd_ail_lock);
D
David Teigland 已提交
487

S
Steven Whitehouse 已提交
488
	if (list_empty(&sdp->sd_ail1_list)) {
D
David Teigland 已提交
489
		tail = sdp->sd_log_head;
S
Steven Whitehouse 已提交
490
	} else {
491 492 493
		tr = list_entry(sdp->sd_ail1_list.prev, struct gfs2_trans,
				tr_list);
		tail = tr->tr_first;
D
David Teigland 已提交
494 495
	}

D
Dave Chinner 已提交
496
	spin_unlock(&sdp->sd_ail_lock);
D
David Teigland 已提交
497 498 499 500

	return tail;
}

501
static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
D
David Teigland 已提交
502 503 504 505 506
{
	unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);

	ail2_empty(sdp, new_tail);

507
	atomic_add(dist, &sdp->sd_log_blks_free);
S
Steven Whitehouse 已提交
508
	trace_gfs2_log_blocks(sdp, dist);
509 510
	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
			     sdp->sd_jdesc->jd_blocks);
D
David Teigland 已提交
511 512 513 514 515

	sdp->sd_log_tail = new_tail;
}


516
static void log_flush_wait(struct gfs2_sbd *sdp)
D
David Teigland 已提交
517
{
518 519 520 521 522 523 524 525 526 527
	DEFINE_WAIT(wait);

	if (atomic_read(&sdp->sd_log_in_flight)) {
		do {
			prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
					TASK_UNINTERRUPTIBLE);
			if (atomic_read(&sdp->sd_log_in_flight))
				io_schedule();
		} while(atomic_read(&sdp->sd_log_in_flight));
		finish_wait(&sdp->sd_log_flush_wait, &wait);
D
David Teigland 已提交
528 529 530
	}
}

531
static int ip_cmp(void *priv, struct list_head *a, struct list_head *b)
B
Bob Peterson 已提交
532
{
533
	struct gfs2_inode *ipa, *ipb;
B
Bob Peterson 已提交
534

535 536
	ipa = list_entry(a, struct gfs2_inode, i_ordered);
	ipb = list_entry(b, struct gfs2_inode, i_ordered);
B
Bob Peterson 已提交
537

538
	if (ipa->i_no_addr < ipb->i_no_addr)
B
Bob Peterson 已提交
539
		return -1;
540
	if (ipa->i_no_addr > ipb->i_no_addr)
B
Bob Peterson 已提交
541 542 543 544
		return 1;
	return 0;
}

545 546
static void gfs2_ordered_write(struct gfs2_sbd *sdp)
{
547
	struct gfs2_inode *ip;
548 549
	LIST_HEAD(written);

550 551
	spin_lock(&sdp->sd_ordered_lock);
	list_sort(NULL, &sdp->sd_log_le_ordered, &ip_cmp);
552
	while (!list_empty(&sdp->sd_log_le_ordered)) {
553
		ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered);
554 555 556
		if (ip->i_inode.i_mapping->nrpages == 0) {
			test_and_clear_bit(GIF_ORDERED, &ip->i_flags);
			list_del(&ip->i_ordered);
557
			continue;
558 559
		}
		list_move(&ip->i_ordered, &written);
560 561 562
		spin_unlock(&sdp->sd_ordered_lock);
		filemap_fdatawrite(ip->i_inode.i_mapping);
		spin_lock(&sdp->sd_ordered_lock);
563 564
	}
	list_splice(&written, &sdp->sd_log_le_ordered);
565
	spin_unlock(&sdp->sd_ordered_lock);
566 567 568 569
}

static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
{
570
	struct gfs2_inode *ip;
571

572
	spin_lock(&sdp->sd_ordered_lock);
573
	while (!list_empty(&sdp->sd_log_le_ordered)) {
574 575 576 577
		ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered);
		list_del(&ip->i_ordered);
		WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags));
		if (ip->i_inode.i_mapping->nrpages == 0)
578
			continue;
579 580 581
		spin_unlock(&sdp->sd_ordered_lock);
		filemap_fdatawait(ip->i_inode.i_mapping);
		spin_lock(&sdp->sd_ordered_lock);
582
	}
583 584 585 586 587 588 589 590 591 592 593
	spin_unlock(&sdp->sd_ordered_lock);
}

void gfs2_ordered_del_inode(struct gfs2_inode *ip)
{
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);

	spin_lock(&sdp->sd_ordered_lock);
	if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags))
		list_del(&ip->i_ordered);
	spin_unlock(&sdp->sd_ordered_lock);
594 595
}

596 597 598 599 600 601 602
void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
{
	struct buffer_head *bh = bd->bd_bh;
	struct gfs2_glock *gl = bd->bd_gl;

	bh->b_private = NULL;
	bd->bd_blkno = bh->b_blocknr;
603 604
	gfs2_remove_from_ail(bd); /* drops ref on bh */
	bd->bd_bh = NULL;
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665
	bd->bd_ops = &gfs2_revoke_lops;
	sdp->sd_log_num_revoke++;
	atomic_inc(&gl->gl_revokes);
	set_bit(GLF_LFLUSH, &gl->gl_flags);
	list_add(&bd->bd_list, &sdp->sd_log_le_revoke);
}

void gfs2_write_revokes(struct gfs2_sbd *sdp)
{
	struct gfs2_trans *tr;
	struct gfs2_bufdata *bd, *tmp;
	int have_revokes = 0;
	int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);

	gfs2_ail1_empty(sdp);
	spin_lock(&sdp->sd_ail_lock);
	list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) {
		list_for_each_entry(bd, &tr->tr_ail2_list, bd_ail_st_list) {
			if (list_empty(&bd->bd_list)) {
				have_revokes = 1;
				goto done;
			}
		}
	}
done:
	spin_unlock(&sdp->sd_ail_lock);
	if (have_revokes == 0)
		return;
	while (sdp->sd_log_num_revoke > max_revokes)
		max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
	max_revokes -= sdp->sd_log_num_revoke;
	if (!sdp->sd_log_num_revoke) {
		atomic_dec(&sdp->sd_log_blks_free);
		/* If no blocks have been reserved, we need to also
		 * reserve a block for the header */
		if (!sdp->sd_log_blks_reserved)
			atomic_dec(&sdp->sd_log_blks_free);
	}
	gfs2_log_lock(sdp);
	spin_lock(&sdp->sd_ail_lock);
	list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) {
		list_for_each_entry_safe(bd, tmp, &tr->tr_ail2_list, bd_ail_st_list) {
			if (max_revokes == 0)
				goto out_of_blocks;
			if (!list_empty(&bd->bd_list))
				continue;
			gfs2_add_revoke(sdp, bd);
			max_revokes--;
		}
	}
out_of_blocks:
	spin_unlock(&sdp->sd_ail_lock);
	gfs2_log_unlock(sdp);

	if (!sdp->sd_log_num_revoke) {
		atomic_inc(&sdp->sd_log_blks_free);
		if (!sdp->sd_log_blks_reserved)
			atomic_inc(&sdp->sd_log_blks_free);
	}
}

666
/**
667
 * write_log_header - Write a journal log header buffer at sd_log_flush_head
668
 * @sdp: The GFS2 superblock
669
 * @jd: journal descriptor of the journal to which we are writing
670 671
 * @seq: sequence number
 * @tail: tail of the log
672
 * @flags: log header flags GFS2_LOG_HEAD_*
673
 * @op_flags: flags to pass to the bio
674 675 676 677
 *
 * Returns: the initialized log buffer descriptor
 */

678 679
void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
			   u64 seq, u32 tail, u32 flags, int op_flags)
680 681
{
	struct gfs2_log_header *lh;
682
	u32 hash, crc;
683
	struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
684 685 686 687
	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
	struct timespec64 tv;
	struct super_block *sb = sdp->sd_vfs;
	u64 addr;
688

689 690
	lh = page_address(page);
	clear_page(lh);
691 692 693 694 695 696

	lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
	lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
	lh->lh_header.__pad0 = cpu_to_be64(0);
	lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
	lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
697
	lh->lh_sequence = cpu_to_be64(seq);
698 699 700
	lh->lh_flags = cpu_to_be32(flags);
	lh->lh_tail = cpu_to_be32(tail);
	lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head);
701
	hash = ~crc32(~0, lh, LH_V1_SIZE);
702 703
	lh->lh_hash = cpu_to_be32(hash);

704
	ktime_get_coarse_real_ts64(&tv);
705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
	lh->lh_nsec = cpu_to_be32(tv.tv_nsec);
	lh->lh_sec = cpu_to_be64(tv.tv_sec);
	addr = gfs2_log_bmap(sdp);
	lh->lh_addr = cpu_to_be64(addr);
	lh->lh_jinode = cpu_to_be64(GFS2_I(jd->jd_inode)->i_no_addr);

	/* We may only write local statfs, quota, etc., when writing to our
	   own journal. The values are left 0 when recovering a journal
	   different from our own. */
	if (!(flags & GFS2_LOG_HEAD_RECOVERY)) {
		lh->lh_statfs_addr =
			cpu_to_be64(GFS2_I(sdp->sd_sc_inode)->i_no_addr);
		lh->lh_quota_addr =
			cpu_to_be64(GFS2_I(sdp->sd_qc_inode)->i_no_addr);

		spin_lock(&sdp->sd_statfs_spin);
		lh->lh_local_total = cpu_to_be64(l_sc->sc_total);
		lh->lh_local_free = cpu_to_be64(l_sc->sc_free);
		lh->lh_local_dinodes = cpu_to_be64(l_sc->sc_dinodes);
		spin_unlock(&sdp->sd_statfs_spin);
	}

	BUILD_BUG_ON(offsetof(struct gfs2_log_header, lh_crc) != LH_V1_SIZE);

	crc = crc32c(~0, (void *)lh + LH_V1_SIZE + 4,
		     sb->s_blocksize - LH_V1_SIZE - 4);
	lh->lh_crc = cpu_to_be32(crc);

	gfs2_log_write(sdp, page, sb->s_blocksize, 0, addr);
734 735 736 737 738 739 740
	gfs2_log_flush_bio(sdp, REQ_OP_WRITE, op_flags);
	log_flush_wait(sdp);
}

/**
 * log_write_header - Get and initialize a journal header buffer
 * @sdp: The GFS2 superblock
741
 * @flags: The log header flags, including log header origin
742 743 744 745 746 747 748 749 750 751 752 753 754
 *
 * Returns: the initialized log buffer descriptor
 */

static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
{
	unsigned int tail;
	int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
	enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);

	gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
	tail = current_tail(sdp);

755 756 757
	if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
		gfs2_ordered_wait(sdp);
		log_flush_wait(sdp);
758
		op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
759
	}
760
	sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
761 762
	gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++, tail,
			      flags, op_flags);
763 764 765 766 767

	if (sdp->sd_log_tail != tail)
		log_pull_tail(sdp, tail);
}

D
David Teigland 已提交
768
/**
769
 * gfs2_log_flush - flush incore transaction(s)
D
David Teigland 已提交
770 771
 * @sdp: the filesystem
 * @gl: The glock structure to flush.  If NULL, flush the whole incore log
772
 * @flags: The log header flags: GFS2_LOG_HEAD_FLUSH_* and debug flags
D
David Teigland 已提交
773 774 775
 *
 */

776
void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
D
David Teigland 已提交
777
{
778
	struct gfs2_trans *tr;
779
	enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
D
David Teigland 已提交
780

781
	down_write(&sdp->sd_log_flush_lock);
782

783 784 785 786
	/* Log might have been flushed while we waited for the flush lock */
	if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) {
		up_write(&sdp->sd_log_flush_lock);
		return;
787
	}
788
	trace_gfs2_log_flush(sdp, 1, flags);
789

790
	if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN)
791 792
		clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);

793
	sdp->sd_log_flush_head = sdp->sd_log_head;
794 795 796 797 798
	tr = sdp->sd_log_tr;
	if (tr) {
		sdp->sd_log_tr = NULL;
		INIT_LIST_HEAD(&tr->tr_ail1_list);
		INIT_LIST_HEAD(&tr->tr_ail2_list);
799
		tr->tr_first = sdp->sd_log_flush_head;
800 801
		if (unlikely (state == SFS_FROZEN))
			gfs2_assert_withdraw(sdp, !tr->tr_num_buf_new && !tr->tr_num_databuf_new);
802
	}
D
David Teigland 已提交
803

804 805
	if (unlikely(state == SFS_FROZEN))
		gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
D
David Teigland 已提交
806 807 808
	gfs2_assert_withdraw(sdp,
			sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);

809
	gfs2_ordered_write(sdp);
810
	lops_before_commit(sdp, tr);
M
Mike Christie 已提交
811
	gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0);
812

813
	if (sdp->sd_log_head != sdp->sd_log_flush_head) {
814
		log_flush_wait(sdp);
815
		log_write_header(sdp, flags);
816
	} else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
817
		atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
S
Steven Whitehouse 已提交
818
		trace_gfs2_log_blocks(sdp, -1);
819
		log_write_header(sdp, flags);
820
	}
821
	lops_after_commit(sdp, tr);
822

823 824
	gfs2_log_lock(sdp);
	sdp->sd_log_head = sdp->sd_log_flush_head;
S
Steven Whitehouse 已提交
825 826
	sdp->sd_log_blks_reserved = 0;
	sdp->sd_log_commited_revoke = 0;
D
David Teigland 已提交
827

D
Dave Chinner 已提交
828
	spin_lock(&sdp->sd_ail_lock);
829 830 831
	if (tr && !list_empty(&tr->tr_ail1_list)) {
		list_add(&tr->tr_list, &sdp->sd_ail1_list);
		tr = NULL;
D
David Teigland 已提交
832
	}
D
Dave Chinner 已提交
833
	spin_unlock(&sdp->sd_ail_lock);
D
David Teigland 已提交
834
	gfs2_log_unlock(sdp);
835

836
	if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) {
837 838 839 840 841 842 843 844 845
		if (!sdp->sd_log_idle) {
			for (;;) {
				gfs2_ail1_start(sdp);
				gfs2_ail1_wait(sdp);
				if (gfs2_ail1_empty(sdp))
					break;
			}
			atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
			trace_gfs2_log_blocks(sdp, -1);
846
			log_write_header(sdp, flags);
847 848
			sdp->sd_log_head = sdp->sd_log_flush_head;
		}
849 850
		if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
			     GFS2_LOG_HEAD_FLUSH_FREEZE))
851
			gfs2_log_shutdown(sdp);
852
		if (flags & GFS2_LOG_HEAD_FLUSH_FREEZE)
853
			atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
854 855
	}

856
	trace_gfs2_log_flush(sdp, 0, flags);
857
	up_write(&sdp->sd_log_flush_lock);
D
David Teigland 已提交
858

859
	kfree(tr);
D
David Teigland 已提交
860 861
}

862 863 864 865 866 867 868 869
/**
 * gfs2_merge_trans - Merge a new transaction into a cached transaction
 * @old: Original transaction to be expanded
 * @new: New transaction to be merged
 */

static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new)
{
870
	WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags));
871 872 873 874 875 876 877 878 879 880 881 882

	old->tr_num_buf_new	+= new->tr_num_buf_new;
	old->tr_num_databuf_new	+= new->tr_num_databuf_new;
	old->tr_num_buf_rm	+= new->tr_num_buf_rm;
	old->tr_num_databuf_rm	+= new->tr_num_databuf_rm;
	old->tr_num_revoke	+= new->tr_num_revoke;
	old->tr_num_revoke_rm	+= new->tr_num_revoke_rm;

	list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
	list_splice_tail_init(&new->tr_buf, &old->tr_buf);
}

D
David Teigland 已提交
883 884
static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
{
885
	unsigned int reserved;
886
	unsigned int unused;
887
	unsigned int maxres;
D
David Teigland 已提交
888 889 890

	gfs2_log_lock(sdp);

891 892 893
	if (sdp->sd_log_tr) {
		gfs2_merge_trans(sdp->sd_log_tr, tr);
	} else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) {
894
		gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags));
895
		sdp->sd_log_tr = tr;
896
		set_bit(TR_ATTACHED, &tr->tr_flags);
897 898
	}

D
David Teigland 已提交
899
	sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
900
	reserved = calc_reserved(sdp);
901 902 903
	maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
	gfs2_assert_withdraw(sdp, maxres >= reserved);
	unused = maxres - reserved;
904
	atomic_add(unused, &sdp->sd_log_blks_free);
S
Steven Whitehouse 已提交
905
	trace_gfs2_log_blocks(sdp, unused);
906
	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
907
			     sdp->sd_jdesc->jd_blocks);
D
David Teigland 已提交
908 909 910 911 912 913 914 915 916 917
	sdp->sd_log_blks_reserved = reserved;

	gfs2_log_unlock(sdp);
}

/**
 * gfs2_log_commit - Commit a transaction to the log
 * @sdp: the filesystem
 * @tr: the transaction
 *
918 919 920 921 922 923 924
 * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
 * or the total number of used blocks (pinned blocks plus AIL blocks)
 * is greater than thresh2.
 *
 * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of
 * journal size.
 *
D
David Teigland 已提交
925 926 927 928 929 930 931
 * Returns: errno
 */

void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
{
	log_refund(sdp, tr);

932 933 934 935
	if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
	    ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
	    atomic_read(&sdp->sd_log_thresh2)))
		wake_up(&sdp->sd_logd_waitq);
D
David Teigland 已提交
936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951
}

/**
 * gfs2_log_shutdown - write a shutdown header into a journal
 * @sdp: the filesystem
 *
 */

void gfs2_log_shutdown(struct gfs2_sbd *sdp)
{
	gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
	gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));

	sdp->sd_log_flush_head = sdp->sd_log_head;

952
	log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT | GFS2_LFC_SHUTDOWN);
D
David Teigland 已提交
953

954 955
	gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
	gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
D
David Teigland 已提交
956 957 958

	sdp->sd_log_head = sdp->sd_log_flush_head;
	sdp->sd_log_tail = sdp->sd_log_head;
959 960
}

961 962
static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
{
963 964 965
	return (atomic_read(&sdp->sd_log_pinned) +
		atomic_read(&sdp->sd_log_blks_needed) >=
		atomic_read(&sdp->sd_log_thresh1));
966 967 968 969 970
}

static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
{
	unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
971 972 973 974

	if (test_and_clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags))
		return 1;

975 976
	return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >=
		atomic_read(&sdp->sd_log_thresh2);
977
}
978 979 980 981 982 983 984 985 986 987 988 989

/**
 * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
 * @sdp: Pointer to GFS2 superblock
 *
 * Also, periodically check to make sure that we're using the most recent
 * journal index.
 */

int gfs2_logd(void *data)
{
	struct gfs2_sbd *sdp = data;
990 991
	unsigned long t = 1;
	DEFINE_WAIT(wait);
992
	bool did_flush;
993 994 995

	while (!kthread_should_stop()) {

996 997 998 999 1000 1001 1002 1003 1004
		/* Check for errors writing to the journal */
		if (sdp->sd_log_error) {
			gfs2_lm_withdraw(sdp,
					 "GFS2: fsid=%s: error %d: "
					 "withdrawing the file system to "
					 "prevent further damage.\n",
					 sdp->sd_fsname, sdp->sd_log_error);
		}

1005
		did_flush = false;
1006
		if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
1007
			gfs2_ail1_empty(sdp);
1008 1009
			gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
				       GFS2_LFC_LOGD_JFLUSH_REQD);
1010
			did_flush = true;
1011
		}
1012

1013 1014
		if (gfs2_ail_flush_reqd(sdp)) {
			gfs2_ail1_start(sdp);
1015
			gfs2_ail1_wait(sdp);
1016
			gfs2_ail1_empty(sdp);
1017 1018
			gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
				       GFS2_LFC_LOGD_AIL_FLUSH_REQD);
1019
			did_flush = true;
1020 1021
		}

1022
		if (!gfs2_ail_flush_reqd(sdp) || did_flush)
1023 1024
			wake_up(&sdp->sd_log_waitq);

1025
		t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
1026 1027

		try_to_freeze();
1028 1029 1030

		do {
			prepare_to_wait(&sdp->sd_logd_waitq, &wait,
1031
					TASK_INTERRUPTIBLE);
1032 1033 1034 1035 1036 1037 1038 1039
			if (!gfs2_ail_flush_reqd(sdp) &&
			    !gfs2_jrnl_flush_reqd(sdp) &&
			    !kthread_should_stop())
				t = schedule_timeout(t);
		} while(t && !gfs2_ail_flush_reqd(sdp) &&
			!gfs2_jrnl_flush_reqd(sdp) &&
			!kthread_should_stop());
		finish_wait(&sdp->sd_logd_waitq, &wait);
1040 1041 1042 1043 1044
	}

	return 0;
}