revoke.c 22.4 KB
Newer Older
1
/*
2
 * linux/fs/jbd2/revoke.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
 *
 * Written by Stephen C. Tweedie <sct@redhat.com>, 2000
 *
 * Copyright 2000 Red Hat corp --- All Rights Reserved
 *
 * This file is part of the Linux kernel and is made available under
 * the terms of the GNU General Public License, version 2, or at your
 * option, any later version, incorporated herein by reference.
 *
 * Journal revoke routines for the generic filesystem journaling code;
 * part of the ext2fs journaling system.
 *
 * Revoke is the mechanism used to prevent old log records for deleted
 * metadata from being replayed on top of newer data using the same
 * blocks.  The revoke mechanism is used in two separate places:
 *
 * + Commit: during commit we write the entire list of the current
 *   transaction's revoked blocks to the journal
 *
 * + Recovery: during recovery we record the transaction ID of all
 *   revoked blocks.  If there are multiple revoke records in the log
 *   for a single block, only the last one counts, and if there is a log
 *   entry for a block beyond the last revoke, then that log entry still
 *   gets replayed.
 *
 * We can get interactions between revokes and new log data within a
 * single transaction:
 *
 * Block is revoked and then journaled:
 *   The desired end result is the journaling of the new block, so we
 *   cancel the revoke before the transaction commits.
 *
 * Block is journaled and then revoked:
 *   The revoke must take precedence over the write of the block, so we
 *   need either to cancel the journal entry or to write the revoke
 *   later in the log than the log block.  In this case, we choose the
 *   latter: journaling a block cancels any revoke record for that block
 *   in the current transaction, so any revoke for that block in the
 *   transaction must have happened after the block was journaled and so
 *   the revoke must take precedence.
 *
 * Block is revoked and then written as data:
 *   The data write is allowed to succeed, but the revoke is _not_
 *   cancelled.  We still need to prevent old log records from
 *   overwriting the new data.  We don't even need to clear the revoke
 *   bit here.
 *
50 51 52 53
 * We cache revoke status of a buffer in the current transaction in b_states
 * bits.  As the name says, revokevalid flag indicates that the cached revoke
 * status of a buffer is valid and we can rely on the cached status.
 *
54 55 56 57 58 59 60 61
 * Revoke information on buffers is a tri-state value:
 *
 * RevokeValid clear:	no cached revoke status, need to look it up
 * RevokeValid set, Revoked clear:
 *			buffer has not been revoked, and cancel_revoke
 *			need do nothing.
 * RevokeValid set, Revoked set:
 *			buffer has been revoked.
J
Jan Kara 已提交
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
 *
 * Locking rules:
 * We keep two hash tables of revoke records. One hashtable belongs to the
 * running transaction (is pointed to by journal->j_revoke), the other one
 * belongs to the committing transaction. Accesses to the second hash table
 * happen only from the kjournald and no other thread touches this table.  Also
 * journal_switch_revoke_table() which switches which hashtable belongs to the
 * running and which to the committing transaction is called only from
 * kjournald. Therefore we need no locks when accessing the hashtable belonging
 * to the committing transaction.
 *
 * All users operating on the hash table belonging to the running transaction
 * have a handle to the transaction. Therefore they are safe from kjournald
 * switching hash tables under them. For operations on the lists of entries in
 * the hash table j_revoke_lock is used.
 *
L
Lucas De Marchi 已提交
78
 * Finally, also replay code uses the hash tables but at this moment no one else
J
Jan Kara 已提交
79 80
 * can touch them (filesystem isn't mounted yet) and hence no locking is
 * needed.
81 82 83 84 85 86 87
 */

#ifndef __KERNEL__
#include "jfs_user.h"
#else
#include <linux/time.h>
#include <linux/fs.h>
88
#include <linux/jbd2.h>
89 90 91 92
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/init.h>
93
#include <linux/bio.h>
V
vignesh babu 已提交
94
#include <linux/log2.h>
95
#endif
96

97 98
static struct kmem_cache *jbd2_revoke_record_cache;
static struct kmem_cache *jbd2_revoke_table_cache;
99 100 101 102 103

/* Each revoke record represents one single revoked block.  During
   journal replay, this involves recording the transaction ID of the
   last transaction to revoke this block. */

104
struct jbd2_revoke_record_s
105 106 107
{
	struct list_head  hash;
	tid_t		  sequence;	/* Used for recovery only */
108
	unsigned long long	  blocknr;
109 110 111 112
};


/* The revoke table is just a simple hash table of revoke records. */
113
struct jbd2_revoke_table_s
114 115 116 117 118 119 120 121 122 123 124
{
	/* It is conceivable that we might want a larger hash table
	 * for recovery.  Must be a power of two. */
	int		  hash_size;
	int		  hash_shift;
	struct list_head *hash_table;
};


#ifdef __KERNEL__
static void write_one_revoke_record(journal_t *, transaction_t *,
125 126
				    struct list_head *,
				    struct buffer_head **, int *,
127
				    struct jbd2_revoke_record_s *, int);
128
static void flush_descriptor(journal_t *, struct buffer_head *, int, int);
129 130 131 132 133
#endif

/* Utility functions to maintain the revoke table */

/* Borrowed from buffer.c: this is a tried and tested block hash function */
134
static inline int hash(journal_t *journal, unsigned long long block)
135
{
136
	struct jbd2_revoke_table_s *table = journal->j_revoke;
137
	int hash_shift = table->hash_shift;
M
Mingming Cao 已提交
138
	int hash = (int)block ^ (int)((block >> 31) >> 1);
139

M
Mingming Cao 已提交
140 141 142
	return ((hash << (hash_shift - 6)) ^
		(hash >> 13) ^
		(hash << (hash_shift - 12))) & (table->hash_size - 1);
143 144
}

145
static int insert_revoke_hash(journal_t *journal, unsigned long long blocknr,
146 147 148
			      tid_t seq)
{
	struct list_head *hash_list;
149
	struct jbd2_revoke_record_s *record;
150 151

repeat:
152
	record = kmem_cache_alloc(jbd2_revoke_record_cache, GFP_NOFS);
153 154 155 156 157 158 159 160 161 162 163 164 165 166
	if (!record)
		goto oom;

	record->sequence = seq;
	record->blocknr = blocknr;
	hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)];
	spin_lock(&journal->j_revoke_lock);
	list_add(&record->hash, hash_list);
	spin_unlock(&journal->j_revoke_lock);
	return 0;

oom:
	if (!journal_oom_retry)
		return -ENOMEM;
167
	jbd_debug(1, "ENOMEM in %s, retrying\n", __func__);
168 169 170 171 172 173
	yield();
	goto repeat;
}

/* Find a revoke record in the journal's hash table. */

174
static struct jbd2_revoke_record_s *find_revoke_record(journal_t *journal,
175
						      unsigned long long blocknr)
176 177
{
	struct list_head *hash_list;
178
	struct jbd2_revoke_record_s *record;
179 180 181 182

	hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)];

	spin_lock(&journal->j_revoke_lock);
183
	record = (struct jbd2_revoke_record_s *) hash_list->next;
184 185 186 187 188
	while (&(record->hash) != hash_list) {
		if (record->blocknr == blocknr) {
			spin_unlock(&journal->j_revoke_lock);
			return record;
		}
189
		record = (struct jbd2_revoke_record_s *) record->hash.next;
190 191 192 193 194
	}
	spin_unlock(&journal->j_revoke_lock);
	return NULL;
}

195 196 197 198 199 200 201 202 203 204 205 206
void jbd2_journal_destroy_revoke_caches(void)
{
	if (jbd2_revoke_record_cache) {
		kmem_cache_destroy(jbd2_revoke_record_cache);
		jbd2_revoke_record_cache = NULL;
	}
	if (jbd2_revoke_table_cache) {
		kmem_cache_destroy(jbd2_revoke_table_cache);
		jbd2_revoke_table_cache = NULL;
	}
}

207
int __init jbd2_journal_init_revoke_caches(void)
208
{
209 210 211
	J_ASSERT(!jbd2_revoke_record_cache);
	J_ASSERT(!jbd2_revoke_table_cache);

212 213
	jbd2_revoke_record_cache = KMEM_CACHE(jbd2_revoke_record_s,
					SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY);
A
Al Viro 已提交
214
	if (!jbd2_revoke_record_cache)
215
		goto record_cache_failure;
216

217 218
	jbd2_revoke_table_cache = KMEM_CACHE(jbd2_revoke_table_s,
					     SLAB_TEMPORARY);
219 220
	if (!jbd2_revoke_table_cache)
		goto table_cache_failure;
221
	return 0;
222 223 224 225
table_cache_failure:
	jbd2_journal_destroy_revoke_caches();
record_cache_failure:
		return -ENOMEM;
226 227
}

228
static struct jbd2_revoke_table_s *jbd2_journal_init_revoke_table(int hash_size)
229
{
230 231 232
	int shift = 0;
	int tmp = hash_size;
	struct jbd2_revoke_table_s *table;
233

234 235 236
	table = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL);
	if (!table)
		goto out;
237 238 239 240

	while((tmp >>= 1UL) != 0UL)
		shift++;

241 242 243
	table->hash_size = hash_size;
	table->hash_shift = shift;
	table->hash_table =
244
		kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
245 246 247 248
	if (!table->hash_table) {
		kmem_cache_free(jbd2_revoke_table_cache, table);
		table = NULL;
		goto out;
249 250 251
	}

	for (tmp = 0; tmp < hash_size; tmp++)
252
		INIT_LIST_HEAD(&table->hash_table[tmp]);
253

254 255 256 257 258 259 260 261 262 263 264 265
out:
	return table;
}

static void jbd2_journal_destroy_revoke_table(struct jbd2_revoke_table_s *table)
{
	int i;
	struct list_head *hash_list;

	for (i = 0; i < table->hash_size; i++) {
		hash_list = &table->hash_table[i];
		J_ASSERT(list_empty(hash_list));
266 267
	}

268 269 270
	kfree(table->hash_table);
	kmem_cache_free(jbd2_revoke_table_cache, table);
}
271

272 273 274 275
/* Initialise the revoke table for a given journal to a given size. */
int jbd2_journal_init_revoke(journal_t *journal, int hash_size)
{
	J_ASSERT(journal->j_revoke_table[0] == NULL);
V
vignesh babu 已提交
276
	J_ASSERT(is_power_of_2(hash_size));
277

278 279 280
	journal->j_revoke_table[0] = jbd2_journal_init_revoke_table(hash_size);
	if (!journal->j_revoke_table[0])
		goto fail0;
281

282 283 284
	journal->j_revoke_table[1] = jbd2_journal_init_revoke_table(hash_size);
	if (!journal->j_revoke_table[1])
		goto fail1;
285

286
	journal->j_revoke = journal->j_revoke_table[1];
287 288 289 290 291

	spin_lock_init(&journal->j_revoke_lock);

	return 0;

292 293 294 295 296
fail1:
	jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]);
fail0:
	return -ENOMEM;
}
297

298
/* Destroy a journal's revoke table.  The table must already be empty! */
299
void jbd2_journal_destroy_revoke(journal_t *journal)
300 301
{
	journal->j_revoke = NULL;
302 303 304 305
	if (journal->j_revoke_table[0])
		jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]);
	if (journal->j_revoke_table[1])
		jbd2_journal_destroy_revoke_table(journal->j_revoke_table[1]);
306 307 308 309 310 311
}


#ifdef __KERNEL__

/*
312
 * jbd2_journal_revoke: revoke a given buffer_head from the journal.  This
313 314 315 316 317 318 319 320 321 322 323
 * prevents the block from being replayed during recovery if we take a
 * crash after this current transaction commits.  Any subsequent
 * metadata writes of the buffer in this transaction cancel the
 * revoke.
 *
 * Note that this call may block --- it is up to the caller to make
 * sure that there are no further calls to journal_write_metadata
 * before the revoke is complete.  In ext3, this implies calling the
 * revoke before clearing the block bitmap when we are deleting
 * metadata.
 *
324
 * Revoke performs a jbd2_journal_forget on any buffer_head passed in as a
325 326 327 328 329 330
 * parameter, but does _not_ forget the buffer_head if the bh was only
 * found implicitly.
 *
 * bh_in may not be a journalled buffer - it may have come off
 * the hash tables without an attached journal_head.
 *
331
 * If bh_in is non-zero, jbd2_journal_revoke() will decrement its b_count
332 333 334
 * by one.
 */

335
int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr,
336 337 338 339 340 341 342 343 344 345 346 347
		   struct buffer_head *bh_in)
{
	struct buffer_head *bh = NULL;
	journal_t *journal;
	struct block_device *bdev;
	int err;

	might_sleep();
	if (bh_in)
		BUFFER_TRACE(bh_in, "enter");

	journal = handle->h_transaction->t_journal;
348
	if (!jbd2_journal_set_features(journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)){
349 350 351 352 353 354 355 356 357 358 359 360
		J_ASSERT (!"Cannot set revoke feature!");
		return -EINVAL;
	}

	bdev = journal->j_fs_dev;
	bh = bh_in;

	if (!bh) {
		bh = __find_get_block(bdev, blocknr, journal->j_blocksize);
		if (bh)
			BUFFER_TRACE(bh, "found on hash");
	}
361
#ifdef JBD2_EXPENSIVE_CHECKING
362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
	else {
		struct buffer_head *bh2;

		/* If there is a different buffer_head lying around in
		 * memory anywhere... */
		bh2 = __find_get_block(bdev, blocknr, journal->j_blocksize);
		if (bh2) {
			/* ... and it has RevokeValid status... */
			if (bh2 != bh && buffer_revokevalid(bh2))
				/* ...then it better be revoked too,
				 * since it's illegal to create a revoke
				 * record against a buffer_head which is
				 * not marked revoked --- that would
				 * risk missing a subsequent revoke
				 * cancel. */
				J_ASSERT_BH(bh2, buffer_revoked(bh2));
			put_bh(bh2);
		}
	}
#endif

	/* We really ought not ever to revoke twice in a row without
           first having the revoke cancelled: it's illegal to free a
           block twice without allocating it in between! */
	if (bh) {
		if (!J_EXPECT_BH(bh, !buffer_revoked(bh),
				 "inconsistent data on disk")) {
			if (!bh_in)
				brelse(bh);
			return -EIO;
		}
		set_buffer_revoked(bh);
		set_buffer_revokevalid(bh);
		if (bh_in) {
396 397
			BUFFER_TRACE(bh_in, "call jbd2_journal_forget");
			jbd2_journal_forget(handle, bh_in);
398 399 400 401 402 403
		} else {
			BUFFER_TRACE(bh, "call brelse");
			__brelse(bh);
		}
	}

M
Mingming Cao 已提交
404
	jbd_debug(2, "insert revoke for block %llu, bh_in=%p\n",blocknr, bh_in);
405 406 407 408 409 410 411 412
	err = insert_revoke_hash(journal, blocknr,
				handle->h_transaction->t_tid);
	BUFFER_TRACE(bh_in, "exit");
	return err;
}

/*
 * Cancel an outstanding revoke.  For use only internally by the
413
 * journaling code (called from jbd2_journal_get_write_access).
414 415 416 417 418 419 420 421 422 423 424 425
 *
 * We trust buffer_revoked() on the buffer if the buffer is already
 * being journaled: if there is no revoke pending on the buffer, then we
 * don't do anything here.
 *
 * This would break if it were possible for a buffer to be revoked and
 * discarded, and then reallocated within the same transaction.  In such
 * a case we would have lost the revoked bit, but when we arrived here
 * the second time we would still have a pending revoke to cancel.  So,
 * do not trust the Revoked bit on buffers unless RevokeValid is also
 * set.
 */
426
int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
427
{
428
	struct jbd2_revoke_record_s *record;
429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
	journal_t *journal = handle->h_transaction->t_journal;
	int need_cancel;
	int did_revoke = 0;	/* akpm: debug */
	struct buffer_head *bh = jh2bh(jh);

	jbd_debug(4, "journal_head %p, cancelling revoke\n", jh);

	/* Is the existing Revoke bit valid?  If so, we trust it, and
	 * only perform the full cancel if the revoke bit is set.  If
	 * not, we can't trust the revoke bit, and we need to do the
	 * full search for a revoke record. */
	if (test_set_buffer_revokevalid(bh)) {
		need_cancel = test_clear_buffer_revoked(bh);
	} else {
		need_cancel = 1;
		clear_buffer_revoked(bh);
	}

	if (need_cancel) {
		record = find_revoke_record(journal, bh->b_blocknr);
		if (record) {
			jbd_debug(4, "cancelled existing revoke on "
				  "blocknr %llu\n", (unsigned long long)bh->b_blocknr);
			spin_lock(&journal->j_revoke_lock);
			list_del(&record->hash);
			spin_unlock(&journal->j_revoke_lock);
455
			kmem_cache_free(jbd2_revoke_record_cache, record);
456 457 458 459
			did_revoke = 1;
		}
	}

460
#ifdef JBD2_EXPENSIVE_CHECKING
461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
	/* There better not be one left behind by now! */
	record = find_revoke_record(journal, bh->b_blocknr);
	J_ASSERT_JH(jh, record == NULL);
#endif

	/* Finally, have we just cleared revoke on an unhashed
	 * buffer_head?  If so, we'd better make sure we clear the
	 * revoked status on any hashed alias too, otherwise the revoke
	 * state machine will get very upset later on. */
	if (need_cancel) {
		struct buffer_head *bh2;
		bh2 = __find_get_block(bh->b_bdev, bh->b_blocknr, bh->b_size);
		if (bh2) {
			if (bh2 != bh)
				clear_buffer_revoked(bh2);
			__brelse(bh2);
		}
	}
	return did_revoke;
}

482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
/*
 * journal_clear_revoked_flag clears revoked flag of buffers in
 * revoke table to reflect there is no revoked buffers in the next
 * transaction which is going to be started.
 */
void jbd2_clear_buffer_revoked_flags(journal_t *journal)
{
	struct jbd2_revoke_table_s *revoke = journal->j_revoke;
	int i = 0;

	for (i = 0; i < revoke->hash_size; i++) {
		struct list_head *hash_list;
		struct list_head *list_entry;
		hash_list = &revoke->hash_table[i];

		list_for_each(list_entry, hash_list) {
			struct jbd2_revoke_record_s *record;
			struct buffer_head *bh;
			record = (struct jbd2_revoke_record_s *)list_entry;
			bh = __find_get_block(journal->j_fs_dev,
					      record->blocknr,
					      journal->j_blocksize);
			if (bh) {
				clear_buffer_revoked(bh);
				__brelse(bh);
			}
		}
	}
}

512 513 514 515
/* journal_switch_revoke table select j_revoke for next transaction
 * we do not want to suspend any processing until all revokes are
 * written -bzzz
 */
516
void jbd2_journal_switch_revoke_table(journal_t *journal)
517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
{
	int i;

	if (journal->j_revoke == journal->j_revoke_table[0])
		journal->j_revoke = journal->j_revoke_table[1];
	else
		journal->j_revoke = journal->j_revoke_table[0];

	for (i = 0; i < journal->j_revoke->hash_size; i++)
		INIT_LIST_HEAD(&journal->j_revoke->hash_table[i]);
}

/*
 * Write revoke records to the journal for all entries in the current
 * revoke hash, deleting the entries as we go.
 */
533
void jbd2_journal_write_revoke_records(journal_t *journal,
534
				       transaction_t *transaction,
535
				       struct list_head *log_bufs,
536
				       int write_op)
537
{
538
	struct buffer_head *descriptor;
539 540
	struct jbd2_revoke_record_s *record;
	struct jbd2_revoke_table_s *revoke;
541 542 543 544 545 546 547 548 549 550 551 552 553 554 555
	struct list_head *hash_list;
	int i, offset, count;

	descriptor = NULL;
	offset = 0;
	count = 0;

	/* select revoke table for committing transaction */
	revoke = journal->j_revoke == journal->j_revoke_table[0] ?
		journal->j_revoke_table[1] : journal->j_revoke_table[0];

	for (i = 0; i < revoke->hash_size; i++) {
		hash_list = &revoke->hash_table[i];

		while (!list_empty(hash_list)) {
556
			record = (struct jbd2_revoke_record_s *)
557
				hash_list->next;
558
			write_one_revoke_record(journal, transaction, log_bufs,
559
						&descriptor, &offset,
560
						record, write_op);
561 562
			count++;
			list_del(&record->hash);
563
			kmem_cache_free(jbd2_revoke_record_cache, record);
564 565 566
		}
	}
	if (descriptor)
567
		flush_descriptor(journal, descriptor, offset, write_op);
568 569 570 571 572 573 574 575 576 577
	jbd_debug(1, "Wrote %d revoke records\n", count);
}

/*
 * Write out one revoke record.  We need to create a new descriptor
 * block if the old one is full or if we have not already created one.
 */

static void write_one_revoke_record(journal_t *journal,
				    transaction_t *transaction,
578 579
				    struct list_head *log_bufs,
				    struct buffer_head **descriptorp,
580
				    int *offsetp,
581 582
				    struct jbd2_revoke_record_s *record,
				    int write_op)
583
{
584
	int csum_size = 0;
585
	struct buffer_head *descriptor;
586 587 588 589 590
	int offset;
	journal_header_t *header;

	/* If we are already aborting, this all becomes a noop.  We
           still need to go round the loop in
591
           jbd2_journal_write_revoke_records in order to free all of the
592 593 594 595 596 597 598
           revoke records: only the IO to the journal is omitted. */
	if (is_journal_aborted(journal))
		return;

	descriptor = *descriptorp;
	offset = *offsetp;

599
	/* Do we need to leave space at the end for a checksum? */
600
	if (jbd2_journal_has_csum_v2or3(journal))
601 602
		csum_size = sizeof(struct jbd2_journal_revoke_tail);

603 604
	/* Make sure we have a descriptor with space left for the record */
	if (descriptor) {
605
		if (offset >= journal->j_blocksize - csum_size) {
606
			flush_descriptor(journal, descriptor, offset, write_op);
607 608 609 610 611
			descriptor = NULL;
		}
	}

	if (!descriptor) {
612
		descriptor = jbd2_journal_get_descriptor_buffer(journal);
613 614
		if (!descriptor)
			return;
615
		header = (journal_header_t *)descriptor->b_data;
616 617
		header->h_magic     = cpu_to_be32(JBD2_MAGIC_NUMBER);
		header->h_blocktype = cpu_to_be32(JBD2_REVOKE_BLOCK);
618 619 620
		header->h_sequence  = cpu_to_be32(transaction->t_tid);

		/* Record it so that we can wait for IO completion later */
621 622
		BUFFER_TRACE(descriptor, "file in log_bufs");
		jbd2_file_log_bh(log_bufs, descriptor);
623

624
		offset = sizeof(jbd2_journal_revoke_header_t);
625 626 627
		*descriptorp = descriptor;
	}

Z
Zach Brown 已提交
628
	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) {
629
		* ((__be64 *)(&descriptor->b_data[offset])) =
Z
Zach Brown 已提交
630 631 632 633
			cpu_to_be64(record->blocknr);
		offset += 8;

	} else {
634
		* ((__be32 *)(&descriptor->b_data[offset])) =
Z
Zach Brown 已提交
635 636 637 638
			cpu_to_be32(record->blocknr);
		offset += 4;
	}

639 640 641
	*offsetp = offset;
}

642
static void jbd2_revoke_csum_set(journal_t *j, struct buffer_head *bh)
643 644 645 646
{
	struct jbd2_journal_revoke_tail *tail;
	__u32 csum;

647
	if (!jbd2_journal_has_csum_v2or3(j))
648 649
		return;

650
	tail = (struct jbd2_journal_revoke_tail *)(bh->b_data + j->j_blocksize -
651 652
			sizeof(struct jbd2_journal_revoke_tail));
	tail->r_checksum = 0;
653
	csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
654 655 656
	tail->r_checksum = cpu_to_be32(csum);
}

657 658 659 660 661 662 663 664
/*
 * Flush a revoke descriptor out to the journal.  If we are aborting,
 * this is a noop; otherwise we are generating a buffer which needs to
 * be waited for during commit, so it has to go onto the appropriate
 * journal buffer list.
 */

static void flush_descriptor(journal_t *journal,
665
			     struct buffer_head *descriptor,
666
			     int offset, int write_op)
667
{
668
	jbd2_journal_revoke_header_t *header;
669 670

	if (is_journal_aborted(journal)) {
671
		put_bh(descriptor);
672 673 674
		return;
	}

675
	header = (jbd2_journal_revoke_header_t *)descriptor->b_data;
676
	header->r_count = cpu_to_be32(offset);
677 678
	jbd2_revoke_csum_set(journal, descriptor);

679 680 681 682
	set_buffer_jwrite(descriptor);
	BUFFER_TRACE(descriptor, "write");
	set_buffer_dirty(descriptor);
	write_dirty_buffer(descriptor, write_op);
683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707
}
#endif

/*
 * Revoke support for recovery.
 *
 * Recovery needs to be able to:
 *
 *  record all revoke records, including the tid of the latest instance
 *  of each revoke in the journal
 *
 *  check whether a given block in a given transaction should be replayed
 *  (ie. has not been revoked by a revoke record in that or a subsequent
 *  transaction)
 *
 *  empty the revoke table after recovery.
 */

/*
 * First, setting revoke records.  We create a new revoke record for
 * every block ever revoked in the log as we scan it for recovery, and
 * we update the existing records if we find multiple revokes for a
 * single block.
 */

708
int jbd2_journal_set_revoke(journal_t *journal,
709
		       unsigned long long blocknr,
710 711
		       tid_t sequence)
{
712
	struct jbd2_revoke_record_s *record;
713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731

	record = find_revoke_record(journal, blocknr);
	if (record) {
		/* If we have multiple occurrences, only record the
		 * latest sequence number in the hashed record */
		if (tid_gt(sequence, record->sequence))
			record->sequence = sequence;
		return 0;
	}
	return insert_revoke_hash(journal, blocknr, sequence);
}

/*
 * Test revoke records.  For a given block referenced in the log, has
 * that block been revoked?  A revoke record with a given transaction
 * sequence number revokes all blocks in that transaction and earlier
 * ones, but later transactions still need replayed.
 */

732
int jbd2_journal_test_revoke(journal_t *journal,
733
			unsigned long long blocknr,
734 735
			tid_t sequence)
{
736
	struct jbd2_revoke_record_s *record;
737 738 739 740 741 742 743 744 745 746 747 748 749 750

	record = find_revoke_record(journal, blocknr);
	if (!record)
		return 0;
	if (tid_gt(sequence, record->sequence))
		return 0;
	return 1;
}

/*
 * Finally, once recovery is over, we need to clear the revoke table so
 * that it can be reused by the running filesystem.
 */

751
void jbd2_journal_clear_revoke(journal_t *journal)
752 753 754
{
	int i;
	struct list_head *hash_list;
755 756
	struct jbd2_revoke_record_s *record;
	struct jbd2_revoke_table_s *revoke;
757 758 759 760 761 762

	revoke = journal->j_revoke;

	for (i = 0; i < revoke->hash_size; i++) {
		hash_list = &revoke->hash_table[i];
		while (!list_empty(hash_list)) {
763
			record = (struct jbd2_revoke_record_s*) hash_list->next;
764
			list_del(&record->hash);
765
			kmem_cache_free(jbd2_revoke_record_cache, record);
766 767 768
		}
	}
}