recovery.c 15.9 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
 * fs/f2fs/recovery.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include "f2fs.h"
#include "node.h"
#include "segment.h"

17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
/*
 * Roll forward recovery scenarios.
 *
 * [Term] F: fsync_mark, D: dentry_mark
 *
 * 1. inode(x) | CP | inode(x) | dnode(F)
 * -> Update the latest inode(x).
 *
 * 2. inode(x) | CP | inode(F) | dnode(F)
 * -> No problem.
 *
 * 3. inode(x) | CP | dnode(F) | inode(x)
 * -> Recover to the latest dnode(F), and drop the last inode(x)
 *
 * 4. inode(x) | CP | dnode(F) | inode(F)
 * -> No problem.
 *
 * 5. CP | inode(x) | dnode(F)
 * -> The inode(DF) was missing. Should drop this dnode(F).
 *
 * 6. CP | inode(DF) | dnode(F)
 * -> No problem.
 *
 * 7. CP | dnode(F) | inode(DF)
 * -> If f2fs_iget fails, then goto next to find inode(DF).
 *
 * 8. CP | dnode(F) | inode(x)
 * -> If f2fs_iget fails, then goto next to find inode(DF).
 *    But it will fail due to no inode(DF).
 */

48 49 50 51
static struct kmem_cache *fsync_entry_slab;

bool space_for_roll_forward(struct f2fs_sb_info *sbi)
{
52 53 54
	s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);

	if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
55 56 57 58 59 60 61 62 63
		return false;
	return true;
}

static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
								nid_t ino)
{
	struct fsync_inode_entry *entry;

64
	list_for_each_entry(entry, head, list)
65 66
		if (entry->inode->i_ino == ino)
			return entry;
67

68 69 70
	return NULL;
}

71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
static struct fsync_inode_entry *add_fsync_inode(struct list_head *head,
							struct inode *inode)
{
	struct fsync_inode_entry *entry;

	entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
	if (!entry)
		return NULL;

	entry->inode = inode;
	list_add_tail(&entry->list, head);

	return entry;
}

static void del_fsync_inode(struct fsync_inode_entry *entry)
{
	iput(entry->inode);
	list_del(&entry->list);
	kmem_cache_free(fsync_entry_slab, entry);
}

C
Chao Yu 已提交
93 94
static int recover_dentry(struct inode *inode, struct page *ipage,
						struct list_head *dir_list)
95
{
96
	struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
97
	nid_t pino = le32_to_cpu(raw_inode->i_pino);
J
Jaegeuk Kim 已提交
98
	struct f2fs_dir_entry *de;
99
	struct fscrypt_name fname;
100
	struct page *page;
J
Jaegeuk Kim 已提交
101
	struct inode *dir, *einode;
C
Chao Yu 已提交
102
	struct fsync_inode_entry *entry;
103
	int err = 0;
104
	char *name;
105

C
Chao Yu 已提交
106 107 108 109 110 111 112 113 114 115 116 117 118 119
	entry = get_fsync_inode(dir_list, pino);
	if (!entry) {
		dir = f2fs_iget(inode->i_sb, pino);
		if (IS_ERR(dir)) {
			err = PTR_ERR(dir);
			goto out;
		}

		entry = add_fsync_inode(dir_list, dir);
		if (!entry) {
			err = -ENOMEM;
			iput(dir);
			goto out;
		}
120 121
	}

C
Chao Yu 已提交
122 123
	dir = entry->inode;

124 125 126
	memset(&fname, 0, sizeof(struct fscrypt_name));
	fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen);
	fname.disk_name.name = raw_inode->i_name;
127

128
	if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) {
129 130
		WARN_ON(1);
		err = -ENAMETOOLONG;
C
Chao Yu 已提交
131
		goto out;
132
	}
J
Jaegeuk Kim 已提交
133
retry:
134
	de = __f2fs_find_entry(dir, &fname, &page);
135
	if (de && inode->i_ino == le32_to_cpu(de->ino))
136
		goto out_unmap_put;
137

J
Jaegeuk Kim 已提交
138 139 140 141
	if (de) {
		einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
		if (IS_ERR(einode)) {
			WARN_ON(1);
142 143
			err = PTR_ERR(einode);
			if (err == -ENOENT)
J
Jaegeuk Kim 已提交
144
				err = -EEXIST;
145 146
			goto out_unmap_put;
		}
147
		err = acquire_orphan_inode(F2FS_I_SB(inode));
148 149 150
		if (err) {
			iput(einode);
			goto out_unmap_put;
J
Jaegeuk Kim 已提交
151
		}
152
		f2fs_delete_entry(de, page, dir, einode);
J
Jaegeuk Kim 已提交
153 154
		iput(einode);
		goto retry;
155 156 157
	} else if (IS_ERR(page)) {
		err = PTR_ERR(page);
	} else {
158
		err = __f2fs_do_add_link(dir, &fname, inode,
159
					inode->i_ino, inode->i_mode);
160
	}
161 162 163
	goto out;

out_unmap_put:
164
	f2fs_dentry_kunmap(dir, page);
165
	f2fs_put_page(page, 0);
166
out:
167 168 169 170
	if (file_enc_name(inode))
		name = "<encrypted>";
	else
		name = raw_inode->i_name;
C
Chris Fries 已提交
171 172
	f2fs_msg(inode->i_sb, KERN_NOTICE,
			"%s: ino = %x, name = %s, dir = %lx, err = %d",
173
			__func__, ino_of_node(ipage), name,
D
Dan Carpenter 已提交
174
			IS_ERR(dir) ? 0 : dir->i_ino, err);
175 176 177
	return err;
}

178
static void recover_inode(struct inode *inode, struct page *page)
179
{
180
	struct f2fs_inode *raw = F2FS_INODE(page);
181
	char *name;
182 183

	inode->i_mode = le16_to_cpu(raw->i_mode);
184
	f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
185 186 187 188 189 190
	inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime);
	inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
	inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
	inode->i_atime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
	inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
	inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
191

192 193 194 195 196
	if (file_enc_name(inode))
		name = "<encrypted>";
	else
		name = F2FS_INODE(page)->i_name;

197
	f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
198
			ino_of_node(page), name);
199 200
}

201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
static bool is_same_inode(struct inode *inode, struct page *ipage)
{
	struct f2fs_inode *ri = F2FS_INODE(ipage);
	struct timespec disk;

	if (!IS_INODE(ipage))
		return true;

	disk.tv_sec = le64_to_cpu(ri->i_ctime);
	disk.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
	if (timespec_compare(&inode->i_ctime, &disk) > 0)
		return false;

	disk.tv_sec = le64_to_cpu(ri->i_atime);
	disk.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
	if (timespec_compare(&inode->i_atime, &disk) > 0)
		return false;

	disk.tv_sec = le64_to_cpu(ri->i_mtime);
	disk.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
	if (timespec_compare(&inode->i_mtime, &disk) > 0)
		return false;

	return true;
}

227 228
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
{
229
	unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
230
	struct curseg_info *curseg;
231
	struct inode *inode;
232
	struct page *page = NULL;
233 234 235 236 237
	block_t blkaddr;
	int err = 0;

	/* get node pages in the current segment */
	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
238
	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
239 240 241 242

	while (1) {
		struct fsync_inode_entry *entry;

243
		if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
244
			return 0;
245

246
		page = get_tmp_page(sbi, blkaddr);
247

248
		if (cp_ver != cpver_of_node(page))
249
			break;
250 251 252 253 254

		if (!is_fsync_dnode(page))
			goto next;

		entry = get_fsync_inode(head, ino_of_node(page));
255 256 257 258
		if (entry) {
			if (!is_same_inode(entry->inode, page))
				goto next;
		} else {
259
			if (IS_INODE(page) && is_dent_dnode(page)) {
260 261
				err = recover_inode_page(sbi, page);
				if (err)
262
					break;
263 264
			}

265 266 267 268
			/*
			 * CP | dnode(F) | inode(DF)
			 * For this case, we should not give up now.
			 */
269 270 271
			inode = f2fs_iget(sbi->sb, ino_of_node(page));
			if (IS_ERR(inode)) {
				err = PTR_ERR(inode);
272 273
				if (err == -ENOENT) {
					err = 0;
274
					goto next;
275
				}
276
				break;
277
			}
278 279 280 281 282 283 284 285

			/* add this fsync inode to the list */
			entry = add_fsync_inode(head, inode);
			if (!entry) {
				err = -ENOMEM;
				iput(inode);
				break;
			}
286
		}
J
Jaegeuk Kim 已提交
287 288
		entry->blkaddr = blkaddr;

289 290
		if (IS_INODE(page) && is_dent_dnode(page))
			entry->last_dentry = blkaddr;
291 292 293
next:
		/* check next segment */
		blkaddr = next_blkaddr_of_node(page);
294
		f2fs_put_page(page, 1);
295 296

		ra_meta_pages_cond(sbi, blkaddr);
297
	}
298
	f2fs_put_page(page, 1);
299 300 301
	return err;
}

302
static void destroy_fsync_dnodes(struct list_head *head)
303
{
304 305
	struct fsync_inode_entry *entry, *tmp;

306 307
	list_for_each_entry_safe(entry, tmp, head, list)
		del_fsync_inode(entry);
308 309
}

310
static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
311
			block_t blkaddr, struct dnode_of_data *dn)
312 313 314
{
	struct seg_entry *sentry;
	unsigned int segno = GET_SEGNO(sbi, blkaddr);
J
Jaegeuk Kim 已提交
315
	unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
J
Jaegeuk Kim 已提交
316
	struct f2fs_summary_block *sum_node;
317
	struct f2fs_summary sum;
J
Jaegeuk Kim 已提交
318
	struct page *sum_page, *node_page;
319
	struct dnode_of_data tdn = *dn;
320
	nid_t ino, nid;
321
	struct inode *inode;
322
	unsigned int offset;
323 324 325 326 327
	block_t bidx;
	int i;

	sentry = get_seg_entry(sbi, segno);
	if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
328
		return 0;
329 330 331 332 333 334

	/* Get the previous summary */
	for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
		struct curseg_info *curseg = CURSEG_I(sbi, i);
		if (curseg->segno == segno) {
			sum = curseg->sum_blk->entries[blkoff];
J
Jaegeuk Kim 已提交
335
			goto got_it;
336 337 338
		}
	}

J
Jaegeuk Kim 已提交
339 340 341 342 343
	sum_page = get_sum_page(sbi, segno);
	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
	sum = sum_node->entries[blkoff];
	f2fs_put_page(sum_page, 1);
got_it:
344 345 346 347
	/* Use the locked dnode page and inode */
	nid = le32_to_cpu(sum.nid);
	if (dn->inode->i_ino == nid) {
		tdn.nid = nid;
348 349
		if (!dn->inode_page_locked)
			lock_page(dn->inode_page);
350
		tdn.node_page = dn->inode_page;
351
		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
352
		goto truncate_out;
353
	} else if (dn->nid == nid) {
354
		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
355
		goto truncate_out;
356 357
	}

358
	/* Get the node page */
359
	node_page = get_node_page(sbi, nid);
360 361
	if (IS_ERR(node_page))
		return PTR_ERR(node_page);
362 363

	offset = ofs_of_node(node_page);
364 365 366
	ino = ino_of_node(node_page);
	f2fs_put_page(node_page, 1);

367 368 369 370 371 372 373 374
	if (ino != dn->inode->i_ino) {
		/* Deallocate previous index in the node page */
		inode = f2fs_iget(sbi->sb, ino);
		if (IS_ERR(inode))
			return PTR_ERR(inode);
	} else {
		inode = dn->inode;
	}
375

376
	bidx = start_bidx_of_node(offset, inode) + le16_to_cpu(sum.ofs_in_node);
377

378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394
	/*
	 * if inode page is locked, unlock temporarily, but its reference
	 * count keeps alive.
	 */
	if (ino == dn->inode->i_ino && dn->inode_page_locked)
		unlock_page(dn->inode_page);

	set_new_dnode(&tdn, inode, NULL, NULL, 0);
	if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
		goto out;

	if (tdn.data_blkaddr == blkaddr)
		truncate_data_blocks_range(&tdn, 1);

	f2fs_put_dnode(&tdn);
out:
	if (ino != dn->inode->i_ino)
395
		iput(inode);
396 397 398 399 400 401 402 403 404
	else if (dn->inode_page_locked)
		lock_page(dn->inode_page);
	return 0;

truncate_out:
	if (datablock_addr(tdn.node_page, tdn.ofs_in_node) == blkaddr)
		truncate_data_blocks_range(&tdn, 1);
	if (dn->inode->i_ino == nid && !dn->inode_page_locked)
		unlock_page(dn->inode_page);
405
	return 0;
406 407
}

408
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
409 410 411 412
					struct page *page, block_t blkaddr)
{
	struct dnode_of_data dn;
	struct node_info ni;
413
	unsigned int start, end;
414
	int err = 0, recovered = 0;
415

416 417 418 419
	/* step 1: recover xattr */
	if (IS_INODE(page)) {
		recover_inline_xattr(inode, page);
	} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
420 421 422 423
		/*
		 * Deprecated; xattr blocks should be found from cold log.
		 * But, we should remain this for backward compatibility.
		 */
424
		recover_xattr_data(inode, page, blkaddr);
425
		goto out;
426
	}
427

428 429
	/* step 2: recover inline data */
	if (recover_inline_data(inode, page))
430 431
		goto out;

432
	/* step 3: recover data indices */
433 434
	start = start_bidx_of_node(ofs_of_node(page), inode);
	end = start + ADDRS_PER_PAGE(page, inode);
435 436

	set_new_dnode(&dn, inode, NULL, NULL, 0);
437

438
	err = get_dnode_of_data(&dn, start, ALLOC_NODE);
439
	if (err)
440
		goto out;
441

442
	f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
443 444

	get_node_info(sbi, dn.nid, &ni);
445 446
	f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
	f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
447

448
	for (; start < end; start++, dn.ofs_in_node++) {
449 450 451 452 453
		block_t src, dest;

		src = datablock_addr(dn.node_page, dn.ofs_in_node);
		dest = datablock_addr(page, dn.ofs_in_node);

454 455 456 457 458 459 460 461 462 463
		/* skip recovering if dest is the same as src */
		if (src == dest)
			continue;

		/* dest is invalid, just invalidate src block */
		if (dest == NULL_ADDR) {
			truncate_data_blocks_range(&dn, 1);
			continue;
		}

464 465 466
		if ((start + 1) << PAGE_SHIFT > i_size_read(inode))
			f2fs_i_size_write(inode, (start + 1) << PAGE_SHIFT);

467 468 469 470 471 472
		/*
		 * dest is reserved block, invalidate src block
		 * and then reserve one new block in dnode page.
		 */
		if (dest == NEW_ADDR) {
			truncate_data_blocks_range(&dn, 1);
473
			reserve_new_block(&dn);
474 475 476 477 478
			continue;
		}

		/* dest is valid block, try to recover from src to dest */
		if (is_valid_blkaddr(sbi, dest, META_POR)) {
479

480
			if (src == NULL_ADDR) {
481
				err = reserve_new_block(&dn);
482 483 484 485
#ifdef CONFIG_F2FS_FAULT_INJECTION
				while (err)
					err = reserve_new_block(&dn);
#endif
486
				/* We should not get -ENOSPC */
487
				f2fs_bug_on(sbi, err);
488 489
				if (err)
					goto err;
490 491 492
			}

			/* Check the previous node page having this index */
493 494 495
			err = check_index_in_prev_nodes(sbi, dest, &dn);
			if (err)
				goto err;
496 497

			/* write dummy data page */
498
			f2fs_replace_block(sbi, &dn, src, dest,
499
						ni.version, false, false);
500
			recovered++;
501 502 503 504 505 506 507
		}
	}

	copy_node_footer(dn.node_page, page);
	fill_node_footer(dn.node_page, dn.nid, ni.ino,
					ofs_of_node(page), false);
	set_page_dirty(dn.node_page);
508
err:
509
	f2fs_put_dnode(&dn);
510
out:
C
Chris Fries 已提交
511 512 513
	f2fs_msg(sbi->sb, KERN_NOTICE,
		"recover_data: ino = %lx, recovered = %d blocks, err = %d",
		inode->i_ino, recovered, err);
514
	return err;
515 516
}

C
Chao Yu 已提交
517 518
static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
						struct list_head *dir_list)
519
{
520
	unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
521
	struct curseg_info *curseg;
522
	struct page *page = NULL;
523
	int err = 0;
524 525 526
	block_t blkaddr;

	/* get node pages in the current segment */
C
Chao Yu 已提交
527
	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
528 529 530 531 532
	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);

	while (1) {
		struct fsync_inode_entry *entry;

533
		if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
534
			break;
535

536 537
		ra_meta_pages_cond(sbi, blkaddr);

538
		page = get_tmp_page(sbi, blkaddr);
539

540 541
		if (cp_ver != cpver_of_node(page)) {
			f2fs_put_page(page, 1);
542
			break;
543
		}
544

C
Chao Yu 已提交
545
		entry = get_fsync_inode(inode_list, ino_of_node(page));
546 547
		if (!entry)
			goto next;
548 549 550
		/*
		 * inode(x) | CP | inode(x) | dnode(F)
		 * In this case, we can lose the latest inode(x).
551
		 * So, call recover_inode for the inode update.
552
		 */
553
		if (IS_INODE(page))
554 555
			recover_inode(entry->inode, page);
		if (entry->last_dentry == blkaddr) {
C
Chao Yu 已提交
556
			err = recover_dentry(entry->inode, page, dir_list);
557 558 559 560 561
			if (err) {
				f2fs_put_page(page, 1);
				break;
			}
		}
562
		err = do_recover_data(sbi, entry->inode, page, blkaddr);
563 564
		if (err) {
			f2fs_put_page(page, 1);
565
			break;
566
		}
567

568 569
		if (entry->blkaddr == blkaddr)
			del_fsync_inode(entry);
570 571 572
next:
		/* check next segment */
		blkaddr = next_blkaddr_of_node(page);
573
		f2fs_put_page(page, 1);
574
	}
575 576 577
	if (!err)
		allocate_new_segments(sbi);
	return err;
578 579
}

580
int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
581
{
582
	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
583
	struct list_head inode_list;
C
Chao Yu 已提交
584
	struct list_head dir_list;
585
	block_t blkaddr;
586
	int err;
587
	int ret = 0;
H
Haicheng Li 已提交
588
	bool need_writecp = false;
589 590

	fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
591
			sizeof(struct fsync_inode_entry));
592
	if (!fsync_entry_slab)
593
		return -ENOMEM;
594 595

	INIT_LIST_HEAD(&inode_list);
C
Chao Yu 已提交
596
	INIT_LIST_HEAD(&dir_list);
597

598 599 600
	/* prevent checkpoint */
	mutex_lock(&sbi->cp_mutex);

601 602
	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);

603
	/* step #1: find fsynced inode numbers */
604
	err = find_fsync_dnodes(sbi, &inode_list);
605
	if (err || list_empty(&inode_list))
606 607
		goto out;

608 609
	if (check_only) {
		ret = 1;
610
		goto out;
611
	}
612

H
Haicheng Li 已提交
613
	need_writecp = true;
614

615
	/* step #2: recover data */
C
Chao Yu 已提交
616
	err = recover_data(sbi, &inode_list, &dir_list);
617
	if (!err)
618
		f2fs_bug_on(sbi, !list_empty(&inode_list));
619
out:
620
	destroy_fsync_dnodes(&inode_list);
621

622 623
	/* truncate meta pages to be used by the recovery */
	truncate_inode_pages_range(META_MAPPING(sbi),
624
			(loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
625

626 627 628 629 630
	if (err) {
		truncate_inode_pages_final(NODE_MAPPING(sbi));
		truncate_inode_pages_final(META_MAPPING(sbi));
	}

631
	clear_sbi_flag(sbi, SBI_POR_DOING);
632
	if (err) {
C
Chao Yu 已提交
633 634
		bool invalidate = false;

635 636
		if (test_opt(sbi, LFS)) {
			update_meta_page(sbi, NULL, blkaddr);
C
Chao Yu 已提交
637
			invalidate = true;
638 639 640
		} else if (discard_next_dnode(sbi, blkaddr)) {
			invalidate = true;
		}
641

C
Chao Yu 已提交
642 643
		f2fs_wait_all_discard_bio(sbi);

644 645 646
		/* Flush all the NAT/SIT pages */
		while (get_pages(sbi, F2FS_DIRTY_META))
			sync_meta_pages(sbi, META, LONG_MAX);
C
Chao Yu 已提交
647 648 649 650 651 652

		/* invalidate temporary meta page */
		if (invalidate)
			invalidate_mapping_pages(META_MAPPING(sbi),
							blkaddr, blkaddr);

653 654
		set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
		mutex_unlock(&sbi->cp_mutex);
655
	} else if (need_writecp) {
656
		struct cp_control cpc = {
657
			.reason = CP_RECOVERY,
658
		};
659
		mutex_unlock(&sbi->cp_mutex);
C
Chao Yu 已提交
660
		err = write_checkpoint(sbi, &cpc);
661 662
	} else {
		mutex_unlock(&sbi->cp_mutex);
663
	}
C
Chao Yu 已提交
664 665 666

	destroy_fsync_dnodes(&dir_list);
	kmem_cache_destroy(fsync_entry_slab);
667
	return ret ? ret: err;
668
}