recovery.c 15.8 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
 * fs/f2fs/recovery.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include "f2fs.h"
#include "node.h"
#include "segment.h"

17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
/*
 * Roll forward recovery scenarios.
 *
 * [Term] F: fsync_mark, D: dentry_mark
 *
 * 1. inode(x) | CP | inode(x) | dnode(F)
 * -> Update the latest inode(x).
 *
 * 2. inode(x) | CP | inode(F) | dnode(F)
 * -> No problem.
 *
 * 3. inode(x) | CP | dnode(F) | inode(x)
 * -> Recover to the latest dnode(F), and drop the last inode(x)
 *
 * 4. inode(x) | CP | dnode(F) | inode(F)
 * -> No problem.
 *
 * 5. CP | inode(x) | dnode(F)
 * -> The inode(DF) was missing. Should drop this dnode(F).
 *
 * 6. CP | inode(DF) | dnode(F)
 * -> No problem.
 *
 * 7. CP | dnode(F) | inode(DF)
 * -> If f2fs_iget fails, then goto next to find inode(DF).
 *
 * 8. CP | dnode(F) | inode(x)
 * -> If f2fs_iget fails, then goto next to find inode(DF).
 *    But it will fail due to no inode(DF).
 */

48 49 50 51
static struct kmem_cache *fsync_entry_slab;

bool space_for_roll_forward(struct f2fs_sb_info *sbi)
{
52 53 54
	s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);

	if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
55 56 57 58 59 60 61 62 63
		return false;
	return true;
}

static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
								nid_t ino)
{
	struct fsync_inode_entry *entry;

64
	list_for_each_entry(entry, head, list)
65 66
		if (entry->inode->i_ino == ino)
			return entry;
67

68 69 70
	return NULL;
}

71 72
static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
					struct list_head *head, nid_t ino)
73
{
74
	struct inode *inode = f2fs_iget(sbi->sb, ino);
75 76
	struct fsync_inode_entry *entry;

77 78 79
	if (IS_ERR(inode))
		return ERR_CAST(inode);

80
	entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
81 82 83 84
	if (!entry) {
		iput(inode);
		return ERR_PTR(-ENOMEM);
	}
85 86 87 88 89 90 91 92 93 94 95 96 97 98

	entry->inode = inode;
	list_add_tail(&entry->list, head);

	return entry;
}

static void del_fsync_inode(struct fsync_inode_entry *entry)
{
	iput(entry->inode);
	list_del(&entry->list);
	kmem_cache_free(fsync_entry_slab, entry);
}

C
Chao Yu 已提交
99 100
static int recover_dentry(struct inode *inode, struct page *ipage,
						struct list_head *dir_list)
101
{
102
	struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
103
	nid_t pino = le32_to_cpu(raw_inode->i_pino);
J
Jaegeuk Kim 已提交
104
	struct f2fs_dir_entry *de;
105
	struct fscrypt_name fname;
106
	struct page *page;
J
Jaegeuk Kim 已提交
107
	struct inode *dir, *einode;
C
Chao Yu 已提交
108
	struct fsync_inode_entry *entry;
109
	int err = 0;
110
	char *name;
111

C
Chao Yu 已提交
112 113
	entry = get_fsync_inode(dir_list, pino);
	if (!entry) {
114 115 116 117
		entry = add_fsync_inode(F2FS_I_SB(inode), dir_list, pino);
		if (IS_ERR(entry)) {
			dir = ERR_CAST(entry);
			err = PTR_ERR(entry);
C
Chao Yu 已提交
118 119
			goto out;
		}
120 121
	}

C
Chao Yu 已提交
122 123
	dir = entry->inode;

124 125 126
	memset(&fname, 0, sizeof(struct fscrypt_name));
	fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen);
	fname.disk_name.name = raw_inode->i_name;
127

128
	if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) {
129 130
		WARN_ON(1);
		err = -ENAMETOOLONG;
C
Chao Yu 已提交
131
		goto out;
132
	}
J
Jaegeuk Kim 已提交
133
retry:
134
	de = __f2fs_find_entry(dir, &fname, &page);
135
	if (de && inode->i_ino == le32_to_cpu(de->ino))
136
		goto out_unmap_put;
137

J
Jaegeuk Kim 已提交
138 139 140 141
	if (de) {
		einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
		if (IS_ERR(einode)) {
			WARN_ON(1);
142 143
			err = PTR_ERR(einode);
			if (err == -ENOENT)
J
Jaegeuk Kim 已提交
144
				err = -EEXIST;
145 146
			goto out_unmap_put;
		}
147
		err = acquire_orphan_inode(F2FS_I_SB(inode));
148 149 150
		if (err) {
			iput(einode);
			goto out_unmap_put;
J
Jaegeuk Kim 已提交
151
		}
152
		f2fs_delete_entry(de, page, dir, einode);
J
Jaegeuk Kim 已提交
153 154
		iput(einode);
		goto retry;
155 156 157
	} else if (IS_ERR(page)) {
		err = PTR_ERR(page);
	} else {
158
		err = __f2fs_do_add_link(dir, &fname, inode,
159
					inode->i_ino, inode->i_mode);
160
	}
161 162 163
	goto out;

out_unmap_put:
164
	f2fs_dentry_kunmap(dir, page);
165
	f2fs_put_page(page, 0);
166
out:
167 168 169 170
	if (file_enc_name(inode))
		name = "<encrypted>";
	else
		name = raw_inode->i_name;
C
Chris Fries 已提交
171 172
	f2fs_msg(inode->i_sb, KERN_NOTICE,
			"%s: ino = %x, name = %s, dir = %lx, err = %d",
173
			__func__, ino_of_node(ipage), name,
D
Dan Carpenter 已提交
174
			IS_ERR(dir) ? 0 : dir->i_ino, err);
175 176 177
	return err;
}

178
static void recover_inode(struct inode *inode, struct page *page)
179
{
180
	struct f2fs_inode *raw = F2FS_INODE(page);
181
	char *name;
182 183

	inode->i_mode = le16_to_cpu(raw->i_mode);
184
	f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
185 186 187 188 189 190
	inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime);
	inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
	inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
	inode->i_atime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
	inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
	inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
191

192 193 194 195 196
	if (file_enc_name(inode))
		name = "<encrypted>";
	else
		name = F2FS_INODE(page)->i_name;

197
	f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
198
			ino_of_node(page), name);
199 200
}

201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
static bool is_same_inode(struct inode *inode, struct page *ipage)
{
	struct f2fs_inode *ri = F2FS_INODE(ipage);
	struct timespec disk;

	if (!IS_INODE(ipage))
		return true;

	disk.tv_sec = le64_to_cpu(ri->i_ctime);
	disk.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
	if (timespec_compare(&inode->i_ctime, &disk) > 0)
		return false;

	disk.tv_sec = le64_to_cpu(ri->i_atime);
	disk.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
	if (timespec_compare(&inode->i_atime, &disk) > 0)
		return false;

	disk.tv_sec = le64_to_cpu(ri->i_mtime);
	disk.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
	if (timespec_compare(&inode->i_mtime, &disk) > 0)
		return false;

	return true;
}

227 228
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
{
229
	unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
230
	struct curseg_info *curseg;
231
	struct page *page = NULL;
232 233 234 235 236
	block_t blkaddr;
	int err = 0;

	/* get node pages in the current segment */
	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
237
	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
238 239 240 241

	while (1) {
		struct fsync_inode_entry *entry;

242
		if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
243
			return 0;
244

245
		page = get_tmp_page(sbi, blkaddr);
246

247
		if (cp_ver != cpver_of_node(page))
248
			break;
249 250 251 252 253

		if (!is_fsync_dnode(page))
			goto next;

		entry = get_fsync_inode(head, ino_of_node(page));
254 255 256 257
		if (entry) {
			if (!is_same_inode(entry->inode, page))
				goto next;
		} else {
258
			if (IS_INODE(page) && is_dent_dnode(page)) {
259 260
				err = recover_inode_page(sbi, page);
				if (err)
261
					break;
262 263
			}

264 265 266 267
			/*
			 * CP | dnode(F) | inode(DF)
			 * For this case, we should not give up now.
			 */
268 269 270
			entry = add_fsync_inode(sbi, head, ino_of_node(page));
			if (IS_ERR(entry)) {
				err = PTR_ERR(entry);
271 272
				if (err == -ENOENT) {
					err = 0;
273
					goto next;
274
				}
275
				break;
276 277
			}
		}
J
Jaegeuk Kim 已提交
278 279
		entry->blkaddr = blkaddr;

280 281
		if (IS_INODE(page) && is_dent_dnode(page))
			entry->last_dentry = blkaddr;
282 283 284
next:
		/* check next segment */
		blkaddr = next_blkaddr_of_node(page);
285
		f2fs_put_page(page, 1);
286 287

		ra_meta_pages_cond(sbi, blkaddr);
288
	}
289
	f2fs_put_page(page, 1);
290 291 292
	return err;
}

293
static void destroy_fsync_dnodes(struct list_head *head)
294
{
295 296
	struct fsync_inode_entry *entry, *tmp;

297 298
	list_for_each_entry_safe(entry, tmp, head, list)
		del_fsync_inode(entry);
299 300
}

301
static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
302
			block_t blkaddr, struct dnode_of_data *dn)
303 304 305
{
	struct seg_entry *sentry;
	unsigned int segno = GET_SEGNO(sbi, blkaddr);
J
Jaegeuk Kim 已提交
306
	unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
J
Jaegeuk Kim 已提交
307
	struct f2fs_summary_block *sum_node;
308
	struct f2fs_summary sum;
J
Jaegeuk Kim 已提交
309
	struct page *sum_page, *node_page;
310
	struct dnode_of_data tdn = *dn;
311
	nid_t ino, nid;
312
	struct inode *inode;
313
	unsigned int offset;
314 315 316 317 318
	block_t bidx;
	int i;

	sentry = get_seg_entry(sbi, segno);
	if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
319
		return 0;
320 321 322 323 324 325

	/* Get the previous summary */
	for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
		struct curseg_info *curseg = CURSEG_I(sbi, i);
		if (curseg->segno == segno) {
			sum = curseg->sum_blk->entries[blkoff];
J
Jaegeuk Kim 已提交
326
			goto got_it;
327 328 329
		}
	}

J
Jaegeuk Kim 已提交
330 331 332 333 334
	sum_page = get_sum_page(sbi, segno);
	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
	sum = sum_node->entries[blkoff];
	f2fs_put_page(sum_page, 1);
got_it:
335 336 337 338
	/* Use the locked dnode page and inode */
	nid = le32_to_cpu(sum.nid);
	if (dn->inode->i_ino == nid) {
		tdn.nid = nid;
339 340
		if (!dn->inode_page_locked)
			lock_page(dn->inode_page);
341
		tdn.node_page = dn->inode_page;
342
		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
343
		goto truncate_out;
344
	} else if (dn->nid == nid) {
345
		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
346
		goto truncate_out;
347 348
	}

349
	/* Get the node page */
350
	node_page = get_node_page(sbi, nid);
351 352
	if (IS_ERR(node_page))
		return PTR_ERR(node_page);
353 354

	offset = ofs_of_node(node_page);
355 356 357
	ino = ino_of_node(node_page);
	f2fs_put_page(node_page, 1);

358 359 360 361 362 363 364 365
	if (ino != dn->inode->i_ino) {
		/* Deallocate previous index in the node page */
		inode = f2fs_iget(sbi->sb, ino);
		if (IS_ERR(inode))
			return PTR_ERR(inode);
	} else {
		inode = dn->inode;
	}
366

367
	bidx = start_bidx_of_node(offset, inode) + le16_to_cpu(sum.ofs_in_node);
368

369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
	/*
	 * if inode page is locked, unlock temporarily, but its reference
	 * count keeps alive.
	 */
	if (ino == dn->inode->i_ino && dn->inode_page_locked)
		unlock_page(dn->inode_page);

	set_new_dnode(&tdn, inode, NULL, NULL, 0);
	if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
		goto out;

	if (tdn.data_blkaddr == blkaddr)
		truncate_data_blocks_range(&tdn, 1);

	f2fs_put_dnode(&tdn);
out:
	if (ino != dn->inode->i_ino)
386
		iput(inode);
387 388 389 390 391 392 393 394 395
	else if (dn->inode_page_locked)
		lock_page(dn->inode_page);
	return 0;

truncate_out:
	if (datablock_addr(tdn.node_page, tdn.ofs_in_node) == blkaddr)
		truncate_data_blocks_range(&tdn, 1);
	if (dn->inode->i_ino == nid && !dn->inode_page_locked)
		unlock_page(dn->inode_page);
396
	return 0;
397 398
}

399
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
400 401 402 403
					struct page *page, block_t blkaddr)
{
	struct dnode_of_data dn;
	struct node_info ni;
404
	unsigned int start, end;
405
	int err = 0, recovered = 0;
406

407 408 409 410
	/* step 1: recover xattr */
	if (IS_INODE(page)) {
		recover_inline_xattr(inode, page);
	} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
411 412 413 414
		/*
		 * Deprecated; xattr blocks should be found from cold log.
		 * But, we should remain this for backward compatibility.
		 */
415
		recover_xattr_data(inode, page, blkaddr);
416
		goto out;
417
	}
418

419 420
	/* step 2: recover inline data */
	if (recover_inline_data(inode, page))
421 422
		goto out;

423
	/* step 3: recover data indices */
424 425
	start = start_bidx_of_node(ofs_of_node(page), inode);
	end = start + ADDRS_PER_PAGE(page, inode);
426 427

	set_new_dnode(&dn, inode, NULL, NULL, 0);
428

429
	err = get_dnode_of_data(&dn, start, ALLOC_NODE);
430
	if (err)
431
		goto out;
432

433
	f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
434 435

	get_node_info(sbi, dn.nid, &ni);
436 437
	f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
	f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
438

439
	for (; start < end; start++, dn.ofs_in_node++) {
440 441 442 443 444
		block_t src, dest;

		src = datablock_addr(dn.node_page, dn.ofs_in_node);
		dest = datablock_addr(page, dn.ofs_in_node);

445 446 447 448 449 450 451 452 453 454
		/* skip recovering if dest is the same as src */
		if (src == dest)
			continue;

		/* dest is invalid, just invalidate src block */
		if (dest == NULL_ADDR) {
			truncate_data_blocks_range(&dn, 1);
			continue;
		}

455 456 457
		if ((start + 1) << PAGE_SHIFT > i_size_read(inode))
			f2fs_i_size_write(inode, (start + 1) << PAGE_SHIFT);

458 459 460 461 462 463
		/*
		 * dest is reserved block, invalidate src block
		 * and then reserve one new block in dnode page.
		 */
		if (dest == NEW_ADDR) {
			truncate_data_blocks_range(&dn, 1);
464
			reserve_new_block(&dn);
465 466 467 468 469
			continue;
		}

		/* dest is valid block, try to recover from src to dest */
		if (is_valid_blkaddr(sbi, dest, META_POR)) {
470

471
			if (src == NULL_ADDR) {
472
				err = reserve_new_block(&dn);
473 474 475 476
#ifdef CONFIG_F2FS_FAULT_INJECTION
				while (err)
					err = reserve_new_block(&dn);
#endif
477
				/* We should not get -ENOSPC */
478
				f2fs_bug_on(sbi, err);
479 480
				if (err)
					goto err;
481 482 483
			}

			/* Check the previous node page having this index */
484 485 486
			err = check_index_in_prev_nodes(sbi, dest, &dn);
			if (err)
				goto err;
487 488

			/* write dummy data page */
489
			f2fs_replace_block(sbi, &dn, src, dest,
490
						ni.version, false, false);
491
			recovered++;
492 493 494 495 496 497 498
		}
	}

	copy_node_footer(dn.node_page, page);
	fill_node_footer(dn.node_page, dn.nid, ni.ino,
					ofs_of_node(page), false);
	set_page_dirty(dn.node_page);
499
err:
500
	f2fs_put_dnode(&dn);
501
out:
C
Chris Fries 已提交
502 503 504
	f2fs_msg(sbi->sb, KERN_NOTICE,
		"recover_data: ino = %lx, recovered = %d blocks, err = %d",
		inode->i_ino, recovered, err);
505
	return err;
506 507
}

C
Chao Yu 已提交
508 509
static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
						struct list_head *dir_list)
510
{
511
	unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
512
	struct curseg_info *curseg;
513
	struct page *page = NULL;
514
	int err = 0;
515 516 517
	block_t blkaddr;

	/* get node pages in the current segment */
C
Chao Yu 已提交
518
	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
519 520 521 522 523
	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);

	while (1) {
		struct fsync_inode_entry *entry;

524
		if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
525
			break;
526

527 528
		ra_meta_pages_cond(sbi, blkaddr);

529
		page = get_tmp_page(sbi, blkaddr);
530

531 532
		if (cp_ver != cpver_of_node(page)) {
			f2fs_put_page(page, 1);
533
			break;
534
		}
535

C
Chao Yu 已提交
536
		entry = get_fsync_inode(inode_list, ino_of_node(page));
537 538
		if (!entry)
			goto next;
539 540 541
		/*
		 * inode(x) | CP | inode(x) | dnode(F)
		 * In this case, we can lose the latest inode(x).
542
		 * So, call recover_inode for the inode update.
543
		 */
544
		if (IS_INODE(page))
545 546
			recover_inode(entry->inode, page);
		if (entry->last_dentry == blkaddr) {
C
Chao Yu 已提交
547
			err = recover_dentry(entry->inode, page, dir_list);
548 549 550 551 552
			if (err) {
				f2fs_put_page(page, 1);
				break;
			}
		}
553
		err = do_recover_data(sbi, entry->inode, page, blkaddr);
554 555
		if (err) {
			f2fs_put_page(page, 1);
556
			break;
557
		}
558

559 560
		if (entry->blkaddr == blkaddr)
			del_fsync_inode(entry);
561 562 563
next:
		/* check next segment */
		blkaddr = next_blkaddr_of_node(page);
564
		f2fs_put_page(page, 1);
565
	}
566 567 568
	if (!err)
		allocate_new_segments(sbi);
	return err;
569 570
}

571
int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
572
{
573
	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
574
	struct list_head inode_list;
C
Chao Yu 已提交
575
	struct list_head dir_list;
576
	block_t blkaddr;
577
	int err;
578
	int ret = 0;
H
Haicheng Li 已提交
579
	bool need_writecp = false;
580 581

	fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
582
			sizeof(struct fsync_inode_entry));
583
	if (!fsync_entry_slab)
584
		return -ENOMEM;
585 586

	INIT_LIST_HEAD(&inode_list);
C
Chao Yu 已提交
587
	INIT_LIST_HEAD(&dir_list);
588

589 590 591
	/* prevent checkpoint */
	mutex_lock(&sbi->cp_mutex);

592 593
	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);

594
	/* step #1: find fsynced inode numbers */
595
	err = find_fsync_dnodes(sbi, &inode_list);
596
	if (err || list_empty(&inode_list))
597 598
		goto out;

599 600
	if (check_only) {
		ret = 1;
601
		goto out;
602
	}
603

H
Haicheng Li 已提交
604
	need_writecp = true;
605

606
	/* step #2: recover data */
C
Chao Yu 已提交
607
	err = recover_data(sbi, &inode_list, &dir_list);
608
	if (!err)
609
		f2fs_bug_on(sbi, !list_empty(&inode_list));
610
out:
611
	destroy_fsync_dnodes(&inode_list);
612

613 614
	/* truncate meta pages to be used by the recovery */
	truncate_inode_pages_range(META_MAPPING(sbi),
615
			(loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
616

617 618 619 620 621
	if (err) {
		truncate_inode_pages_final(NODE_MAPPING(sbi));
		truncate_inode_pages_final(META_MAPPING(sbi));
	}

622
	clear_sbi_flag(sbi, SBI_POR_DOING);
623
	if (err) {
C
Chao Yu 已提交
624 625
		bool invalidate = false;

626 627
		if (test_opt(sbi, LFS)) {
			update_meta_page(sbi, NULL, blkaddr);
C
Chao Yu 已提交
628
			invalidate = true;
629 630 631
		} else if (discard_next_dnode(sbi, blkaddr)) {
			invalidate = true;
		}
632

C
Chao Yu 已提交
633 634
		f2fs_wait_all_discard_bio(sbi);

635 636 637
		/* Flush all the NAT/SIT pages */
		while (get_pages(sbi, F2FS_DIRTY_META))
			sync_meta_pages(sbi, META, LONG_MAX);
C
Chao Yu 已提交
638 639 640 641 642 643

		/* invalidate temporary meta page */
		if (invalidate)
			invalidate_mapping_pages(META_MAPPING(sbi),
							blkaddr, blkaddr);

644 645
		set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
		mutex_unlock(&sbi->cp_mutex);
646
	} else if (need_writecp) {
647
		struct cp_control cpc = {
648
			.reason = CP_RECOVERY,
649
		};
650
		mutex_unlock(&sbi->cp_mutex);
C
Chao Yu 已提交
651
		err = write_checkpoint(sbi, &cpc);
652 653
	} else {
		mutex_unlock(&sbi->cp_mutex);
654
	}
C
Chao Yu 已提交
655 656 657

	destroy_fsync_dnodes(&dir_list);
	kmem_cache_destroy(fsync_entry_slab);
658
	return ret ? ret: err;
659
}