recovery.c 15.7 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
 * fs/f2fs/recovery.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include "f2fs.h"
#include "node.h"
#include "segment.h"

17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
/*
 * Roll forward recovery scenarios.
 *
 * [Term] F: fsync_mark, D: dentry_mark
 *
 * 1. inode(x) | CP | inode(x) | dnode(F)
 * -> Update the latest inode(x).
 *
 * 2. inode(x) | CP | inode(F) | dnode(F)
 * -> No problem.
 *
 * 3. inode(x) | CP | dnode(F) | inode(x)
 * -> Recover to the latest dnode(F), and drop the last inode(x)
 *
 * 4. inode(x) | CP | dnode(F) | inode(F)
 * -> No problem.
 *
 * 5. CP | inode(x) | dnode(F)
 * -> The inode(DF) was missing. Should drop this dnode(F).
 *
 * 6. CP | inode(DF) | dnode(F)
 * -> No problem.
 *
 * 7. CP | dnode(F) | inode(DF)
 * -> If f2fs_iget fails, then goto next to find inode(DF).
 *
 * 8. CP | dnode(F) | inode(x)
 * -> If f2fs_iget fails, then goto next to find inode(DF).
 *    But it will fail due to no inode(DF).
 */

48 49 50 51
static struct kmem_cache *fsync_entry_slab;

bool space_for_roll_forward(struct f2fs_sb_info *sbi)
{
52 53 54
	s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);

	if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
55 56 57 58 59 60 61 62 63
		return false;
	return true;
}

static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
								nid_t ino)
{
	struct fsync_inode_entry *entry;

64
	list_for_each_entry(entry, head, list)
65 66
		if (entry->inode->i_ino == ino)
			return entry;
67

68 69 70
	return NULL;
}

71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
static struct fsync_inode_entry *add_fsync_inode(struct list_head *head,
							struct inode *inode)
{
	struct fsync_inode_entry *entry;

	entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
	if (!entry)
		return NULL;

	entry->inode = inode;
	list_add_tail(&entry->list, head);

	return entry;
}

static void del_fsync_inode(struct fsync_inode_entry *entry)
{
	iput(entry->inode);
	list_del(&entry->list);
	kmem_cache_free(fsync_entry_slab, entry);
}

C
Chao Yu 已提交
93 94
static int recover_dentry(struct inode *inode, struct page *ipage,
						struct list_head *dir_list)
95
{
96
	struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
97
	nid_t pino = le32_to_cpu(raw_inode->i_pino);
J
Jaegeuk Kim 已提交
98
	struct f2fs_dir_entry *de;
99
	struct qstr name;
100
	struct page *page;
J
Jaegeuk Kim 已提交
101
	struct inode *dir, *einode;
C
Chao Yu 已提交
102
	struct fsync_inode_entry *entry;
103 104
	int err = 0;

C
Chao Yu 已提交
105 106 107 108 109 110 111 112 113 114 115 116 117 118
	entry = get_fsync_inode(dir_list, pino);
	if (!entry) {
		dir = f2fs_iget(inode->i_sb, pino);
		if (IS_ERR(dir)) {
			err = PTR_ERR(dir);
			goto out;
		}

		entry = add_fsync_inode(dir_list, dir);
		if (!entry) {
			err = -ENOMEM;
			iput(dir);
			goto out;
		}
119 120
	}

C
Chao Yu 已提交
121 122 123
	dir = entry->inode;

	if (file_enc_name(inode))
124 125
		return 0;

126 127
	name.len = le32_to_cpu(raw_inode->i_namelen);
	name.name = raw_inode->i_name;
128 129 130 131

	if (unlikely(name.len > F2FS_NAME_LEN)) {
		WARN_ON(1);
		err = -ENAMETOOLONG;
C
Chao Yu 已提交
132
		goto out;
133
	}
J
Jaegeuk Kim 已提交
134 135
retry:
	de = f2fs_find_entry(dir, &name, &page);
136
	if (de && inode->i_ino == le32_to_cpu(de->ino))
137
		goto out_unmap_put;
138

J
Jaegeuk Kim 已提交
139 140 141 142
	if (de) {
		einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
		if (IS_ERR(einode)) {
			WARN_ON(1);
143 144
			err = PTR_ERR(einode);
			if (err == -ENOENT)
J
Jaegeuk Kim 已提交
145
				err = -EEXIST;
146 147
			goto out_unmap_put;
		}
148
		err = acquire_orphan_inode(F2FS_I_SB(inode));
149 150 151
		if (err) {
			iput(einode);
			goto out_unmap_put;
J
Jaegeuk Kim 已提交
152
		}
153
		f2fs_delete_entry(de, page, dir, einode);
J
Jaegeuk Kim 已提交
154 155
		iput(einode);
		goto retry;
156 157 158 159 160
	} else if (IS_ERR(page)) {
		err = PTR_ERR(page);
	} else {
		err = __f2fs_add_link(dir, &name, inode,
					inode->i_ino, inode->i_mode);
161
	}
162 163 164
	goto out;

out_unmap_put:
165
	f2fs_dentry_kunmap(dir, page);
166
	f2fs_put_page(page, 0);
167
out:
C
Chris Fries 已提交
168 169 170
	f2fs_msg(inode->i_sb, KERN_NOTICE,
			"%s: ino = %x, name = %s, dir = %lx, err = %d",
			__func__, ino_of_node(ipage), raw_inode->i_name,
D
Dan Carpenter 已提交
171
			IS_ERR(dir) ? 0 : dir->i_ino, err);
172 173 174
	return err;
}

175
static void recover_inode(struct inode *inode, struct page *page)
176
{
177
	struct f2fs_inode *raw = F2FS_INODE(page);
178
	char *name;
179 180

	inode->i_mode = le16_to_cpu(raw->i_mode);
181
	f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
182 183 184 185 186 187
	inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime);
	inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
	inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
	inode->i_atime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
	inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
	inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
188

189 190 191 192 193
	if (file_enc_name(inode))
		name = "<encrypted>";
	else
		name = F2FS_INODE(page)->i_name;

194
	f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
195
			ino_of_node(page), name);
196 197
}

198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
static bool is_same_inode(struct inode *inode, struct page *ipage)
{
	struct f2fs_inode *ri = F2FS_INODE(ipage);
	struct timespec disk;

	if (!IS_INODE(ipage))
		return true;

	disk.tv_sec = le64_to_cpu(ri->i_ctime);
	disk.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
	if (timespec_compare(&inode->i_ctime, &disk) > 0)
		return false;

	disk.tv_sec = le64_to_cpu(ri->i_atime);
	disk.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
	if (timespec_compare(&inode->i_atime, &disk) > 0)
		return false;

	disk.tv_sec = le64_to_cpu(ri->i_mtime);
	disk.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
	if (timespec_compare(&inode->i_mtime, &disk) > 0)
		return false;

	return true;
}

224 225
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
{
226
	unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
227
	struct curseg_info *curseg;
228
	struct inode *inode;
229
	struct page *page = NULL;
230 231 232 233 234
	block_t blkaddr;
	int err = 0;

	/* get node pages in the current segment */
	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
235
	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
236 237 238 239

	while (1) {
		struct fsync_inode_entry *entry;

240
		if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
241
			return 0;
242

243
		page = get_tmp_page(sbi, blkaddr);
244

245
		if (cp_ver != cpver_of_node(page))
246
			break;
247 248 249 250 251

		if (!is_fsync_dnode(page))
			goto next;

		entry = get_fsync_inode(head, ino_of_node(page));
252 253 254 255
		if (entry) {
			if (!is_same_inode(entry->inode, page))
				goto next;
		} else {
256
			if (IS_INODE(page) && is_dent_dnode(page)) {
257 258
				err = recover_inode_page(sbi, page);
				if (err)
259
					break;
260 261
			}

262 263 264 265
			/*
			 * CP | dnode(F) | inode(DF)
			 * For this case, we should not give up now.
			 */
266 267 268
			inode = f2fs_iget(sbi->sb, ino_of_node(page));
			if (IS_ERR(inode)) {
				err = PTR_ERR(inode);
269 270
				if (err == -ENOENT) {
					err = 0;
271
					goto next;
272
				}
273
				break;
274
			}
275 276 277 278 279 280 281 282

			/* add this fsync inode to the list */
			entry = add_fsync_inode(head, inode);
			if (!entry) {
				err = -ENOMEM;
				iput(inode);
				break;
			}
283
		}
J
Jaegeuk Kim 已提交
284 285
		entry->blkaddr = blkaddr;

286 287
		if (IS_INODE(page) && is_dent_dnode(page))
			entry->last_dentry = blkaddr;
288 289 290
next:
		/* check next segment */
		blkaddr = next_blkaddr_of_node(page);
291
		f2fs_put_page(page, 1);
292 293

		ra_meta_pages_cond(sbi, blkaddr);
294
	}
295
	f2fs_put_page(page, 1);
296 297 298
	return err;
}

299
static void destroy_fsync_dnodes(struct list_head *head)
300
{
301 302
	struct fsync_inode_entry *entry, *tmp;

303 304
	list_for_each_entry_safe(entry, tmp, head, list)
		del_fsync_inode(entry);
305 306
}

307
static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
308
			block_t blkaddr, struct dnode_of_data *dn)
309 310 311
{
	struct seg_entry *sentry;
	unsigned int segno = GET_SEGNO(sbi, blkaddr);
J
Jaegeuk Kim 已提交
312
	unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
J
Jaegeuk Kim 已提交
313
	struct f2fs_summary_block *sum_node;
314
	struct f2fs_summary sum;
J
Jaegeuk Kim 已提交
315
	struct page *sum_page, *node_page;
316
	struct dnode_of_data tdn = *dn;
317
	nid_t ino, nid;
318
	struct inode *inode;
319
	unsigned int offset;
320 321 322 323 324
	block_t bidx;
	int i;

	sentry = get_seg_entry(sbi, segno);
	if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
325
		return 0;
326 327 328 329 330 331

	/* Get the previous summary */
	for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
		struct curseg_info *curseg = CURSEG_I(sbi, i);
		if (curseg->segno == segno) {
			sum = curseg->sum_blk->entries[blkoff];
J
Jaegeuk Kim 已提交
332
			goto got_it;
333 334 335
		}
	}

J
Jaegeuk Kim 已提交
336 337 338 339 340
	sum_page = get_sum_page(sbi, segno);
	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
	sum = sum_node->entries[blkoff];
	f2fs_put_page(sum_page, 1);
got_it:
341 342 343 344
	/* Use the locked dnode page and inode */
	nid = le32_to_cpu(sum.nid);
	if (dn->inode->i_ino == nid) {
		tdn.nid = nid;
345 346
		if (!dn->inode_page_locked)
			lock_page(dn->inode_page);
347
		tdn.node_page = dn->inode_page;
348
		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
349
		goto truncate_out;
350
	} else if (dn->nid == nid) {
351
		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
352
		goto truncate_out;
353 354
	}

355
	/* Get the node page */
356
	node_page = get_node_page(sbi, nid);
357 358
	if (IS_ERR(node_page))
		return PTR_ERR(node_page);
359 360

	offset = ofs_of_node(node_page);
361 362 363
	ino = ino_of_node(node_page);
	f2fs_put_page(node_page, 1);

364 365 366 367 368 369 370 371
	if (ino != dn->inode->i_ino) {
		/* Deallocate previous index in the node page */
		inode = f2fs_iget(sbi->sb, ino);
		if (IS_ERR(inode))
			return PTR_ERR(inode);
	} else {
		inode = dn->inode;
	}
372

373
	bidx = start_bidx_of_node(offset, inode) + le16_to_cpu(sum.ofs_in_node);
374

375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
	/*
	 * if inode page is locked, unlock temporarily, but its reference
	 * count keeps alive.
	 */
	if (ino == dn->inode->i_ino && dn->inode_page_locked)
		unlock_page(dn->inode_page);

	set_new_dnode(&tdn, inode, NULL, NULL, 0);
	if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
		goto out;

	if (tdn.data_blkaddr == blkaddr)
		truncate_data_blocks_range(&tdn, 1);

	f2fs_put_dnode(&tdn);
out:
	if (ino != dn->inode->i_ino)
392
		iput(inode);
393 394 395 396 397 398 399 400 401
	else if (dn->inode_page_locked)
		lock_page(dn->inode_page);
	return 0;

truncate_out:
	if (datablock_addr(tdn.node_page, tdn.ofs_in_node) == blkaddr)
		truncate_data_blocks_range(&tdn, 1);
	if (dn->inode->i_ino == nid && !dn->inode_page_locked)
		unlock_page(dn->inode_page);
402
	return 0;
403 404
}

405
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
406 407 408 409
					struct page *page, block_t blkaddr)
{
	struct dnode_of_data dn;
	struct node_info ni;
410
	unsigned int start, end;
411
	int err = 0, recovered = 0;
412

413 414 415 416
	/* step 1: recover xattr */
	if (IS_INODE(page)) {
		recover_inline_xattr(inode, page);
	} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
417 418 419 420
		/*
		 * Deprecated; xattr blocks should be found from cold log.
		 * But, we should remain this for backward compatibility.
		 */
421
		recover_xattr_data(inode, page, blkaddr);
422
		goto out;
423
	}
424

425 426
	/* step 2: recover inline data */
	if (recover_inline_data(inode, page))
427 428
		goto out;

429
	/* step 3: recover data indices */
430 431
	start = start_bidx_of_node(ofs_of_node(page), inode);
	end = start + ADDRS_PER_PAGE(page, inode);
432 433

	set_new_dnode(&dn, inode, NULL, NULL, 0);
434

435
	err = get_dnode_of_data(&dn, start, ALLOC_NODE);
436
	if (err)
437
		goto out;
438

439
	f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
440 441

	get_node_info(sbi, dn.nid, &ni);
442 443
	f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
	f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
444

445
	for (; start < end; start++, dn.ofs_in_node++) {
446 447 448 449 450
		block_t src, dest;

		src = datablock_addr(dn.node_page, dn.ofs_in_node);
		dest = datablock_addr(page, dn.ofs_in_node);

451 452 453 454 455 456 457 458 459 460
		/* skip recovering if dest is the same as src */
		if (src == dest)
			continue;

		/* dest is invalid, just invalidate src block */
		if (dest == NULL_ADDR) {
			truncate_data_blocks_range(&dn, 1);
			continue;
		}

461 462 463
		if ((start + 1) << PAGE_SHIFT > i_size_read(inode))
			f2fs_i_size_write(inode, (start + 1) << PAGE_SHIFT);

464 465 466 467 468 469
		/*
		 * dest is reserved block, invalidate src block
		 * and then reserve one new block in dnode page.
		 */
		if (dest == NEW_ADDR) {
			truncate_data_blocks_range(&dn, 1);
470
			reserve_new_block(&dn);
471 472 473 474 475
			continue;
		}

		/* dest is valid block, try to recover from src to dest */
		if (is_valid_blkaddr(sbi, dest, META_POR)) {
476

477
			if (src == NULL_ADDR) {
478
				err = reserve_new_block(&dn);
479 480 481 482
#ifdef CONFIG_F2FS_FAULT_INJECTION
				while (err)
					err = reserve_new_block(&dn);
#endif
483
				/* We should not get -ENOSPC */
484
				f2fs_bug_on(sbi, err);
485 486
				if (err)
					goto err;
487 488 489
			}

			/* Check the previous node page having this index */
490 491 492
			err = check_index_in_prev_nodes(sbi, dest, &dn);
			if (err)
				goto err;
493 494

			/* write dummy data page */
495
			f2fs_replace_block(sbi, &dn, src, dest,
496
						ni.version, false, false);
497
			recovered++;
498 499 500 501 502 503 504
		}
	}

	copy_node_footer(dn.node_page, page);
	fill_node_footer(dn.node_page, dn.nid, ni.ino,
					ofs_of_node(page), false);
	set_page_dirty(dn.node_page);
505
err:
506
	f2fs_put_dnode(&dn);
507
out:
C
Chris Fries 已提交
508 509 510
	f2fs_msg(sbi->sb, KERN_NOTICE,
		"recover_data: ino = %lx, recovered = %d blocks, err = %d",
		inode->i_ino, recovered, err);
511
	return err;
512 513
}

C
Chao Yu 已提交
514 515
static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
						struct list_head *dir_list)
516
{
517
	unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
518
	struct curseg_info *curseg;
519
	struct page *page = NULL;
520
	int err = 0;
521 522 523
	block_t blkaddr;

	/* get node pages in the current segment */
C
Chao Yu 已提交
524
	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
525 526 527 528 529
	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);

	while (1) {
		struct fsync_inode_entry *entry;

530
		if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
531
			break;
532

533 534
		ra_meta_pages_cond(sbi, blkaddr);

535
		page = get_tmp_page(sbi, blkaddr);
536

537 538
		if (cp_ver != cpver_of_node(page)) {
			f2fs_put_page(page, 1);
539
			break;
540
		}
541

C
Chao Yu 已提交
542
		entry = get_fsync_inode(inode_list, ino_of_node(page));
543 544
		if (!entry)
			goto next;
545 546 547
		/*
		 * inode(x) | CP | inode(x) | dnode(F)
		 * In this case, we can lose the latest inode(x).
548
		 * So, call recover_inode for the inode update.
549
		 */
550
		if (IS_INODE(page))
551 552
			recover_inode(entry->inode, page);
		if (entry->last_dentry == blkaddr) {
C
Chao Yu 已提交
553
			err = recover_dentry(entry->inode, page, dir_list);
554 555 556 557 558
			if (err) {
				f2fs_put_page(page, 1);
				break;
			}
		}
559
		err = do_recover_data(sbi, entry->inode, page, blkaddr);
560 561
		if (err) {
			f2fs_put_page(page, 1);
562
			break;
563
		}
564

565 566
		if (entry->blkaddr == blkaddr)
			del_fsync_inode(entry);
567 568 569
next:
		/* check next segment */
		blkaddr = next_blkaddr_of_node(page);
570
		f2fs_put_page(page, 1);
571
	}
572 573 574
	if (!err)
		allocate_new_segments(sbi);
	return err;
575 576
}

577
int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
578
{
579
	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
580
	struct list_head inode_list;
C
Chao Yu 已提交
581
	struct list_head dir_list;
582
	block_t blkaddr;
583
	int err;
584
	int ret = 0;
H
Haicheng Li 已提交
585
	bool need_writecp = false;
586 587

	fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
588
			sizeof(struct fsync_inode_entry));
589
	if (!fsync_entry_slab)
590
		return -ENOMEM;
591 592

	INIT_LIST_HEAD(&inode_list);
C
Chao Yu 已提交
593
	INIT_LIST_HEAD(&dir_list);
594

595 596 597
	/* prevent checkpoint */
	mutex_lock(&sbi->cp_mutex);

598 599
	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);

600
	/* step #1: find fsynced inode numbers */
601
	err = find_fsync_dnodes(sbi, &inode_list);
602
	if (err || list_empty(&inode_list))
603 604
		goto out;

605 606
	if (check_only) {
		ret = 1;
607
		goto out;
608
	}
609

H
Haicheng Li 已提交
610
	need_writecp = true;
611

612
	/* step #2: recover data */
C
Chao Yu 已提交
613
	err = recover_data(sbi, &inode_list, &dir_list);
614
	if (!err)
615
		f2fs_bug_on(sbi, !list_empty(&inode_list));
616
out:
617
	destroy_fsync_dnodes(&inode_list);
618

619 620
	/* truncate meta pages to be used by the recovery */
	truncate_inode_pages_range(META_MAPPING(sbi),
621
			(loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
622

623 624 625 626 627
	if (err) {
		truncate_inode_pages_final(NODE_MAPPING(sbi));
		truncate_inode_pages_final(META_MAPPING(sbi));
	}

628
	clear_sbi_flag(sbi, SBI_POR_DOING);
629
	if (err) {
C
Chao Yu 已提交
630 631
		bool invalidate = false;

632 633
		if (test_opt(sbi, LFS)) {
			update_meta_page(sbi, NULL, blkaddr);
C
Chao Yu 已提交
634
			invalidate = true;
635 636 637
		} else if (discard_next_dnode(sbi, blkaddr)) {
			invalidate = true;
		}
638 639 640 641

		/* Flush all the NAT/SIT pages */
		while (get_pages(sbi, F2FS_DIRTY_META))
			sync_meta_pages(sbi, META, LONG_MAX);
C
Chao Yu 已提交
642 643 644 645 646 647

		/* invalidate temporary meta page */
		if (invalidate)
			invalidate_mapping_pages(META_MAPPING(sbi),
							blkaddr, blkaddr);

648 649
		set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
		mutex_unlock(&sbi->cp_mutex);
650
	} else if (need_writecp) {
651
		struct cp_control cpc = {
652
			.reason = CP_RECOVERY,
653
		};
654
		mutex_unlock(&sbi->cp_mutex);
C
Chao Yu 已提交
655
		err = write_checkpoint(sbi, &cpc);
656 657
	} else {
		mutex_unlock(&sbi->cp_mutex);
658
	}
C
Chao Yu 已提交
659 660 661

	destroy_fsync_dnodes(&dir_list);
	kmem_cache_destroy(fsync_entry_slab);
662
	return ret ? ret: err;
663
}