recovery.c 15.5 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
 * fs/f2fs/recovery.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include "f2fs.h"
#include "node.h"
#include "segment.h"

17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
/*
 * Roll forward recovery scenarios.
 *
 * [Term] F: fsync_mark, D: dentry_mark
 *
 * 1. inode(x) | CP | inode(x) | dnode(F)
 * -> Update the latest inode(x).
 *
 * 2. inode(x) | CP | inode(F) | dnode(F)
 * -> No problem.
 *
 * 3. inode(x) | CP | dnode(F) | inode(x)
 * -> Recover to the latest dnode(F), and drop the last inode(x)
 *
 * 4. inode(x) | CP | dnode(F) | inode(F)
 * -> No problem.
 *
 * 5. CP | inode(x) | dnode(F)
 * -> The inode(DF) was missing. Should drop this dnode(F).
 *
 * 6. CP | inode(DF) | dnode(F)
 * -> No problem.
 *
 * 7. CP | dnode(F) | inode(DF)
 * -> If f2fs_iget fails, then goto next to find inode(DF).
 *
 * 8. CP | dnode(F) | inode(x)
 * -> If f2fs_iget fails, then goto next to find inode(DF).
 *    But it will fail due to no inode(DF).
 */

48 49 50 51
static struct kmem_cache *fsync_entry_slab;

bool space_for_roll_forward(struct f2fs_sb_info *sbi)
{
52 53 54
	s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);

	if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
55 56 57 58 59 60 61 62 63
		return false;
	return true;
}

static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
								nid_t ino)
{
	struct fsync_inode_entry *entry;

64
	list_for_each_entry(entry, head, list)
65 66
		if (entry->inode->i_ino == ino)
			return entry;
67

68 69 70
	return NULL;
}

71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
static struct fsync_inode_entry *add_fsync_inode(struct list_head *head,
							struct inode *inode)
{
	struct fsync_inode_entry *entry;

	entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
	if (!entry)
		return NULL;

	entry->inode = inode;
	list_add_tail(&entry->list, head);

	return entry;
}

static void del_fsync_inode(struct fsync_inode_entry *entry)
{
	iput(entry->inode);
	list_del(&entry->list);
	kmem_cache_free(fsync_entry_slab, entry);
}

C
Chao Yu 已提交
93 94
static int recover_dentry(struct inode *inode, struct page *ipage,
						struct list_head *dir_list)
95
{
96
	struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
97
	nid_t pino = le32_to_cpu(raw_inode->i_pino);
J
Jaegeuk Kim 已提交
98
	struct f2fs_dir_entry *de;
99
	struct qstr name;
100
	struct page *page;
J
Jaegeuk Kim 已提交
101
	struct inode *dir, *einode;
C
Chao Yu 已提交
102
	struct fsync_inode_entry *entry;
103 104
	int err = 0;

C
Chao Yu 已提交
105 106 107 108 109 110 111 112 113 114 115 116 117 118
	entry = get_fsync_inode(dir_list, pino);
	if (!entry) {
		dir = f2fs_iget(inode->i_sb, pino);
		if (IS_ERR(dir)) {
			err = PTR_ERR(dir);
			goto out;
		}

		entry = add_fsync_inode(dir_list, dir);
		if (!entry) {
			err = -ENOMEM;
			iput(dir);
			goto out;
		}
119 120
	}

C
Chao Yu 已提交
121 122 123
	dir = entry->inode;

	if (file_enc_name(inode))
124 125
		return 0;

126 127
	name.len = le32_to_cpu(raw_inode->i_namelen);
	name.name = raw_inode->i_name;
128 129 130 131

	if (unlikely(name.len > F2FS_NAME_LEN)) {
		WARN_ON(1);
		err = -ENAMETOOLONG;
C
Chao Yu 已提交
132
		goto out;
133
	}
J
Jaegeuk Kim 已提交
134 135
retry:
	de = f2fs_find_entry(dir, &name, &page);
136
	if (de && inode->i_ino == le32_to_cpu(de->ino))
137
		goto out_unmap_put;
138

J
Jaegeuk Kim 已提交
139 140 141 142
	if (de) {
		einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
		if (IS_ERR(einode)) {
			WARN_ON(1);
143 144
			err = PTR_ERR(einode);
			if (err == -ENOENT)
J
Jaegeuk Kim 已提交
145
				err = -EEXIST;
146 147
			goto out_unmap_put;
		}
148
		err = acquire_orphan_inode(F2FS_I_SB(inode));
149 150 151
		if (err) {
			iput(einode);
			goto out_unmap_put;
J
Jaegeuk Kim 已提交
152
		}
153
		f2fs_delete_entry(de, page, dir, einode);
J
Jaegeuk Kim 已提交
154 155
		iput(einode);
		goto retry;
156
	}
157
	err = __f2fs_add_link(dir, &name, inode, inode->i_ino, inode->i_mode);
158

159 160 161
	goto out;

out_unmap_put:
162
	f2fs_dentry_kunmap(dir, page);
163
	f2fs_put_page(page, 0);
164
out:
C
Chris Fries 已提交
165 166 167
	f2fs_msg(inode->i_sb, KERN_NOTICE,
			"%s: ino = %x, name = %s, dir = %lx, err = %d",
			__func__, ino_of_node(ipage), raw_inode->i_name,
D
Dan Carpenter 已提交
168
			IS_ERR(dir) ? 0 : dir->i_ino, err);
169 170 171
	return err;
}

172
static void recover_inode(struct inode *inode, struct page *page)
173
{
174
	struct f2fs_inode *raw = F2FS_INODE(page);
175
	char *name;
176 177

	inode->i_mode = le16_to_cpu(raw->i_mode);
178
	f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
179 180 181 182 183 184
	inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime);
	inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
	inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
	inode->i_atime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
	inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
	inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
185

186 187 188 189 190
	if (file_enc_name(inode))
		name = "<encrypted>";
	else
		name = F2FS_INODE(page)->i_name;

191
	f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
192
			ino_of_node(page), name);
193 194
}

195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
static bool is_same_inode(struct inode *inode, struct page *ipage)
{
	struct f2fs_inode *ri = F2FS_INODE(ipage);
	struct timespec disk;

	if (!IS_INODE(ipage))
		return true;

	disk.tv_sec = le64_to_cpu(ri->i_ctime);
	disk.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
	if (timespec_compare(&inode->i_ctime, &disk) > 0)
		return false;

	disk.tv_sec = le64_to_cpu(ri->i_atime);
	disk.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
	if (timespec_compare(&inode->i_atime, &disk) > 0)
		return false;

	disk.tv_sec = le64_to_cpu(ri->i_mtime);
	disk.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
	if (timespec_compare(&inode->i_mtime, &disk) > 0)
		return false;

	return true;
}

221 222
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
{
223
	unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
224
	struct curseg_info *curseg;
225
	struct inode *inode;
226
	struct page *page = NULL;
227 228 229 230 231
	block_t blkaddr;
	int err = 0;

	/* get node pages in the current segment */
	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
232
	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
233 234 235 236

	while (1) {
		struct fsync_inode_entry *entry;

237
		if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
238
			return 0;
239

240
		page = get_tmp_page(sbi, blkaddr);
241

242
		if (cp_ver != cpver_of_node(page))
243
			break;
244 245 246 247 248

		if (!is_fsync_dnode(page))
			goto next;

		entry = get_fsync_inode(head, ino_of_node(page));
249 250 251 252
		if (entry) {
			if (!is_same_inode(entry->inode, page))
				goto next;
		} else {
253
			if (IS_INODE(page) && is_dent_dnode(page)) {
254 255
				err = recover_inode_page(sbi, page);
				if (err)
256
					break;
257 258
			}

259 260 261 262
			/*
			 * CP | dnode(F) | inode(DF)
			 * For this case, we should not give up now.
			 */
263 264 265
			inode = f2fs_iget(sbi->sb, ino_of_node(page));
			if (IS_ERR(inode)) {
				err = PTR_ERR(inode);
266 267
				if (err == -ENOENT) {
					err = 0;
268
					goto next;
269
				}
270
				break;
271
			}
272 273 274 275 276 277 278 279

			/* add this fsync inode to the list */
			entry = add_fsync_inode(head, inode);
			if (!entry) {
				err = -ENOMEM;
				iput(inode);
				break;
			}
280
		}
J
Jaegeuk Kim 已提交
281 282
		entry->blkaddr = blkaddr;

283 284
		if (IS_INODE(page) && is_dent_dnode(page))
			entry->last_dentry = blkaddr;
285 286 287
next:
		/* check next segment */
		blkaddr = next_blkaddr_of_node(page);
288
		f2fs_put_page(page, 1);
289 290

		ra_meta_pages_cond(sbi, blkaddr);
291
	}
292
	f2fs_put_page(page, 1);
293 294 295
	return err;
}

296
static void destroy_fsync_dnodes(struct list_head *head)
297
{
298 299
	struct fsync_inode_entry *entry, *tmp;

300 301
	list_for_each_entry_safe(entry, tmp, head, list)
		del_fsync_inode(entry);
302 303
}

304
static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
305
			block_t blkaddr, struct dnode_of_data *dn)
306 307 308
{
	struct seg_entry *sentry;
	unsigned int segno = GET_SEGNO(sbi, blkaddr);
J
Jaegeuk Kim 已提交
309
	unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
J
Jaegeuk Kim 已提交
310
	struct f2fs_summary_block *sum_node;
311
	struct f2fs_summary sum;
J
Jaegeuk Kim 已提交
312
	struct page *sum_page, *node_page;
313
	struct dnode_of_data tdn = *dn;
314
	nid_t ino, nid;
315
	struct inode *inode;
316
	unsigned int offset;
317 318 319 320 321
	block_t bidx;
	int i;

	sentry = get_seg_entry(sbi, segno);
	if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
322
		return 0;
323 324 325 326 327 328

	/* Get the previous summary */
	for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
		struct curseg_info *curseg = CURSEG_I(sbi, i);
		if (curseg->segno == segno) {
			sum = curseg->sum_blk->entries[blkoff];
J
Jaegeuk Kim 已提交
329
			goto got_it;
330 331 332
		}
	}

J
Jaegeuk Kim 已提交
333 334 335 336 337
	sum_page = get_sum_page(sbi, segno);
	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
	sum = sum_node->entries[blkoff];
	f2fs_put_page(sum_page, 1);
got_it:
338 339 340 341
	/* Use the locked dnode page and inode */
	nid = le32_to_cpu(sum.nid);
	if (dn->inode->i_ino == nid) {
		tdn.nid = nid;
342 343
		if (!dn->inode_page_locked)
			lock_page(dn->inode_page);
344
		tdn.node_page = dn->inode_page;
345
		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
346
		goto truncate_out;
347
	} else if (dn->nid == nid) {
348
		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
349
		goto truncate_out;
350 351
	}

352
	/* Get the node page */
353
	node_page = get_node_page(sbi, nid);
354 355
	if (IS_ERR(node_page))
		return PTR_ERR(node_page);
356 357

	offset = ofs_of_node(node_page);
358 359 360
	ino = ino_of_node(node_page);
	f2fs_put_page(node_page, 1);

361 362 363 364 365 366 367 368
	if (ino != dn->inode->i_ino) {
		/* Deallocate previous index in the node page */
		inode = f2fs_iget(sbi->sb, ino);
		if (IS_ERR(inode))
			return PTR_ERR(inode);
	} else {
		inode = dn->inode;
	}
369

370
	bidx = start_bidx_of_node(offset, inode) + le16_to_cpu(sum.ofs_in_node);
371

372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
	/*
	 * if inode page is locked, unlock temporarily, but its reference
	 * count keeps alive.
	 */
	if (ino == dn->inode->i_ino && dn->inode_page_locked)
		unlock_page(dn->inode_page);

	set_new_dnode(&tdn, inode, NULL, NULL, 0);
	if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
		goto out;

	if (tdn.data_blkaddr == blkaddr)
		truncate_data_blocks_range(&tdn, 1);

	f2fs_put_dnode(&tdn);
out:
	if (ino != dn->inode->i_ino)
389
		iput(inode);
390 391 392 393 394 395 396 397 398
	else if (dn->inode_page_locked)
		lock_page(dn->inode_page);
	return 0;

truncate_out:
	if (datablock_addr(tdn.node_page, tdn.ofs_in_node) == blkaddr)
		truncate_data_blocks_range(&tdn, 1);
	if (dn->inode->i_ino == nid && !dn->inode_page_locked)
		unlock_page(dn->inode_page);
399
	return 0;
400 401
}

402
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
403 404 405 406
					struct page *page, block_t blkaddr)
{
	struct dnode_of_data dn;
	struct node_info ni;
407
	unsigned int start, end;
408
	int err = 0, recovered = 0;
409

410 411 412 413
	/* step 1: recover xattr */
	if (IS_INODE(page)) {
		recover_inline_xattr(inode, page);
	} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
414 415 416 417
		/*
		 * Deprecated; xattr blocks should be found from cold log.
		 * But, we should remain this for backward compatibility.
		 */
418
		recover_xattr_data(inode, page, blkaddr);
419
		goto out;
420
	}
421

422 423
	/* step 2: recover inline data */
	if (recover_inline_data(inode, page))
424 425
		goto out;

426
	/* step 3: recover data indices */
427 428
	start = start_bidx_of_node(ofs_of_node(page), inode);
	end = start + ADDRS_PER_PAGE(page, inode);
429 430

	set_new_dnode(&dn, inode, NULL, NULL, 0);
431

432
	err = get_dnode_of_data(&dn, start, ALLOC_NODE);
433
	if (err)
434
		goto out;
435

436
	f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
437 438

	get_node_info(sbi, dn.nid, &ni);
439 440
	f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
	f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
441

442
	for (; start < end; start++, dn.ofs_in_node++) {
443 444 445 446 447
		block_t src, dest;

		src = datablock_addr(dn.node_page, dn.ofs_in_node);
		dest = datablock_addr(page, dn.ofs_in_node);

448 449 450 451 452 453 454 455 456 457
		/* skip recovering if dest is the same as src */
		if (src == dest)
			continue;

		/* dest is invalid, just invalidate src block */
		if (dest == NULL_ADDR) {
			truncate_data_blocks_range(&dn, 1);
			continue;
		}

458 459 460
		if ((start + 1) << PAGE_SHIFT > i_size_read(inode))
			f2fs_i_size_write(inode, (start + 1) << PAGE_SHIFT);

461 462 463 464 465 466
		/*
		 * dest is reserved block, invalidate src block
		 * and then reserve one new block in dnode page.
		 */
		if (dest == NEW_ADDR) {
			truncate_data_blocks_range(&dn, 1);
467
			reserve_new_block(&dn);
468 469 470 471 472
			continue;
		}

		/* dest is valid block, try to recover from src to dest */
		if (is_valid_blkaddr(sbi, dest, META_POR)) {
473

474
			if (src == NULL_ADDR) {
475
				err = reserve_new_block(&dn);
476 477 478 479
#ifdef CONFIG_F2FS_FAULT_INJECTION
				while (err)
					err = reserve_new_block(&dn);
#endif
480
				/* We should not get -ENOSPC */
481
				f2fs_bug_on(sbi, err);
482 483 484
			}

			/* Check the previous node page having this index */
485 486 487
			err = check_index_in_prev_nodes(sbi, dest, &dn);
			if (err)
				goto err;
488 489

			/* write dummy data page */
490
			f2fs_replace_block(sbi, &dn, src, dest,
491
						ni.version, false, false);
492
			recovered++;
493 494 495 496 497 498 499
		}
	}

	copy_node_footer(dn.node_page, page);
	fill_node_footer(dn.node_page, dn.nid, ni.ino,
					ofs_of_node(page), false);
	set_page_dirty(dn.node_page);
500
err:
501
	f2fs_put_dnode(&dn);
502
out:
C
Chris Fries 已提交
503 504 505
	f2fs_msg(sbi->sb, KERN_NOTICE,
		"recover_data: ino = %lx, recovered = %d blocks, err = %d",
		inode->i_ino, recovered, err);
506
	return err;
507 508
}

C
Chao Yu 已提交
509 510
static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
						struct list_head *dir_list)
511
{
512
	unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
513
	struct curseg_info *curseg;
514
	struct page *page = NULL;
515
	int err = 0;
516 517 518
	block_t blkaddr;

	/* get node pages in the current segment */
C
Chao Yu 已提交
519
	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
520 521 522 523 524
	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);

	while (1) {
		struct fsync_inode_entry *entry;

525
		if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
526
			break;
527

528 529
		ra_meta_pages_cond(sbi, blkaddr);

530
		page = get_tmp_page(sbi, blkaddr);
531

532 533
		if (cp_ver != cpver_of_node(page)) {
			f2fs_put_page(page, 1);
534
			break;
535
		}
536

C
Chao Yu 已提交
537
		entry = get_fsync_inode(inode_list, ino_of_node(page));
538 539
		if (!entry)
			goto next;
540 541 542
		/*
		 * inode(x) | CP | inode(x) | dnode(F)
		 * In this case, we can lose the latest inode(x).
543
		 * So, call recover_inode for the inode update.
544
		 */
545
		if (IS_INODE(page))
546 547
			recover_inode(entry->inode, page);
		if (entry->last_dentry == blkaddr) {
C
Chao Yu 已提交
548
			err = recover_dentry(entry->inode, page, dir_list);
549 550 551 552 553
			if (err) {
				f2fs_put_page(page, 1);
				break;
			}
		}
554
		err = do_recover_data(sbi, entry->inode, page, blkaddr);
555 556
		if (err) {
			f2fs_put_page(page, 1);
557
			break;
558
		}
559

560 561
		if (entry->blkaddr == blkaddr)
			del_fsync_inode(entry);
562 563 564
next:
		/* check next segment */
		blkaddr = next_blkaddr_of_node(page);
565
		f2fs_put_page(page, 1);
566
	}
567 568 569
	if (!err)
		allocate_new_segments(sbi);
	return err;
570 571
}

572
int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
573
{
574
	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
575
	struct list_head inode_list;
C
Chao Yu 已提交
576
	struct list_head dir_list;
577
	block_t blkaddr;
578
	int err;
579
	int ret = 0;
H
Haicheng Li 已提交
580
	bool need_writecp = false;
581 582

	fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
583
			sizeof(struct fsync_inode_entry));
584
	if (!fsync_entry_slab)
585
		return -ENOMEM;
586 587

	INIT_LIST_HEAD(&inode_list);
C
Chao Yu 已提交
588
	INIT_LIST_HEAD(&dir_list);
589

590 591 592
	/* prevent checkpoint */
	mutex_lock(&sbi->cp_mutex);

593 594
	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);

595
	/* step #1: find fsynced inode numbers */
596
	err = find_fsync_dnodes(sbi, &inode_list);
597
	if (err || list_empty(&inode_list))
598 599
		goto out;

600 601
	if (check_only) {
		ret = 1;
602
		goto out;
603
	}
604

H
Haicheng Li 已提交
605
	need_writecp = true;
606

607
	/* step #2: recover data */
C
Chao Yu 已提交
608
	err = recover_data(sbi, &inode_list, &dir_list);
609
	if (!err)
610
		f2fs_bug_on(sbi, !list_empty(&inode_list));
611
out:
612
	destroy_fsync_dnodes(&inode_list);
613

614 615
	/* truncate meta pages to be used by the recovery */
	truncate_inode_pages_range(META_MAPPING(sbi),
616
			(loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
617

618 619 620 621 622
	if (err) {
		truncate_inode_pages_final(NODE_MAPPING(sbi));
		truncate_inode_pages_final(META_MAPPING(sbi));
	}

623
	clear_sbi_flag(sbi, SBI_POR_DOING);
624
	if (err) {
C
Chao Yu 已提交
625 626 627 628
		bool invalidate = false;

		if (discard_next_dnode(sbi, blkaddr))
			invalidate = true;
629 630 631 632

		/* Flush all the NAT/SIT pages */
		while (get_pages(sbi, F2FS_DIRTY_META))
			sync_meta_pages(sbi, META, LONG_MAX);
C
Chao Yu 已提交
633 634 635 636 637 638

		/* invalidate temporary meta page */
		if (invalidate)
			invalidate_mapping_pages(META_MAPPING(sbi),
							blkaddr, blkaddr);

639 640
		set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
		mutex_unlock(&sbi->cp_mutex);
641
	} else if (need_writecp) {
642
		struct cp_control cpc = {
643
			.reason = CP_RECOVERY,
644
		};
645
		mutex_unlock(&sbi->cp_mutex);
C
Chao Yu 已提交
646
		err = write_checkpoint(sbi, &cpc);
647 648
	} else {
		mutex_unlock(&sbi->cp_mutex);
649
	}
C
Chao Yu 已提交
650 651 652

	destroy_fsync_dnodes(&dir_list);
	kmem_cache_destroy(fsync_entry_slab);
653
	return ret ? ret: err;
654
}