recovery.c 15.8 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
 * fs/f2fs/recovery.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include "f2fs.h"
#include "node.h"
#include "segment.h"

17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
/*
 * Roll forward recovery scenarios.
 *
 * [Term] F: fsync_mark, D: dentry_mark
 *
 * 1. inode(x) | CP | inode(x) | dnode(F)
 * -> Update the latest inode(x).
 *
 * 2. inode(x) | CP | inode(F) | dnode(F)
 * -> No problem.
 *
 * 3. inode(x) | CP | dnode(F) | inode(x)
 * -> Recover to the latest dnode(F), and drop the last inode(x)
 *
 * 4. inode(x) | CP | dnode(F) | inode(F)
 * -> No problem.
 *
 * 5. CP | inode(x) | dnode(F)
 * -> The inode(DF) was missing. Should drop this dnode(F).
 *
 * 6. CP | inode(DF) | dnode(F)
 * -> No problem.
 *
 * 7. CP | dnode(F) | inode(DF)
 * -> If f2fs_iget fails, then goto next to find inode(DF).
 *
 * 8. CP | dnode(F) | inode(x)
 * -> If f2fs_iget fails, then goto next to find inode(DF).
 *    But it will fail due to no inode(DF).
 */

48 49 50 51
static struct kmem_cache *fsync_entry_slab;

bool space_for_roll_forward(struct f2fs_sb_info *sbi)
{
52 53 54
	s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);

	if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
55 56 57 58 59 60 61 62 63
		return false;
	return true;
}

static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
								nid_t ino)
{
	struct fsync_inode_entry *entry;

64
	list_for_each_entry(entry, head, list)
65 66
		if (entry->inode->i_ino == ino)
			return entry;
67

68 69 70
	return NULL;
}

71
static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
C
Chao Yu 已提交
72
			struct list_head *head, nid_t ino, bool quota_inode)
73
{
74
	struct inode *inode;
75
	struct fsync_inode_entry *entry;
C
Chao Yu 已提交
76
	int err;
77

78
	inode = f2fs_iget_retry(sbi->sb, ino);
79 80 81
	if (IS_ERR(inode))
		return ERR_CAST(inode);

C
Chao Yu 已提交
82 83 84 85 86 87 88 89 90 91
	err = dquot_initialize(inode);
	if (err)
		goto err_out;

	if (quota_inode) {
		err = dquot_alloc_inode(inode);
		if (err)
			goto err_out;
	}

92
	entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
93 94 95 96
	entry->inode = inode;
	list_add_tail(&entry->list, head);

	return entry;
C
Chao Yu 已提交
97 98 99
err_out:
	iput(inode);
	return ERR_PTR(err);
100 101 102 103 104 105 106 107 108
}

static void del_fsync_inode(struct fsync_inode_entry *entry)
{
	iput(entry->inode);
	list_del(&entry->list);
	kmem_cache_free(fsync_entry_slab, entry);
}

C
Chao Yu 已提交
109 110
static int recover_dentry(struct inode *inode, struct page *ipage,
						struct list_head *dir_list)
111
{
112
	struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
113
	nid_t pino = le32_to_cpu(raw_inode->i_pino);
J
Jaegeuk Kim 已提交
114
	struct f2fs_dir_entry *de;
115
	struct fscrypt_name fname;
116
	struct page *page;
J
Jaegeuk Kim 已提交
117
	struct inode *dir, *einode;
C
Chao Yu 已提交
118
	struct fsync_inode_entry *entry;
119
	int err = 0;
120
	char *name;
121

C
Chao Yu 已提交
122 123
	entry = get_fsync_inode(dir_list, pino);
	if (!entry) {
C
Chao Yu 已提交
124 125
		entry = add_fsync_inode(F2FS_I_SB(inode), dir_list,
							pino, false);
126 127 128
		if (IS_ERR(entry)) {
			dir = ERR_CAST(entry);
			err = PTR_ERR(entry);
C
Chao Yu 已提交
129 130
			goto out;
		}
131 132
	}

C
Chao Yu 已提交
133 134
	dir = entry->inode;

135 136 137
	memset(&fname, 0, sizeof(struct fscrypt_name));
	fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen);
	fname.disk_name.name = raw_inode->i_name;
138

139
	if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) {
140 141
		WARN_ON(1);
		err = -ENAMETOOLONG;
C
Chao Yu 已提交
142
		goto out;
143
	}
J
Jaegeuk Kim 已提交
144
retry:
145
	de = __f2fs_find_entry(dir, &fname, &page);
146
	if (de && inode->i_ino == le32_to_cpu(de->ino))
147
		goto out_unmap_put;
148

J
Jaegeuk Kim 已提交
149
	if (de) {
150
		einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
J
Jaegeuk Kim 已提交
151 152
		if (IS_ERR(einode)) {
			WARN_ON(1);
153 154
			err = PTR_ERR(einode);
			if (err == -ENOENT)
J
Jaegeuk Kim 已提交
155
				err = -EEXIST;
156 157
			goto out_unmap_put;
		}
C
Chao Yu 已提交
158 159 160 161 162 163 164

		err = dquot_initialize(einode);
		if (err) {
			iput(einode);
			goto out_unmap_put;
		}

165
		err = acquire_orphan_inode(F2FS_I_SB(inode));
166 167 168
		if (err) {
			iput(einode);
			goto out_unmap_put;
J
Jaegeuk Kim 已提交
169
		}
170
		f2fs_delete_entry(de, page, dir, einode);
J
Jaegeuk Kim 已提交
171 172
		iput(einode);
		goto retry;
173 174 175
	} else if (IS_ERR(page)) {
		err = PTR_ERR(page);
	} else {
176
		err = __f2fs_do_add_link(dir, &fname, inode,
177
					inode->i_ino, inode->i_mode);
178
	}
179 180
	if (err == -ENOMEM)
		goto retry;
181 182 183
	goto out;

out_unmap_put:
184
	f2fs_dentry_kunmap(dir, page);
185
	f2fs_put_page(page, 0);
186
out:
187 188 189 190
	if (file_enc_name(inode))
		name = "<encrypted>";
	else
		name = raw_inode->i_name;
C
Chris Fries 已提交
191 192
	f2fs_msg(inode->i_sb, KERN_NOTICE,
			"%s: ino = %x, name = %s, dir = %lx, err = %d",
193
			__func__, ino_of_node(ipage), name,
D
Dan Carpenter 已提交
194
			IS_ERR(dir) ? 0 : dir->i_ino, err);
195 196 197
	return err;
}

198
static void recover_inode(struct inode *inode, struct page *page)
199
{
200
	struct f2fs_inode *raw = F2FS_INODE(page);
201
	char *name;
202 203

	inode->i_mode = le16_to_cpu(raw->i_mode);
204
	f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
C
Chao Yu 已提交
205
	inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
206 207
	inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
	inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
C
Chao Yu 已提交
208
	inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
209 210
	inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
	inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
211

212 213
	F2FS_I(inode)->i_advise = raw->i_advise;

214 215 216 217 218
	if (file_enc_name(inode))
		name = "<encrypted>";
	else
		name = F2FS_INODE(page)->i_name;

219
	f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
220
			ino_of_node(page), name);
221 222
}

223 224
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
				bool check_only)
225 226
{
	struct curseg_info *curseg;
227
	struct page *page = NULL;
228 229 230 231 232
	block_t blkaddr;
	int err = 0;

	/* get node pages in the current segment */
	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
233
	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
234 235 236 237

	while (1) {
		struct fsync_inode_entry *entry;

238
		if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
239
			return 0;
240

241
		page = get_tmp_page(sbi, blkaddr);
242

243
		if (!is_recoverable_dnode(page))
244
			break;
245 246 247 248 249

		if (!is_fsync_dnode(page))
			goto next;

		entry = get_fsync_inode(head, ino_of_node(page));
250
		if (!entry) {
C
Chao Yu 已提交
251 252
			bool quota_inode = false;

253 254
			if (!check_only &&
					IS_INODE(page) && is_dent_dnode(page)) {
255 256
				err = recover_inode_page(sbi, page);
				if (err)
257
					break;
C
Chao Yu 已提交
258
				quota_inode = true;
259 260
			}

261 262 263 264
			/*
			 * CP | dnode(F) | inode(DF)
			 * For this case, we should not give up now.
			 */
C
Chao Yu 已提交
265 266
			entry = add_fsync_inode(sbi, head, ino_of_node(page),
								quota_inode);
267 268
			if (IS_ERR(entry)) {
				err = PTR_ERR(entry);
269 270
				if (err == -ENOENT) {
					err = 0;
271
					goto next;
272
				}
273
				break;
274 275
			}
		}
J
Jaegeuk Kim 已提交
276 277
		entry->blkaddr = blkaddr;

278 279
		if (IS_INODE(page) && is_dent_dnode(page))
			entry->last_dentry = blkaddr;
280 281 282
next:
		/* check next segment */
		blkaddr = next_blkaddr_of_node(page);
283
		f2fs_put_page(page, 1);
284 285

		ra_meta_pages_cond(sbi, blkaddr);
286
	}
287
	f2fs_put_page(page, 1);
288 289 290
	return err;
}

291
static void destroy_fsync_dnodes(struct list_head *head)
292
{
293 294
	struct fsync_inode_entry *entry, *tmp;

295 296
	list_for_each_entry_safe(entry, tmp, head, list)
		del_fsync_inode(entry);
297 298
}

299
static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
300
			block_t blkaddr, struct dnode_of_data *dn)
301 302 303
{
	struct seg_entry *sentry;
	unsigned int segno = GET_SEGNO(sbi, blkaddr);
J
Jaegeuk Kim 已提交
304
	unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
J
Jaegeuk Kim 已提交
305
	struct f2fs_summary_block *sum_node;
306
	struct f2fs_summary sum;
J
Jaegeuk Kim 已提交
307
	struct page *sum_page, *node_page;
308
	struct dnode_of_data tdn = *dn;
309
	nid_t ino, nid;
310
	struct inode *inode;
311
	unsigned int offset;
312 313 314 315 316
	block_t bidx;
	int i;

	sentry = get_seg_entry(sbi, segno);
	if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
317
		return 0;
318 319

	/* Get the previous summary */
320
	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
321 322 323
		struct curseg_info *curseg = CURSEG_I(sbi, i);
		if (curseg->segno == segno) {
			sum = curseg->sum_blk->entries[blkoff];
J
Jaegeuk Kim 已提交
324
			goto got_it;
325 326 327
		}
	}

J
Jaegeuk Kim 已提交
328 329 330 331 332
	sum_page = get_sum_page(sbi, segno);
	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
	sum = sum_node->entries[blkoff];
	f2fs_put_page(sum_page, 1);
got_it:
333 334 335 336
	/* Use the locked dnode page and inode */
	nid = le32_to_cpu(sum.nid);
	if (dn->inode->i_ino == nid) {
		tdn.nid = nid;
337 338
		if (!dn->inode_page_locked)
			lock_page(dn->inode_page);
339
		tdn.node_page = dn->inode_page;
340
		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
341
		goto truncate_out;
342
	} else if (dn->nid == nid) {
343
		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
344
		goto truncate_out;
345 346
	}

347
	/* Get the node page */
348
	node_page = get_node_page(sbi, nid);
349 350
	if (IS_ERR(node_page))
		return PTR_ERR(node_page);
351 352

	offset = ofs_of_node(node_page);
353 354 355
	ino = ino_of_node(node_page);
	f2fs_put_page(node_page, 1);

356
	if (ino != dn->inode->i_ino) {
C
Chao Yu 已提交
357 358
		int ret;

359
		/* Deallocate previous index in the node page */
360
		inode = f2fs_iget_retry(sbi->sb, ino);
361 362
		if (IS_ERR(inode))
			return PTR_ERR(inode);
C
Chao Yu 已提交
363 364 365 366 367 368

		ret = dquot_initialize(inode);
		if (ret) {
			iput(inode);
			return ret;
		}
369 370 371
	} else {
		inode = dn->inode;
	}
372

373
	bidx = start_bidx_of_node(offset, inode) + le16_to_cpu(sum.ofs_in_node);
374

375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
	/*
	 * if inode page is locked, unlock temporarily, but its reference
	 * count keeps alive.
	 */
	if (ino == dn->inode->i_ino && dn->inode_page_locked)
		unlock_page(dn->inode_page);

	set_new_dnode(&tdn, inode, NULL, NULL, 0);
	if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
		goto out;

	if (tdn.data_blkaddr == blkaddr)
		truncate_data_blocks_range(&tdn, 1);

	f2fs_put_dnode(&tdn);
out:
	if (ino != dn->inode->i_ino)
392
		iput(inode);
393 394 395 396 397
	else if (dn->inode_page_locked)
		lock_page(dn->inode_page);
	return 0;

truncate_out:
398 399
	if (datablock_addr(tdn.inode, tdn.node_page,
					tdn.ofs_in_node) == blkaddr)
400 401 402
		truncate_data_blocks_range(&tdn, 1);
	if (dn->inode->i_ino == nid && !dn->inode_page_locked)
		unlock_page(dn->inode_page);
403
	return 0;
404 405
}

406
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
407 408 409 410
					struct page *page, block_t blkaddr)
{
	struct dnode_of_data dn;
	struct node_info ni;
411
	unsigned int start, end;
412
	int err = 0, recovered = 0;
413

414 415 416 417
	/* step 1: recover xattr */
	if (IS_INODE(page)) {
		recover_inline_xattr(inode, page);
	} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
418 419 420
		err = recover_xattr_data(inode, page, blkaddr);
		if (!err)
			recovered++;
421
		goto out;
422
	}
423

424 425
	/* step 2: recover inline data */
	if (recover_inline_data(inode, page))
426 427
		goto out;

428
	/* step 3: recover data indices */
429 430
	start = start_bidx_of_node(ofs_of_node(page), inode);
	end = start + ADDRS_PER_PAGE(page, inode);
431 432

	set_new_dnode(&dn, inode, NULL, NULL, 0);
433
retry_dn:
434
	err = get_dnode_of_data(&dn, start, ALLOC_NODE);
435 436 437 438 439
	if (err) {
		if (err == -ENOMEM) {
			congestion_wait(BLK_RW_ASYNC, HZ/50);
			goto retry_dn;
		}
440
		goto out;
441
	}
442

443
	f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
444 445

	get_node_info(sbi, dn.nid, &ni);
446 447
	f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
	f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
448

449
	for (; start < end; start++, dn.ofs_in_node++) {
450 451
		block_t src, dest;

452 453
		src = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
		dest = datablock_addr(dn.inode, page, dn.ofs_in_node);
454

455 456 457 458 459 460 461 462 463 464
		/* skip recovering if dest is the same as src */
		if (src == dest)
			continue;

		/* dest is invalid, just invalidate src block */
		if (dest == NULL_ADDR) {
			truncate_data_blocks_range(&dn, 1);
			continue;
		}

465
		if (!file_keep_isize(inode) &&
466 467 468
			(i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT)))
			f2fs_i_size_write(inode,
				(loff_t)(start + 1) << PAGE_SHIFT);
469

470 471 472 473 474 475
		/*
		 * dest is reserved block, invalidate src block
		 * and then reserve one new block in dnode page.
		 */
		if (dest == NEW_ADDR) {
			truncate_data_blocks_range(&dn, 1);
476
			reserve_new_block(&dn);
477 478 479 480 481
			continue;
		}

		/* dest is valid block, try to recover from src to dest */
		if (is_valid_blkaddr(sbi, dest, META_POR)) {
482

483
			if (src == NULL_ADDR) {
484
				err = reserve_new_block(&dn);
485 486 487 488
#ifdef CONFIG_F2FS_FAULT_INJECTION
				while (err)
					err = reserve_new_block(&dn);
#endif
489
				/* We should not get -ENOSPC */
490
				f2fs_bug_on(sbi, err);
491 492
				if (err)
					goto err;
493
			}
494
retry_prev:
495
			/* Check the previous node page having this index */
496
			err = check_index_in_prev_nodes(sbi, dest, &dn);
497 498 499 500 501
			if (err) {
				if (err == -ENOMEM) {
					congestion_wait(BLK_RW_ASYNC, HZ/50);
					goto retry_prev;
				}
502
				goto err;
503
			}
504 505

			/* write dummy data page */
506
			f2fs_replace_block(sbi, &dn, src, dest,
507
						ni.version, false, false);
508
			recovered++;
509 510 511 512 513 514 515
		}
	}

	copy_node_footer(dn.node_page, page);
	fill_node_footer(dn.node_page, dn.nid, ni.ino,
					ofs_of_node(page), false);
	set_page_dirty(dn.node_page);
516
err:
517
	f2fs_put_dnode(&dn);
518
out:
C
Chris Fries 已提交
519
	f2fs_msg(sbi->sb, KERN_NOTICE,
520 521 522 523
		"recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
		inode->i_ino,
		file_keep_isize(inode) ? "keep" : "recover",
		recovered, err);
524
	return err;
525 526
}

C
Chao Yu 已提交
527 528
static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
						struct list_head *dir_list)
529 530
{
	struct curseg_info *curseg;
531
	struct page *page = NULL;
532
	int err = 0;
533 534 535
	block_t blkaddr;

	/* get node pages in the current segment */
C
Chao Yu 已提交
536
	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
537 538 539 540 541
	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);

	while (1) {
		struct fsync_inode_entry *entry;

542
		if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
543
			break;
544

545 546
		ra_meta_pages_cond(sbi, blkaddr);

547
		page = get_tmp_page(sbi, blkaddr);
548

549
		if (!is_recoverable_dnode(page)) {
550
			f2fs_put_page(page, 1);
551
			break;
552
		}
553

C
Chao Yu 已提交
554
		entry = get_fsync_inode(inode_list, ino_of_node(page));
555 556
		if (!entry)
			goto next;
557 558 559
		/*
		 * inode(x) | CP | inode(x) | dnode(F)
		 * In this case, we can lose the latest inode(x).
560
		 * So, call recover_inode for the inode update.
561
		 */
562
		if (IS_INODE(page))
563 564
			recover_inode(entry->inode, page);
		if (entry->last_dentry == blkaddr) {
C
Chao Yu 已提交
565
			err = recover_dentry(entry->inode, page, dir_list);
566 567 568 569 570
			if (err) {
				f2fs_put_page(page, 1);
				break;
			}
		}
571
		err = do_recover_data(sbi, entry->inode, page, blkaddr);
572 573
		if (err) {
			f2fs_put_page(page, 1);
574
			break;
575
		}
576

577 578
		if (entry->blkaddr == blkaddr)
			del_fsync_inode(entry);
579 580 581
next:
		/* check next segment */
		blkaddr = next_blkaddr_of_node(page);
582
		f2fs_put_page(page, 1);
583
	}
584 585 586
	if (!err)
		allocate_new_segments(sbi);
	return err;
587 588
}

589
int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
590 591
{
	struct list_head inode_list;
C
Chao Yu 已提交
592
	struct list_head dir_list;
593
	int err;
594
	int ret = 0;
C
Chao Yu 已提交
595
	unsigned long s_flags = sbi->sb->s_flags;
H
Haicheng Li 已提交
596
	bool need_writecp = false;
J
Jaegeuk Kim 已提交
597 598 599
#ifdef CONFIG_QUOTA
	int quota_enabled;
#endif
600

C
Chao Yu 已提交
601 602 603 604 605 606 607 608 609
	if (s_flags & MS_RDONLY) {
		f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
		sbi->sb->s_flags &= ~MS_RDONLY;
	}

#ifdef CONFIG_QUOTA
	/* Needed for iput() to work correctly and not trash data */
	sbi->sb->s_flags |= MS_ACTIVE;
	/* Turn on quotas so that they are updated correctly */
J
Jaegeuk Kim 已提交
610
	quota_enabled = f2fs_enable_quota_files(sbi, s_flags & MS_RDONLY);
C
Chao Yu 已提交
611 612
#endif

613
	fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
614
			sizeof(struct fsync_inode_entry));
C
Chao Yu 已提交
615 616 617 618
	if (!fsync_entry_slab) {
		err = -ENOMEM;
		goto out;
	}
619 620

	INIT_LIST_HEAD(&inode_list);
C
Chao Yu 已提交
621
	INIT_LIST_HEAD(&dir_list);
622

623 624 625
	/* prevent checkpoint */
	mutex_lock(&sbi->cp_mutex);

626
	/* step #1: find fsynced inode numbers */
627
	err = find_fsync_dnodes(sbi, &inode_list, check_only);
628
	if (err || list_empty(&inode_list))
C
Chao Yu 已提交
629
		goto skip;
630

631 632
	if (check_only) {
		ret = 1;
C
Chao Yu 已提交
633
		goto skip;
634
	}
635

H
Haicheng Li 已提交
636
	need_writecp = true;
637

638
	/* step #2: recover data */
C
Chao Yu 已提交
639
	err = recover_data(sbi, &inode_list, &dir_list);
640
	if (!err)
641
		f2fs_bug_on(sbi, !list_empty(&inode_list));
C
Chao Yu 已提交
642
skip:
643
	destroy_fsync_dnodes(&inode_list);
644

645 646
	/* truncate meta pages to be used by the recovery */
	truncate_inode_pages_range(META_MAPPING(sbi),
647
			(loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
648

649 650 651 652 653
	if (err) {
		truncate_inode_pages_final(NODE_MAPPING(sbi));
		truncate_inode_pages_final(META_MAPPING(sbi));
	}

654
	clear_sbi_flag(sbi, SBI_POR_DOING);
655 656
	mutex_unlock(&sbi->cp_mutex);

657 658 659
	/* let's drop all the directory inodes for clean checkpoint */
	destroy_fsync_dnodes(&dir_list);

660
	if (!err && need_writecp) {
661
		struct cp_control cpc = {
662
			.reason = CP_RECOVERY,
663
		};
C
Chao Yu 已提交
664
		err = write_checkpoint(sbi, &cpc);
665
	}
C
Chao Yu 已提交
666 667

	kmem_cache_destroy(fsync_entry_slab);
C
Chao Yu 已提交
668 669 670
out:
#ifdef CONFIG_QUOTA
	/* Turn quotas off */
J
Jaegeuk Kim 已提交
671 672
	if (quota_enabled)
		f2fs_quota_off_umount(sbi->sb);
C
Chao Yu 已提交
673 674 675
#endif
	sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */

676
	return ret ? ret: err;
677
}