recovery.c 16.0 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
 * fs/f2fs/recovery.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include "f2fs.h"
#include "node.h"
#include "segment.h"

17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
/*
 * Roll forward recovery scenarios.
 *
 * [Term] F: fsync_mark, D: dentry_mark
 *
 * 1. inode(x) | CP | inode(x) | dnode(F)
 * -> Update the latest inode(x).
 *
 * 2. inode(x) | CP | inode(F) | dnode(F)
 * -> No problem.
 *
 * 3. inode(x) | CP | dnode(F) | inode(x)
 * -> Recover to the latest dnode(F), and drop the last inode(x)
 *
 * 4. inode(x) | CP | dnode(F) | inode(F)
 * -> No problem.
 *
 * 5. CP | inode(x) | dnode(F)
 * -> The inode(DF) was missing. Should drop this dnode(F).
 *
 * 6. CP | inode(DF) | dnode(F)
 * -> No problem.
 *
 * 7. CP | dnode(F) | inode(DF)
 * -> If f2fs_iget fails, then goto next to find inode(DF).
 *
 * 8. CP | dnode(F) | inode(x)
 * -> If f2fs_iget fails, then goto next to find inode(DF).
 *    But it will fail due to no inode(DF).
 */

48 49 50 51
static struct kmem_cache *fsync_entry_slab;

bool space_for_roll_forward(struct f2fs_sb_info *sbi)
{
52 53 54
	s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);

	if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
55 56 57 58 59 60 61 62 63
		return false;
	return true;
}

static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
								nid_t ino)
{
	struct fsync_inode_entry *entry;

64
	list_for_each_entry(entry, head, list)
65 66
		if (entry->inode->i_ino == ino)
			return entry;
67

68 69 70
	return NULL;
}

71 72
static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
					struct list_head *head, nid_t ino)
73
{
74
	struct inode *inode;
75 76
	struct fsync_inode_entry *entry;

77
	inode = f2fs_iget_retry(sbi->sb, ino);
78 79 80
	if (IS_ERR(inode))
		return ERR_CAST(inode);

81
	entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
82 83 84 85 86 87 88 89 90 91 92 93 94
	entry->inode = inode;
	list_add_tail(&entry->list, head);

	return entry;
}

static void del_fsync_inode(struct fsync_inode_entry *entry)
{
	iput(entry->inode);
	list_del(&entry->list);
	kmem_cache_free(fsync_entry_slab, entry);
}

C
Chao Yu 已提交
95 96
static int recover_dentry(struct inode *inode, struct page *ipage,
						struct list_head *dir_list)
97
{
98
	struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
99
	nid_t pino = le32_to_cpu(raw_inode->i_pino);
J
Jaegeuk Kim 已提交
100
	struct f2fs_dir_entry *de;
101
	struct fscrypt_name fname;
102
	struct page *page;
J
Jaegeuk Kim 已提交
103
	struct inode *dir, *einode;
C
Chao Yu 已提交
104
	struct fsync_inode_entry *entry;
105
	int err = 0;
106
	char *name;
107

C
Chao Yu 已提交
108 109
	entry = get_fsync_inode(dir_list, pino);
	if (!entry) {
110 111 112 113
		entry = add_fsync_inode(F2FS_I_SB(inode), dir_list, pino);
		if (IS_ERR(entry)) {
			dir = ERR_CAST(entry);
			err = PTR_ERR(entry);
C
Chao Yu 已提交
114 115
			goto out;
		}
116 117
	}

C
Chao Yu 已提交
118 119
	dir = entry->inode;

120 121 122
	memset(&fname, 0, sizeof(struct fscrypt_name));
	fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen);
	fname.disk_name.name = raw_inode->i_name;
123

124
	if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) {
125 126
		WARN_ON(1);
		err = -ENAMETOOLONG;
C
Chao Yu 已提交
127
		goto out;
128
	}
J
Jaegeuk Kim 已提交
129
retry:
130
	de = __f2fs_find_entry(dir, &fname, &page);
131
	if (de && inode->i_ino == le32_to_cpu(de->ino))
132
		goto out_unmap_put;
133

J
Jaegeuk Kim 已提交
134
	if (de) {
135
		einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
J
Jaegeuk Kim 已提交
136 137
		if (IS_ERR(einode)) {
			WARN_ON(1);
138 139
			err = PTR_ERR(einode);
			if (err == -ENOENT)
J
Jaegeuk Kim 已提交
140
				err = -EEXIST;
141 142
			goto out_unmap_put;
		}
143
		err = acquire_orphan_inode(F2FS_I_SB(inode));
144 145 146
		if (err) {
			iput(einode);
			goto out_unmap_put;
J
Jaegeuk Kim 已提交
147
		}
148
		f2fs_delete_entry(de, page, dir, einode);
J
Jaegeuk Kim 已提交
149 150
		iput(einode);
		goto retry;
151 152 153
	} else if (IS_ERR(page)) {
		err = PTR_ERR(page);
	} else {
154
		err = __f2fs_do_add_link(dir, &fname, inode,
155
					inode->i_ino, inode->i_mode);
156
	}
157 158
	if (err == -ENOMEM)
		goto retry;
159 160 161
	goto out;

out_unmap_put:
162
	f2fs_dentry_kunmap(dir, page);
163
	f2fs_put_page(page, 0);
164
out:
165 166 167 168
	if (file_enc_name(inode))
		name = "<encrypted>";
	else
		name = raw_inode->i_name;
C
Chris Fries 已提交
169 170
	f2fs_msg(inode->i_sb, KERN_NOTICE,
			"%s: ino = %x, name = %s, dir = %lx, err = %d",
171
			__func__, ino_of_node(ipage), name,
D
Dan Carpenter 已提交
172
			IS_ERR(dir) ? 0 : dir->i_ino, err);
173 174 175
	return err;
}

176
static void recover_inode(struct inode *inode, struct page *page)
177
{
178
	struct f2fs_inode *raw = F2FS_INODE(page);
179
	char *name;
180 181

	inode->i_mode = le16_to_cpu(raw->i_mode);
182
	f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
183 184 185 186 187 188
	inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime);
	inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
	inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
	inode->i_atime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
	inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
	inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
189

190 191 192 193 194
	if (file_enc_name(inode))
		name = "<encrypted>";
	else
		name = F2FS_INODE(page)->i_name;

195
	f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
196
			ino_of_node(page), name);
197 198
}

199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
static bool is_same_inode(struct inode *inode, struct page *ipage)
{
	struct f2fs_inode *ri = F2FS_INODE(ipage);
	struct timespec disk;

	if (!IS_INODE(ipage))
		return true;

	disk.tv_sec = le64_to_cpu(ri->i_ctime);
	disk.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
	if (timespec_compare(&inode->i_ctime, &disk) > 0)
		return false;

	disk.tv_sec = le64_to_cpu(ri->i_atime);
	disk.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
	if (timespec_compare(&inode->i_atime, &disk) > 0)
		return false;

	disk.tv_sec = le64_to_cpu(ri->i_mtime);
	disk.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
	if (timespec_compare(&inode->i_mtime, &disk) > 0)
		return false;

	return true;
}

225 226
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
{
227
	unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
228
	struct curseg_info *curseg;
229
	struct page *page = NULL;
230 231 232 233 234
	block_t blkaddr;
	int err = 0;

	/* get node pages in the current segment */
	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
235
	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
236 237 238 239

	while (1) {
		struct fsync_inode_entry *entry;

240
		if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
241
			return 0;
242

243
		page = get_tmp_page(sbi, blkaddr);
244

245
		if (cp_ver != cpver_of_node(page))
246
			break;
247 248 249 250 251

		if (!is_fsync_dnode(page))
			goto next;

		entry = get_fsync_inode(head, ino_of_node(page));
252 253 254 255
		if (entry) {
			if (!is_same_inode(entry->inode, page))
				goto next;
		} else {
256
			if (IS_INODE(page) && is_dent_dnode(page)) {
257 258
				err = recover_inode_page(sbi, page);
				if (err)
259
					break;
260 261
			}

262 263 264 265
			/*
			 * CP | dnode(F) | inode(DF)
			 * For this case, we should not give up now.
			 */
266 267 268
			entry = add_fsync_inode(sbi, head, ino_of_node(page));
			if (IS_ERR(entry)) {
				err = PTR_ERR(entry);
269 270
				if (err == -ENOENT) {
					err = 0;
271
					goto next;
272
				}
273
				break;
274 275
			}
		}
J
Jaegeuk Kim 已提交
276 277
		entry->blkaddr = blkaddr;

278 279
		if (IS_INODE(page) && is_dent_dnode(page))
			entry->last_dentry = blkaddr;
280 281 282
next:
		/* check next segment */
		blkaddr = next_blkaddr_of_node(page);
283
		f2fs_put_page(page, 1);
284 285

		ra_meta_pages_cond(sbi, blkaddr);
286
	}
287
	f2fs_put_page(page, 1);
288 289 290
	return err;
}

291
static void destroy_fsync_dnodes(struct list_head *head)
292
{
293 294
	struct fsync_inode_entry *entry, *tmp;

295 296
	list_for_each_entry_safe(entry, tmp, head, list)
		del_fsync_inode(entry);
297 298
}

299
static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
300
			block_t blkaddr, struct dnode_of_data *dn)
301 302 303
{
	struct seg_entry *sentry;
	unsigned int segno = GET_SEGNO(sbi, blkaddr);
J
Jaegeuk Kim 已提交
304
	unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
J
Jaegeuk Kim 已提交
305
	struct f2fs_summary_block *sum_node;
306
	struct f2fs_summary sum;
J
Jaegeuk Kim 已提交
307
	struct page *sum_page, *node_page;
308
	struct dnode_of_data tdn = *dn;
309
	nid_t ino, nid;
310
	struct inode *inode;
311
	unsigned int offset;
312 313 314 315 316
	block_t bidx;
	int i;

	sentry = get_seg_entry(sbi, segno);
	if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
317
		return 0;
318 319 320 321 322 323

	/* Get the previous summary */
	for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
		struct curseg_info *curseg = CURSEG_I(sbi, i);
		if (curseg->segno == segno) {
			sum = curseg->sum_blk->entries[blkoff];
J
Jaegeuk Kim 已提交
324
			goto got_it;
325 326 327
		}
	}

J
Jaegeuk Kim 已提交
328 329 330 331 332
	sum_page = get_sum_page(sbi, segno);
	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
	sum = sum_node->entries[blkoff];
	f2fs_put_page(sum_page, 1);
got_it:
333 334 335 336
	/* Use the locked dnode page and inode */
	nid = le32_to_cpu(sum.nid);
	if (dn->inode->i_ino == nid) {
		tdn.nid = nid;
337 338
		if (!dn->inode_page_locked)
			lock_page(dn->inode_page);
339
		tdn.node_page = dn->inode_page;
340
		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
341
		goto truncate_out;
342
	} else if (dn->nid == nid) {
343
		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
344
		goto truncate_out;
345 346
	}

347
	/* Get the node page */
348
	node_page = get_node_page(sbi, nid);
349 350
	if (IS_ERR(node_page))
		return PTR_ERR(node_page);
351 352

	offset = ofs_of_node(node_page);
353 354 355
	ino = ino_of_node(node_page);
	f2fs_put_page(node_page, 1);

356 357
	if (ino != dn->inode->i_ino) {
		/* Deallocate previous index in the node page */
358
		inode = f2fs_iget_retry(sbi->sb, ino);
359 360 361 362 363
		if (IS_ERR(inode))
			return PTR_ERR(inode);
	} else {
		inode = dn->inode;
	}
364

365
	bidx = start_bidx_of_node(offset, inode) + le16_to_cpu(sum.ofs_in_node);
366

367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
	/*
	 * if inode page is locked, unlock temporarily, but its reference
	 * count keeps alive.
	 */
	if (ino == dn->inode->i_ino && dn->inode_page_locked)
		unlock_page(dn->inode_page);

	set_new_dnode(&tdn, inode, NULL, NULL, 0);
	if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
		goto out;

	if (tdn.data_blkaddr == blkaddr)
		truncate_data_blocks_range(&tdn, 1);

	f2fs_put_dnode(&tdn);
out:
	if (ino != dn->inode->i_ino)
384
		iput(inode);
385 386 387 388 389 390 391 392 393
	else if (dn->inode_page_locked)
		lock_page(dn->inode_page);
	return 0;

truncate_out:
	if (datablock_addr(tdn.node_page, tdn.ofs_in_node) == blkaddr)
		truncate_data_blocks_range(&tdn, 1);
	if (dn->inode->i_ino == nid && !dn->inode_page_locked)
		unlock_page(dn->inode_page);
394
	return 0;
395 396
}

397
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
398 399 400 401
					struct page *page, block_t blkaddr)
{
	struct dnode_of_data dn;
	struct node_info ni;
402
	unsigned int start, end;
403
	int err = 0, recovered = 0;
404

405 406 407 408
	/* step 1: recover xattr */
	if (IS_INODE(page)) {
		recover_inline_xattr(inode, page);
	} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
409 410 411 412
		/*
		 * Deprecated; xattr blocks should be found from cold log.
		 * But, we should remain this for backward compatibility.
		 */
413
		recover_xattr_data(inode, page, blkaddr);
414
		goto out;
415
	}
416

417 418
	/* step 2: recover inline data */
	if (recover_inline_data(inode, page))
419 420
		goto out;

421
	/* step 3: recover data indices */
422 423
	start = start_bidx_of_node(ofs_of_node(page), inode);
	end = start + ADDRS_PER_PAGE(page, inode);
424 425

	set_new_dnode(&dn, inode, NULL, NULL, 0);
426
retry_dn:
427
	err = get_dnode_of_data(&dn, start, ALLOC_NODE);
428 429 430 431 432
	if (err) {
		if (err == -ENOMEM) {
			congestion_wait(BLK_RW_ASYNC, HZ/50);
			goto retry_dn;
		}
433
		goto out;
434
	}
435

436
	f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
437 438

	get_node_info(sbi, dn.nid, &ni);
439 440
	f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
	f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
441

442
	for (; start < end; start++, dn.ofs_in_node++) {
443 444 445 446 447
		block_t src, dest;

		src = datablock_addr(dn.node_page, dn.ofs_in_node);
		dest = datablock_addr(page, dn.ofs_in_node);

448 449 450 451 452 453 454 455 456 457
		/* skip recovering if dest is the same as src */
		if (src == dest)
			continue;

		/* dest is invalid, just invalidate src block */
		if (dest == NULL_ADDR) {
			truncate_data_blocks_range(&dn, 1);
			continue;
		}

458 459 460
		if ((start + 1) << PAGE_SHIFT > i_size_read(inode))
			f2fs_i_size_write(inode, (start + 1) << PAGE_SHIFT);

461 462 463 464 465 466
		/*
		 * dest is reserved block, invalidate src block
		 * and then reserve one new block in dnode page.
		 */
		if (dest == NEW_ADDR) {
			truncate_data_blocks_range(&dn, 1);
467
			reserve_new_block(&dn);
468 469 470 471 472
			continue;
		}

		/* dest is valid block, try to recover from src to dest */
		if (is_valid_blkaddr(sbi, dest, META_POR)) {
473

474
			if (src == NULL_ADDR) {
475
				err = reserve_new_block(&dn);
476 477 478 479
#ifdef CONFIG_F2FS_FAULT_INJECTION
				while (err)
					err = reserve_new_block(&dn);
#endif
480
				/* We should not get -ENOSPC */
481
				f2fs_bug_on(sbi, err);
482 483
				if (err)
					goto err;
484
			}
485
retry_prev:
486
			/* Check the previous node page having this index */
487
			err = check_index_in_prev_nodes(sbi, dest, &dn);
488 489 490 491 492
			if (err) {
				if (err == -ENOMEM) {
					congestion_wait(BLK_RW_ASYNC, HZ/50);
					goto retry_prev;
				}
493
				goto err;
494
			}
495 496

			/* write dummy data page */
497
			f2fs_replace_block(sbi, &dn, src, dest,
498
						ni.version, false, false);
499
			recovered++;
500 501 502 503 504 505 506
		}
	}

	copy_node_footer(dn.node_page, page);
	fill_node_footer(dn.node_page, dn.nid, ni.ino,
					ofs_of_node(page), false);
	set_page_dirty(dn.node_page);
507
err:
508
	f2fs_put_dnode(&dn);
509
out:
C
Chris Fries 已提交
510 511 512
	f2fs_msg(sbi->sb, KERN_NOTICE,
		"recover_data: ino = %lx, recovered = %d blocks, err = %d",
		inode->i_ino, recovered, err);
513
	return err;
514 515
}

C
Chao Yu 已提交
516 517
static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
						struct list_head *dir_list)
518
{
519
	unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
520
	struct curseg_info *curseg;
521
	struct page *page = NULL;
522
	int err = 0;
523 524 525
	block_t blkaddr;

	/* get node pages in the current segment */
C
Chao Yu 已提交
526
	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
527 528 529 530 531
	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);

	while (1) {
		struct fsync_inode_entry *entry;

532
		if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
533
			break;
534

535 536
		ra_meta_pages_cond(sbi, blkaddr);

537
		page = get_tmp_page(sbi, blkaddr);
538

539 540
		if (cp_ver != cpver_of_node(page)) {
			f2fs_put_page(page, 1);
541
			break;
542
		}
543

C
Chao Yu 已提交
544
		entry = get_fsync_inode(inode_list, ino_of_node(page));
545 546
		if (!entry)
			goto next;
547 548 549
		/*
		 * inode(x) | CP | inode(x) | dnode(F)
		 * In this case, we can lose the latest inode(x).
550
		 * So, call recover_inode for the inode update.
551
		 */
552
		if (IS_INODE(page))
553 554
			recover_inode(entry->inode, page);
		if (entry->last_dentry == blkaddr) {
C
Chao Yu 已提交
555
			err = recover_dentry(entry->inode, page, dir_list);
556 557 558 559 560
			if (err) {
				f2fs_put_page(page, 1);
				break;
			}
		}
561
		err = do_recover_data(sbi, entry->inode, page, blkaddr);
562 563
		if (err) {
			f2fs_put_page(page, 1);
564
			break;
565
		}
566

567 568
		if (entry->blkaddr == blkaddr)
			del_fsync_inode(entry);
569 570 571
next:
		/* check next segment */
		blkaddr = next_blkaddr_of_node(page);
572
		f2fs_put_page(page, 1);
573
	}
574 575 576
	if (!err)
		allocate_new_segments(sbi);
	return err;
577 578
}

579
int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
580
{
581
	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
582
	struct list_head inode_list;
C
Chao Yu 已提交
583
	struct list_head dir_list;
584
	block_t blkaddr;
585
	int err;
586
	int ret = 0;
H
Haicheng Li 已提交
587
	bool need_writecp = false;
588 589

	fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
590
			sizeof(struct fsync_inode_entry));
591
	if (!fsync_entry_slab)
592
		return -ENOMEM;
593 594

	INIT_LIST_HEAD(&inode_list);
C
Chao Yu 已提交
595
	INIT_LIST_HEAD(&dir_list);
596

597 598 599
	/* prevent checkpoint */
	mutex_lock(&sbi->cp_mutex);

600 601
	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);

602
	/* step #1: find fsynced inode numbers */
603
	err = find_fsync_dnodes(sbi, &inode_list);
604
	if (err || list_empty(&inode_list))
605 606
		goto out;

607 608
	if (check_only) {
		ret = 1;
609
		goto out;
610
	}
611

H
Haicheng Li 已提交
612
	need_writecp = true;
613

614
	/* step #2: recover data */
C
Chao Yu 已提交
615
	err = recover_data(sbi, &inode_list, &dir_list);
616
	if (!err)
617
		f2fs_bug_on(sbi, !list_empty(&inode_list));
618
out:
619
	destroy_fsync_dnodes(&inode_list);
620

621 622
	/* truncate meta pages to be used by the recovery */
	truncate_inode_pages_range(META_MAPPING(sbi),
623
			(loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
624

625 626 627 628 629
	if (err) {
		truncate_inode_pages_final(NODE_MAPPING(sbi));
		truncate_inode_pages_final(META_MAPPING(sbi));
	}

630
	clear_sbi_flag(sbi, SBI_POR_DOING);
631
	if (err) {
C
Chao Yu 已提交
632 633
		bool invalidate = false;

634 635
		if (test_opt(sbi, LFS)) {
			update_meta_page(sbi, NULL, blkaddr);
C
Chao Yu 已提交
636
			invalidate = true;
637 638 639
		} else if (discard_next_dnode(sbi, blkaddr)) {
			invalidate = true;
		}
640

C
Chao Yu 已提交
641 642
		f2fs_wait_all_discard_bio(sbi);

643 644 645
		/* Flush all the NAT/SIT pages */
		while (get_pages(sbi, F2FS_DIRTY_META))
			sync_meta_pages(sbi, META, LONG_MAX);
C
Chao Yu 已提交
646 647 648 649 650 651

		/* invalidate temporary meta page */
		if (invalidate)
			invalidate_mapping_pages(META_MAPPING(sbi),
							blkaddr, blkaddr);

652 653
		set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
		mutex_unlock(&sbi->cp_mutex);
654
	} else if (need_writecp) {
655
		struct cp_control cpc = {
656
			.reason = CP_RECOVERY,
657
		};
658
		mutex_unlock(&sbi->cp_mutex);
C
Chao Yu 已提交
659
		err = write_checkpoint(sbi, &cpc);
660 661
	} else {
		mutex_unlock(&sbi->cp_mutex);
662
	}
C
Chao Yu 已提交
663 664 665

	destroy_fsync_dnodes(&dir_list);
	kmem_cache_destroy(fsync_entry_slab);
666
	return ret ? ret: err;
667
}