recovery.c 13.9 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
 * fs/f2fs/recovery.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include "f2fs.h"
#include "node.h"
#include "segment.h"

17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
/*
 * Roll forward recovery scenarios.
 *
 * [Term] F: fsync_mark, D: dentry_mark
 *
 * 1. inode(x) | CP | inode(x) | dnode(F)
 * -> Update the latest inode(x).
 *
 * 2. inode(x) | CP | inode(F) | dnode(F)
 * -> No problem.
 *
 * 3. inode(x) | CP | dnode(F) | inode(x)
 * -> Recover to the latest dnode(F), and drop the last inode(x)
 *
 * 4. inode(x) | CP | dnode(F) | inode(F)
 * -> No problem.
 *
 * 5. CP | inode(x) | dnode(F)
 * -> The inode(DF) was missing. Should drop this dnode(F).
 *
 * 6. CP | inode(DF) | dnode(F)
 * -> No problem.
 *
 * 7. CP | dnode(F) | inode(DF)
 * -> If f2fs_iget fails, then goto next to find inode(DF).
 *
 * 8. CP | dnode(F) | inode(x)
 * -> If f2fs_iget fails, then goto next to find inode(DF).
 *    But it will fail due to no inode(DF).
 */

48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
static struct kmem_cache *fsync_entry_slab;

bool space_for_roll_forward(struct f2fs_sb_info *sbi)
{
	if (sbi->last_valid_block_count + sbi->alloc_valid_block_count
			> sbi->user_block_count)
		return false;
	return true;
}

static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
								nid_t ino)
{
	struct fsync_inode_entry *entry;

63
	list_for_each_entry(entry, head, list)
64 65
		if (entry->inode->i_ino == ino)
			return entry;
66

67 68 69
	return NULL;
}

70
static int recover_dentry(struct inode *inode, struct page *ipage)
71
{
72
	struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
73
	nid_t pino = le32_to_cpu(raw_inode->i_pino);
J
Jaegeuk Kim 已提交
74
	struct f2fs_dir_entry *de;
75
	struct qstr name;
76
	struct page *page;
J
Jaegeuk Kim 已提交
77
	struct inode *dir, *einode;
78 79
	int err = 0;

80 81 82 83 84 85
	dir = f2fs_iget(inode->i_sb, pino);
	if (IS_ERR(dir)) {
		err = PTR_ERR(dir);
		goto out;
	}

86 87 88 89 90
	if (file_enc_name(inode)) {
		iput(dir);
		return 0;
	}

91 92
	name.len = le32_to_cpu(raw_inode->i_namelen);
	name.name = raw_inode->i_name;
93 94 95 96

	if (unlikely(name.len > F2FS_NAME_LEN)) {
		WARN_ON(1);
		err = -ENAMETOOLONG;
97
		goto out_err;
98
	}
J
Jaegeuk Kim 已提交
99 100
retry:
	de = f2fs_find_entry(dir, &name, &page);
101
	if (de && inode->i_ino == le32_to_cpu(de->ino))
102
		goto out_unmap_put;
103

J
Jaegeuk Kim 已提交
104 105 106 107
	if (de) {
		einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
		if (IS_ERR(einode)) {
			WARN_ON(1);
108 109
			err = PTR_ERR(einode);
			if (err == -ENOENT)
J
Jaegeuk Kim 已提交
110
				err = -EEXIST;
111 112
			goto out_unmap_put;
		}
113
		err = acquire_orphan_inode(F2FS_I_SB(inode));
114 115 116
		if (err) {
			iput(einode);
			goto out_unmap_put;
J
Jaegeuk Kim 已提交
117
		}
118
		f2fs_delete_entry(de, page, dir, einode);
J
Jaegeuk Kim 已提交
119 120
		iput(einode);
		goto retry;
121
	}
122
	err = __f2fs_add_link(dir, &name, inode, inode->i_ino, inode->i_mode);
123 124 125 126 127 128 129 130 131 132
	if (err)
		goto out_err;

	if (is_inode_flag_set(F2FS_I(dir), FI_DELAY_IPUT)) {
		iput(dir);
	} else {
		add_dirty_dir_inode(dir);
		set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT);
	}

133 134 135
	goto out;

out_unmap_put:
136
	f2fs_dentry_kunmap(dir, page);
137
	f2fs_put_page(page, 0);
138 139
out_err:
	iput(dir);
140
out:
C
Chris Fries 已提交
141 142 143
	f2fs_msg(inode->i_sb, KERN_NOTICE,
			"%s: ino = %x, name = %s, dir = %lx, err = %d",
			__func__, ino_of_node(ipage), raw_inode->i_name,
D
Dan Carpenter 已提交
144
			IS_ERR(dir) ? 0 : dir->i_ino, err);
145 146 147
	return err;
}

148
static void recover_inode(struct inode *inode, struct page *page)
149
{
150
	struct f2fs_inode *raw = F2FS_INODE(page);
151
	char *name;
152 153 154 155 156 157 158 159 160

	inode->i_mode = le16_to_cpu(raw->i_mode);
	i_size_write(inode, le64_to_cpu(raw->i_size));
	inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime);
	inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
	inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
	inode->i_atime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
	inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
	inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
161

162 163 164 165 166
	if (file_enc_name(inode))
		name = "<encrypted>";
	else
		name = F2FS_INODE(page)->i_name;

167
	f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
168
			ino_of_node(page), name);
169 170 171 172
}

static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
{
173
	unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
174
	struct curseg_info *curseg;
175
	struct page *page = NULL;
176 177 178 179 180
	block_t blkaddr;
	int err = 0;

	/* get node pages in the current segment */
	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
181
	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
182

183 184
	ra_meta_pages(sbi, blkaddr, 1, META_POR);

185 186 187
	while (1) {
		struct fsync_inode_entry *entry;

188
		if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
189
			return 0;
190

191
		page = get_meta_page(sbi, blkaddr);
192

193
		if (cp_ver != cpver_of_node(page))
194
			break;
195 196 197 198 199

		if (!is_fsync_dnode(page))
			goto next;

		entry = get_fsync_inode(head, ino_of_node(page));
200
		if (!entry) {
201
			if (IS_INODE(page) && is_dent_dnode(page)) {
202 203
				err = recover_inode_page(sbi, page);
				if (err)
204
					break;
205 206 207
			}

			/* add this fsync inode to the list */
208
			entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
209 210
			if (!entry) {
				err = -ENOMEM;
211
				break;
212
			}
213 214 215 216
			/*
			 * CP | dnode(F) | inode(DF)
			 * For this case, we should not give up now.
			 */
217 218 219
			entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
			if (IS_ERR(entry->inode)) {
				err = PTR_ERR(entry->inode);
220
				kmem_cache_free(fsync_entry_slab, entry);
221 222
				if (err == -ENOENT) {
					err = 0;
223
					goto next;
224
				}
225
				break;
226
			}
227
			list_add_tail(&entry->list, head);
228
		}
J
Jaegeuk Kim 已提交
229 230
		entry->blkaddr = blkaddr;

231 232 233 234 235
		if (IS_INODE(page)) {
			entry->last_inode = blkaddr;
			if (is_dent_dnode(page))
				entry->last_dentry = blkaddr;
		}
236 237 238
next:
		/* check next segment */
		blkaddr = next_blkaddr_of_node(page);
239
		f2fs_put_page(page, 1);
240 241

		ra_meta_pages_cond(sbi, blkaddr);
242
	}
243
	f2fs_put_page(page, 1);
244 245 246
	return err;
}

247
static void destroy_fsync_dnodes(struct list_head *head)
248
{
249 250 251
	struct fsync_inode_entry *entry, *tmp;

	list_for_each_entry_safe(entry, tmp, head, list) {
252 253 254 255 256 257
		iput(entry->inode);
		list_del(&entry->list);
		kmem_cache_free(fsync_entry_slab, entry);
	}
}

258
static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
259
			block_t blkaddr, struct dnode_of_data *dn)
260 261 262
{
	struct seg_entry *sentry;
	unsigned int segno = GET_SEGNO(sbi, blkaddr);
J
Jaegeuk Kim 已提交
263
	unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
J
Jaegeuk Kim 已提交
264
	struct f2fs_summary_block *sum_node;
265
	struct f2fs_summary sum;
J
Jaegeuk Kim 已提交
266
	struct page *sum_page, *node_page;
267
	struct dnode_of_data tdn = *dn;
268
	nid_t ino, nid;
269
	struct inode *inode;
270
	unsigned int offset;
271 272 273 274 275
	block_t bidx;
	int i;

	sentry = get_seg_entry(sbi, segno);
	if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
276
		return 0;
277 278 279 280 281 282

	/* Get the previous summary */
	for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
		struct curseg_info *curseg = CURSEG_I(sbi, i);
		if (curseg->segno == segno) {
			sum = curseg->sum_blk->entries[blkoff];
J
Jaegeuk Kim 已提交
283
			goto got_it;
284 285 286
		}
	}

J
Jaegeuk Kim 已提交
287 288 289 290 291
	sum_page = get_sum_page(sbi, segno);
	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
	sum = sum_node->entries[blkoff];
	f2fs_put_page(sum_page, 1);
got_it:
292 293 294 295
	/* Use the locked dnode page and inode */
	nid = le32_to_cpu(sum.nid);
	if (dn->inode->i_ino == nid) {
		tdn.nid = nid;
296 297
		if (!dn->inode_page_locked)
			lock_page(dn->inode_page);
298
		tdn.node_page = dn->inode_page;
299
		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
300
		goto truncate_out;
301
	} else if (dn->nid == nid) {
302
		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
303
		goto truncate_out;
304 305
	}

306
	/* Get the node page */
307
	node_page = get_node_page(sbi, nid);
308 309
	if (IS_ERR(node_page))
		return PTR_ERR(node_page);
310 311

	offset = ofs_of_node(node_page);
312 313 314
	ino = ino_of_node(node_page);
	f2fs_put_page(node_page, 1);

315 316 317 318 319 320 321 322
	if (ino != dn->inode->i_ino) {
		/* Deallocate previous index in the node page */
		inode = f2fs_iget(sbi->sb, ino);
		if (IS_ERR(inode))
			return PTR_ERR(inode);
	} else {
		inode = dn->inode;
	}
323

324
	bidx = start_bidx_of_node(offset, F2FS_I(inode)) +
325
			le16_to_cpu(sum.ofs_in_node);
326

327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
	/*
	 * if inode page is locked, unlock temporarily, but its reference
	 * count keeps alive.
	 */
	if (ino == dn->inode->i_ino && dn->inode_page_locked)
		unlock_page(dn->inode_page);

	set_new_dnode(&tdn, inode, NULL, NULL, 0);
	if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
		goto out;

	if (tdn.data_blkaddr == blkaddr)
		truncate_data_blocks_range(&tdn, 1);

	f2fs_put_dnode(&tdn);
out:
	if (ino != dn->inode->i_ino)
344
		iput(inode);
345 346 347 348 349 350 351 352 353
	else if (dn->inode_page_locked)
		lock_page(dn->inode_page);
	return 0;

truncate_out:
	if (datablock_addr(tdn.node_page, tdn.ofs_in_node) == blkaddr)
		truncate_data_blocks_range(&tdn, 1);
	if (dn->inode->i_ino == nid && !dn->inode_page_locked)
		unlock_page(dn->inode_page);
354
	return 0;
355 356
}

357
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
358 359
					struct page *page, block_t blkaddr)
{
360
	struct f2fs_inode_info *fi = F2FS_I(inode);
361 362 363
	unsigned int start, end;
	struct dnode_of_data dn;
	struct node_info ni;
364
	int err = 0, recovered = 0;
365

366 367 368 369
	/* step 1: recover xattr */
	if (IS_INODE(page)) {
		recover_inline_xattr(inode, page);
	} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
370 371 372 373
		/*
		 * Deprecated; xattr blocks should be found from cold log.
		 * But, we should remain this for backward compatibility.
		 */
374
		recover_xattr_data(inode, page, blkaddr);
375
		goto out;
376
	}
377

378 379
	/* step 2: recover inline data */
	if (recover_inline_data(inode, page))
380 381
		goto out;

382
	/* step 3: recover data indices */
383
	start = start_bidx_of_node(ofs_of_node(page), fi);
384
	end = start + ADDRS_PER_PAGE(page, fi);
385

386
	f2fs_lock_op(sbi);
387

388
	set_new_dnode(&dn, inode, NULL, NULL, 0);
389

390
	err = get_dnode_of_data(&dn, start, ALLOC_NODE);
391
	if (err) {
392
		f2fs_unlock_op(sbi);
393
		goto out;
394
	}
395

396
	f2fs_wait_on_page_writeback(dn.node_page, NODE);
397 398

	get_node_info(sbi, dn.nid, &ni);
399 400
	f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
	f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
401 402 403 404 405 406 407

	for (; start < end; start++) {
		block_t src, dest;

		src = datablock_addr(dn.node_page, dn.ofs_in_node);
		dest = datablock_addr(page, dn.ofs_in_node);

408
		if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR &&
409
			is_valid_blkaddr(sbi, dest, META_POR)) {
410

411
			if (src == NULL_ADDR) {
412
				err = reserve_new_block(&dn);
413
				/* We should not get -ENOSPC */
414
				f2fs_bug_on(sbi, err);
415 416 417
			}

			/* Check the previous node page having this index */
418 419 420
			err = check_index_in_prev_nodes(sbi, dest, &dn);
			if (err)
				goto err;
421 422

			/* write dummy data page */
423 424
			f2fs_replace_block(sbi, &dn, src, dest,
							ni.version, false);
425
			recovered++;
426 427 428 429 430 431 432 433 434 435 436
		}
		dn.ofs_in_node++;
	}

	if (IS_INODE(dn.node_page))
		sync_inode_page(&dn);

	copy_node_footer(dn.node_page, page);
	fill_node_footer(dn.node_page, dn.nid, ni.ino,
					ofs_of_node(page), false);
	set_page_dirty(dn.node_page);
437
err:
438
	f2fs_put_dnode(&dn);
439
	f2fs_unlock_op(sbi);
440
out:
C
Chris Fries 已提交
441 442 443
	f2fs_msg(sbi->sb, KERN_NOTICE,
		"recover_data: ino = %lx, recovered = %d blocks, err = %d",
		inode->i_ino, recovered, err);
444
	return err;
445 446
}

447
static int recover_data(struct f2fs_sb_info *sbi,
448 449
				struct list_head *head, int type)
{
450
	unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
451
	struct curseg_info *curseg;
452
	struct page *page = NULL;
453
	int err = 0;
454 455 456 457 458 459 460 461 462
	block_t blkaddr;

	/* get node pages in the current segment */
	curseg = CURSEG_I(sbi, type);
	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);

	while (1) {
		struct fsync_inode_entry *entry;

463
		if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
464
			break;
465

466 467 468
		ra_meta_pages_cond(sbi, blkaddr);

		page = get_meta_page(sbi, blkaddr);
469

470 471
		if (cp_ver != cpver_of_node(page)) {
			f2fs_put_page(page, 1);
472
			break;
473
		}
474 475 476 477

		entry = get_fsync_inode(head, ino_of_node(page));
		if (!entry)
			goto next;
478 479 480
		/*
		 * inode(x) | CP | inode(x) | dnode(F)
		 * In this case, we can lose the latest inode(x).
481
		 * So, call recover_inode for the inode update.
482
		 */
483 484 485 486 487 488 489 490 491
		if (entry->last_inode == blkaddr)
			recover_inode(entry->inode, page);
		if (entry->last_dentry == blkaddr) {
			err = recover_dentry(entry->inode, page);
			if (err) {
				f2fs_put_page(page, 1);
				break;
			}
		}
492
		err = do_recover_data(sbi, entry->inode, page, blkaddr);
493 494
		if (err) {
			f2fs_put_page(page, 1);
495
			break;
496
		}
497 498 499 500 501 502 503 504 505

		if (entry->blkaddr == blkaddr) {
			iput(entry->inode);
			list_del(&entry->list);
			kmem_cache_free(fsync_entry_slab, entry);
		}
next:
		/* check next segment */
		blkaddr = next_blkaddr_of_node(page);
506
		f2fs_put_page(page, 1);
507
	}
508 509 510
	if (!err)
		allocate_new_segments(sbi);
	return err;
511 512
}

513
int recover_fsync_data(struct f2fs_sb_info *sbi)
514
{
515
	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
516
	struct list_head inode_list;
517
	block_t blkaddr;
518
	int err;
H
Haicheng Li 已提交
519
	bool need_writecp = false;
520 521

	fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
522
			sizeof(struct fsync_inode_entry));
523
	if (!fsync_entry_slab)
524
		return -ENOMEM;
525 526 527 528

	INIT_LIST_HEAD(&inode_list);

	/* step #1: find fsynced inode numbers */
529
	set_sbi_flag(sbi, SBI_POR_DOING);
530

531 532 533
	/* prevent checkpoint */
	mutex_lock(&sbi->cp_mutex);

534 535
	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);

536 537
	err = find_fsync_dnodes(sbi, &inode_list);
	if (err)
538 539 540 541 542
		goto out;

	if (list_empty(&inode_list))
		goto out;

H
Haicheng Li 已提交
543
	need_writecp = true;
544

545
	/* step #2: recover data */
546
	err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
547
	if (!err)
548
		f2fs_bug_on(sbi, !list_empty(&inode_list));
549
out:
550
	destroy_fsync_dnodes(&inode_list);
551
	kmem_cache_destroy(fsync_entry_slab);
552

553 554
	/* truncate meta pages to be used by the recovery */
	truncate_inode_pages_range(META_MAPPING(sbi),
555
			MAIN_BLKADDR(sbi) << PAGE_CACHE_SHIFT, -1);
556

557 558 559 560 561
	if (err) {
		truncate_inode_pages_final(NODE_MAPPING(sbi));
		truncate_inode_pages_final(META_MAPPING(sbi));
	}

562
	clear_sbi_flag(sbi, SBI_POR_DOING);
563
	if (err) {
C
Chao Yu 已提交
564 565 566 567
		bool invalidate = false;

		if (discard_next_dnode(sbi, blkaddr))
			invalidate = true;
568 569 570 571

		/* Flush all the NAT/SIT pages */
		while (get_pages(sbi, F2FS_DIRTY_META))
			sync_meta_pages(sbi, META, LONG_MAX);
C
Chao Yu 已提交
572 573 574 575 576 577

		/* invalidate temporary meta page */
		if (invalidate)
			invalidate_mapping_pages(META_MAPPING(sbi),
							blkaddr, blkaddr);

578 579
		set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
		mutex_unlock(&sbi->cp_mutex);
580
	} else if (need_writecp) {
581
		struct cp_control cpc = {
582
			.reason = CP_RECOVERY,
583
		};
584
		mutex_unlock(&sbi->cp_mutex);
585
		write_checkpoint(sbi, &cpc);
586 587
	} else {
		mutex_unlock(&sbi->cp_mutex);
588
	}
589
	return err;
590
}