recovery.c 10.9 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
 * fs/f2fs/recovery.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include "f2fs.h"
#include "node.h"
#include "segment.h"

static struct kmem_cache *fsync_entry_slab;

bool space_for_roll_forward(struct f2fs_sb_info *sbi)
{
	if (sbi->last_valid_block_count + sbi->alloc_valid_block_count
			> sbi->user_block_count)
		return false;
	return true;
}

static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
								nid_t ino)
{
	struct list_head *this;
	struct fsync_inode_entry *entry;

	list_for_each(this, head) {
		entry = list_entry(this, struct fsync_inode_entry, list);
		if (entry->inode->i_ino == ino)
			return entry;
	}
	return NULL;
}

static int recover_dentry(struct page *ipage, struct inode *inode)
{
43
	struct f2fs_node *raw_node = F2FS_NODE(ipage);
44
	struct f2fs_inode *raw_inode = &(raw_node->i);
45
	nid_t pino = le32_to_cpu(raw_inode->i_pino);
J
Jaegeuk Kim 已提交
46
	struct f2fs_dir_entry *de;
47
	struct qstr name;
48
	struct page *page;
J
Jaegeuk Kim 已提交
49
	struct inode *dir, *einode;
50 51
	int err = 0;

52 53 54 55 56 57 58 59
	dir = check_dirty_dir_inode(F2FS_SB(inode->i_sb), pino);
	if (!dir) {
		dir = f2fs_iget(inode->i_sb, pino);
		if (IS_ERR(dir)) {
			err = PTR_ERR(dir);
			goto out;
		}
		set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT);
60
		add_dirty_dir_inode(dir);
61 62
	}

63 64
	name.len = le32_to_cpu(raw_inode->i_namelen);
	name.name = raw_inode->i_name;
J
Jaegeuk Kim 已提交
65 66
retry:
	de = f2fs_find_entry(dir, &name, &page);
67 68
	if (de && inode->i_ino == le32_to_cpu(de->ino))
		goto out_unmap_put;
J
Jaegeuk Kim 已提交
69 70 71 72 73 74
	if (de) {
		einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
		if (IS_ERR(einode)) {
			WARN_ON(1);
			if (PTR_ERR(einode) == -ENOENT)
				err = -EEXIST;
75 76 77 78 79 80
			goto out_unmap_put;
		}
		err = acquire_orphan_inode(F2FS_SB(inode->i_sb));
		if (err) {
			iput(einode);
			goto out_unmap_put;
J
Jaegeuk Kim 已提交
81 82 83 84
		}
		f2fs_delete_entry(de, page, einode);
		iput(einode);
		goto retry;
85
	}
J
Jaegeuk Kim 已提交
86
	err = __f2fs_add_link(dir, &name, inode);
87 88 89 90 91
	goto out;

out_unmap_put:
	kunmap(page);
	f2fs_put_page(page, 0);
92
out:
93 94
	f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode and its dentry: "
			"ino = %x, name = %s, dir = %lx, err = %d",
D
Dan Carpenter 已提交
95 96
			ino_of_node(ipage), raw_inode->i_name,
			IS_ERR(dir) ? 0 : dir->i_ino, err);
97 98 99 100 101
	return err;
}

static int recover_inode(struct inode *inode, struct page *node_page)
{
102
	struct f2fs_node *raw_node = F2FS_NODE(node_page);
103 104
	struct f2fs_inode *raw_inode = &(raw_node->i);

105 106 107
	if (!IS_INODE(node_page))
		return 0;

108
	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
109 110 111 112 113 114 115 116
	i_size_write(inode, le64_to_cpu(raw_inode->i_size));
	inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
	inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
	inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
	inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
	inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
	inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);

117 118 119 120 121 122
	if (is_dent_dnode(node_page))
		return recover_dentry(node_page, inode);

	f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
			ino_of_node(node_page), raw_inode->i_name);
	return 0;
123 124 125 126
}

static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
{
127
	unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
128 129 130 131 132 133 134 135 136 137 138
	struct curseg_info *curseg;
	struct page *page;
	block_t blkaddr;
	int err = 0;

	/* get node pages in the current segment */
	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
	blkaddr = START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff;

	/* read node page */
	page = alloc_page(GFP_F2FS_ZERO);
139 140
	if (!page)
		return -ENOMEM;
141 142 143 144 145
	lock_page(page);

	while (1) {
		struct fsync_inode_entry *entry;

146
		err = f2fs_submit_page_bio(sbi, page, blkaddr, READ_SYNC);
147
		if (err)
148
			return err;
149

150 151
		lock_page(page);

152
		if (cp_ver != cpver_of_node(page))
153
			break;
154 155 156 157 158 159 160 161 162 163 164

		if (!is_fsync_dnode(page))
			goto next;

		entry = get_fsync_inode(head, ino_of_node(page));
		if (entry) {
			if (IS_INODE(page) && is_dent_dnode(page))
				set_inode_flag(F2FS_I(entry->inode),
							FI_INC_LINK);
		} else {
			if (IS_INODE(page) && is_dent_dnode(page)) {
165 166
				err = recover_inode_page(sbi, page);
				if (err)
167
					break;
168 169 170 171 172 173
			}

			/* add this fsync inode to the list */
			entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS);
			if (!entry) {
				err = -ENOMEM;
174
				break;
175 176 177 178 179
			}

			entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
			if (IS_ERR(entry->inode)) {
				err = PTR_ERR(entry->inode);
180
				kmem_cache_free(fsync_entry_slab, entry);
181
				break;
182
			}
183
			list_add_tail(&entry->list, head);
184
		}
J
Jaegeuk Kim 已提交
185 186
		entry->blkaddr = blkaddr;

187 188 189
		err = recover_inode(entry->inode, page);
		if (err && err != -ENOENT)
			break;
190 191 192 193
next:
		/* check next segment */
		blkaddr = next_blkaddr_of_node(page);
	}
194

195 196
	unlock_page(page);
	__free_pages(page, 0);
197

198 199 200
	return err;
}

201
static void destroy_fsync_dnodes(struct list_head *head)
202
{
203 204 205
	struct fsync_inode_entry *entry, *tmp;

	list_for_each_entry_safe(entry, tmp, head, list) {
206 207 208 209 210 211
		iput(entry->inode);
		list_del(&entry->list);
		kmem_cache_free(fsync_entry_slab, entry);
	}
}

212
static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
213
			block_t blkaddr, struct dnode_of_data *dn)
214 215 216 217 218 219
{
	struct seg_entry *sentry;
	unsigned int segno = GET_SEGNO(sbi, blkaddr);
	unsigned short blkoff = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) &
					(sbi->blocks_per_seg - 1);
	struct f2fs_summary sum;
220
	nid_t ino, nid;
221 222 223
	void *kaddr;
	struct inode *inode;
	struct page *node_page;
224
	unsigned int offset;
225 226 227 228 229
	block_t bidx;
	int i;

	sentry = get_seg_entry(sbi, segno);
	if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
230
		return 0;
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248

	/* Get the previous summary */
	for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
		struct curseg_info *curseg = CURSEG_I(sbi, i);
		if (curseg->segno == segno) {
			sum = curseg->sum_blk->entries[blkoff];
			break;
		}
	}
	if (i > CURSEG_COLD_DATA) {
		struct page *sum_page = get_sum_page(sbi, segno);
		struct f2fs_summary_block *sum_node;
		kaddr = page_address(sum_page);
		sum_node = (struct f2fs_summary_block *)kaddr;
		sum = sum_node->entries[blkoff];
		f2fs_put_page(sum_page, 1);
	}

249 250 251 252 253 254
	/* Use the locked dnode page and inode */
	nid = le32_to_cpu(sum.nid);
	if (dn->inode->i_ino == nid) {
		struct dnode_of_data tdn = *dn;
		tdn.nid = nid;
		tdn.node_page = dn->inode_page;
255
		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
256
		truncate_data_blocks_range(&tdn, 1);
257
		return 0;
258 259
	} else if (dn->nid == nid) {
		struct dnode_of_data tdn = *dn;
260
		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
261
		truncate_data_blocks_range(&tdn, 1);
262
		return 0;
263 264
	}

265
	/* Get the node page */
266
	node_page = get_node_page(sbi, nid);
267 268
	if (IS_ERR(node_page))
		return PTR_ERR(node_page);
269 270

	offset = ofs_of_node(node_page);
271 272 273 274
	ino = ino_of_node(node_page);
	f2fs_put_page(node_page, 1);

	/* Deallocate previous index in the node page */
275
	inode = f2fs_iget(sbi->sb, ino);
276
	if (IS_ERR(inode))
277
		return PTR_ERR(inode);
278

279 280 281
	bidx = start_bidx_of_node(offset, F2FS_I(inode)) +
					le16_to_cpu(sum.ofs_in_node);

282 283
	truncate_hole(inode, bidx, bidx + 1);
	iput(inode);
284
	return 0;
285 286
}

287
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
288 289
					struct page *page, block_t blkaddr)
{
290
	struct f2fs_inode_info *fi = F2FS_I(inode);
291 292 293 294
	unsigned int start, end;
	struct dnode_of_data dn;
	struct f2fs_summary sum;
	struct node_info ni;
295
	int err = 0, recovered = 0;
296

297
	start = start_bidx_of_node(ofs_of_node(page), fi);
298
	if (IS_INODE(page))
299
		end = start + ADDRS_PER_INODE(fi);
300 301 302
	else
		end = start + ADDRS_PER_BLOCK;

303
	f2fs_lock_op(sbi);
304
	set_new_dnode(&dn, inode, NULL, NULL, 0);
305

306
	err = get_dnode_of_data(&dn, start, ALLOC_NODE);
307
	if (err) {
308
		f2fs_unlock_op(sbi);
309
		return err;
310
	}
311 312 313 314

	wait_on_page_writeback(dn.node_page);

	get_node_info(sbi, dn.nid, &ni);
315 316
	f2fs_bug_on(ni.ino != ino_of_node(page));
	f2fs_bug_on(ofs_of_node(dn.node_page) != ofs_of_node(page));
317 318 319 320 321 322 323 324 325

	for (; start < end; start++) {
		block_t src, dest;

		src = datablock_addr(dn.node_page, dn.ofs_in_node);
		dest = datablock_addr(page, dn.ofs_in_node);

		if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR) {
			if (src == NULL_ADDR) {
326
				err = reserve_new_block(&dn);
327
				/* We should not get -ENOSPC */
328
				f2fs_bug_on(err);
329 330 331
			}

			/* Check the previous node page having this index */
332 333 334
			err = check_index_in_prev_nodes(sbi, dest, &dn);
			if (err)
				goto err;
335 336 337 338 339 340

			set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);

			/* write dummy data page */
			recover_data_page(sbi, NULL, &sum, src, dest);
			update_extent_cache(dest, &dn);
341
			recovered++;
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
		}
		dn.ofs_in_node++;
	}

	/* write node page in place */
	set_summary(&sum, dn.nid, 0, 0);
	if (IS_INODE(dn.node_page))
		sync_inode_page(&dn);

	copy_node_footer(dn.node_page, page);
	fill_node_footer(dn.node_page, dn.nid, ni.ino,
					ofs_of_node(page), false);
	set_page_dirty(dn.node_page);

	recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr);
357
err:
358
	f2fs_put_dnode(&dn);
359
	f2fs_unlock_op(sbi);
360 361

	f2fs_msg(sbi->sb, KERN_NOTICE, "recover_data: ino = %lx, "
362 363 364
			"recovered_data = %d blocks, err = %d",
			inode->i_ino, recovered, err);
	return err;
365 366
}

367
static int recover_data(struct f2fs_sb_info *sbi,
368 369
				struct list_head *head, int type)
{
370
	unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
371 372
	struct curseg_info *curseg;
	struct page *page;
373
	int err = 0;
374 375 376 377 378 379 380
	block_t blkaddr;

	/* get node pages in the current segment */
	curseg = CURSEG_I(sbi, type);
	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);

	/* read node page */
381
	page = alloc_page(GFP_F2FS_ZERO);
382
	if (!page)
383 384
		return -ENOMEM;

385 386 387 388 389
	lock_page(page);

	while (1) {
		struct fsync_inode_entry *entry;

390
		err = f2fs_submit_page_bio(sbi, page, blkaddr, READ_SYNC);
391
		if (err)
392
			return err;
393

394 395
		lock_page(page);

396
		if (cp_ver != cpver_of_node(page))
397
			break;
398 399 400 401 402

		entry = get_fsync_inode(head, ino_of_node(page));
		if (!entry)
			goto next;

403 404
		err = do_recover_data(sbi, entry->inode, page, blkaddr);
		if (err)
405
			break;
406 407 408 409 410 411 412 413 414 415

		if (entry->blkaddr == blkaddr) {
			iput(entry->inode);
			list_del(&entry->list);
			kmem_cache_free(fsync_entry_slab, entry);
		}
next:
		/* check next segment */
		blkaddr = next_blkaddr_of_node(page);
	}
416

417 418 419
	unlock_page(page);
	__free_pages(page, 0);

420 421 422
	if (!err)
		allocate_new_segments(sbi);
	return err;
423 424
}

425
int recover_fsync_data(struct f2fs_sb_info *sbi)
426 427
{
	struct list_head inode_list;
428
	int err;
H
Haicheng Li 已提交
429
	bool need_writecp = false;
430 431 432

	fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
			sizeof(struct fsync_inode_entry), NULL);
433
	if (!fsync_entry_slab)
434
		return -ENOMEM;
435 436 437 438

	INIT_LIST_HEAD(&inode_list);

	/* step #1: find fsynced inode numbers */
H
Haicheng Li 已提交
439
	sbi->por_doing = true;
440 441
	err = find_fsync_dnodes(sbi, &inode_list);
	if (err)
442 443 444 445 446
		goto out;

	if (list_empty(&inode_list))
		goto out;

H
Haicheng Li 已提交
447
	need_writecp = true;
448

449
	/* step #2: recover data */
450
	err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
451
	f2fs_bug_on(!list_empty(&inode_list));
452
out:
453
	destroy_fsync_dnodes(&inode_list);
454
	kmem_cache_destroy(fsync_entry_slab);
H
Haicheng Li 已提交
455
	sbi->por_doing = false;
456
	if (!err && need_writecp)
457
		write_checkpoint(sbi, false);
458
	return err;
459
}