recovery.c 10.7 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
 * fs/f2fs/recovery.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include "f2fs.h"
#include "node.h"
#include "segment.h"

static struct kmem_cache *fsync_entry_slab;

bool space_for_roll_forward(struct f2fs_sb_info *sbi)
{
	if (sbi->last_valid_block_count + sbi->alloc_valid_block_count
			> sbi->user_block_count)
		return false;
	return true;
}

static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
								nid_t ino)
{
	struct list_head *this;
	struct fsync_inode_entry *entry;

	list_for_each(this, head) {
		entry = list_entry(this, struct fsync_inode_entry, list);
		if (entry->inode->i_ino == ino)
			return entry;
	}
	return NULL;
}

static int recover_dentry(struct page *ipage, struct inode *inode)
{
43
	struct f2fs_node *raw_node = F2FS_NODE(ipage);
44
	struct f2fs_inode *raw_inode = &(raw_node->i);
45
	nid_t pino = le32_to_cpu(raw_inode->i_pino);
J
Jaegeuk Kim 已提交
46
	struct f2fs_dir_entry *de;
47
	struct qstr name;
48
	struct page *page;
J
Jaegeuk Kim 已提交
49
	struct inode *dir, *einode;
50 51
	int err = 0;

52 53 54 55 56 57 58 59
	dir = check_dirty_dir_inode(F2FS_SB(inode->i_sb), pino);
	if (!dir) {
		dir = f2fs_iget(inode->i_sb, pino);
		if (IS_ERR(dir)) {
			err = PTR_ERR(dir);
			goto out;
		}
		set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT);
60
		add_dirty_dir_inode(dir);
61 62
	}

63 64
	name.len = le32_to_cpu(raw_inode->i_namelen);
	name.name = raw_inode->i_name;
J
Jaegeuk Kim 已提交
65 66 67
retry:
	de = f2fs_find_entry(dir, &name, &page);
	if (de && inode->i_ino == le32_to_cpu(de->ino)) {
68 69
		kunmap(page);
		f2fs_put_page(page, 0);
J
Jaegeuk Kim 已提交
70 71 72 73 74 75 76 77 78 79 80 81 82
		goto out;
	}
	if (de) {
		einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
		if (IS_ERR(einode)) {
			WARN_ON(1);
			if (PTR_ERR(einode) == -ENOENT)
				err = -EEXIST;
			goto out;
		}
		f2fs_delete_entry(de, page, einode);
		iput(einode);
		goto retry;
83
	}
J
Jaegeuk Kim 已提交
84
	err = __f2fs_add_link(dir, &name, inode);
85
out:
86 87
	f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode and its dentry: "
			"ino = %x, name = %s, dir = %lx, err = %d",
D
Dan Carpenter 已提交
88 89
			ino_of_node(ipage), raw_inode->i_name,
			IS_ERR(dir) ? 0 : dir->i_ino, err);
90 91 92 93 94
	return err;
}

static int recover_inode(struct inode *inode, struct page *node_page)
{
95
	struct f2fs_node *raw_node = F2FS_NODE(node_page);
96 97
	struct f2fs_inode *raw_inode = &(raw_node->i);

98 99 100
	if (!IS_INODE(node_page))
		return 0;

101
	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
102 103 104 105 106 107 108 109
	i_size_write(inode, le64_to_cpu(raw_inode->i_size));
	inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
	inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
	inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
	inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
	inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
	inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);

110 111 112 113 114 115
	if (is_dent_dnode(node_page))
		return recover_dentry(node_page, inode);

	f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
			ino_of_node(node_page), raw_inode->i_name);
	return 0;
116 117 118 119
}

static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
{
120
	unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
121 122 123 124 125 126 127 128 129 130 131
	struct curseg_info *curseg;
	struct page *page;
	block_t blkaddr;
	int err = 0;

	/* get node pages in the current segment */
	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
	blkaddr = START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff;

	/* read node page */
	page = alloc_page(GFP_F2FS_ZERO);
132 133
	if (!page)
		return -ENOMEM;
134 135 136 137 138
	lock_page(page);

	while (1) {
		struct fsync_inode_entry *entry;

139 140
		err = f2fs_readpage(sbi, page, blkaddr, READ_SYNC);
		if (err)
141 142
			goto out;

143 144
		lock_page(page);

145
		if (cp_ver != cpver_of_node(page))
146
			break;
147 148 149 150 151 152 153 154 155 156 157

		if (!is_fsync_dnode(page))
			goto next;

		entry = get_fsync_inode(head, ino_of_node(page));
		if (entry) {
			if (IS_INODE(page) && is_dent_dnode(page))
				set_inode_flag(F2FS_I(entry->inode),
							FI_INC_LINK);
		} else {
			if (IS_INODE(page) && is_dent_dnode(page)) {
158 159
				err = recover_inode_page(sbi, page);
				if (err)
160
					break;
161 162 163 164 165 166
			}

			/* add this fsync inode to the list */
			entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS);
			if (!entry) {
				err = -ENOMEM;
167
				break;
168 169 170 171 172
			}

			entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
			if (IS_ERR(entry->inode)) {
				err = PTR_ERR(entry->inode);
173
				kmem_cache_free(fsync_entry_slab, entry);
174
				break;
175
			}
176
			list_add_tail(&entry->list, head);
177
		}
J
Jaegeuk Kim 已提交
178 179
		entry->blkaddr = blkaddr;

180 181 182
		err = recover_inode(entry->inode, page);
		if (err && err != -ENOENT)
			break;
183 184 185 186 187
next:
		/* check next segment */
		blkaddr = next_blkaddr_of_node(page);
	}
	unlock_page(page);
188
out:
189 190 191 192
	__free_pages(page, 0);
	return err;
}

193
static void destroy_fsync_dnodes(struct list_head *head)
194
{
195 196 197
	struct fsync_inode_entry *entry, *tmp;

	list_for_each_entry_safe(entry, tmp, head, list) {
198 199 200 201 202 203
		iput(entry->inode);
		list_del(&entry->list);
		kmem_cache_free(fsync_entry_slab, entry);
	}
}

204
static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
205
			block_t blkaddr, struct dnode_of_data *dn)
206 207 208 209 210 211
{
	struct seg_entry *sentry;
	unsigned int segno = GET_SEGNO(sbi, blkaddr);
	unsigned short blkoff = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) &
					(sbi->blocks_per_seg - 1);
	struct f2fs_summary sum;
212
	nid_t ino, nid;
213 214 215
	void *kaddr;
	struct inode *inode;
	struct page *node_page;
216
	unsigned int offset;
217 218 219 220 221
	block_t bidx;
	int i;

	sentry = get_seg_entry(sbi, segno);
	if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
222
		return 0;
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240

	/* Get the previous summary */
	for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
		struct curseg_info *curseg = CURSEG_I(sbi, i);
		if (curseg->segno == segno) {
			sum = curseg->sum_blk->entries[blkoff];
			break;
		}
	}
	if (i > CURSEG_COLD_DATA) {
		struct page *sum_page = get_sum_page(sbi, segno);
		struct f2fs_summary_block *sum_node;
		kaddr = page_address(sum_page);
		sum_node = (struct f2fs_summary_block *)kaddr;
		sum = sum_node->entries[blkoff];
		f2fs_put_page(sum_page, 1);
	}

241 242 243 244 245 246
	/* Use the locked dnode page and inode */
	nid = le32_to_cpu(sum.nid);
	if (dn->inode->i_ino == nid) {
		struct dnode_of_data tdn = *dn;
		tdn.nid = nid;
		tdn.node_page = dn->inode_page;
247
		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
248
		truncate_data_blocks_range(&tdn, 1);
249
		return 0;
250 251
	} else if (dn->nid == nid) {
		struct dnode_of_data tdn = *dn;
252
		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
253
		truncate_data_blocks_range(&tdn, 1);
254
		return 0;
255 256
	}

257
	/* Get the node page */
258
	node_page = get_node_page(sbi, nid);
259 260
	if (IS_ERR(node_page))
		return PTR_ERR(node_page);
261 262

	offset = ofs_of_node(node_page);
263 264 265 266
	ino = ino_of_node(node_page);
	f2fs_put_page(node_page, 1);

	/* Deallocate previous index in the node page */
267
	inode = f2fs_iget(sbi->sb, ino);
268
	if (IS_ERR(inode))
269
		return PTR_ERR(inode);
270

271 272 273
	bidx = start_bidx_of_node(offset, F2FS_I(inode)) +
					le16_to_cpu(sum.ofs_in_node);

274 275
	truncate_hole(inode, bidx, bidx + 1);
	iput(inode);
276
	return 0;
277 278
}

279
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
280 281
					struct page *page, block_t blkaddr)
{
282
	struct f2fs_inode_info *fi = F2FS_I(inode);
283 284 285 286
	unsigned int start, end;
	struct dnode_of_data dn;
	struct f2fs_summary sum;
	struct node_info ni;
287
	int err = 0, recovered = 0;
288
	int ilock;
289

290
	start = start_bidx_of_node(ofs_of_node(page), fi);
291
	if (IS_INODE(page))
292
		end = start + ADDRS_PER_INODE(fi);
293 294 295
	else
		end = start + ADDRS_PER_BLOCK;

296
	ilock = mutex_lock_op(sbi);
297
	set_new_dnode(&dn, inode, NULL, NULL, 0);
298

299
	err = get_dnode_of_data(&dn, start, ALLOC_NODE);
300 301
	if (err) {
		mutex_unlock_op(sbi, ilock);
302
		return err;
303
	}
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324

	wait_on_page_writeback(dn.node_page);

	get_node_info(sbi, dn.nid, &ni);
	BUG_ON(ni.ino != ino_of_node(page));
	BUG_ON(ofs_of_node(dn.node_page) != ofs_of_node(page));

	for (; start < end; start++) {
		block_t src, dest;

		src = datablock_addr(dn.node_page, dn.ofs_in_node);
		dest = datablock_addr(page, dn.ofs_in_node);

		if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR) {
			if (src == NULL_ADDR) {
				int err = reserve_new_block(&dn);
				/* We should not get -ENOSPC */
				BUG_ON(err);
			}

			/* Check the previous node page having this index */
325 326 327
			err = check_index_in_prev_nodes(sbi, dest, &dn);
			if (err)
				goto err;
328 329 330 331 332 333

			set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);

			/* write dummy data page */
			recover_data_page(sbi, NULL, &sum, src, dest);
			update_extent_cache(dest, &dn);
334
			recovered++;
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
		}
		dn.ofs_in_node++;
	}

	/* write node page in place */
	set_summary(&sum, dn.nid, 0, 0);
	if (IS_INODE(dn.node_page))
		sync_inode_page(&dn);

	copy_node_footer(dn.node_page, page);
	fill_node_footer(dn.node_page, dn.nid, ni.ino,
					ofs_of_node(page), false);
	set_page_dirty(dn.node_page);

	recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr);
350
err:
351
	f2fs_put_dnode(&dn);
352
	mutex_unlock_op(sbi, ilock);
353 354

	f2fs_msg(sbi->sb, KERN_NOTICE, "recover_data: ino = %lx, "
355 356 357
			"recovered_data = %d blocks, err = %d",
			inode->i_ino, recovered, err);
	return err;
358 359
}

360
static int recover_data(struct f2fs_sb_info *sbi,
361 362
				struct list_head *head, int type)
{
363
	unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
364 365
	struct curseg_info *curseg;
	struct page *page;
366
	int err = 0;
367 368 369 370 371 372 373 374
	block_t blkaddr;

	/* get node pages in the current segment */
	curseg = CURSEG_I(sbi, type);
	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);

	/* read node page */
	page = alloc_page(GFP_NOFS | __GFP_ZERO);
375
	if (!page)
376 377
		return -ENOMEM;

378 379 380 381 382
	lock_page(page);

	while (1) {
		struct fsync_inode_entry *entry;

383 384
		err = f2fs_readpage(sbi, page, blkaddr, READ_SYNC);
		if (err)
385 386
			goto out;

387 388
		lock_page(page);

389
		if (cp_ver != cpver_of_node(page))
390
			break;
391 392 393 394 395

		entry = get_fsync_inode(head, ino_of_node(page));
		if (!entry)
			goto next;

396 397
		err = do_recover_data(sbi, entry->inode, page, blkaddr);
		if (err)
398
			break;
399 400 401 402 403 404 405 406 407 408 409

		if (entry->blkaddr == blkaddr) {
			iput(entry->inode);
			list_del(&entry->list);
			kmem_cache_free(fsync_entry_slab, entry);
		}
next:
		/* check next segment */
		blkaddr = next_blkaddr_of_node(page);
	}
	unlock_page(page);
410
out:
411 412
	__free_pages(page, 0);

413 414 415
	if (!err)
		allocate_new_segments(sbi);
	return err;
416 417
}

418
int recover_fsync_data(struct f2fs_sb_info *sbi)
419 420
{
	struct list_head inode_list;
421
	int err;
422 423 424 425

	fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
			sizeof(struct fsync_inode_entry), NULL);
	if (unlikely(!fsync_entry_slab))
426
		return -ENOMEM;
427 428 429 430

	INIT_LIST_HEAD(&inode_list);

	/* step #1: find fsynced inode numbers */
431
	sbi->por_doing = 1;
432 433
	err = find_fsync_dnodes(sbi, &inode_list);
	if (err)
434 435 436 437 438 439
		goto out;

	if (list_empty(&inode_list))
		goto out;

	/* step #2: recover data */
440
	err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
441 442
	BUG_ON(!list_empty(&inode_list));
out:
443
	destroy_fsync_dnodes(&inode_list);
444
	kmem_cache_destroy(fsync_entry_slab);
445
	sbi->por_doing = 0;
446 447
	if (!err)
		write_checkpoint(sbi, false);
448
	return err;
449
}