node.c 77.4 KB
Newer Older
C
Chao Yu 已提交
1
// SPDX-License-Identifier: GPL-2.0
J
Jaegeuk Kim 已提交
2
/*
J
Jaegeuk Kim 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * fs/f2fs/node.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/mpage.h>
#include <linux/backing-dev.h>
#include <linux/blkdev.h>
#include <linux/pagevec.h>
#include <linux/swap.h>

#include "f2fs.h"
#include "node.h"
#include "segment.h"
19
#include "xattr.h"
J
Jaegeuk Kim 已提交
20
#include "trace.h"
21
#include <trace/events/f2fs.h>
J
Jaegeuk Kim 已提交
22

C
Chao Yu 已提交
23
#define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
24

J
Jaegeuk Kim 已提交
25 26
static struct kmem_cache *nat_entry_slab;
static struct kmem_cache *free_nid_slab;
27
static struct kmem_cache *nat_entry_set_slab;
28
static struct kmem_cache *fsync_node_entry_slab;
J
Jaegeuk Kim 已提交
29

30 31 32
/*
 * Check whether the given nid is within node id range.
 */
C
Chao Yu 已提交
33
int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
34 35 36
{
	if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
		set_sbi_flag(sbi, SBI_NEED_FSCK);
37 38
		f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.",
			  __func__, nid);
39
		return -EFSCORRUPTED;
40 41 42 43
	}
	return 0;
}

C
Chao Yu 已提交
44
bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
45
{
46
	struct f2fs_nm_info *nm_i = NM_I(sbi);
47
	struct sysinfo val;
48
	unsigned long avail_ram;
49
	unsigned long mem_size = 0;
50
	bool res = false;
51 52

	si_meminfo(&val);
53 54 55 56

	/* only uses low memory */
	avail_ram = val.totalram - val.totalhigh;

57 58 59
	/*
	 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
	 */
60
	if (type == FREE_NIDS) {
C
Chao Yu 已提交
61
		mem_size = (nm_i->nid_cnt[FREE_NID] *
C
Chao Yu 已提交
62
				sizeof(struct free_nid)) >> PAGE_SHIFT;
63
		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
64
	} else if (type == NAT_ENTRIES) {
65
		mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
66
							PAGE_SHIFT;
67
		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
68 69
		if (excess_cached_nats(sbi))
			res = false;
70 71 72 73 74
	} else if (type == DIRTY_DENTS) {
		if (sbi->sb->s_bdi->wb.dirty_exceeded)
			return false;
		mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
75 76 77
	} else if (type == INO_ENTRIES) {
		int i;

C
Chao Yu 已提交
78
		for (i = 0; i < MAX_INO_ENTRY; i++)
79 80 81
			mem_size += sbi->im[i].ino_num *
						sizeof(struct ino_entry);
		mem_size >>= PAGE_SHIFT;
82
		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
83
	} else if (type == EXTENT_CACHE) {
84 85
		mem_size = (atomic_read(&sbi->total_ext_tree) *
				sizeof(struct extent_tree) +
86
				atomic_read(&sbi->total_ext_node) *
87
				sizeof(struct extent_node)) >> PAGE_SHIFT;
88
		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
J
Jaegeuk Kim 已提交
89 90 91 92
	} else if (type == INMEM_PAGES) {
		/* it allows 20% / total_ram for inmemory pages */
		mem_size = get_pages(sbi, F2FS_INMEM_PAGES);
		res = mem_size < (val.totalram / 5);
93
	} else {
94 95
		if (!sbi->sb->s_bdi->wb.dirty_exceeded)
			return true;
96 97
	}
	return res;
98 99
}

J
Jaegeuk Kim 已提交
100 101 102
static void clear_node_page_dirty(struct page *page)
{
	if (PageDirty(page)) {
M
Matthew Wilcox 已提交
103
		f2fs_clear_page_cache_dirty_tag(page);
J
Jaegeuk Kim 已提交
104
		clear_page_dirty_for_io(page);
105
		dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
J
Jaegeuk Kim 已提交
106 107 108 109 110 111
	}
	ClearPageUptodate(page);
}

static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
{
112
	return f2fs_get_meta_page_nofail(sbi, current_nat_addr(sbi, nid));
J
Jaegeuk Kim 已提交
113 114 115 116 117 118 119 120 121 122 123
}

static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
{
	struct page *src_page;
	struct page *dst_page;
	pgoff_t dst_off;
	void *src_addr;
	void *dst_addr;
	struct f2fs_nm_info *nm_i = NM_I(sbi);

124
	dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
J
Jaegeuk Kim 已提交
125 126

	/* get current nat block page with lock */
127
	src_page = get_current_nat_page(sbi, nid);
128 129
	if (IS_ERR(src_page))
		return src_page;
C
Chao Yu 已提交
130
	dst_page = f2fs_grab_meta_page(sbi, dst_off);
131
	f2fs_bug_on(sbi, PageDirty(src_page));
J
Jaegeuk Kim 已提交
132 133 134

	src_addr = page_address(src_page);
	dst_addr = page_address(dst_page);
135
	memcpy(dst_addr, src_addr, PAGE_SIZE);
J
Jaegeuk Kim 已提交
136 137 138 139 140 141 142 143
	set_page_dirty(dst_page);
	f2fs_put_page(src_page, 1);

	set_to_next_nat(nm_i, nid);

	return dst_page;
}

144 145 146 147 148
static struct nat_entry *__alloc_nat_entry(nid_t nid, bool no_fail)
{
	struct nat_entry *new;

	if (no_fail)
C
Chao Yu 已提交
149
		new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
150
	else
C
Chao Yu 已提交
151
		new = kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
	if (new) {
		nat_set_nid(new, nid);
		nat_reset_flag(new);
	}
	return new;
}

static void __free_nat_entry(struct nat_entry *e)
{
	kmem_cache_free(nat_entry_slab, e);
}

/* must be locked by nat_tree_lock */
static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
	struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail)
{
	if (no_fail)
		f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
	else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
		return NULL;

	if (raw_ne)
		node_info_from_raw_nat(&ne->ni, raw_ne);
175 176

	spin_lock(&nm_i->nat_list_lock);
177
	list_add_tail(&ne->list, &nm_i->nat_entries);
178 179
	spin_unlock(&nm_i->nat_list_lock);

180 181 182 183
	nm_i->nat_cnt++;
	return ne;
}

J
Jaegeuk Kim 已提交
184 185
static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
{
186 187 188 189 190 191 192 193 194 195 196 197 198
	struct nat_entry *ne;

	ne = radix_tree_lookup(&nm_i->nat_root, n);

	/* for recent accessed nat entry, move it to tail of lru list */
	if (ne && !get_nat_flag(ne, IS_DIRTY)) {
		spin_lock(&nm_i->nat_list_lock);
		if (!list_empty(&ne->list))
			list_move_tail(&ne->list, &nm_i->nat_entries);
		spin_unlock(&nm_i->nat_list_lock);
	}

	return ne;
J
Jaegeuk Kim 已提交
199 200 201 202 203 204 205 206 207 208 209 210
}

static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
		nid_t start, unsigned int nr, struct nat_entry **ep)
{
	return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
}

static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
{
	radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
	nm_i->nat_cnt--;
211
	__free_nat_entry(e);
J
Jaegeuk Kim 已提交
212 213
}

214 215
static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
							struct nat_entry *ne)
216 217 218 219 220 221
{
	nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
	struct nat_entry_set *head;

	head = radix_tree_lookup(&nm_i->nat_set_root, set);
	if (!head) {
222
		head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS);
223 224 225 226 227

		INIT_LIST_HEAD(&head->entry_list);
		INIT_LIST_HEAD(&head->set_list);
		head->set = set;
		head->entry_cnt = 0;
228
		f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
229
	}
230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
	return head;
}

static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
						struct nat_entry *ne)
{
	struct nat_entry_set *head;
	bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;

	if (!new_ne)
		head = __grab_nat_entry_set(nm_i, ne);

	/*
	 * update entry_cnt in below condition:
	 * 1. update NEW_ADDR to valid block address;
	 * 2. update old block address to new one;
	 */
	if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) ||
				!get_nat_flag(ne, IS_DIRTY)))
		head->entry_cnt++;

	set_nat_flag(ne, IS_PREALLOC, new_ne);
252 253 254 255

	if (get_nat_flag(ne, IS_DIRTY))
		goto refresh_list;

256 257
	nm_i->dirty_nat_cnt++;
	set_nat_flag(ne, IS_DIRTY, true);
258
refresh_list:
259
	spin_lock(&nm_i->nat_list_lock);
260
	if (new_ne)
261 262 263
		list_del_init(&ne->list);
	else
		list_move_tail(&ne->list, &head->entry_list);
264
	spin_unlock(&nm_i->nat_list_lock);
265 266 267
}

static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
268
		struct nat_entry_set *set, struct nat_entry *ne)
269
{
270
	spin_lock(&nm_i->nat_list_lock);
271
	list_move_tail(&ne->list, &nm_i->nat_entries);
272 273
	spin_unlock(&nm_i->nat_list_lock);

274 275 276
	set_nat_flag(ne, IS_DIRTY, false);
	set->entry_cnt--;
	nm_i->dirty_nat_cnt--;
277 278 279 280 281 282 283 284 285
}

static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
		nid_t start, unsigned int nr, struct nat_entry_set **ep)
{
	return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
							start, nr);
}

286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
{
	return NODE_MAPPING(sbi) == page->mapping &&
			IS_DNODE(page) && is_cold_node(page);
}

void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
{
	spin_lock_init(&sbi->fsync_node_lock);
	INIT_LIST_HEAD(&sbi->fsync_node_list);
	sbi->fsync_seg_id = 0;
	sbi->fsync_node_num = 0;
}

static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
							struct page *page)
{
	struct fsync_node_entry *fn;
	unsigned long flags;
	unsigned int seq_id;

	fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab, GFP_NOFS);

	get_page(page);
	fn->page = page;
	INIT_LIST_HEAD(&fn->list);

	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
	list_add_tail(&fn->list, &sbi->fsync_node_list);
	fn->seq_id = sbi->fsync_seg_id++;
	seq_id = fn->seq_id;
	sbi->fsync_node_num++;
	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);

	return seq_id;
}

void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
{
	struct fsync_node_entry *fn;
	unsigned long flags;

	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
	list_for_each_entry(fn, &sbi->fsync_node_list, list) {
		if (fn->page == page) {
			list_del(&fn->list);
			sbi->fsync_node_num--;
			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
			kmem_cache_free(fsync_node_entry_slab, fn);
			put_page(page);
			return;
		}
	}
	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
	f2fs_bug_on(sbi, 1);
}

void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi)
{
	unsigned long flags;

	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
	sbi->fsync_seg_id = 0;
	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
}

C
Chao Yu 已提交
352
int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
J
Jaegeuk Kim 已提交
353 354 355
{
	struct f2fs_nm_info *nm_i = NM_I(sbi);
	struct nat_entry *e;
J
Jaegeuk Kim 已提交
356
	bool need = false;
J
Jaegeuk Kim 已提交
357

358
	down_read(&nm_i->nat_tree_lock);
J
Jaegeuk Kim 已提交
359
	e = __lookup_nat_cache(nm_i, nid);
J
Jaegeuk Kim 已提交
360 361 362 363 364
	if (e) {
		if (!get_nat_flag(e, IS_CHECKPOINTED) &&
				!get_nat_flag(e, HAS_FSYNCED_INODE))
			need = true;
	}
365
	up_read(&nm_i->nat_tree_lock);
J
Jaegeuk Kim 已提交
366
	return need;
J
Jaegeuk Kim 已提交
367 368
}

C
Chao Yu 已提交
369
bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
370 371 372
{
	struct f2fs_nm_info *nm_i = NM_I(sbi);
	struct nat_entry *e;
J
Jaegeuk Kim 已提交
373
	bool is_cp = true;
374

375
	down_read(&nm_i->nat_tree_lock);
J
Jaegeuk Kim 已提交
376 377 378
	e = __lookup_nat_cache(nm_i, nid);
	if (e && !get_nat_flag(e, IS_CHECKPOINTED))
		is_cp = false;
379
	up_read(&nm_i->nat_tree_lock);
J
Jaegeuk Kim 已提交
380
	return is_cp;
381 382
}

C
Chao Yu 已提交
383
bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
384 385 386
{
	struct f2fs_nm_info *nm_i = NM_I(sbi);
	struct nat_entry *e;
387
	bool need_update = true;
388

389
	down_read(&nm_i->nat_tree_lock);
390 391 392 393 394
	e = __lookup_nat_cache(nm_i, ino);
	if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
			(get_nat_flag(e, IS_CHECKPOINTED) ||
			 get_nat_flag(e, HAS_FSYNCED_INODE)))
		need_update = false;
395
	up_read(&nm_i->nat_tree_lock);
396
	return need_update;
397 398
}

399
/* must be locked by nat_tree_lock */
400
static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
J
Jaegeuk Kim 已提交
401 402
						struct f2fs_nat_entry *ne)
{
403
	struct f2fs_nm_info *nm_i = NM_I(sbi);
404
	struct nat_entry *new, *e;
405

406 407 408 409 410
	new = __alloc_nat_entry(nid, false);
	if (!new)
		return;

	down_write(&nm_i->nat_tree_lock);
J
Jaegeuk Kim 已提交
411
	e = __lookup_nat_cache(nm_i, nid);
412 413 414
	if (!e)
		e = __init_nat_entry(nm_i, new, ne, false);
	else
E
Eric Biggers 已提交
415 416 417
		f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
				nat_get_blkaddr(e) !=
					le32_to_cpu(ne->block_addr) ||
418
				nat_get_version(e) != ne->version);
419 420 421
	up_write(&nm_i->nat_tree_lock);
	if (e != new)
		__free_nat_entry(new);
J
Jaegeuk Kim 已提交
422 423 424
}

static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
425
			block_t new_blkaddr, bool fsync_done)
J
Jaegeuk Kim 已提交
426 427 428
{
	struct f2fs_nm_info *nm_i = NM_I(sbi);
	struct nat_entry *e;
429
	struct nat_entry *new = __alloc_nat_entry(ni->nid, true);
430

431
	down_write(&nm_i->nat_tree_lock);
J
Jaegeuk Kim 已提交
432 433
	e = __lookup_nat_cache(nm_i, ni->nid);
	if (!e) {
434
		e = __init_nat_entry(nm_i, new, NULL, true);
435
		copy_node_info(&e->ni, ni);
436
		f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
J
Jaegeuk Kim 已提交
437 438 439 440 441 442
	} else if (new_blkaddr == NEW_ADDR) {
		/*
		 * when nid is reallocated,
		 * previous nat entry can be remained in nat cache.
		 * So, reinitialize it with new information.
		 */
443
		copy_node_info(&e->ni, ni);
444
		f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
J
Jaegeuk Kim 已提交
445
	}
446 447 448
	/* let's free early to reduce memory consumption */
	if (e != new)
		__free_nat_entry(new);
J
Jaegeuk Kim 已提交
449 450

	/* sanity check */
451 452
	f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
J
Jaegeuk Kim 已提交
453
			new_blkaddr == NULL_ADDR);
454
	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
J
Jaegeuk Kim 已提交
455
			new_blkaddr == NEW_ADDR);
C
Chao Yu 已提交
456
	f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) &&
J
Jaegeuk Kim 已提交
457 458
			new_blkaddr == NEW_ADDR);

A
arter97 已提交
459
	/* increment version no as node is removed */
J
Jaegeuk Kim 已提交
460 461 462 463 464 465 466
	if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
		unsigned char version = nat_get_version(e);
		nat_set_version(e, inc_node_version(version));
	}

	/* change address */
	nat_set_blkaddr(e, new_blkaddr);
C
Chao Yu 已提交
467
	if (!__is_valid_data_blkaddr(new_blkaddr))
468
		set_nat_flag(e, IS_CHECKPOINTED, false);
J
Jaegeuk Kim 已提交
469
	__set_nat_cache_dirty(nm_i, e);
470 471

	/* update fsync_mark if its inode nat entry is still alive */
472 473
	if (ni->nid != ni->ino)
		e = __lookup_nat_cache(nm_i, ni->ino);
474 475 476 477 478
	if (e) {
		if (fsync_done && ni->nid == ni->ino)
			set_nat_flag(e, HAS_FSYNCED_INODE, true);
		set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
	}
479
	up_write(&nm_i->nat_tree_lock);
J
Jaegeuk Kim 已提交
480 481
}

C
Chao Yu 已提交
482
int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
J
Jaegeuk Kim 已提交
483 484
{
	struct f2fs_nm_info *nm_i = NM_I(sbi);
J
Jaegeuk Kim 已提交
485
	int nr = nr_shrink;
J
Jaegeuk Kim 已提交
486

487 488
	if (!down_write_trylock(&nm_i->nat_tree_lock))
		return 0;
J
Jaegeuk Kim 已提交
489

490 491
	spin_lock(&nm_i->nat_list_lock);
	while (nr_shrink) {
J
Jaegeuk Kim 已提交
492
		struct nat_entry *ne;
493 494 495 496

		if (list_empty(&nm_i->nat_entries))
			break;

J
Jaegeuk Kim 已提交
497 498
		ne = list_first_entry(&nm_i->nat_entries,
					struct nat_entry, list);
499 500 501
		list_del(&ne->list);
		spin_unlock(&nm_i->nat_list_lock);

J
Jaegeuk Kim 已提交
502 503
		__del_from_nat_cache(nm_i, ne);
		nr_shrink--;
504 505

		spin_lock(&nm_i->nat_list_lock);
J
Jaegeuk Kim 已提交
506
	}
507 508
	spin_unlock(&nm_i->nat_list_lock);

509
	up_write(&nm_i->nat_tree_lock);
J
Jaegeuk Kim 已提交
510
	return nr - nr_shrink;
J
Jaegeuk Kim 已提交
511 512
}

513
int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
C
Chao Yu 已提交
514
						struct node_info *ni)
J
Jaegeuk Kim 已提交
515 516 517
{
	struct f2fs_nm_info *nm_i = NM_I(sbi);
	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
518
	struct f2fs_journal *journal = curseg->journal;
J
Jaegeuk Kim 已提交
519 520 521 522 523
	nid_t start_nid = START_NID(nid);
	struct f2fs_nat_block *nat_blk;
	struct page *page = NULL;
	struct f2fs_nat_entry ne;
	struct nat_entry *e;
524
	pgoff_t index;
C
Chao Yu 已提交
525
	block_t blkaddr;
J
Jaegeuk Kim 已提交
526 527 528 529 530
	int i;

	ni->nid = nid;

	/* Check nat cache */
531
	down_read(&nm_i->nat_tree_lock);
J
Jaegeuk Kim 已提交
532 533 534 535 536
	e = __lookup_nat_cache(nm_i, nid);
	if (e) {
		ni->ino = nat_get_ino(e);
		ni->blk_addr = nat_get_blkaddr(e);
		ni->version = nat_get_version(e);
537
		up_read(&nm_i->nat_tree_lock);
538
		return 0;
539
	}
J
Jaegeuk Kim 已提交
540

541 542
	memset(&ne, 0, sizeof(struct f2fs_nat_entry));

J
Jaegeuk Kim 已提交
543
	/* Check current segment summary */
544
	down_read(&curseg->journal_rwsem);
C
Chao Yu 已提交
545
	i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
J
Jaegeuk Kim 已提交
546
	if (i >= 0) {
547
		ne = nat_in_journal(journal, i);
J
Jaegeuk Kim 已提交
548 549
		node_info_from_raw_nat(ni, &ne);
	}
550
	up_read(&curseg->journal_rwsem);
551 552
	if (i >= 0) {
		up_read(&nm_i->nat_tree_lock);
J
Jaegeuk Kim 已提交
553
		goto cache;
554
	}
J
Jaegeuk Kim 已提交
555 556

	/* Fill node_info from nat page */
557 558 559
	index = current_nat_addr(sbi, nid);
	up_read(&nm_i->nat_tree_lock);

C
Chao Yu 已提交
560
	page = f2fs_get_meta_page(sbi, index);
561 562 563
	if (IS_ERR(page))
		return PTR_ERR(page);

J
Jaegeuk Kim 已提交
564 565 566 567 568
	nat_blk = (struct f2fs_nat_block *)page_address(page);
	ne = nat_blk->entries[nid - start_nid];
	node_info_from_raw_nat(ni, &ne);
	f2fs_put_page(page, 1);
cache:
C
Chao Yu 已提交
569 570 571 572 573
	blkaddr = le32_to_cpu(ne.block_addr);
	if (__is_valid_data_blkaddr(blkaddr) &&
		!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE))
		return -EFAULT;

J
Jaegeuk Kim 已提交
574
	/* cache nat entry */
575
	cache_nat_entry(sbi, nid, &ne);
576
	return 0;
J
Jaegeuk Kim 已提交
577 578
}

579 580 581
/*
 * readahead MAX_RA_NODE number of node pages.
 */
C
Chao Yu 已提交
582
static void f2fs_ra_node_pages(struct page *parent, int start, int n)
583 584 585 586 587 588 589 590 591 592 593 594 595
{
	struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
	struct blk_plug plug;
	int i, end;
	nid_t nid;

	blk_start_plug(&plug);

	/* Then, try readahead for siblings of the desired node */
	end = start + n;
	end = min(end, NIDS_PER_BLOCK);
	for (i = start; i < end; i++) {
		nid = get_nid(parent, i, false);
C
Chao Yu 已提交
596
		f2fs_ra_node_page(sbi, nid);
597 598 599 600 601
	}

	blk_finish_plug(&plug);
}

C
Chao Yu 已提交
602
pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
603 604
{
	const long direct_index = ADDRS_PER_INODE(dn->inode);
605 606 607
	const long direct_blks = ADDRS_PER_BLOCK(dn->inode);
	const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK;
	unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode);
608 609 610 611 612 613 614 615 616 617 618 619 620
	int cur_level = dn->cur_level;
	int max_level = dn->max_level;
	pgoff_t base = 0;

	if (!dn->max_level)
		return pgofs + 1;

	while (max_level-- > cur_level)
		skipped_unit *= NIDS_PER_BLOCK;

	switch (dn->max_level) {
	case 3:
		base += 2 * indirect_blks;
621
		/* fall through */
622 623
	case 2:
		base += 2 * direct_blks;
624
		/* fall through */
625 626 627 628 629 630 631 632 633 634
	case 1:
		base += direct_index;
		break;
	default:
		f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
	}

	return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
}

J
Jaegeuk Kim 已提交
635
/*
J
Jaegeuk Kim 已提交
636 637 638
 * The maximum depth is four.
 * Offset[0] will have raw inode offset.
 */
639
static int get_node_path(struct inode *inode, long block,
640
				int offset[4], unsigned int noffset[4])
J
Jaegeuk Kim 已提交
641
{
642
	const long direct_index = ADDRS_PER_INODE(inode);
643
	const long direct_blks = ADDRS_PER_BLOCK(inode);
J
Jaegeuk Kim 已提交
644
	const long dptrs_per_blk = NIDS_PER_BLOCK;
645
	const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK;
J
Jaegeuk Kim 已提交
646 647 648 649 650 651 652
	const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
	int n = 0;
	int level = 0;

	noffset[0] = 0;

	if (block < direct_index) {
653
		offset[n] = block;
J
Jaegeuk Kim 已提交
654 655 656 657 658 659
		goto got;
	}
	block -= direct_index;
	if (block < direct_blks) {
		offset[n++] = NODE_DIR1_BLOCK;
		noffset[n] = 1;
660
		offset[n] = block;
J
Jaegeuk Kim 已提交
661 662 663 664 665 666 667
		level = 1;
		goto got;
	}
	block -= direct_blks;
	if (block < direct_blks) {
		offset[n++] = NODE_DIR2_BLOCK;
		noffset[n] = 2;
668
		offset[n] = block;
J
Jaegeuk Kim 已提交
669 670 671 672 673 674 675 676 677
		level = 1;
		goto got;
	}
	block -= direct_blks;
	if (block < indirect_blks) {
		offset[n++] = NODE_IND1_BLOCK;
		noffset[n] = 3;
		offset[n++] = block / direct_blks;
		noffset[n] = 4 + offset[n - 1];
678
		offset[n] = block % direct_blks;
J
Jaegeuk Kim 已提交
679 680 681 682 683 684 685 686 687
		level = 2;
		goto got;
	}
	block -= indirect_blks;
	if (block < indirect_blks) {
		offset[n++] = NODE_IND2_BLOCK;
		noffset[n] = 4 + dptrs_per_blk;
		offset[n++] = block / direct_blks;
		noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
688
		offset[n] = block % direct_blks;
J
Jaegeuk Kim 已提交
689 690 691 692 693 694 695 696 697 698 699 700 701 702
		level = 2;
		goto got;
	}
	block -= indirect_blks;
	if (block < dindirect_blks) {
		offset[n++] = NODE_DIND_BLOCK;
		noffset[n] = 5 + (dptrs_per_blk * 2);
		offset[n++] = block / indirect_blks;
		noffset[n] = 6 + (dptrs_per_blk * 2) +
			      offset[n - 1] * (dptrs_per_blk + 1);
		offset[n++] = (block / direct_blks) % dptrs_per_blk;
		noffset[n] = 7 + (dptrs_per_blk * 2) +
			      offset[n - 2] * (dptrs_per_blk + 1) +
			      offset[n - 1];
703
		offset[n] = block % direct_blks;
J
Jaegeuk Kim 已提交
704 705 706
		level = 3;
		goto got;
	} else {
707
		return -E2BIG;
J
Jaegeuk Kim 已提交
708 709 710 711 712 713 714
	}
got:
	return level;
}

/*
 * Caller should call f2fs_put_dnode(dn).
C
Chao Yu 已提交
715
 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
C
Chao Yu 已提交
716
 * f2fs_unlock_op() only if mode is set with ALLOC_NODE.
J
Jaegeuk Kim 已提交
717
 */
C
Chao Yu 已提交
718
int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
J
Jaegeuk Kim 已提交
719
{
720
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
J
Jaegeuk Kim 已提交
721
	struct page *npage[4];
722
	struct page *parent = NULL;
J
Jaegeuk Kim 已提交
723 724 725
	int offset[4];
	unsigned int noffset[4];
	nid_t nids[4];
726
	int level, i = 0;
J
Jaegeuk Kim 已提交
727 728
	int err = 0;

729
	level = get_node_path(dn->inode, index, offset, noffset);
730 731
	if (level < 0)
		return level;
J
Jaegeuk Kim 已提交
732 733

	nids[0] = dn->inode->i_ino;
734
	npage[0] = dn->inode_page;
J
Jaegeuk Kim 已提交
735

736
	if (!npage[0]) {
C
Chao Yu 已提交
737
		npage[0] = f2fs_get_node_page(sbi, nids[0]);
738 739 740
		if (IS_ERR(npage[0]))
			return PTR_ERR(npage[0]);
	}
741 742 743

	/* if inline_data is set, should not report any block indices */
	if (f2fs_has_inline_data(dn->inode) && index) {
744
		err = -ENOENT;
745 746 747 748
		f2fs_put_page(npage[0], 1);
		goto release_out;
	}

J
Jaegeuk Kim 已提交
749
	parent = npage[0];
750 751
	if (level != 0)
		nids[1] = get_nid(parent, offset[0], true);
J
Jaegeuk Kim 已提交
752 753 754 755 756 757 758
	dn->inode_page = npage[0];
	dn->inode_page_locked = true;

	/* get indirect or direct nodes */
	for (i = 1; i <= level; i++) {
		bool done = false;

759
		if (!nids[i] && mode == ALLOC_NODE) {
J
Jaegeuk Kim 已提交
760
			/* alloc new node */
C
Chao Yu 已提交
761
			if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
J
Jaegeuk Kim 已提交
762 763 764 765 766
				err = -ENOSPC;
				goto release_pages;
			}

			dn->nid = nids[i];
C
Chao Yu 已提交
767
			npage[i] = f2fs_new_node_page(dn, noffset[i]);
J
Jaegeuk Kim 已提交
768
			if (IS_ERR(npage[i])) {
C
Chao Yu 已提交
769
				f2fs_alloc_nid_failed(sbi, nids[i]);
J
Jaegeuk Kim 已提交
770 771 772 773 774
				err = PTR_ERR(npage[i]);
				goto release_pages;
			}

			set_nid(parent, offset[i - 1], nids[i], i == 1);
C
Chao Yu 已提交
775
			f2fs_alloc_nid_done(sbi, nids[i]);
J
Jaegeuk Kim 已提交
776
			done = true;
777
		} else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
C
Chao Yu 已提交
778
			npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]);
J
Jaegeuk Kim 已提交
779 780 781 782 783 784 785 786 787 788 789 790 791 792
			if (IS_ERR(npage[i])) {
				err = PTR_ERR(npage[i]);
				goto release_pages;
			}
			done = true;
		}
		if (i == 1) {
			dn->inode_page_locked = false;
			unlock_page(parent);
		} else {
			f2fs_put_page(parent, 1);
		}

		if (!done) {
C
Chao Yu 已提交
793
			npage[i] = f2fs_get_node_page(sbi, nids[i]);
J
Jaegeuk Kim 已提交
794 795 796 797 798 799 800 801 802 803 804 805 806 807
			if (IS_ERR(npage[i])) {
				err = PTR_ERR(npage[i]);
				f2fs_put_page(npage[0], 0);
				goto release_out;
			}
		}
		if (i < level) {
			parent = npage[i];
			nids[i + 1] = get_nid(parent, offset[i], false);
		}
	}
	dn->nid = nids[level];
	dn->ofs_in_node = offset[level];
	dn->node_page = npage[level];
808
	dn->data_blkaddr = f2fs_data_blkaddr(dn);
J
Jaegeuk Kim 已提交
809 810 811 812 813 814 815 816 817
	return 0;

release_pages:
	f2fs_put_page(parent, 1);
	if (i > 1)
		f2fs_put_page(npage[0], 0);
release_out:
	dn->inode_page = NULL;
	dn->node_page = NULL;
818 819 820
	if (err == -ENOENT) {
		dn->cur_level = i;
		dn->max_level = level;
821
		dn->ofs_in_node = offset[level];
822
	}
J
Jaegeuk Kim 已提交
823 824 825
	return err;
}

826
static int truncate_node(struct dnode_of_data *dn)
J
Jaegeuk Kim 已提交
827
{
828
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
J
Jaegeuk Kim 已提交
829
	struct node_info ni;
830
	int err;
P
Pan Bian 已提交
831
	pgoff_t index;
J
Jaegeuk Kim 已提交
832

833 834 835
	err = f2fs_get_node_info(sbi, dn->nid, &ni);
	if (err)
		return err;
J
Jaegeuk Kim 已提交
836 837

	/* Deallocate node address */
C
Chao Yu 已提交
838
	f2fs_invalidate_blocks(sbi, ni.blk_addr);
839
	dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
840
	set_node_addr(sbi, &ni, NULL_ADDR, false);
J
Jaegeuk Kim 已提交
841 842

	if (dn->nid == dn->inode->i_ino) {
C
Chao Yu 已提交
843
		f2fs_remove_orphan_inode(sbi, dn->nid);
J
Jaegeuk Kim 已提交
844
		dec_valid_inode_count(sbi);
845
		f2fs_inode_synced(dn->inode);
J
Jaegeuk Kim 已提交
846
	}
847

J
Jaegeuk Kim 已提交
848
	clear_node_page_dirty(dn->node_page);
849
	set_sbi_flag(sbi, SBI_IS_DIRTY);
J
Jaegeuk Kim 已提交
850

P
Pan Bian 已提交
851
	index = dn->node_page->index;
J
Jaegeuk Kim 已提交
852
	f2fs_put_page(dn->node_page, 1);
853 854

	invalidate_mapping_pages(NODE_MAPPING(sbi),
P
Pan Bian 已提交
855
			index, index);
856

J
Jaegeuk Kim 已提交
857
	dn->node_page = NULL;
858
	trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
859 860

	return 0;
J
Jaegeuk Kim 已提交
861 862 863 864 865
}

static int truncate_dnode(struct dnode_of_data *dn)
{
	struct page *page;
866
	int err;
J
Jaegeuk Kim 已提交
867 868 869 870 871

	if (dn->nid == 0)
		return 1;

	/* get direct node */
C
Chao Yu 已提交
872
	page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
873
	if (PTR_ERR(page) == -ENOENT)
J
Jaegeuk Kim 已提交
874 875 876 877 878 879 880
		return 1;
	else if (IS_ERR(page))
		return PTR_ERR(page);

	/* Make dnode_of_data for parameter */
	dn->node_page = page;
	dn->ofs_in_node = 0;
C
Chao Yu 已提交
881
	f2fs_truncate_data_blocks(dn);
882 883 884 885
	err = truncate_node(dn);
	if (err)
		return err;

J
Jaegeuk Kim 已提交
886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902
	return 1;
}

static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
						int ofs, int depth)
{
	struct dnode_of_data rdn = *dn;
	struct page *page;
	struct f2fs_node *rn;
	nid_t child_nid;
	unsigned int child_nofs;
	int freed = 0;
	int i, ret;

	if (dn->nid == 0)
		return NIDS_PER_BLOCK + 1;

903 904
	trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);

C
Chao Yu 已提交
905
	page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
906 907
	if (IS_ERR(page)) {
		trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
J
Jaegeuk Kim 已提交
908
		return PTR_ERR(page);
909
	}
J
Jaegeuk Kim 已提交
910

C
Chao Yu 已提交
911
	f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK);
912

913
	rn = F2FS_NODE(page);
J
Jaegeuk Kim 已提交
914 915 916 917 918 919 920 921 922
	if (depth < 3) {
		for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
			child_nid = le32_to_cpu(rn->in.nid[i]);
			if (child_nid == 0)
				continue;
			rdn.nid = child_nid;
			ret = truncate_dnode(&rdn);
			if (ret < 0)
				goto out_err;
923 924
			if (set_nid(page, i, 0, false))
				dn->node_changed = true;
J
Jaegeuk Kim 已提交
925 926 927 928 929 930 931 932 933 934 935 936
		}
	} else {
		child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
		for (i = ofs; i < NIDS_PER_BLOCK; i++) {
			child_nid = le32_to_cpu(rn->in.nid[i]);
			if (child_nid == 0) {
				child_nofs += NIDS_PER_BLOCK + 1;
				continue;
			}
			rdn.nid = child_nid;
			ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
			if (ret == (NIDS_PER_BLOCK + 1)) {
937 938
				if (set_nid(page, i, 0, false))
					dn->node_changed = true;
J
Jaegeuk Kim 已提交
939 940 941 942 943 944 945 946 947 948 949
				child_nofs += ret;
			} else if (ret < 0 && ret != -ENOENT) {
				goto out_err;
			}
		}
		freed = child_nofs;
	}

	if (!ofs) {
		/* remove current indirect node */
		dn->node_page = page;
950 951 952
		ret = truncate_node(dn);
		if (ret)
			goto out_err;
J
Jaegeuk Kim 已提交
953 954 955 956
		freed++;
	} else {
		f2fs_put_page(page, 1);
	}
957
	trace_f2fs_truncate_nodes_exit(dn->inode, freed);
J
Jaegeuk Kim 已提交
958 959 960 961
	return freed;

out_err:
	f2fs_put_page(page, 1);
962
	trace_f2fs_truncate_nodes_exit(dn->inode, ret);
J
Jaegeuk Kim 已提交
963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980
	return ret;
}

static int truncate_partial_nodes(struct dnode_of_data *dn,
			struct f2fs_inode *ri, int *offset, int depth)
{
	struct page *pages[2];
	nid_t nid[3];
	nid_t child_nid;
	int err = 0;
	int i;
	int idx = depth - 2;

	nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
	if (!nid[0])
		return 0;

	/* get indirect nodes in the path */
981
	for (i = 0; i < idx + 1; i++) {
A
arter97 已提交
982
		/* reference count'll be increased */
C
Chao Yu 已提交
983
		pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]);
J
Jaegeuk Kim 已提交
984 985
		if (IS_ERR(pages[i])) {
			err = PTR_ERR(pages[i]);
986
			idx = i - 1;
J
Jaegeuk Kim 已提交
987 988 989 990 991
			goto fail;
		}
		nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
	}

C
Chao Yu 已提交
992
	f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
993

J
Jaegeuk Kim 已提交
994
	/* free direct nodes linked to a partial indirect node */
995
	for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
J
Jaegeuk Kim 已提交
996 997 998 999 1000 1001 1002
		child_nid = get_nid(pages[idx], i, false);
		if (!child_nid)
			continue;
		dn->nid = child_nid;
		err = truncate_dnode(dn);
		if (err < 0)
			goto fail;
1003 1004
		if (set_nid(pages[idx], i, 0, false))
			dn->node_changed = true;
J
Jaegeuk Kim 已提交
1005 1006
	}

1007
	if (offset[idx + 1] == 0) {
J
Jaegeuk Kim 已提交
1008 1009
		dn->node_page = pages[idx];
		dn->nid = nid[idx];
1010 1011 1012
		err = truncate_node(dn);
		if (err)
			goto fail;
J
Jaegeuk Kim 已提交
1013 1014 1015 1016
	} else {
		f2fs_put_page(pages[idx], 1);
	}
	offset[idx]++;
1017 1018
	offset[idx + 1] = 0;
	idx--;
J
Jaegeuk Kim 已提交
1019
fail:
1020
	for (i = idx; i >= 0; i--)
J
Jaegeuk Kim 已提交
1021
		f2fs_put_page(pages[i], 1);
1022 1023 1024

	trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);

J
Jaegeuk Kim 已提交
1025 1026 1027
	return err;
}

J
Jaegeuk Kim 已提交
1028
/*
J
Jaegeuk Kim 已提交
1029 1030
 * All the block addresses of data and nodes should be nullified.
 */
C
Chao Yu 已提交
1031
int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
J
Jaegeuk Kim 已提交
1032
{
1033
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
J
Jaegeuk Kim 已提交
1034 1035
	int err = 0, cont = 1;
	int level, offset[4], noffset[4];
J
Jaegeuk Kim 已提交
1036
	unsigned int nofs = 0;
1037
	struct f2fs_inode *ri;
J
Jaegeuk Kim 已提交
1038 1039 1040
	struct dnode_of_data dn;
	struct page *page;

1041 1042
	trace_f2fs_truncate_inode_blocks_enter(inode, from);

1043
	level = get_node_path(inode, from, offset, noffset);
1044 1045
	if (level < 0)
		return level;
1046

C
Chao Yu 已提交
1047
	page = f2fs_get_node_page(sbi, inode->i_ino);
1048 1049
	if (IS_ERR(page)) {
		trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
J
Jaegeuk Kim 已提交
1050
		return PTR_ERR(page);
1051
	}
J
Jaegeuk Kim 已提交
1052 1053 1054 1055

	set_new_dnode(&dn, inode, page, NULL, 0);
	unlock_page(page);

1056
	ri = F2FS_INODE(page);
J
Jaegeuk Kim 已提交
1057 1058 1059 1060 1061 1062 1063 1064 1065
	switch (level) {
	case 0:
	case 1:
		nofs = noffset[1];
		break;
	case 2:
		nofs = noffset[1];
		if (!offset[level - 1])
			goto skip_partial;
1066
		err = truncate_partial_nodes(&dn, ri, offset, level);
J
Jaegeuk Kim 已提交
1067 1068 1069 1070 1071 1072 1073 1074
		if (err < 0 && err != -ENOENT)
			goto fail;
		nofs += 1 + NIDS_PER_BLOCK;
		break;
	case 3:
		nofs = 5 + 2 * NIDS_PER_BLOCK;
		if (!offset[level - 1])
			goto skip_partial;
1075
		err = truncate_partial_nodes(&dn, ri, offset, level);
J
Jaegeuk Kim 已提交
1076 1077 1078 1079 1080 1081 1082 1083 1084
		if (err < 0 && err != -ENOENT)
			goto fail;
		break;
	default:
		BUG();
	}

skip_partial:
	while (cont) {
1085
		dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
J
Jaegeuk Kim 已提交
1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
		switch (offset[0]) {
		case NODE_DIR1_BLOCK:
		case NODE_DIR2_BLOCK:
			err = truncate_dnode(&dn);
			break;

		case NODE_IND1_BLOCK:
		case NODE_IND2_BLOCK:
			err = truncate_nodes(&dn, nofs, offset[1], 2);
			break;

		case NODE_DIND_BLOCK:
			err = truncate_nodes(&dn, nofs, offset[1], 3);
			cont = 0;
			break;

		default:
			BUG();
		}
		if (err < 0 && err != -ENOENT)
			goto fail;
		if (offset[1] == 0 &&
1108
				ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
J
Jaegeuk Kim 已提交
1109
			lock_page(page);
1110
			BUG_ON(page->mapping != NODE_MAPPING(sbi));
1111
			f2fs_wait_on_page_writeback(page, NODE, true, true);
1112
			ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
J
Jaegeuk Kim 已提交
1113 1114 1115 1116 1117 1118 1119 1120 1121
			set_page_dirty(page);
			unlock_page(page);
		}
		offset[1] = 0;
		offset[0]++;
		nofs += err;
	}
fail:
	f2fs_put_page(page, 0);
1122
	trace_f2fs_truncate_inode_blocks_exit(inode, err);
J
Jaegeuk Kim 已提交
1123 1124 1125
	return err > 0 ? 0 : err;
}

1126
/* caller must lock inode page */
C
Chao Yu 已提交
1127
int f2fs_truncate_xattr_node(struct inode *inode)
1128
{
1129
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1130 1131 1132
	nid_t nid = F2FS_I(inode)->i_xattr_nid;
	struct dnode_of_data dn;
	struct page *npage;
1133
	int err;
1134 1135 1136 1137

	if (!nid)
		return 0;

C
Chao Yu 已提交
1138
	npage = f2fs_get_node_page(sbi, nid);
1139 1140 1141
	if (IS_ERR(npage))
		return PTR_ERR(npage);

1142 1143 1144 1145 1146 1147 1148
	set_new_dnode(&dn, inode, NULL, npage, nid);
	err = truncate_node(&dn);
	if (err) {
		f2fs_put_page(npage, 1);
		return err;
	}

1149
	f2fs_i_xnid_write(inode, 0);
J
Jaegeuk Kim 已提交
1150

1151 1152 1153
	return 0;
}

1154
/*
C
Chao Yu 已提交
1155 1156
 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
 * f2fs_unlock_op().
1157
 */
C
Chao Yu 已提交
1158
int f2fs_remove_inode_page(struct inode *inode)
J
Jaegeuk Kim 已提交
1159 1160
{
	struct dnode_of_data dn;
C
Chao Yu 已提交
1161
	int err;
J
Jaegeuk Kim 已提交
1162

1163
	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
C
Chao Yu 已提交
1164
	err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
C
Chao Yu 已提交
1165 1166
	if (err)
		return err;
J
Jaegeuk Kim 已提交
1167

C
Chao Yu 已提交
1168
	err = f2fs_truncate_xattr_node(inode);
C
Chao Yu 已提交
1169
	if (err) {
1170
		f2fs_put_dnode(&dn);
C
Chao Yu 已提交
1171
		return err;
J
Jaegeuk Kim 已提交
1172
	}
1173 1174 1175 1176

	/* remove potential inline_data blocks */
	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
				S_ISLNK(inode->i_mode))
C
Chao Yu 已提交
1177
		f2fs_truncate_data_blocks_range(&dn, 1);
1178

A
arter97 已提交
1179
	/* 0 is possible, after f2fs_new_inode() has failed */
1180 1181 1182 1183
	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
		f2fs_put_dnode(&dn);
		return -EIO;
	}
1184 1185

	if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) {
1186 1187 1188
		f2fs_warn(F2FS_I_SB(inode),
			"f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu",
			inode->i_ino, (unsigned long long)inode->i_blocks);
1189 1190
		set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
	}
1191 1192

	/* will put inode & node pages */
1193 1194 1195 1196 1197
	err = truncate_node(&dn);
	if (err) {
		f2fs_put_dnode(&dn);
		return err;
	}
C
Chao Yu 已提交
1198
	return 0;
J
Jaegeuk Kim 已提交
1199 1200
}

C
Chao Yu 已提交
1201
struct page *f2fs_new_inode_page(struct inode *inode)
J
Jaegeuk Kim 已提交
1202 1203 1204 1205 1206
{
	struct dnode_of_data dn;

	/* allocate inode page for new inode */
	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1207 1208

	/* caller should f2fs_put_page(page, 1); */
C
Chao Yu 已提交
1209
	return f2fs_new_node_page(&dn, 0);
J
Jaegeuk Kim 已提交
1210 1211
}

C
Chao Yu 已提交
1212
struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
J
Jaegeuk Kim 已提交
1213
{
1214
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1215
	struct node_info new_ni;
J
Jaegeuk Kim 已提交
1216 1217 1218
	struct page *page;
	int err;

1219
	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
J
Jaegeuk Kim 已提交
1220 1221
		return ERR_PTR(-EPERM);

1222
	page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
J
Jaegeuk Kim 已提交
1223 1224 1225
	if (!page)
		return ERR_PTR(-ENOMEM);

C
Chao Yu 已提交
1226
	if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
1227
		goto fail;
C
Chao Yu 已提交
1228

1229
#ifdef CONFIG_F2FS_CHECK_FS
1230 1231 1232 1233 1234
	err = f2fs_get_node_info(sbi, dn->nid, &new_ni);
	if (err) {
		dec_valid_node_count(sbi, dn->inode, !ofs);
		goto fail;
	}
1235 1236 1237
	f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR);
#endif
	new_ni.nid = dn->nid;
J
Jaegeuk Kim 已提交
1238
	new_ni.ino = dn->inode->i_ino;
1239 1240 1241
	new_ni.blk_addr = NULL_ADDR;
	new_ni.flag = 0;
	new_ni.version = 0;
1242
	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
1243

1244
	f2fs_wait_on_page_writeback(page, NODE, true, true);
1245
	fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
C
Chao Yu 已提交
1246
	set_cold_node(page, S_ISDIR(dn->inode->i_mode));
1247 1248
	if (!PageUptodate(page))
		SetPageUptodate(page);
1249 1250
	if (set_page_dirty(page))
		dn->node_changed = true;
J
Jaegeuk Kim 已提交
1251

1252
	if (f2fs_has_xattr_block(ofs))
1253
		f2fs_i_xnid_write(dn->inode, dn->nid);
1254

J
Jaegeuk Kim 已提交
1255 1256 1257 1258 1259
	if (ofs == 0)
		inc_valid_inode_count(sbi);
	return page;

fail:
1260
	clear_node_page_dirty(page);
J
Jaegeuk Kim 已提交
1261 1262 1263 1264
	f2fs_put_page(page, 1);
	return ERR_PTR(err);
}

1265 1266 1267
/*
 * Caller should do after getting the following values.
 * 0: f2fs_put_page(page, 0)
1268
 * LOCKED_PAGE or error: f2fs_put_page(page, 1)
1269
 */
M
Mike Christie 已提交
1270
static int read_node_page(struct page *page, int op_flags)
J
Jaegeuk Kim 已提交
1271
{
1272
	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
J
Jaegeuk Kim 已提交
1273
	struct node_info ni;
1274
	struct f2fs_io_info fio = {
1275
		.sbi = sbi,
1276
		.type = NODE,
M
Mike Christie 已提交
1277 1278
		.op = REQ_OP_READ,
		.op_flags = op_flags,
1279
		.page = page,
1280
		.encrypted_page = NULL,
1281
	};
1282
	int err;
J
Jaegeuk Kim 已提交
1283

1284
	if (PageUptodate(page)) {
1285 1286
		if (!f2fs_inode_chksum_verify(sbi, page)) {
			ClearPageUptodate(page);
1287
			return -EFSBADCRC;
1288
		}
1289
		return LOCKED_PAGE;
1290
	}
1291

1292 1293 1294
	err = f2fs_get_node_info(sbi, page->index, &ni);
	if (err)
		return err;
J
Jaegeuk Kim 已提交
1295

1296 1297
	if (unlikely(ni.blk_addr == NULL_ADDR) ||
			is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) {
1298
		ClearPageUptodate(page);
J
Jaegeuk Kim 已提交
1299
		return -ENOENT;
1300 1301
	}

1302
	fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
C
Chao Yu 已提交
1303 1304 1305 1306 1307 1308 1309

	err = f2fs_submit_page_bio(&fio);

	if (!err)
		f2fs_update_iostat(sbi, FS_NODE_READ_IO, F2FS_BLKSIZE);

	return err;
J
Jaegeuk Kim 已提交
1310 1311
}

J
Jaegeuk Kim 已提交
1312
/*
J
Jaegeuk Kim 已提交
1313 1314
 * Readahead a node page
 */
C
Chao Yu 已提交
1315
void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
J
Jaegeuk Kim 已提交
1316 1317
{
	struct page *apage;
1318
	int err;
J
Jaegeuk Kim 已提交
1319

1320 1321
	if (!nid)
		return;
C
Chao Yu 已提交
1322
	if (f2fs_check_nid_range(sbi, nid))
1323
		return;
1324

M
Matthew Wilcox 已提交
1325
	apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid);
1326
	if (apage)
1327
		return;
J
Jaegeuk Kim 已提交
1328

1329
	apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
J
Jaegeuk Kim 已提交
1330 1331 1332
	if (!apage)
		return;

1333
	err = read_node_page(apage, REQ_RAHEAD);
1334
	f2fs_put_page(apage, err ? 1 : 0);
J
Jaegeuk Kim 已提交
1335 1336
}

J
Jaegeuk Kim 已提交
1337
static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
1338
					struct page *parent, int start)
J
Jaegeuk Kim 已提交
1339 1340
{
	struct page *page;
1341
	int err;
J
Jaegeuk Kim 已提交
1342 1343 1344

	if (!nid)
		return ERR_PTR(-ENOENT);
C
Chao Yu 已提交
1345
	if (f2fs_check_nid_range(sbi, nid))
1346
		return ERR_PTR(-EINVAL);
1347
repeat:
1348
	page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
J
Jaegeuk Kim 已提交
1349 1350 1351
	if (!page)
		return ERR_PTR(-ENOMEM);

1352
	err = read_node_page(page, 0);
1353 1354
	if (err < 0) {
		f2fs_put_page(page, 1);
J
Jaegeuk Kim 已提交
1355
		return ERR_PTR(err);
1356
	} else if (err == LOCKED_PAGE) {
1357
		err = 0;
1358
		goto page_hit;
1359
	}
J
Jaegeuk Kim 已提交
1360

1361
	if (parent)
C
Chao Yu 已提交
1362
		f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
1363

J
Jaegeuk Kim 已提交
1364
	lock_page(page);
1365

1366
	if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1367 1368 1369
		f2fs_put_page(page, 1);
		goto repeat;
	}
1370

1371 1372
	if (unlikely(!PageUptodate(page))) {
		err = -EIO;
1373
		goto out_err;
1374
	}
C
Chao Yu 已提交
1375 1376

	if (!f2fs_inode_chksum_verify(sbi, page)) {
1377
		err = -EFSBADCRC;
C
Chao Yu 已提交
1378 1379
		goto out_err;
	}
1380
page_hit:
1381
	if(unlikely(nid != nid_of_node(page))) {
1382 1383 1384 1385
		f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
			  nid, nid_of_node(page), ino_of_node(page),
			  ofs_of_node(page), cpver_of_node(page),
			  next_blkaddr_of_node(page));
1386
		err = -EINVAL;
1387
out_err:
1388
		ClearPageUptodate(page);
1389
		f2fs_put_page(page, 1);
1390
		return ERR_PTR(err);
1391
	}
J
Jaegeuk Kim 已提交
1392 1393 1394
	return page;
}

C
Chao Yu 已提交
1395
struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
1396 1397 1398 1399
{
	return __get_node_page(sbi, nid, NULL, 0);
}

C
Chao Yu 已提交
1400
struct page *f2fs_get_node_page_ra(struct page *parent, int start)
1401 1402 1403 1404 1405 1406 1407
{
	struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
	nid_t nid = get_nid(parent, start, false);

	return __get_node_page(sbi, nid, parent, start);
}

1408 1409 1410 1411
static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
{
	struct inode *inode;
	struct page *page;
1412
	int ret;
1413 1414 1415 1416 1417 1418

	/* should flush inline_data before evict_inode */
	inode = ilookup(sbi->sb, ino);
	if (!inode)
		return;

C
Chao Yu 已提交
1419 1420
	page = f2fs_pagecache_get_page(inode->i_mapping, 0,
					FGP_LOCK|FGP_NOWAIT, 0);
1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432
	if (!page)
		goto iput_out;

	if (!PageUptodate(page))
		goto page_out;

	if (!PageDirty(page))
		goto page_out;

	if (!clear_page_dirty_for_io(page))
		goto page_out;

1433 1434
	ret = f2fs_write_inline_data(inode, page);
	inode_dec_dirty_pages(inode);
C
Chao Yu 已提交
1435
	f2fs_remove_dirty_inode(inode);
1436
	if (ret)
1437 1438
		set_page_dirty(page);
page_out:
1439
	f2fs_put_page(page, 1);
1440 1441 1442 1443
iput_out:
	iput(inode);
}

1444 1445
static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
{
J
Jan Kara 已提交
1446
	pgoff_t index;
1447 1448
	struct pagevec pvec;
	struct page *last_page = NULL;
J
Jan Kara 已提交
1449
	int nr_pages;
1450

1451
	pagevec_init(&pvec);
1452
	index = 0;
J
Jan Kara 已提交
1453 1454

	while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1455
				PAGECACHE_TAG_DIRTY))) {
J
Jan Kara 已提交
1456
		int i;
1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];

			if (unlikely(f2fs_cp_error(sbi))) {
				f2fs_put_page(last_page, 0);
				pagevec_release(&pvec);
				return ERR_PTR(-EIO);
			}

			if (!IS_DNODE(page) || !is_cold_node(page))
				continue;
			if (ino_of_node(page) != ino)
				continue;

			lock_page(page);

			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
continue_unlock:
				unlock_page(page);
				continue;
			}
			if (ino_of_node(page) != ino)
				goto continue_unlock;

			if (!PageDirty(page)) {
				/* someone wrote it for us */
				goto continue_unlock;
			}

			if (last_page)
				f2fs_put_page(last_page, 0);

			get_page(page);
			last_page = page;
			unlock_page(page);
		}
		pagevec_release(&pvec);
		cond_resched();
	}
	return last_page;
}

1500
static int __write_node_page(struct page *page, bool atomic, bool *submitted,
C
Chao Yu 已提交
1501
				struct writeback_control *wbc, bool do_balance,
1502
				enum iostat_type io_type, unsigned int *seq_id)
1503 1504 1505 1506 1507 1508
{
	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
	nid_t nid;
	struct node_info ni;
	struct f2fs_io_info fio = {
		.sbi = sbi,
C
Chao Yu 已提交
1509
		.ino = ino_of_node(page),
1510 1511 1512 1513 1514
		.type = NODE,
		.op = REQ_OP_WRITE,
		.op_flags = wbc_to_write_flags(wbc),
		.page = page,
		.encrypted_page = NULL,
1515
		.submitted = false,
C
Chao Yu 已提交
1516
		.io_type = io_type,
1517
		.io_wbc = wbc,
1518
	};
1519
	unsigned int seq;
1520 1521 1522

	trace_f2fs_writepage(page, NODE);

1523 1524 1525 1526 1527 1528 1529
	if (unlikely(f2fs_cp_error(sbi))) {
		if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) {
			ClearPageUptodate(page);
			dec_page_count(sbi, F2FS_DIRTY_NODES);
			unlock_page(page);
			return 0;
		}
1530
		goto redirty_out;
1531
	}
1532

1533 1534 1535
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto redirty_out;

1536 1537
	if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
			wbc->sync_mode == WB_SYNC_NONE &&
1538 1539 1540
			IS_DNODE(page) && is_cold_node(page))
		goto redirty_out;

1541 1542 1543 1544
	/* get old block addr of this node page */
	nid = nid_of_node(page);
	f2fs_bug_on(sbi, page->index != nid);

1545 1546 1547
	if (f2fs_get_node_info(sbi, nid, &ni))
		goto redirty_out;

1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563
	if (wbc->for_reclaim) {
		if (!down_read_trylock(&sbi->node_write))
			goto redirty_out;
	} else {
		down_read(&sbi->node_write);
	}

	/* This page is already truncated */
	if (unlikely(ni.blk_addr == NULL_ADDR)) {
		ClearPageUptodate(page);
		dec_page_count(sbi, F2FS_DIRTY_NODES);
		up_read(&sbi->node_write);
		unlock_page(page);
		return 0;
	}

1564
	if (__is_valid_data_blkaddr(ni.blk_addr) &&
C
Chao Yu 已提交
1565 1566
		!f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
					DATA_GENERIC_ENHANCE)) {
J
Jaegeuk Kim 已提交
1567
		up_read(&sbi->node_write);
1568
		goto redirty_out;
J
Jaegeuk Kim 已提交
1569
	}
1570

1571 1572 1573
	if (atomic && !test_opt(sbi, NOBARRIER))
		fio.op_flags |= REQ_PREFLUSH | REQ_FUA;

1574
	/* should add to global list before clearing PAGECACHE status */
1575 1576 1577 1578 1579 1580
	if (f2fs_in_warm_node_list(sbi, page)) {
		seq = f2fs_add_fsync_node_entry(sbi, page);
		if (seq_id)
			*seq_id = seq;
	}

1581 1582 1583
	set_page_writeback(page);
	ClearPageError(page);

1584
	fio.old_blkaddr = ni.blk_addr;
C
Chao Yu 已提交
1585
	f2fs_do_write_node_page(nid, &fio);
1586 1587 1588 1589
	set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
	dec_page_count(sbi, F2FS_DIRTY_NODES);
	up_read(&sbi->node_write);

1590
	if (wbc->for_reclaim) {
1591
		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE);
1592 1593
		submitted = NULL;
	}
1594 1595 1596

	unlock_page(page);

1597
	if (unlikely(f2fs_cp_error(sbi))) {
1598
		f2fs_submit_merged_write(sbi, NODE);
1599 1600 1601 1602
		submitted = NULL;
	}
	if (submitted)
		*submitted = fio.submitted;
1603

1604 1605
	if (do_balance)
		f2fs_balance_fs(sbi, false);
1606 1607 1608 1609 1610 1611 1612
	return 0;

redirty_out:
	redirty_page_for_writepage(wbc, page);
	return AOP_WRITEPAGE_ACTIVATE;
}

1613
int f2fs_move_node_page(struct page *node_page, int gc_type)
1614
{
1615 1616
	int err = 0;

1617 1618 1619 1620 1621 1622 1623
	if (gc_type == FG_GC) {
		struct writeback_control wbc = {
			.sync_mode = WB_SYNC_ALL,
			.nr_to_write = 1,
			.for_reclaim = 0,
		};

1624
		f2fs_wait_on_page_writeback(node_page, NODE, true, true);
1625 1626 1627

		set_page_dirty(node_page);

1628 1629
		if (!clear_page_dirty_for_io(node_page)) {
			err = -EAGAIN;
1630
			goto out_page;
1631
		}
1632 1633

		if (__write_node_page(node_page, false, NULL,
1634 1635
					&wbc, false, FS_GC_NODE_IO, NULL)) {
			err = -EAGAIN;
1636
			unlock_page(node_page);
1637
		}
1638 1639 1640 1641 1642 1643 1644 1645 1646 1647
		goto release_page;
	} else {
		/* set page dirty and write it */
		if (!PageWriteback(node_page))
			set_page_dirty(node_page);
	}
out_page:
	unlock_page(node_page);
release_page:
	f2fs_put_page(node_page, 0);
1648
	return err;
1649 1650
}

1651 1652 1653
static int f2fs_write_node_page(struct page *page,
				struct writeback_control *wbc)
{
1654 1655
	return __write_node_page(page, false, NULL, wbc, false,
						FS_NODE_IO, NULL);
1656 1657
}

C
Chao Yu 已提交
1658
int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
1659 1660
			struct writeback_control *wbc, bool atomic,
			unsigned int *seq_id)
J
Jaegeuk Kim 已提交
1661
{
J
Jan Kara 已提交
1662
	pgoff_t index;
J
Jaegeuk Kim 已提交
1663
	struct pagevec pvec;
1664
	int ret = 0;
1665 1666
	struct page *last_page = NULL;
	bool marked = false;
1667
	nid_t ino = inode->i_ino;
J
Jan Kara 已提交
1668
	int nr_pages;
1669
	int nwritten = 0;
1670

1671 1672 1673 1674 1675 1676
	if (atomic) {
		last_page = last_fsync_dnode(sbi, ino);
		if (IS_ERR_OR_NULL(last_page))
			return PTR_ERR_OR_ZERO(last_page);
	}
retry:
1677
	pagevec_init(&pvec);
1678
	index = 0;
J
Jan Kara 已提交
1679 1680

	while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1681
				PAGECACHE_TAG_DIRTY))) {
J
Jan Kara 已提交
1682
		int i;
1683 1684 1685

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];
1686
			bool submitted = false;
1687 1688

			if (unlikely(f2fs_cp_error(sbi))) {
1689
				f2fs_put_page(last_page, 0);
1690
				pagevec_release(&pvec);
1691 1692
				ret = -EIO;
				goto out;
1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709
			}

			if (!IS_DNODE(page) || !is_cold_node(page))
				continue;
			if (ino_of_node(page) != ino)
				continue;

			lock_page(page);

			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
continue_unlock:
				unlock_page(page);
				continue;
			}
			if (ino_of_node(page) != ino)
				goto continue_unlock;

1710
			if (!PageDirty(page) && page != last_page) {
1711 1712 1713 1714
				/* someone wrote it for us */
				goto continue_unlock;
			}

1715
			f2fs_wait_on_page_writeback(page, NODE, true, true);
1716

1717 1718 1719
			set_fsync_mark(page, 0);
			set_dentry_mark(page, 0);

1720 1721
			if (!atomic || page == last_page) {
				set_fsync_mark(page, 1);
1722 1723 1724
				if (IS_INODE(page)) {
					if (is_inode_flag_set(inode,
								FI_DIRTY_INODE))
C
Chao Yu 已提交
1725
						f2fs_update_inode(inode, page);
1726
					set_dentry_mark(page,
C
Chao Yu 已提交
1727
						f2fs_need_dentry_mark(sbi, ino));
1728
				}
1729 1730 1731 1732 1733 1734 1735
				/*  may be written by other thread */
				if (!PageDirty(page))
					set_page_dirty(page);
			}

			if (!clear_page_dirty_for_io(page))
				goto continue_unlock;
1736

1737
			ret = __write_node_page(page, atomic &&
1738
						page == last_page,
C
Chao Yu 已提交
1739
						&submitted, wbc, true,
1740
						FS_NODE_IO, seq_id);
1741
			if (ret) {
1742
				unlock_page(page);
1743 1744
				f2fs_put_page(last_page, 0);
				break;
1745
			} else if (submitted) {
1746
				nwritten++;
1747
			}
1748

1749 1750 1751
			if (page == last_page) {
				f2fs_put_page(page, 0);
				marked = true;
1752
				break;
1753
			}
1754 1755 1756 1757
		}
		pagevec_release(&pvec);
		cond_resched();

1758
		if (ret || marked)
1759 1760
			break;
	}
1761
	if (!ret && atomic && !marked) {
1762 1763
		f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx",
			   ino, last_page->index);
1764
		lock_page(last_page);
1765
		f2fs_wait_on_page_writeback(last_page, NODE, true, true);
1766 1767 1768 1769
		set_page_dirty(last_page);
		unlock_page(last_page);
		goto retry;
	}
1770
out:
1771 1772
	if (nwritten)
		f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE);
1773
	return ret ? -EIO: 0;
1774 1775
}

1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816
static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	bool clean;

	if (inode->i_ino != ino)
		return 0;

	if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
		return 0;

	spin_lock(&sbi->inode_lock[DIRTY_META]);
	clean = list_empty(&F2FS_I(inode)->gdirty_list);
	spin_unlock(&sbi->inode_lock[DIRTY_META]);

	if (clean)
		return 0;

	inode = igrab(inode);
	if (!inode)
		return 0;
	return 1;
}

static bool flush_dirty_inode(struct page *page)
{
	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
	struct inode *inode;
	nid_t ino = ino_of_node(page);

	inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL);
	if (!inode)
		return false;

	f2fs_update_inode(inode, page);
	unlock_page(page);

	iput(inode);
	return true;
}

1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863
int f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
{
	pgoff_t index = 0;
	struct pagevec pvec;
	int nr_pages;
	int ret = 0;

	pagevec_init(&pvec);

	while ((nr_pages = pagevec_lookup_tag(&pvec,
			NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
		int i;

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];

			if (!IS_DNODE(page))
				continue;

			lock_page(page);

			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
continue_unlock:
				unlock_page(page);
				continue;
			}

			if (!PageDirty(page)) {
				/* someone wrote it for us */
				goto continue_unlock;
			}

			/* flush inline_data, if it's async context. */
			if (is_inline_node(page)) {
				clear_inline_node(page);
				unlock_page(page);
				flush_inline_data(sbi, ino_of_node(page));
				continue;
			}
			unlock_page(page);
		}
		pagevec_release(&pvec);
		cond_resched();
	}
	return ret;
}

C
Chao Yu 已提交
1864 1865
int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
				struct writeback_control *wbc,
C
Chao Yu 已提交
1866
				bool do_balance, enum iostat_type io_type)
1867
{
J
Jan Kara 已提交
1868
	pgoff_t index;
1869 1870
	struct pagevec pvec;
	int step = 0;
1871
	int nwritten = 0;
1872
	int ret = 0;
1873
	int nr_pages, done = 0;
J
Jaegeuk Kim 已提交
1874

1875
	pagevec_init(&pvec);
J
Jaegeuk Kim 已提交
1876 1877 1878

next_step:
	index = 0;
J
Jan Kara 已提交
1879

1880 1881
	while (!done && (nr_pages = pagevec_lookup_tag(&pvec,
			NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
J
Jan Kara 已提交
1882
		int i;
J
Jaegeuk Kim 已提交
1883 1884 1885

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];
1886
			bool submitted = false;
1887
			bool may_dirty = true;
J
Jaegeuk Kim 已提交
1888

1889 1890 1891 1892 1893 1894 1895
			/* give a priority to WB_SYNC threads */
			if (atomic_read(&sbi->wb_sync_req[NODE]) &&
					wbc->sync_mode == WB_SYNC_NONE) {
				done = 1;
				break;
			}

J
Jaegeuk Kim 已提交
1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909
			/*
			 * flushing sequence with step:
			 * 0. indirect nodes
			 * 1. dentry dnodes
			 * 2. file dnodes
			 */
			if (step == 0 && IS_DNODE(page))
				continue;
			if (step == 1 && (!IS_DNODE(page) ||
						is_cold_node(page)))
				continue;
			if (step == 2 && (!IS_DNODE(page) ||
						!is_cold_node(page)))
				continue;
1910
lock_node:
1911 1912 1913
			if (wbc->sync_mode == WB_SYNC_ALL)
				lock_page(page);
			else if (!trylock_page(page))
J
Jaegeuk Kim 已提交
1914 1915
				continue;

1916
			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
J
Jaegeuk Kim 已提交
1917 1918 1919 1920 1921 1922 1923 1924 1925 1926
continue_unlock:
				unlock_page(page);
				continue;
			}

			if (!PageDirty(page)) {
				/* someone wrote it for us */
				goto continue_unlock;
			}

1927 1928
			/* flush inline_data, if it's async context. */
			if (do_balance && is_inline_node(page)) {
1929 1930 1931
				clear_inline_node(page);
				unlock_page(page);
				flush_inline_data(sbi, ino_of_node(page));
1932
				goto lock_node;
1933 1934 1935 1936 1937 1938 1939
			}

			/* flush dirty inode */
			if (IS_INODE(page) && may_dirty) {
				may_dirty = false;
				if (flush_dirty_inode(page))
					goto lock_node;
1940 1941
			}

1942
			f2fs_wait_on_page_writeback(page, NODE, true, true);
1943

J
Jaegeuk Kim 已提交
1944 1945 1946
			if (!clear_page_dirty_for_io(page))
				goto continue_unlock;

1947 1948
			set_fsync_mark(page, 0);
			set_dentry_mark(page, 0);
1949

1950
			ret = __write_node_page(page, false, &submitted,
1951
						wbc, do_balance, io_type, NULL);
1952
			if (ret)
1953
				unlock_page(page);
1954
			else if (submitted)
1955
				nwritten++;
J
Jaegeuk Kim 已提交
1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969

			if (--wbc->nr_to_write == 0)
				break;
		}
		pagevec_release(&pvec);
		cond_resched();

		if (wbc->nr_to_write == 0) {
			step = 2;
			break;
		}
	}

	if (step < 2) {
1970 1971
		if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
				wbc->sync_mode == WB_SYNC_NONE && step == 1)
1972
			goto out;
J
Jaegeuk Kim 已提交
1973 1974 1975
		step++;
		goto next_step;
	}
1976
out:
1977
	if (nwritten)
1978
		f2fs_submit_merged_write(sbi, NODE);
1979 1980 1981

	if (unlikely(f2fs_cp_error(sbi)))
		return -EIO;
1982
	return ret;
J
Jaegeuk Kim 已提交
1983 1984
}

1985 1986
int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
						unsigned int seq_id)
1987
{
1988 1989 1990 1991 1992
	struct fsync_node_entry *fn;
	struct page *page;
	struct list_head *head = &sbi->fsync_node_list;
	unsigned long flags;
	unsigned int cur_seq_id = 0;
1993
	int ret2, ret = 0;
1994

1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009
	while (seq_id && cur_seq_id < seq_id) {
		spin_lock_irqsave(&sbi->fsync_node_lock, flags);
		if (list_empty(head)) {
			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
			break;
		}
		fn = list_first_entry(head, struct fsync_node_entry, list);
		if (fn->seq_id > seq_id) {
			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
			break;
		}
		cur_seq_id = fn->seq_id;
		page = fn->page;
		get_page(page);
		spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2010

2011
		f2fs_wait_on_page_writeback(page, NODE, true, false);
2012 2013
		if (TestClearPageError(page))
			ret = -EIO;
2014

2015
		put_page(page);
2016

2017 2018
		if (ret)
			break;
2019 2020
	}

2021
	ret2 = filemap_check_errors(NODE_MAPPING(sbi));
2022 2023
	if (!ret)
		ret = ret2;
2024

2025 2026 2027
	return ret;
}

J
Jaegeuk Kim 已提交
2028 2029 2030
static int f2fs_write_node_pages(struct address_space *mapping,
			    struct writeback_control *wbc)
{
2031
	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2032
	struct blk_plug plug;
2033
	long diff;
J
Jaegeuk Kim 已提交
2034

2035 2036 2037
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto skip_write;

2038
	/* balancing f2fs's metadata in background */
2039
	f2fs_balance_fs_bg(sbi, true);
J
Jaegeuk Kim 已提交
2040

2041
	/* collect a number of dirty node pages and write together */
2042 2043 2044
	if (wbc->sync_mode != WB_SYNC_ALL &&
			get_pages(sbi, F2FS_DIRTY_NODES) <
					nr_pages_to_skip(sbi, NODE))
2045
		goto skip_write;
2046

2047 2048 2049 2050 2051
	if (wbc->sync_mode == WB_SYNC_ALL)
		atomic_inc(&sbi->wb_sync_req[NODE]);
	else if (atomic_read(&sbi->wb_sync_req[NODE]))
		goto skip_write;

Y
Yunlei He 已提交
2052 2053
	trace_f2fs_writepages(mapping->host, wbc, NODE);

2054
	diff = nr_pages_to_write(sbi, NODE, wbc);
2055
	blk_start_plug(&plug);
C
Chao Yu 已提交
2056
	f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
2057
	blk_finish_plug(&plug);
2058
	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
2059 2060 2061

	if (wbc->sync_mode == WB_SYNC_ALL)
		atomic_dec(&sbi->wb_sync_req[NODE]);
J
Jaegeuk Kim 已提交
2062
	return 0;
2063 2064 2065

skip_write:
	wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
Y
Yunlei He 已提交
2066
	trace_f2fs_writepages(mapping->host, wbc, NODE);
2067
	return 0;
J
Jaegeuk Kim 已提交
2068 2069 2070 2071
}

static int f2fs_set_node_page_dirty(struct page *page)
{
2072 2073
	trace_f2fs_set_page_dirty(page, NODE);

2074 2075
	if (!PageUptodate(page))
		SetPageUptodate(page);
2076 2077 2078 2079
#ifdef CONFIG_F2FS_CHECK_FS
	if (IS_INODE(page))
		f2fs_inode_chksum_set(F2FS_P_SB(page), page);
#endif
J
Jaegeuk Kim 已提交
2080
	if (!PageDirty(page)) {
2081
		__set_page_dirty_nobuffers(page);
2082
		inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
2083
		f2fs_set_page_private(page, 0);
J
Jaegeuk Kim 已提交
2084
		f2fs_trace_pid(page);
J
Jaegeuk Kim 已提交
2085 2086 2087 2088 2089
		return 1;
	}
	return 0;
}

J
Jaegeuk Kim 已提交
2090
/*
J
Jaegeuk Kim 已提交
2091 2092 2093 2094 2095 2096
 * Structure of the f2fs node operations
 */
const struct address_space_operations f2fs_node_aops = {
	.writepage	= f2fs_write_node_page,
	.writepages	= f2fs_write_node_pages,
	.set_page_dirty	= f2fs_set_node_page_dirty,
2097 2098
	.invalidatepage	= f2fs_invalidate_page,
	.releasepage	= f2fs_release_page,
2099 2100 2101
#ifdef CONFIG_MIGRATION
	.migratepage    = f2fs_migrate_page,
#endif
J
Jaegeuk Kim 已提交
2102 2103
};

2104 2105
static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
						nid_t n)
J
Jaegeuk Kim 已提交
2106
{
2107
	return radix_tree_lookup(&nm_i->free_nid_root, n);
J
Jaegeuk Kim 已提交
2108 2109
}

C
Chao Yu 已提交
2110
static int __insert_free_nid(struct f2fs_sb_info *sbi,
2111
				struct free_nid *i)
J
Jaegeuk Kim 已提交
2112
{
C
Chao Yu 已提交
2113 2114
	struct f2fs_nm_info *nm_i = NM_I(sbi);

F
Fan Li 已提交
2115 2116 2117
	int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
	if (err)
		return err;
2118

2119 2120
	nm_i->nid_cnt[FREE_NID]++;
	list_add_tail(&i->list, &nm_i->free_nid_list);
2121
	return 0;
C
Chao Yu 已提交
2122 2123
}

C
Chao Yu 已提交
2124
static void __remove_free_nid(struct f2fs_sb_info *sbi,
F
Fan Li 已提交
2125
			struct free_nid *i, enum nid_state state)
C
Chao Yu 已提交
2126 2127 2128
{
	struct f2fs_nm_info *nm_i = NM_I(sbi);

C
Chao Yu 已提交
2129 2130 2131 2132
	f2fs_bug_on(sbi, state != i->state);
	nm_i->nid_cnt[state]--;
	if (state == FREE_NID)
		list_del(&i->list);
F
Fan Li 已提交
2133 2134 2135 2136 2137
	radix_tree_delete(&nm_i->free_nid_root, i->nid);
}

static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
			enum nid_state org_state, enum nid_state dst_state)
C
Chao Yu 已提交
2138 2139 2140
{
	struct f2fs_nm_info *nm_i = NM_I(sbi);

F
Fan Li 已提交
2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155
	f2fs_bug_on(sbi, org_state != i->state);
	i->state = dst_state;
	nm_i->nid_cnt[org_state]--;
	nm_i->nid_cnt[dst_state]++;

	switch (dst_state) {
	case PREALLOC_NID:
		list_del(&i->list);
		break;
	case FREE_NID:
		list_add_tail(&i->list, &nm_i->free_nid_list);
		break;
	default:
		BUG_ON(1);
	}
J
Jaegeuk Kim 已提交
2156 2157
}

2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181
static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
							bool set, bool build)
{
	struct f2fs_nm_info *nm_i = NM_I(sbi);
	unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
	unsigned int nid_ofs = nid - START_NID(nid);

	if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
		return;

	if (set) {
		if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
			return;
		__set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
		nm_i->free_nid_count[nat_ofs]++;
	} else {
		if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
			return;
		__clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
		if (!build)
			nm_i->free_nid_count[nat_ofs]--;
	}
}

C
Chao Yu 已提交
2182
/* return if the nid is recognized as free */
2183 2184
static bool add_free_nid(struct f2fs_sb_info *sbi,
				nid_t nid, bool build, bool update)
J
Jaegeuk Kim 已提交
2185
{
2186
	struct f2fs_nm_info *nm_i = NM_I(sbi);
2187
	struct free_nid *i, *e;
2188
	struct nat_entry *ne;
2189 2190
	int err = -EINVAL;
	bool ret = false;
2191 2192

	/* 0 nid should not be used */
2193
	if (unlikely(nid == 0))
C
Chao Yu 已提交
2194
		return false;
2195

2196 2197 2198
	if (unlikely(f2fs_check_nid_range(sbi, nid)))
		return false;

2199
	i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
J
Jaegeuk Kim 已提交
2200
	i->nid = nid;
C
Chao Yu 已提交
2201
	i->state = FREE_NID;
J
Jaegeuk Kim 已提交
2202

2203
	radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
2204

C
Chao Yu 已提交
2205
	spin_lock(&nm_i->nid_list_lock);
2206 2207 2208 2209 2210 2211

	if (build) {
		/*
		 *   Thread A             Thread B
		 *  - f2fs_create
		 *   - f2fs_new_inode
C
Chao Yu 已提交
2212
		 *    - f2fs_alloc_nid
C
Chao Yu 已提交
2213
		 *     - __insert_nid_to_list(PREALLOC_NID)
2214
		 *                     - f2fs_balance_fs_bg
C
Chao Yu 已提交
2215 2216
		 *                      - f2fs_build_free_nids
		 *                       - __f2fs_build_free_nids
2217 2218 2219 2220
		 *                        - scan_nat_page
		 *                         - add_free_nid
		 *                          - __lookup_nat_cache
		 *  - f2fs_add_link
C
Chao Yu 已提交
2221 2222 2223
		 *   - f2fs_init_inode_metadata
		 *    - f2fs_new_inode_page
		 *     - f2fs_new_node_page
2224
		 *      - set_node_addr
C
Chao Yu 已提交
2225
		 *  - f2fs_alloc_nid_done
C
Chao Yu 已提交
2226 2227
		 *   - __remove_nid_from_list(PREALLOC_NID)
		 *                         - __insert_nid_to_list(FREE_NID)
2228 2229 2230 2231 2232 2233 2234 2235
		 */
		ne = __lookup_nat_cache(nm_i, nid);
		if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
				nat_get_blkaddr(ne) != NULL_ADDR))
			goto err_out;

		e = __lookup_free_nid_list(nm_i, nid);
		if (e) {
C
Chao Yu 已提交
2236
			if (e->state == FREE_NID)
2237 2238 2239 2240 2241
				ret = true;
			goto err_out;
		}
	}
	ret = true;
2242
	err = __insert_free_nid(sbi, i);
2243
err_out:
2244 2245 2246 2247 2248
	if (update) {
		update_free_nid_bitmap(sbi, nid, ret, build);
		if (!build)
			nm_i->available_nids++;
	}
2249 2250
	spin_unlock(&nm_i->nid_list_lock);
	radix_tree_preload_end();
2251

2252
	if (err)
J
Jaegeuk Kim 已提交
2253
		kmem_cache_free(free_nid_slab, i);
2254
	return ret;
J
Jaegeuk Kim 已提交
2255 2256
}

C
Chao Yu 已提交
2257
static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
J
Jaegeuk Kim 已提交
2258
{
C
Chao Yu 已提交
2259
	struct f2fs_nm_info *nm_i = NM_I(sbi);
J
Jaegeuk Kim 已提交
2260
	struct free_nid *i;
2261 2262
	bool need_free = false;

C
Chao Yu 已提交
2263
	spin_lock(&nm_i->nid_list_lock);
2264
	i = __lookup_free_nid_list(nm_i, nid);
C
Chao Yu 已提交
2265
	if (i && i->state == FREE_NID) {
F
Fan Li 已提交
2266
		__remove_free_nid(sbi, i, FREE_NID);
2267
		need_free = true;
J
Jaegeuk Kim 已提交
2268
	}
C
Chao Yu 已提交
2269
	spin_unlock(&nm_i->nid_list_lock);
2270 2271 2272

	if (need_free)
		kmem_cache_free(free_nid_slab, i);
J
Jaegeuk Kim 已提交
2273 2274
}

2275
static int scan_nat_page(struct f2fs_sb_info *sbi,
J
Jaegeuk Kim 已提交
2276 2277
			struct page *nat_page, nid_t start_nid)
{
2278
	struct f2fs_nm_info *nm_i = NM_I(sbi);
J
Jaegeuk Kim 已提交
2279 2280
	struct f2fs_nat_block *nat_blk = page_address(nat_page);
	block_t blk_addr;
C
Chao Yu 已提交
2281
	unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
J
Jaegeuk Kim 已提交
2282 2283
	int i;

J
Jaegeuk Kim 已提交
2284
	__set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
C
Chao Yu 已提交
2285

J
Jaegeuk Kim 已提交
2286 2287 2288
	i = start_nid % NAT_ENTRY_PER_BLOCK;

	for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
2289
		if (unlikely(start_nid >= nm_i->max_nid))
J
Jaegeuk Kim 已提交
2290
			break;
H
Haicheng Li 已提交
2291 2292

		blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
2293 2294 2295 2296

		if (blk_addr == NEW_ADDR)
			return -EINVAL;

2297 2298 2299 2300 2301 2302 2303
		if (blk_addr == NULL_ADDR) {
			add_free_nid(sbi, start_nid, true, true);
		} else {
			spin_lock(&NM_I(sbi)->nid_list_lock);
			update_free_nid_bitmap(sbi, start_nid, false, true);
			spin_unlock(&NM_I(sbi)->nid_list_lock);
		}
C
Chao Yu 已提交
2304
	}
2305 2306

	return 0;
C
Chao Yu 已提交
2307 2308
}

2309
static void scan_curseg_cache(struct f2fs_sb_info *sbi)
C
Chao Yu 已提交
2310 2311 2312
{
	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
	struct f2fs_journal *journal = curseg->journal;
2313 2314 2315 2316 2317 2318 2319 2320 2321 2322
	int i;

	down_read(&curseg->journal_rwsem);
	for (i = 0; i < nats_in_cursum(journal); i++) {
		block_t addr;
		nid_t nid;

		addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
		nid = le32_to_cpu(nid_in_journal(journal, i));
		if (addr == NULL_ADDR)
2323
			add_free_nid(sbi, nid, true, false);
2324 2325 2326 2327 2328 2329 2330 2331 2332
		else
			remove_free_nid(sbi, nid);
	}
	up_read(&curseg->journal_rwsem);
}

static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
{
	struct f2fs_nm_info *nm_i = NM_I(sbi);
C
Chao Yu 已提交
2333
	unsigned int i, idx;
2334
	nid_t nid;
C
Chao Yu 已提交
2335 2336 2337 2338 2339 2340

	down_read(&nm_i->nat_tree_lock);

	for (i = 0; i < nm_i->nat_blocks; i++) {
		if (!test_bit_le(i, nm_i->nat_block_bitmap))
			continue;
2341 2342
		if (!nm_i->free_nid_count[i])
			continue;
C
Chao Yu 已提交
2343
		for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
2344 2345 2346 2347
			idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
						NAT_ENTRY_PER_BLOCK, idx);
			if (idx >= NAT_ENTRY_PER_BLOCK)
				break;
C
Chao Yu 已提交
2348 2349

			nid = i * NAT_ENTRY_PER_BLOCK + idx;
2350
			add_free_nid(sbi, nid, true, false);
C
Chao Yu 已提交
2351

C
Chao Yu 已提交
2352
			if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
C
Chao Yu 已提交
2353 2354 2355 2356
				goto out;
		}
	}
out:
2357
	scan_curseg_cache(sbi);
C
Chao Yu 已提交
2358 2359

	up_read(&nm_i->nat_tree_lock);
J
Jaegeuk Kim 已提交
2360 2361
}

2362
static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
C
Chao Yu 已提交
2363
						bool sync, bool mount)
J
Jaegeuk Kim 已提交
2364 2365
{
	struct f2fs_nm_info *nm_i = NM_I(sbi);
2366
	int i = 0, ret;
2367
	nid_t nid = nm_i->next_scan_nid;
J
Jaegeuk Kim 已提交
2368

2369 2370 2371
	if (unlikely(nid >= nm_i->max_nid))
		nid = 0;

2372
	/* Enough entries */
C
Chao Yu 已提交
2373
	if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2374
		return 0;
J
Jaegeuk Kim 已提交
2375

C
Chao Yu 已提交
2376
	if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
2377
		return 0;
J
Jaegeuk Kim 已提交
2378

C
Chao Yu 已提交
2379 2380 2381 2382
	if (!mount) {
		/* try to find free nids in free_nid_bitmap */
		scan_free_nid_bits(sbi);

2383
		if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2384
			return 0;
2385 2386
	}

2387
	/* readahead nat pages to be scanned */
C
Chao Yu 已提交
2388
	f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
2389
							META_NAT, true);
J
Jaegeuk Kim 已提交
2390

2391
	down_read(&nm_i->nat_tree_lock);
2392

J
Jaegeuk Kim 已提交
2393
	while (1) {
2394 2395 2396
		if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
						nm_i->nat_block_bitmap)) {
			struct page *page = get_current_nat_page(sbi, nid);
J
Jaegeuk Kim 已提交
2397

2398 2399 2400 2401 2402 2403
			if (IS_ERR(page)) {
				ret = PTR_ERR(page);
			} else {
				ret = scan_nat_page(sbi, page, nid);
				f2fs_put_page(page, 1);
			}
2404 2405 2406

			if (ret) {
				up_read(&nm_i->nat_tree_lock);
2407
				f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
2408
				return ret;
2409
			}
2410
		}
J
Jaegeuk Kim 已提交
2411 2412

		nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
2413
		if (unlikely(nid >= nm_i->max_nid))
J
Jaegeuk Kim 已提交
2414
			nid = 0;
2415

2416
		if (++i >= FREE_NID_PAGES)
J
Jaegeuk Kim 已提交
2417 2418 2419
			break;
	}

2420 2421
	/* go to the next free nat pages to find free nids abundantly */
	nm_i->next_scan_nid = nid;
J
Jaegeuk Kim 已提交
2422 2423

	/* find free nids from current sum_pages */
2424
	scan_curseg_cache(sbi);
2425

2426
	up_read(&nm_i->nat_tree_lock);
C
Chao Yu 已提交
2427

C
Chao Yu 已提交
2428
	f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
C
Chao Yu 已提交
2429
					nm_i->ra_nid_pages, META_NAT, false);
2430 2431

	return 0;
J
Jaegeuk Kim 已提交
2432 2433
}

2434
int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
2435
{
2436 2437
	int ret;

2438
	mutex_lock(&NM_I(sbi)->build_lock);
2439
	ret = __f2fs_build_free_nids(sbi, sync, mount);
2440
	mutex_unlock(&NM_I(sbi)->build_lock);
2441 2442

	return ret;
2443 2444
}

J
Jaegeuk Kim 已提交
2445 2446 2447 2448 2449
/*
 * If this function returns success, caller can obtain a new nid
 * from second parameter of this function.
 * The returned nid could be used ino as well as nid when inode is created.
 */
C
Chao Yu 已提交
2450
bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
J
Jaegeuk Kim 已提交
2451 2452 2453 2454
{
	struct f2fs_nm_info *nm_i = NM_I(sbi);
	struct free_nid *i = NULL;
retry:
2455
	if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
2456
		f2fs_show_injection_info(sbi, FAULT_ALLOC_NID);
J
Jaegeuk Kim 已提交
2457
		return false;
2458
	}
2459

C
Chao Yu 已提交
2460
	spin_lock(&nm_i->nid_list_lock);
J
Jaegeuk Kim 已提交
2461

2462 2463 2464 2465
	if (unlikely(nm_i->available_nids == 0)) {
		spin_unlock(&nm_i->nid_list_lock);
		return false;
	}
J
Jaegeuk Kim 已提交
2466

C
Chao Yu 已提交
2467 2468
	/* We should not use stale free nids created by f2fs_build_free_nids */
	if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
C
Chao Yu 已提交
2469 2470
		f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
		i = list_first_entry(&nm_i->free_nid_list,
C
Chao Yu 已提交
2471
					struct free_nid, list);
2472
		*nid = i->nid;
C
Chao Yu 已提交
2473

F
Fan Li 已提交
2474
		__move_free_nid(sbi, i, FREE_NID, PREALLOC_NID);
2475
		nm_i->available_nids--;
C
Chao Yu 已提交
2476

2477
		update_free_nid_bitmap(sbi, *nid, false, false);
C
Chao Yu 已提交
2478

C
Chao Yu 已提交
2479
		spin_unlock(&nm_i->nid_list_lock);
2480 2481
		return true;
	}
C
Chao Yu 已提交
2482
	spin_unlock(&nm_i->nid_list_lock);
2483 2484

	/* Let's scan nat pages and its caches to get free nids */
2485 2486 2487
	if (!f2fs_build_free_nids(sbi, true, false))
		goto retry;
	return false;
J
Jaegeuk Kim 已提交
2488 2489
}

J
Jaegeuk Kim 已提交
2490
/*
C
Chao Yu 已提交
2491
 * f2fs_alloc_nid() should be called prior to this function.
J
Jaegeuk Kim 已提交
2492
 */
C
Chao Yu 已提交
2493
void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
J
Jaegeuk Kim 已提交
2494 2495 2496 2497
{
	struct f2fs_nm_info *nm_i = NM_I(sbi);
	struct free_nid *i;

C
Chao Yu 已提交
2498
	spin_lock(&nm_i->nid_list_lock);
2499
	i = __lookup_free_nid_list(nm_i, nid);
C
Chao Yu 已提交
2500
	f2fs_bug_on(sbi, !i);
F
Fan Li 已提交
2501
	__remove_free_nid(sbi, i, PREALLOC_NID);
C
Chao Yu 已提交
2502
	spin_unlock(&nm_i->nid_list_lock);
2503 2504

	kmem_cache_free(free_nid_slab, i);
J
Jaegeuk Kim 已提交
2505 2506
}

J
Jaegeuk Kim 已提交
2507
/*
C
Chao Yu 已提交
2508
 * f2fs_alloc_nid() should be called prior to this function.
J
Jaegeuk Kim 已提交
2509
 */
C
Chao Yu 已提交
2510
void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
J
Jaegeuk Kim 已提交
2511
{
2512 2513
	struct f2fs_nm_info *nm_i = NM_I(sbi);
	struct free_nid *i;
2514
	bool need_free = false;
2515

J
Jaegeuk Kim 已提交
2516 2517 2518
	if (!nid)
		return;

C
Chao Yu 已提交
2519
	spin_lock(&nm_i->nid_list_lock);
2520
	i = __lookup_free_nid_list(nm_i, nid);
C
Chao Yu 已提交
2521 2522
	f2fs_bug_on(sbi, !i);

C
Chao Yu 已提交
2523
	if (!f2fs_available_free_memory(sbi, FREE_NIDS)) {
F
Fan Li 已提交
2524
		__remove_free_nid(sbi, i, PREALLOC_NID);
2525
		need_free = true;
2526
	} else {
F
Fan Li 已提交
2527
		__move_free_nid(sbi, i, PREALLOC_NID, FREE_NID);
2528
	}
2529 2530 2531

	nm_i->available_nids++;

2532
	update_free_nid_bitmap(sbi, nid, true, false);
C
Chao Yu 已提交
2533

C
Chao Yu 已提交
2534
	spin_unlock(&nm_i->nid_list_lock);
2535 2536 2537

	if (need_free)
		kmem_cache_free(free_nid_slab, i);
J
Jaegeuk Kim 已提交
2538 2539
}

C
Chao Yu 已提交
2540
int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
C
Chao Yu 已提交
2541 2542 2543 2544
{
	struct f2fs_nm_info *nm_i = NM_I(sbi);
	int nr = nr_shrink;

C
Chao Yu 已提交
2545
	if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2546 2547
		return 0;

C
Chao Yu 已提交
2548 2549 2550
	if (!mutex_trylock(&nm_i->build_lock))
		return 0;

C
Chao Yu 已提交
2551 2552 2553
	while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) {
		struct free_nid *i, *next;
		unsigned int batch = SHRINK_NID_BATCH_SIZE;
C
Chao Yu 已提交
2554

C
Chao Yu 已提交
2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565
		spin_lock(&nm_i->nid_list_lock);
		list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
			if (!nr_shrink || !batch ||
				nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
				break;
			__remove_free_nid(sbi, i, FREE_NID);
			kmem_cache_free(free_nid_slab, i);
			nr_shrink--;
			batch--;
		}
		spin_unlock(&nm_i->nid_list_lock);
C
Chao Yu 已提交
2566
	}
C
Chao Yu 已提交
2567

C
Chao Yu 已提交
2568 2569 2570 2571 2572
	mutex_unlock(&nm_i->build_lock);

	return nr - nr_shrink;
}

C
Chao Yu 已提交
2573
void f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
2574 2575 2576 2577 2578 2579
{
	void *src_addr, *dst_addr;
	size_t inline_size;
	struct page *ipage;
	struct f2fs_inode *ri;

C
Chao Yu 已提交
2580
	ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
2581
	f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage));
2582

2583
	ri = F2FS_INODE(page);
2584 2585 2586
	if (ri->i_inline & F2FS_INLINE_XATTR) {
		set_inode_flag(inode, FI_INLINE_XATTR);
	} else {
2587
		clear_inode_flag(inode, FI_INLINE_XATTR);
2588 2589 2590
		goto update_inode;
	}

C
Chao Yu 已提交
2591 2592
	dst_addr = inline_xattr_addr(inode, ipage);
	src_addr = inline_xattr_addr(inode, page);
2593 2594
	inline_size = inline_xattr_size(inode);

2595
	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
2596
	memcpy(dst_addr, src_addr, inline_size);
2597
update_inode:
C
Chao Yu 已提交
2598
	f2fs_update_inode(inode, ipage);
2599 2600 2601
	f2fs_put_page(ipage, 1);
}

C
Chao Yu 已提交
2602
int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
2603
{
2604
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2605
	nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
2606 2607
	nid_t new_xnid;
	struct dnode_of_data dn;
2608
	struct node_info ni;
2609
	struct page *xpage;
2610
	int err;
2611 2612 2613 2614

	if (!prev_xnid)
		goto recover_xnid;

2615
	/* 1: invalidate the previous xattr nid */
2616 2617 2618 2619
	err = f2fs_get_node_info(sbi, prev_xnid, &ni);
	if (err)
		return err;

C
Chao Yu 已提交
2620
	f2fs_invalidate_blocks(sbi, ni.blk_addr);
2621
	dec_valid_node_count(sbi, inode, false);
2622
	set_node_addr(sbi, &ni, NULL_ADDR, false);
2623 2624

recover_xnid:
2625
	/* 2: update xattr nid in inode */
C
Chao Yu 已提交
2626
	if (!f2fs_alloc_nid(sbi, &new_xnid))
2627 2628 2629
		return -ENOSPC;

	set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
C
Chao Yu 已提交
2630
	xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
2631
	if (IS_ERR(xpage)) {
C
Chao Yu 已提交
2632
		f2fs_alloc_nid_failed(sbi, new_xnid);
2633 2634 2635
		return PTR_ERR(xpage);
	}

C
Chao Yu 已提交
2636 2637
	f2fs_alloc_nid_done(sbi, new_xnid);
	f2fs_update_inode_page(inode);
2638 2639

	/* 3: update and set xattr node page dirty */
2640
	memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
2641

2642 2643
	set_page_dirty(xpage);
	f2fs_put_page(xpage, 1);
2644

2645
	return 0;
2646 2647
}

C
Chao Yu 已提交
2648
int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
J
Jaegeuk Kim 已提交
2649
{
2650
	struct f2fs_inode *src, *dst;
J
Jaegeuk Kim 已提交
2651 2652 2653
	nid_t ino = ino_of_node(page);
	struct node_info old_ni, new_ni;
	struct page *ipage;
2654
	int err;
J
Jaegeuk Kim 已提交
2655

2656 2657 2658
	err = f2fs_get_node_info(sbi, ino, &old_ni);
	if (err)
		return err;
2659 2660 2661

	if (unlikely(old_ni.blk_addr != NULL_ADDR))
		return -EINVAL;
2662
retry:
2663
	ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
2664
	if (!ipage) {
C
Chao Yu 已提交
2665
		congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
2666 2667
		goto retry;
	}
J
Jaegeuk Kim 已提交
2668

A
arter97 已提交
2669
	/* Should not use this inode from free nid list */
C
Chao Yu 已提交
2670
	remove_free_nid(sbi, ino);
J
Jaegeuk Kim 已提交
2671

2672 2673
	if (!PageUptodate(ipage))
		SetPageUptodate(ipage);
J
Jaegeuk Kim 已提交
2674
	fill_node_footer(ipage, ino, ino, 0, true);
2675
	set_cold_node(ipage, false);
J
Jaegeuk Kim 已提交
2676

2677 2678
	src = F2FS_INODE(page);
	dst = F2FS_INODE(ipage);
J
Jaegeuk Kim 已提交
2679

2680 2681 2682 2683 2684
	memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src);
	dst->i_size = 0;
	dst->i_blocks = cpu_to_le64(1);
	dst->i_links = cpu_to_le32(1);
	dst->i_xattr_nid = 0;
2685
	dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR);
C
Chao Yu 已提交
2686
	if (dst->i_inline & F2FS_EXTRA_ATTR) {
2687
		dst->i_extra_isize = src->i_extra_isize;
C
Chao Yu 已提交
2688

2689
		if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
C
Chao Yu 已提交
2690 2691 2692 2693
			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
							i_inline_xattr_size))
			dst->i_inline_xattr_size = src->i_inline_xattr_size;

2694
		if (f2fs_sb_has_project_quota(sbi) &&
C
Chao Yu 已提交
2695 2696 2697
			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
								i_projid))
			dst->i_projid = src->i_projid;
2698

2699
		if (f2fs_sb_has_inode_crtime(sbi) &&
2700 2701 2702 2703 2704
			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
							i_crtime_nsec)) {
			dst->i_crtime = src->i_crtime;
			dst->i_crtime_nsec = src->i_crtime_nsec;
		}
C
Chao Yu 已提交
2705
	}
J
Jaegeuk Kim 已提交
2706 2707 2708 2709

	new_ni = old_ni;
	new_ni.ino = ino;

C
Chao Yu 已提交
2710
	if (unlikely(inc_valid_node_count(sbi, NULL, true)))
2711
		WARN_ON(1);
2712
	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
J
Jaegeuk Kim 已提交
2713
	inc_valid_inode_count(sbi);
2714
	set_page_dirty(ipage);
J
Jaegeuk Kim 已提交
2715 2716 2717 2718
	f2fs_put_page(ipage, 1);
	return 0;
}

2719
int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
J
Jaegeuk Kim 已提交
2720 2721 2722 2723 2724
			unsigned int segno, struct f2fs_summary_block *sum)
{
	struct f2fs_node *rn;
	struct f2fs_summary *sum_entry;
	block_t addr;
2725
	int i, idx, last_offset, nrpages;
J
Jaegeuk Kim 已提交
2726 2727 2728 2729 2730 2731

	/* scan the node segment */
	last_offset = sbi->blocks_per_seg;
	addr = START_BLOCK(sbi, segno);
	sum_entry = &sum->entries[0];

2732
	for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
2733
		nrpages = min(last_offset - i, BIO_MAX_PAGES);
2734

A
arter97 已提交
2735
		/* readahead node pages */
C
Chao Yu 已提交
2736
		f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
J
Jaegeuk Kim 已提交
2737

2738
		for (idx = addr; idx < addr + nrpages; idx++) {
C
Chao Yu 已提交
2739
			struct page *page = f2fs_get_tmp_page(sbi, idx);
2740

2741 2742 2743
			if (IS_ERR(page))
				return PTR_ERR(page);

2744 2745 2746 2747 2748 2749
			rn = F2FS_NODE(page);
			sum_entry->nid = rn->footer.nid;
			sum_entry->version = 0;
			sum_entry->ofs_in_node = 0;
			sum_entry++;
			f2fs_put_page(page, 1);
2750
		}
2751

2752
		invalidate_mapping_pages(META_MAPPING(sbi), addr,
2753
							addr + nrpages);
J
Jaegeuk Kim 已提交
2754
	}
2755
	return 0;
J
Jaegeuk Kim 已提交
2756 2757
}

2758
static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
J
Jaegeuk Kim 已提交
2759 2760 2761
{
	struct f2fs_nm_info *nm_i = NM_I(sbi);
	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2762
	struct f2fs_journal *journal = curseg->journal;
J
Jaegeuk Kim 已提交
2763 2764
	int i;

2765
	down_write(&curseg->journal_rwsem);
2766
	for (i = 0; i < nats_in_cursum(journal); i++) {
J
Jaegeuk Kim 已提交
2767 2768
		struct nat_entry *ne;
		struct f2fs_nat_entry raw_ne;
2769
		nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
J
Jaegeuk Kim 已提交
2770

2771
		raw_ne = nat_in_journal(journal, i);
2772

J
Jaegeuk Kim 已提交
2773 2774
		ne = __lookup_nat_cache(nm_i, nid);
		if (!ne) {
2775 2776
			ne = __alloc_nat_entry(nid, true);
			__init_nat_entry(nm_i, ne, &raw_ne, true);
J
Jaegeuk Kim 已提交
2777
		}
2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790

		/*
		 * if a free nat in journal has not been used after last
		 * checkpoint, we should remove it from available nids,
		 * since later we will add it again.
		 */
		if (!get_nat_flag(ne, IS_DIRTY) &&
				le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
			spin_lock(&nm_i->nid_list_lock);
			nm_i->available_nids--;
			spin_unlock(&nm_i->nid_list_lock);
		}

J
Jaegeuk Kim 已提交
2791 2792
		__set_nat_cache_dirty(nm_i, ne);
	}
2793
	update_nats_in_cursum(journal, -i);
2794
	up_write(&curseg->journal_rwsem);
J
Jaegeuk Kim 已提交
2795 2796
}

2797 2798
static void __adjust_nat_entry_set(struct nat_entry_set *nes,
						struct list_head *head, int max)
J
Jaegeuk Kim 已提交
2799
{
2800
	struct nat_entry_set *cur;
J
Jaegeuk Kim 已提交
2801

2802 2803
	if (nes->entry_cnt >= max)
		goto add_out;
J
Jaegeuk Kim 已提交
2804

2805 2806 2807 2808 2809
	list_for_each_entry(cur, head, set_list) {
		if (cur->entry_cnt >= nes->entry_cnt) {
			list_add(&nes->set_list, cur->set_list.prev);
			return;
		}
2810
	}
2811 2812 2813
add_out:
	list_add_tail(&nes->set_list, head);
}
J
Jaegeuk Kim 已提交
2814

J
Jaegeuk Kim 已提交
2815
static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
2816 2817 2818 2819 2820 2821
						struct page *page)
{
	struct f2fs_nm_info *nm_i = NM_I(sbi);
	unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
	struct f2fs_nat_block *nat_blk = page_address(page);
	int valid = 0;
F
Fan Li 已提交
2822
	int i = 0;
2823 2824 2825 2826

	if (!enabled_nat_bits(sbi, NULL))
		return;

F
Fan Li 已提交
2827 2828 2829 2830 2831
	if (nat_index == 0) {
		valid = 1;
		i = 1;
	}
	for (; i < NAT_ENTRY_PER_BLOCK; i++) {
C
Chao Yu 已提交
2832
		if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR)
2833 2834 2835
			valid++;
	}
	if (valid == 0) {
J
Jaegeuk Kim 已提交
2836 2837
		__set_bit_le(nat_index, nm_i->empty_nat_bits);
		__clear_bit_le(nat_index, nm_i->full_nat_bits);
2838 2839 2840
		return;
	}

J
Jaegeuk Kim 已提交
2841
	__clear_bit_le(nat_index, nm_i->empty_nat_bits);
2842
	if (valid == NAT_ENTRY_PER_BLOCK)
J
Jaegeuk Kim 已提交
2843
		__set_bit_le(nat_index, nm_i->full_nat_bits);
2844
	else
J
Jaegeuk Kim 已提交
2845
		__clear_bit_le(nat_index, nm_i->full_nat_bits);
2846 2847
}

2848
static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
2849
		struct nat_entry_set *set, struct cp_control *cpc)
2850 2851
{
	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2852
	struct f2fs_journal *journal = curseg->journal;
2853 2854 2855 2856 2857
	nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
	bool to_journal = true;
	struct f2fs_nat_block *nat_blk;
	struct nat_entry *ne, *cur;
	struct page *page = NULL;
J
Jaegeuk Kim 已提交
2858

2859 2860 2861 2862 2863
	/*
	 * there are two steps to flush nat entries:
	 * #1, flush nat entries to journal in current hot data summary block.
	 * #2, flush nat entries to nat page.
	 */
2864 2865
	if (enabled_nat_bits(sbi, cpc) ||
		!__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
2866 2867 2868
		to_journal = false;

	if (to_journal) {
2869
		down_write(&curseg->journal_rwsem);
2870 2871
	} else {
		page = get_next_nat_page(sbi, start_nid);
2872 2873 2874
		if (IS_ERR(page))
			return PTR_ERR(page);

2875 2876 2877
		nat_blk = page_address(page);
		f2fs_bug_on(sbi, !nat_blk);
	}
2878

2879 2880 2881 2882 2883 2884
	/* flush dirty nats in nat entry set */
	list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
		struct f2fs_nat_entry *raw_ne;
		nid_t nid = nat_get_nid(ne);
		int offset;

2885
		f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
2886 2887

		if (to_journal) {
C
Chao Yu 已提交
2888
			offset = f2fs_lookup_journal_in_cursum(journal,
2889 2890
							NAT_JOURNAL, nid, 1);
			f2fs_bug_on(sbi, offset < 0);
2891 2892
			raw_ne = &nat_in_journal(journal, offset);
			nid_in_journal(journal, offset) = cpu_to_le32(nid);
2893
		} else {
2894
			raw_ne = &nat_blk->entries[nid - start_nid];
J
Jaegeuk Kim 已提交
2895
		}
2896 2897
		raw_nat_from_node_info(raw_ne, &ne->ni);
		nat_reset_flag(ne);
2898
		__clear_nat_cache_dirty(NM_I(sbi), set, ne);
2899
		if (nat_get_blkaddr(ne) == NULL_ADDR) {
2900
			add_free_nid(sbi, nid, false, true);
C
Chao Yu 已提交
2901 2902
		} else {
			spin_lock(&NM_I(sbi)->nid_list_lock);
2903
			update_free_nid_bitmap(sbi, nid, false, false);
2904 2905
			spin_unlock(&NM_I(sbi)->nid_list_lock);
		}
2906
	}
J
Jaegeuk Kim 已提交
2907

2908
	if (to_journal) {
2909
		up_write(&curseg->journal_rwsem);
2910 2911
	} else {
		__update_nat_bits(sbi, start_nid, page);
2912
		f2fs_put_page(page, 1);
2913
	}
2914

2915 2916 2917 2918 2919
	/* Allow dirty nats by node block allocation in write_begin */
	if (!set->entry_cnt) {
		radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
		kmem_cache_free(nat_entry_set_slab, set);
	}
2920
	return 0;
2921
}
2922

2923 2924 2925
/*
 * This function is called during the checkpointing process.
 */
2926
int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
2927 2928 2929
{
	struct f2fs_nm_info *nm_i = NM_I(sbi);
	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2930
	struct f2fs_journal *journal = curseg->journal;
2931
	struct nat_entry_set *setvec[SETVEC_SIZE];
2932 2933 2934 2935
	struct nat_entry_set *set, *tmp;
	unsigned int found;
	nid_t set_idx = 0;
	LIST_HEAD(sets);
2936
	int err = 0;
2937

2938 2939 2940 2941 2942 2943 2944
	/* during unmount, let's flush nat_bits before checking dirty_nat_cnt */
	if (enabled_nat_bits(sbi, cpc)) {
		down_write(&nm_i->nat_tree_lock);
		remove_nats_in_journal(sbi);
		up_write(&nm_i->nat_tree_lock);
	}

2945
	if (!nm_i->dirty_nat_cnt)
2946
		return 0;
2947

2948
	down_write(&nm_i->nat_tree_lock);
2949

2950 2951 2952 2953 2954
	/*
	 * if there are no enough space in journal to store dirty nat
	 * entries, remove all entries from journal and merge them
	 * into nat entry set.
	 */
2955
	if (enabled_nat_bits(sbi, cpc) ||
2956
		!__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL))
2957 2958 2959
		remove_nats_in_journal(sbi);

	while ((found = __gang_lookup_nat_set(nm_i,
2960
					set_idx, SETVEC_SIZE, setvec))) {
2961 2962 2963 2964
		unsigned idx;
		set_idx = setvec[found - 1]->set + 1;
		for (idx = 0; idx < found; idx++)
			__adjust_nat_entry_set(setvec[idx], &sets,
2965
						MAX_NAT_JENTRIES(journal));
J
Jaegeuk Kim 已提交
2966
	}
2967

2968
	/* flush dirty nats in nat entry set */
2969 2970 2971 2972 2973
	list_for_each_entry_safe(set, tmp, &sets, set_list) {
		err = __flush_nat_entry_set(sbi, set, cpc);
		if (err)
			break;
	}
2974

2975
	up_write(&nm_i->nat_tree_lock);
2976
	/* Allow dirty nats by node block allocation in write_begin */
2977 2978

	return err;
J
Jaegeuk Kim 已提交
2979 2980
}

2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992
static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
{
	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
	struct f2fs_nm_info *nm_i = NM_I(sbi);
	unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
	unsigned int i;
	__u64 cp_ver = cur_cp_version(ckpt);
	block_t nat_bits_addr;

	if (!enabled_nat_bits(sbi, NULL))
		return 0;

C
Chao Yu 已提交
2993
	nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
2994
	nm_i->nat_bits = f2fs_kvzalloc(sbi,
C
Chao Yu 已提交
2995
			nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
2996 2997 2998 2999 3000 3001
	if (!nm_i->nat_bits)
		return -ENOMEM;

	nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
						nm_i->nat_bits_blocks;
	for (i = 0; i < nm_i->nat_bits_blocks; i++) {
3002 3003 3004
		struct page *page;

		page = f2fs_get_meta_page(sbi, nat_bits_addr++);
C
Chao Yu 已提交
3005
		if (IS_ERR(page))
3006
			return PTR_ERR(page);
3007 3008 3009 3010 3011 3012

		memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
					page_address(page), F2FS_BLKSIZE);
		f2fs_put_page(page, 1);
	}

3013
	cp_ver |= (cur_cp_crc(ckpt) << 32);
3014 3015 3016 3017 3018 3019 3020 3021
	if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
		disable_nat_bits(sbi, true);
		return 0;
	}

	nm_i->full_nat_bits = nm_i->nat_bits + 8;
	nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;

3022
	f2fs_notice(sbi, "Found nat_bits in checkpoint");
3023 3024 3025
	return 0;
}

3026
static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042
{
	struct f2fs_nm_info *nm_i = NM_I(sbi);
	unsigned int i = 0;
	nid_t nid, last_nid;

	if (!enabled_nat_bits(sbi, NULL))
		return;

	for (i = 0; i < nm_i->nat_blocks; i++) {
		i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
		if (i >= nm_i->nat_blocks)
			break;

		__set_bit_le(i, nm_i->nat_block_bitmap);

		nid = i * NAT_ENTRY_PER_BLOCK;
3043
		last_nid = nid + NAT_ENTRY_PER_BLOCK;
3044

3045
		spin_lock(&NM_I(sbi)->nid_list_lock);
3046
		for (; nid < last_nid; nid++)
3047 3048
			update_free_nid_bitmap(sbi, nid, true, true);
		spin_unlock(&NM_I(sbi)->nid_list_lock);
3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059
	}

	for (i = 0; i < nm_i->nat_blocks; i++) {
		i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
		if (i >= nm_i->nat_blocks)
			break;

		__set_bit_le(i, nm_i->nat_block_bitmap);
	}
}

J
Jaegeuk Kim 已提交
3060 3061 3062 3063 3064
static int init_node_manager(struct f2fs_sb_info *sbi)
{
	struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
	struct f2fs_nm_info *nm_i = NM_I(sbi);
	unsigned char *version_bitmap;
3065 3066
	unsigned int nat_segs;
	int err;
J
Jaegeuk Kim 已提交
3067 3068 3069 3070 3071

	nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);

	/* segment_count_nat includes pair segment so divide to 2. */
	nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
3072 3073
	nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
	nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
3074

3075
	/* not used nids: 0, node, meta, (and root counted as valid node) */
3076
	nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
3077
						F2FS_RESERVED_NODE_NUM;
C
Chao Yu 已提交
3078 3079
	nm_i->nid_cnt[FREE_NID] = 0;
	nm_i->nid_cnt[PREALLOC_NID] = 0;
J
Jaegeuk Kim 已提交
3080
	nm_i->nat_cnt = 0;
3081
	nm_i->ram_thresh = DEF_RAM_THRESHOLD;
C
Chao Yu 已提交
3082
	nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
C
Chao Yu 已提交
3083
	nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
J
Jaegeuk Kim 已提交
3084

3085
	INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
C
Chao Yu 已提交
3086
	INIT_LIST_HEAD(&nm_i->free_nid_list);
3087 3088
	INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
	INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
J
Jaegeuk Kim 已提交
3089
	INIT_LIST_HEAD(&nm_i->nat_entries);
3090
	spin_lock_init(&nm_i->nat_list_lock);
J
Jaegeuk Kim 已提交
3091 3092

	mutex_init(&nm_i->build_lock);
C
Chao Yu 已提交
3093
	spin_lock_init(&nm_i->nid_list_lock);
3094
	init_rwsem(&nm_i->nat_tree_lock);
J
Jaegeuk Kim 已提交
3095 3096

	nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
A
Alexandru Gheorghiu 已提交
3097
	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
J
Jaegeuk Kim 已提交
3098 3099 3100 3101
	version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
	if (!version_bitmap)
		return -EFAULT;

A
Alexandru Gheorghiu 已提交
3102 3103 3104 3105
	nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
					GFP_KERNEL);
	if (!nm_i->nat_bitmap)
		return -ENOMEM;
3106

3107 3108 3109 3110
	err = __get_nat_bitmaps(sbi);
	if (err)
		return err;

3111 3112 3113 3114 3115 3116 3117
#ifdef CONFIG_F2FS_CHECK_FS
	nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
					GFP_KERNEL);
	if (!nm_i->nat_bitmap_mir)
		return -ENOMEM;
#endif

J
Jaegeuk Kim 已提交
3118 3119 3120
	return 0;
}

J
Jaegeuk Kim 已提交
3121
static int init_free_nid_cache(struct f2fs_sb_info *sbi)
C
Chao Yu 已提交
3122 3123
{
	struct f2fs_nm_info *nm_i = NM_I(sbi);
3124
	int i;
C
Chao Yu 已提交
3125

3126
	nm_i->free_nid_bitmap =
3127 3128 3129
		f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *),
					      nm_i->nat_blocks),
			      GFP_KERNEL);
C
Chao Yu 已提交
3130 3131 3132
	if (!nm_i->free_nid_bitmap)
		return -ENOMEM;

3133 3134
	for (i = 0; i < nm_i->nat_blocks; i++) {
		nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
3135
			f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
3136
		if (!nm_i->free_nid_bitmap[i])
3137 3138 3139
			return -ENOMEM;
	}

C
Chao Yu 已提交
3140
	nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
C
Chao Yu 已提交
3141 3142 3143
								GFP_KERNEL);
	if (!nm_i->nat_block_bitmap)
		return -ENOMEM;
3144

3145 3146 3147 3148
	nm_i->free_nid_count =
		f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short),
					      nm_i->nat_blocks),
			      GFP_KERNEL);
3149 3150
	if (!nm_i->free_nid_count)
		return -ENOMEM;
C
Chao Yu 已提交
3151 3152 3153
	return 0;
}

C
Chao Yu 已提交
3154
int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
J
Jaegeuk Kim 已提交
3155 3156 3157
{
	int err;

C
Chao Yu 已提交
3158 3159
	sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info),
							GFP_KERNEL);
J
Jaegeuk Kim 已提交
3160 3161 3162 3163 3164 3165 3166
	if (!sbi->nm_info)
		return -ENOMEM;

	err = init_node_manager(sbi);
	if (err)
		return err;

C
Chao Yu 已提交
3167 3168 3169 3170
	err = init_free_nid_cache(sbi);
	if (err)
		return err;

3171 3172 3173
	/* load free nid status from nat_bits table */
	load_free_nid_bitmap(sbi);

3174
	return f2fs_build_free_nids(sbi, true, true);
J
Jaegeuk Kim 已提交
3175 3176
}

C
Chao Yu 已提交
3177
void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
J
Jaegeuk Kim 已提交
3178 3179 3180 3181
{
	struct f2fs_nm_info *nm_i = NM_I(sbi);
	struct free_nid *i, *next_i;
	struct nat_entry *natvec[NATVEC_SIZE];
3182
	struct nat_entry_set *setvec[SETVEC_SIZE];
J
Jaegeuk Kim 已提交
3183 3184 3185 3186 3187 3188 3189
	nid_t nid = 0;
	unsigned int found;

	if (!nm_i)
		return;

	/* destroy free nid list */
C
Chao Yu 已提交
3190
	spin_lock(&nm_i->nid_list_lock);
C
Chao Yu 已提交
3191
	list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
F
Fan Li 已提交
3192
		__remove_free_nid(sbi, i, FREE_NID);
C
Chao Yu 已提交
3193
		spin_unlock(&nm_i->nid_list_lock);
3194
		kmem_cache_free(free_nid_slab, i);
C
Chao Yu 已提交
3195
		spin_lock(&nm_i->nid_list_lock);
J
Jaegeuk Kim 已提交
3196
	}
C
Chao Yu 已提交
3197 3198 3199
	f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
	f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
	f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
C
Chao Yu 已提交
3200
	spin_unlock(&nm_i->nid_list_lock);
J
Jaegeuk Kim 已提交
3201 3202

	/* destroy nat cache */
3203
	down_write(&nm_i->nat_tree_lock);
J
Jaegeuk Kim 已提交
3204 3205 3206
	while ((found = __gang_lookup_nat_cache(nm_i,
					nid, NATVEC_SIZE, natvec))) {
		unsigned idx;
3207

3208
		nid = nat_get_nid(natvec[found - 1]) + 1;
3209 3210 3211 3212 3213
		for (idx = 0; idx < found; idx++) {
			spin_lock(&nm_i->nat_list_lock);
			list_del(&natvec[idx]->list);
			spin_unlock(&nm_i->nat_list_lock);

3214
			__del_from_nat_cache(nm_i, natvec[idx]);
3215
		}
J
Jaegeuk Kim 已提交
3216
	}
3217
	f2fs_bug_on(sbi, nm_i->nat_cnt);
3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232

	/* destroy nat set cache */
	nid = 0;
	while ((found = __gang_lookup_nat_set(nm_i,
					nid, SETVEC_SIZE, setvec))) {
		unsigned idx;

		nid = setvec[found - 1]->set + 1;
		for (idx = 0; idx < found; idx++) {
			/* entry_cnt is not zero, when cp_error was occurred */
			f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
			radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
			kmem_cache_free(nat_entry_set_slab, setvec[idx]);
		}
	}
3233
	up_write(&nm_i->nat_tree_lock);
J
Jaegeuk Kim 已提交
3234

C
Chao Yu 已提交
3235
	kvfree(nm_i->nat_block_bitmap);
3236 3237 3238 3239 3240
	if (nm_i->free_nid_bitmap) {
		int i;

		for (i = 0; i < nm_i->nat_blocks; i++)
			kvfree(nm_i->free_nid_bitmap[i]);
3241
		kvfree(nm_i->free_nid_bitmap);
3242
	}
3243
	kvfree(nm_i->free_nid_count);
C
Chao Yu 已提交
3244

3245 3246
	kvfree(nm_i->nat_bitmap);
	kvfree(nm_i->nat_bits);
3247
#ifdef CONFIG_F2FS_CHECK_FS
3248
	kvfree(nm_i->nat_bitmap_mir);
3249
#endif
J
Jaegeuk Kim 已提交
3250
	sbi->nm_info = NULL;
3251
	kvfree(nm_i);
J
Jaegeuk Kim 已提交
3252 3253
}

C
Chao Yu 已提交
3254
int __init f2fs_create_node_manager_caches(void)
J
Jaegeuk Kim 已提交
3255
{
3256
	nat_entry_slab = f2fs_kmem_cache_create("f2fs_nat_entry",
3257
			sizeof(struct nat_entry));
J
Jaegeuk Kim 已提交
3258
	if (!nat_entry_slab)
3259
		goto fail;
J
Jaegeuk Kim 已提交
3260

3261
	free_nid_slab = f2fs_kmem_cache_create("f2fs_free_nid",
3262
			sizeof(struct free_nid));
3263
	if (!free_nid_slab)
3264
		goto destroy_nat_entry;
3265

3266
	nat_entry_set_slab = f2fs_kmem_cache_create("f2fs_nat_entry_set",
3267 3268
			sizeof(struct nat_entry_set));
	if (!nat_entry_set_slab)
3269
		goto destroy_free_nid;
3270

3271
	fsync_node_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_node_entry",
3272 3273 3274
			sizeof(struct fsync_node_entry));
	if (!fsync_node_entry_slab)
		goto destroy_nat_entry_set;
J
Jaegeuk Kim 已提交
3275
	return 0;
3276

3277 3278
destroy_nat_entry_set:
	kmem_cache_destroy(nat_entry_set_slab);
3279
destroy_free_nid:
3280
	kmem_cache_destroy(free_nid_slab);
3281
destroy_nat_entry:
3282 3283 3284
	kmem_cache_destroy(nat_entry_slab);
fail:
	return -ENOMEM;
J
Jaegeuk Kim 已提交
3285 3286
}

C
Chao Yu 已提交
3287
void f2fs_destroy_node_manager_caches(void)
J
Jaegeuk Kim 已提交
3288
{
3289
	kmem_cache_destroy(fsync_node_entry_slab);
3290
	kmem_cache_destroy(nat_entry_set_slab);
J
Jaegeuk Kim 已提交
3291 3292 3293
	kmem_cache_destroy(free_nid_slab);
	kmem_cache_destroy(nat_entry_slab);
}