extent_cache.c 19.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * f2fs extent cache support
 *
 * Copyright (c) 2015 Motorola Mobility
 * Copyright (c) 2015 Samsung Electronics
 * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
 *          Chao Yu <chao2.yu@samsung.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/fs.h>
#include <linux/f2fs_fs.h>

#include "f2fs.h"
#include "node.h"
#include <trace/events/f2fs.h>

21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
							unsigned int ofs)
{
	if (cached_re) {
		if (cached_re->ofs <= ofs &&
				cached_re->ofs + cached_re->len > ofs) {
			return cached_re;
		}
	}
	return NULL;
}

static struct rb_entry *__lookup_rb_tree_slow(struct rb_root *root,
							unsigned int ofs)
{
	struct rb_node *node = root->rb_node;
	struct rb_entry *re;

	while (node) {
		re = rb_entry(node, struct rb_entry, rb_node);

		if (ofs < re->ofs)
			node = node->rb_left;
		else if (ofs >= re->ofs + re->len)
			node = node->rb_right;
		else
			return re;
	}
	return NULL;
}

52
struct rb_entry *__lookup_rb_tree(struct rb_root *root,
53 54 55 56 57 58 59 60 61 62 63
				struct rb_entry *cached_re, unsigned int ofs)
{
	struct rb_entry *re;

	re = __lookup_rb_tree_fast(cached_re, ofs);
	if (!re)
		return __lookup_rb_tree_slow(root, ofs);

	return re;
}

64
struct rb_node **__lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
				struct rb_root *root, struct rb_node **parent,
				unsigned int ofs)
{
	struct rb_node **p = &root->rb_node;
	struct rb_entry *re;

	while (*p) {
		*parent = *p;
		re = rb_entry(*parent, struct rb_entry, rb_node);

		if (ofs < re->ofs)
			p = &(*p)->rb_left;
		else if (ofs >= re->ofs + re->len)
			p = &(*p)->rb_right;
		else
			f2fs_bug_on(sbi, 1);
	}

	return p;
}

/*
 * lookup rb entry in position of @ofs in rb-tree,
 * if hit, return the entry, otherwise, return NULL
 * @prev_ex: extent before ofs
 * @next_ex: extent after ofs
 * @insert_p: insert point for new extent at ofs
 * in order to simpfy the insertion after.
 * tree must stay unchanged between lookup and insertion.
 */
95
struct rb_entry *__lookup_rb_tree_ret(struct rb_root *root,
96 97 98 99 100
				struct rb_entry *cached_re,
				unsigned int ofs,
				struct rb_entry **prev_entry,
				struct rb_entry **next_entry,
				struct rb_node ***insert_p,
101 102
				struct rb_node **insert_parent,
				bool force)
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
{
	struct rb_node **pnode = &root->rb_node;
	struct rb_node *parent = NULL, *tmp_node;
	struct rb_entry *re = cached_re;

	*insert_p = NULL;
	*insert_parent = NULL;
	*prev_entry = NULL;
	*next_entry = NULL;

	if (RB_EMPTY_ROOT(root))
		return NULL;

	if (re) {
		if (re->ofs <= ofs && re->ofs + re->len > ofs)
			goto lookup_neighbors;
	}

	while (*pnode) {
		parent = *pnode;
		re = rb_entry(*pnode, struct rb_entry, rb_node);

		if (ofs < re->ofs)
			pnode = &(*pnode)->rb_left;
		else if (ofs >= re->ofs + re->len)
			pnode = &(*pnode)->rb_right;
		else
			goto lookup_neighbors;
	}

	*insert_p = pnode;
	*insert_parent = parent;

	re = rb_entry(parent, struct rb_entry, rb_node);
	tmp_node = parent;
	if (parent && ofs > re->ofs)
		tmp_node = rb_next(parent);
	*next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);

	tmp_node = parent;
	if (parent && ofs < re->ofs)
		tmp_node = rb_prev(parent);
	*prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
	return NULL;

lookup_neighbors:
149
	if (ofs == re->ofs || force) {
150 151 152 153
		/* lookup prev node for merging backward later */
		tmp_node = rb_prev(&re->rb_node);
		*prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
	}
154
	if (ofs == re->ofs + re->len - 1 || force) {
155 156 157 158 159 160 161
		/* lookup next node for merging frontward later */
		tmp_node = rb_next(&re->rb_node);
		*next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
	}
	return re;
}

162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
bool __check_rb_tree_consistence(struct f2fs_sb_info *sbi,
						struct rb_root *root)
{
#ifdef CONFIG_F2FS_CHECK_FS
	struct rb_node *cur = rb_first(root), *next;
	struct rb_entry *cur_re, *next_re;

	if (!cur)
		return true;

	while (cur) {
		next = rb_next(cur);
		if (!next)
			return true;

		cur_re = rb_entry(cur, struct rb_entry, rb_node);
		next_re = rb_entry(next, struct rb_entry, rb_node);

		if (cur_re->ofs + cur_re->len > next_re->ofs) {
			f2fs_msg(sbi->sb, KERN_INFO, "inconsistent rbtree, "
				"cur(%u, %u) next(%u, %u)",
				cur_re->ofs, cur_re->len,
				next_re->ofs, next_re->len);
			return false;
		}

		cur = next;
	}
#endif
	return true;
}

194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
static struct kmem_cache *extent_tree_slab;
static struct kmem_cache *extent_node_slab;

static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
				struct extent_tree *et, struct extent_info *ei,
				struct rb_node *parent, struct rb_node **p)
{
	struct extent_node *en;

	en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
	if (!en)
		return NULL;

	en->ei = *ei;
	INIT_LIST_HEAD(&en->list);
209
	en->et = et;
210 211 212

	rb_link_node(&en->rb_node, parent, p);
	rb_insert_color(&en->rb_node, &et->root);
213
	atomic_inc(&et->node_cnt);
214 215 216 217 218 219 220 221
	atomic_inc(&sbi->total_ext_node);
	return en;
}

static void __detach_extent_node(struct f2fs_sb_info *sbi,
				struct extent_tree *et, struct extent_node *en)
{
	rb_erase(&en->rb_node, &et->root);
222
	atomic_dec(&et->node_cnt);
223 224 225 226
	atomic_dec(&sbi->total_ext_node);

	if (et->cached_en == en)
		et->cached_en = NULL;
227 228 229 230 231 232 233 234 235 236 237 238 239
	kmem_cache_free(extent_node_slab, en);
}

/*
 * Flow to release an extent_node:
 * 1. list_del_init
 * 2. __detach_extent_node
 * 3. kmem_cache_free.
 */
static void __release_extent_node(struct f2fs_sb_info *sbi,
			struct extent_tree *et, struct extent_node *en)
{
	spin_lock(&sbi->extent_lock);
240 241
	f2fs_bug_on(sbi, list_empty(&en->list));
	list_del_init(&en->list);
242 243 244
	spin_unlock(&sbi->extent_lock);

	__detach_extent_node(sbi, et, en);
245 246 247 248 249 250 251 252
}

static struct extent_tree *__grab_extent_tree(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct extent_tree *et;
	nid_t ino = inode->i_ino;

253
	mutex_lock(&sbi->extent_tree_lock);
254 255 256 257 258 259 260 261 262
	et = radix_tree_lookup(&sbi->extent_tree_root, ino);
	if (!et) {
		et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
		f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
		memset(et, 0, sizeof(struct extent_tree));
		et->ino = ino;
		et->root = RB_ROOT;
		et->cached_en = NULL;
		rwlock_init(&et->lock);
263
		INIT_LIST_HEAD(&et->list);
264
		atomic_set(&et->node_cnt, 0);
265
		atomic_inc(&sbi->total_ext_tree);
266 267
	} else {
		atomic_dec(&sbi->total_zombie_tree);
268
		list_del_init(&et->list);
269
	}
270
	mutex_unlock(&sbi->extent_tree_lock);
271 272 273 274 275 276 277

	/* never died until evict_inode */
	F2FS_I(inode)->extent_tree = et;

	return et;
}

278 279
static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
				struct extent_tree *et, struct extent_info *ei)
280 281 282 283
{
	struct rb_node **p = &et->root.rb_node;
	struct extent_node *en;

284
	en = __attach_extent_node(sbi, et, ei, NULL, p);
285 286
	if (!en)
		return NULL;
287 288

	et->largest = en->ei;
289 290 291 292 293
	et->cached_en = en;
	return en;
}

static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
294
					struct extent_tree *et)
295 296 297
{
	struct rb_node *node, *next;
	struct extent_node *en;
298
	unsigned int count = atomic_read(&et->node_cnt);
299 300 301 302 303

	node = rb_first(&et->root);
	while (node) {
		next = rb_next(node);
		en = rb_entry(node, struct extent_node, rb_node);
304
		__release_extent_node(sbi, et, en);
305 306 307
		node = next;
	}

308
	return count - atomic_read(&et->node_cnt);
309 310
}

F
Fan Li 已提交
311 312
static void __drop_largest_extent(struct inode *inode,
					pgoff_t fofs, unsigned int len)
313 314 315
{
	struct extent_info *largest = &F2FS_I(inode)->extent_tree->largest;

316
	if (fofs < largest->fofs + largest->len && fofs + len > largest->fofs) {
317
		largest->len = 0;
318
		f2fs_mark_inode_dirty_sync(inode, true);
319
	}
320 321
}

322
/* return true, if inode page is changed */
323
static bool __f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
324 325 326 327 328 329
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct extent_tree *et;
	struct extent_node *en;
	struct extent_info ei;

330 331 332 333 334 335 336 337
	if (!f2fs_may_extent_tree(inode)) {
		/* drop largest extent */
		if (i_ext && i_ext->len) {
			i_ext->len = 0;
			return true;
		}
		return false;
	}
338 339 340

	et = __grab_extent_tree(inode);

341 342
	if (!i_ext || !i_ext->len)
		return false;
343

C
Chao Yu 已提交
344
	get_extent_info(&ei, i_ext);
345 346

	write_lock(&et->lock);
347
	if (atomic_read(&et->node_cnt))
348 349
		goto out;

350
	en = __init_extent_tree(sbi, et, &ei);
351 352 353 354 355 356 357
	if (en) {
		spin_lock(&sbi->extent_lock);
		list_add_tail(&en->list, &sbi->extent_list);
		spin_unlock(&sbi->extent_lock);
	}
out:
	write_unlock(&et->lock);
358
	return false;
359 360
}

361 362 363 364 365 366 367 368 369 370
bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
{
	bool ret =  __f2fs_init_extent_tree(inode, i_ext);

	if (!F2FS_I(inode)->extent_tree)
		set_inode_flag(inode, FI_NO_EXTENT);

	return ret;
}

371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
							struct extent_info *ei)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct extent_tree *et = F2FS_I(inode)->extent_tree;
	struct extent_node *en;
	bool ret = false;

	f2fs_bug_on(sbi, !et);

	trace_f2fs_lookup_extent_tree_start(inode, pgofs);

	read_lock(&et->lock);

	if (et->largest.fofs <= pgofs &&
			et->largest.fofs + et->largest.len > pgofs) {
		*ei = et->largest;
		ret = true;
389
		stat_inc_largest_node_hit(sbi);
390 391 392
		goto out;
	}

393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
	en = (struct extent_node *)__lookup_rb_tree(&et->root,
				(struct rb_entry *)et->cached_en, pgofs);
	if (!en)
		goto out;

	if (en == et->cached_en)
		stat_inc_cached_node_hit(sbi);
	else
		stat_inc_rbtree_node_hit(sbi);

	*ei = en->ei;
	spin_lock(&sbi->extent_lock);
	if (!list_empty(&en->list)) {
		list_move_tail(&en->list, &sbi->extent_list);
		et->cached_en = en;
408
	}
409 410
	spin_unlock(&sbi->extent_lock);
	ret = true;
411
out:
412
	stat_inc_total_hit(sbi);
413 414 415 416 417 418
	read_unlock(&et->lock);

	trace_f2fs_lookup_extent_tree_end(inode, pgofs, ei);
	return ret;
}

419
static struct extent_node *__try_merge_extent_node(struct inode *inode,
420 421
				struct extent_tree *et, struct extent_info *ei,
				struct extent_node *prev_ex,
422
				struct extent_node *next_ex)
423
{
424
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
425 426 427 428 429 430 431
	struct extent_node *en = NULL;

	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
		prev_ex->ei.len += ei->len;
		ei = &prev_ex->ei;
		en = prev_ex;
	}
432

433 434 435 436
	if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
		next_ex->ei.fofs = ei->fofs;
		next_ex->ei.blk = ei->blk;
		next_ex->ei.len += ei->len;
437 438 439
		if (en)
			__release_extent_node(sbi, et, prev_ex);

440 441
		en = next_ex;
	}
442

443 444 445
	if (!en)
		return NULL;

446
	__try_update_largest_extent(inode, et, en);
447 448

	spin_lock(&sbi->extent_lock);
449
	if (!list_empty(&en->list)) {
450
		list_move_tail(&en->list, &sbi->extent_list);
451 452
		et->cached_en = en;
	}
453
	spin_unlock(&sbi->extent_lock);
454 455 456
	return en;
}

457
static struct extent_node *__insert_extent_tree(struct inode *inode,
458 459 460 461
				struct extent_tree *et, struct extent_info *ei,
				struct rb_node **insert_p,
				struct rb_node *insert_parent)
{
462
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
463
	struct rb_node **p;
464 465
	struct rb_node *parent = NULL;
	struct extent_node *en = NULL;
466 467 468 469 470 471 472

	if (insert_p && insert_parent) {
		parent = insert_parent;
		p = insert_p;
		goto do_insert;
	}

473
	p = __lookup_rb_tree_for_insert(sbi, &et->root, &parent, ei->fofs);
474 475 476 477
do_insert:
	en = __attach_extent_node(sbi, et, ei, parent, p);
	if (!en)
		return NULL;
478

479
	__try_update_largest_extent(inode, et, en);
480 481 482 483

	/* update in global extent list */
	spin_lock(&sbi->extent_lock);
	list_add_tail(&en->list, &sbi->extent_list);
484
	et->cached_en = en;
485
	spin_unlock(&sbi->extent_lock);
486 487 488
	return en;
}

C
Chao Yu 已提交
489
static void f2fs_update_extent_tree_range(struct inode *inode,
C
Chao Yu 已提交
490
				pgoff_t fofs, block_t blkaddr, unsigned int len)
491 492 493
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct extent_tree *et = F2FS_I(inode)->extent_tree;
494
	struct extent_node *en = NULL, *en1 = NULL;
C
Chao Yu 已提交
495
	struct extent_node *prev_en = NULL, *next_en = NULL;
496
	struct extent_info ei, dei, prev;
497
	struct rb_node **insert_p = NULL, *insert_parent = NULL;
C
Chao Yu 已提交
498 499
	unsigned int end = fofs + len;
	unsigned int pos = (unsigned int)fofs;
500 501

	if (!et)
C
Chao Yu 已提交
502
		return;
503

504 505
	trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, len);

506 507
	write_lock(&et->lock);

508
	if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
509
		write_unlock(&et->lock);
C
Chao Yu 已提交
510
		return;
511 512 513 514 515
	}

	prev = et->largest;
	dei.len = 0;

516 517 518 519
	/*
	 * drop largest extent before lookup, in case it's already
	 * been shrunk from extent tree
	 */
F
Fan Li 已提交
520
	__drop_largest_extent(inode, fofs, len);
521

C
Chao Yu 已提交
522
	/* 1. lookup first extent node in range [fofs, fofs + len - 1] */
523 524 525 526
	en = (struct extent_node *)__lookup_rb_tree_ret(&et->root,
					(struct rb_entry *)et->cached_en, fofs,
					(struct rb_entry **)&prev_en,
					(struct rb_entry **)&next_en,
527
					&insert_p, &insert_parent, false);
528 529
	if (!en)
		en = next_en;
C
Chao Yu 已提交
530 531

	/* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
532 533 534
	while (en && en->ei.fofs < end) {
		unsigned int org_end;
		int parts = 0;	/* # of parts current extent split into */
C
Chao Yu 已提交
535

536
		next_en = en1 = NULL;
C
Chao Yu 已提交
537 538

		dei = en->ei;
539 540
		org_end = dei.fofs + dei.len;
		f2fs_bug_on(sbi, pos >= org_end);
C
Chao Yu 已提交
541

542 543 544 545 546
		if (pos > dei.fofs &&	pos - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
			en->ei.len = pos - en->ei.fofs;
			prev_en = en;
			parts = 1;
		}
C
Chao Yu 已提交
547

548 549 550 551 552
		if (end < org_end && org_end - end >= F2FS_MIN_EXTENT_LEN) {
			if (parts) {
				set_extent_info(&ei, end,
						end - dei.fofs + dei.blk,
						org_end - end);
553
				en1 = __insert_extent_tree(inode, et, &ei,
554 555 556 557 558 559 560
							NULL, NULL);
				next_en = en1;
			} else {
				en->ei.fofs = end;
				en->ei.blk += end - dei.fofs;
				en->ei.len -= end - dei.fofs;
				next_en = en;
C
Chao Yu 已提交
561
			}
562
			parts++;
C
Chao Yu 已提交
563 564
		}

565 566
		if (!next_en) {
			struct rb_node *node = rb_next(&en->rb_node);
C
Chao Yu 已提交
567

G
Geliang Tang 已提交
568 569
			next_en = rb_entry_safe(node, struct extent_node,
						rb_node);
570 571
		}

572
		if (parts)
573
			__try_update_largest_extent(inode, et, en);
574
		else
575
			__release_extent_node(sbi, et, en);
C
Chao Yu 已提交
576 577

		/*
578 579 580
		 * if original extent is split into zero or two parts, extent
		 * tree has been altered by deletion or insertion, therefore
		 * invalidate pointers regard to tree.
C
Chao Yu 已提交
581
		 */
582 583 584
		if (parts != 1) {
			insert_p = NULL;
			insert_parent = NULL;
585
		}
586
		en = next_en;
587 588 589 590
	}

	/* 3. update extent in extent cache */
	if (blkaddr) {
C
Chao Yu 已提交
591 592

		set_extent_info(&ei, fofs, blkaddr, len);
593 594
		if (!__try_merge_extent_node(inode, et, &ei, prev_en, next_en))
			__insert_extent_tree(inode, et, &ei,
595
						insert_p, insert_parent);
596 597 598 599 600

		/* give up extent_cache, if split and small updates happen */
		if (dei.len >= 1 &&
				prev.len < F2FS_MIN_EXTENT_LEN &&
				et->largest.len < F2FS_MIN_EXTENT_LEN) {
601
			__drop_largest_extent(inode, 0, UINT_MAX);
602
			set_inode_flag(inode, FI_NO_EXTENT);
603
		}
C
Chao Yu 已提交
604
	}
605

606
	if (is_inode_flag_set(inode, FI_NO_EXTENT))
607
		__free_extent_tree(sbi, et);
608 609 610 611 612 613

	write_unlock(&et->lock);
}

unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
{
614
	struct extent_tree *et, *next;
615
	struct extent_node *en;
616 617 618 619 620 621
	unsigned int node_cnt = 0, tree_cnt = 0;
	int remained;

	if (!test_opt(sbi, EXTENT_CACHE))
		return 0;

622 623 624
	if (!atomic_read(&sbi->total_zombie_tree))
		goto free_node;

625
	if (!mutex_trylock(&sbi->extent_tree_lock))
626 627 628
		goto out;

	/* 1. remove unreferenced extent tree */
629
	list_for_each_entry_safe(et, next, &sbi->zombie_list, list) {
630 631
		if (atomic_read(&et->node_cnt)) {
			write_lock(&et->lock);
632
			node_cnt += __free_extent_tree(sbi, et);
633 634
			write_unlock(&et->lock);
		}
635
		f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
636 637 638 639 640 641
		list_del_init(&et->list);
		radix_tree_delete(&sbi->extent_tree_root, et->ino);
		kmem_cache_free(extent_tree_slab, et);
		atomic_dec(&sbi->total_ext_tree);
		atomic_dec(&sbi->total_zombie_tree);
		tree_cnt++;
642

643 644
		if (node_cnt + tree_cnt >= nr_shrink)
			goto unlock_out;
645
		cond_resched();
646
	}
647
	mutex_unlock(&sbi->extent_tree_lock);
648

649
free_node:
650
	/* 2. remove LRU extent entries */
651
	if (!mutex_trylock(&sbi->extent_tree_lock))
652 653 654 655 656
		goto out;

	remained = nr_shrink - (node_cnt + tree_cnt);

	spin_lock(&sbi->extent_lock);
657 658
	for (; remained > 0; remained--) {
		if (list_empty(&sbi->extent_list))
659
			break;
660 661 662 663 664 665 666 667
		en = list_first_entry(&sbi->extent_list,
					struct extent_node, list);
		et = en->et;
		if (!write_trylock(&et->lock)) {
			/* refresh this extent node's position in extent list */
			list_move_tail(&en->list, &sbi->extent_list);
			continue;
		}
668

669 670
		list_del_init(&en->list);
		spin_unlock(&sbi->extent_lock);
671

672
		__detach_extent_node(sbi, et, en);
673

674 675 676
		write_unlock(&et->lock);
		node_cnt++;
		spin_lock(&sbi->extent_lock);
677
	}
678 679
	spin_unlock(&sbi->extent_lock);

680
unlock_out:
681
	mutex_unlock(&sbi->extent_tree_lock);
682 683 684 685 686 687 688 689 690 691 692 693
out:
	trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);

	return node_cnt + tree_cnt;
}

unsigned int f2fs_destroy_extent_node(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct extent_tree *et = F2FS_I(inode)->extent_tree;
	unsigned int node_cnt = 0;

694
	if (!et || !atomic_read(&et->node_cnt))
695 696 697
		return 0;

	write_lock(&et->lock);
698
	node_cnt = __free_extent_tree(sbi, et);
699 700 701 702 703
	write_unlock(&et->lock);

	return node_cnt;
}

704 705 706 707 708
void f2fs_drop_extent_tree(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct extent_tree *et = F2FS_I(inode)->extent_tree;

709 710 711
	if (!f2fs_may_extent_tree(inode))
		return;

712 713 714 715 716 717 718 719
	set_inode_flag(inode, FI_NO_EXTENT);

	write_lock(&et->lock);
	__free_extent_tree(sbi, et);
	__drop_largest_extent(inode, 0, UINT_MAX);
	write_unlock(&et->lock);
}

720 721 722 723 724 725 726 727 728
void f2fs_destroy_extent_tree(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct extent_tree *et = F2FS_I(inode)->extent_tree;
	unsigned int node_cnt = 0;

	if (!et)
		return;

729 730
	if (inode->i_nlink && !is_bad_inode(inode) &&
					atomic_read(&et->node_cnt)) {
731
		mutex_lock(&sbi->extent_tree_lock);
732
		list_add_tail(&et->list, &sbi->zombie_list);
733
		atomic_inc(&sbi->total_zombie_tree);
734
		mutex_unlock(&sbi->extent_tree_lock);
735 736 737 738 739 740 741
		return;
	}

	/* free all extent info belong to this extent tree */
	node_cnt = f2fs_destroy_extent_node(inode);

	/* delete extent tree entry in radix tree */
742
	mutex_lock(&sbi->extent_tree_lock);
743
	f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
744 745
	radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
	kmem_cache_free(extent_tree_slab, et);
746
	atomic_dec(&sbi->total_ext_tree);
747
	mutex_unlock(&sbi->extent_tree_lock);
748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765

	F2FS_I(inode)->extent_tree = NULL;

	trace_f2fs_destroy_extent_tree(inode, node_cnt);
}

bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
					struct extent_info *ei)
{
	if (!f2fs_may_extent_tree(inode))
		return false;

	return f2fs_lookup_extent_tree(inode, pgofs, ei);
}

void f2fs_update_extent_cache(struct dnode_of_data *dn)
{
	pgoff_t fofs;
766
	block_t blkaddr;
767 768 769 770

	if (!f2fs_may_extent_tree(dn->inode))
		return;

771 772 773 774
	if (dn->data_blkaddr == NEW_ADDR)
		blkaddr = NULL_ADDR;
	else
		blkaddr = dn->data_blkaddr;
C
Chao Yu 已提交
775

776 777
	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
								dn->ofs_in_node;
778
	f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, 1);
C
Chao Yu 已提交
779 780 781 782 783 784 785 786 787
}

void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
				pgoff_t fofs, block_t blkaddr, unsigned int len)

{
	if (!f2fs_may_extent_tree(dn->inode))
		return;

788
	f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len);
789 790 791 792 793
}

void init_extent_cache_info(struct f2fs_sb_info *sbi)
{
	INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
794
	mutex_init(&sbi->extent_tree_lock);
795 796
	INIT_LIST_HEAD(&sbi->extent_list);
	spin_lock_init(&sbi->extent_lock);
797
	atomic_set(&sbi->total_ext_tree, 0);
798
	INIT_LIST_HEAD(&sbi->zombie_list);
799
	atomic_set(&sbi->total_zombie_tree, 0);
800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822
	atomic_set(&sbi->total_ext_node, 0);
}

int __init create_extent_cache(void)
{
	extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
			sizeof(struct extent_tree));
	if (!extent_tree_slab)
		return -ENOMEM;
	extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
			sizeof(struct extent_node));
	if (!extent_node_slab) {
		kmem_cache_destroy(extent_tree_slab);
		return -ENOMEM;
	}
	return 0;
}

void destroy_extent_cache(void)
{
	kmem_cache_destroy(extent_node_slab);
	kmem_cache_destroy(extent_tree_slab);
}