extent_cache.c 19.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * f2fs extent cache support
 *
 * Copyright (c) 2015 Motorola Mobility
 * Copyright (c) 2015 Samsung Electronics
 * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
 *          Chao Yu <chao2.yu@samsung.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/fs.h>
#include <linux/f2fs_fs.h>

#include "f2fs.h"
#include "node.h"
#include <trace/events/f2fs.h>

21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
							unsigned int ofs)
{
	if (cached_re) {
		if (cached_re->ofs <= ofs &&
				cached_re->ofs + cached_re->len > ofs) {
			return cached_re;
		}
	}
	return NULL;
}

static struct rb_entry *__lookup_rb_tree_slow(struct rb_root *root,
							unsigned int ofs)
{
	struct rb_node *node = root->rb_node;
	struct rb_entry *re;

	while (node) {
		re = rb_entry(node, struct rb_entry, rb_node);

		if (ofs < re->ofs)
			node = node->rb_left;
		else if (ofs >= re->ofs + re->len)
			node = node->rb_right;
		else
			return re;
	}
	return NULL;
}

52
struct rb_entry *__lookup_rb_tree(struct rb_root *root,
53 54 55 56 57 58 59 60 61 62 63
				struct rb_entry *cached_re, unsigned int ofs)
{
	struct rb_entry *re;

	re = __lookup_rb_tree_fast(cached_re, ofs);
	if (!re)
		return __lookup_rb_tree_slow(root, ofs);

	return re;
}

64
struct rb_node **__lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
				struct rb_root *root, struct rb_node **parent,
				unsigned int ofs)
{
	struct rb_node **p = &root->rb_node;
	struct rb_entry *re;

	while (*p) {
		*parent = *p;
		re = rb_entry(*parent, struct rb_entry, rb_node);

		if (ofs < re->ofs)
			p = &(*p)->rb_left;
		else if (ofs >= re->ofs + re->len)
			p = &(*p)->rb_right;
		else
			f2fs_bug_on(sbi, 1);
	}

	return p;
}

/*
 * lookup rb entry in position of @ofs in rb-tree,
 * if hit, return the entry, otherwise, return NULL
 * @prev_ex: extent before ofs
 * @next_ex: extent after ofs
 * @insert_p: insert point for new extent at ofs
 * in order to simpfy the insertion after.
 * tree must stay unchanged between lookup and insertion.
 */
95
struct rb_entry *__lookup_rb_tree_ret(struct rb_root *root,
96 97 98 99 100
				struct rb_entry *cached_re,
				unsigned int ofs,
				struct rb_entry **prev_entry,
				struct rb_entry **next_entry,
				struct rb_node ***insert_p,
101 102
				struct rb_node **insert_parent,
				bool force)
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
{
	struct rb_node **pnode = &root->rb_node;
	struct rb_node *parent = NULL, *tmp_node;
	struct rb_entry *re = cached_re;

	*insert_p = NULL;
	*insert_parent = NULL;
	*prev_entry = NULL;
	*next_entry = NULL;

	if (RB_EMPTY_ROOT(root))
		return NULL;

	if (re) {
		if (re->ofs <= ofs && re->ofs + re->len > ofs)
			goto lookup_neighbors;
	}

	while (*pnode) {
		parent = *pnode;
		re = rb_entry(*pnode, struct rb_entry, rb_node);

		if (ofs < re->ofs)
			pnode = &(*pnode)->rb_left;
		else if (ofs >= re->ofs + re->len)
			pnode = &(*pnode)->rb_right;
		else
			goto lookup_neighbors;
	}

	*insert_p = pnode;
	*insert_parent = parent;

	re = rb_entry(parent, struct rb_entry, rb_node);
	tmp_node = parent;
	if (parent && ofs > re->ofs)
		tmp_node = rb_next(parent);
	*next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);

	tmp_node = parent;
	if (parent && ofs < re->ofs)
		tmp_node = rb_prev(parent);
	*prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
	return NULL;

lookup_neighbors:
149
	if (ofs == re->ofs || force) {
150 151 152 153
		/* lookup prev node for merging backward later */
		tmp_node = rb_prev(&re->rb_node);
		*prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
	}
154
	if (ofs == re->ofs + re->len - 1 || force) {
155 156 157 158 159 160 161
		/* lookup next node for merging frontward later */
		tmp_node = rb_next(&re->rb_node);
		*next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
	}
	return re;
}

162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
bool __check_rb_tree_consistence(struct f2fs_sb_info *sbi,
						struct rb_root *root)
{
#ifdef CONFIG_F2FS_CHECK_FS
	struct rb_node *cur = rb_first(root), *next;
	struct rb_entry *cur_re, *next_re;

	if (!cur)
		return true;

	while (cur) {
		next = rb_next(cur);
		if (!next)
			return true;

		cur_re = rb_entry(cur, struct rb_entry, rb_node);
		next_re = rb_entry(next, struct rb_entry, rb_node);

		if (cur_re->ofs + cur_re->len > next_re->ofs) {
			f2fs_msg(sbi->sb, KERN_INFO, "inconsistent rbtree, "
				"cur(%u, %u) next(%u, %u)",
				cur_re->ofs, cur_re->len,
				next_re->ofs, next_re->len);
			return false;
		}

		cur = next;
	}
#endif
	return true;
}

194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
static struct kmem_cache *extent_tree_slab;
static struct kmem_cache *extent_node_slab;

static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
				struct extent_tree *et, struct extent_info *ei,
				struct rb_node *parent, struct rb_node **p)
{
	struct extent_node *en;

	en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
	if (!en)
		return NULL;

	en->ei = *ei;
	INIT_LIST_HEAD(&en->list);
209
	en->et = et;
210 211 212

	rb_link_node(&en->rb_node, parent, p);
	rb_insert_color(&en->rb_node, &et->root);
213
	atomic_inc(&et->node_cnt);
214 215 216 217 218 219 220 221
	atomic_inc(&sbi->total_ext_node);
	return en;
}

static void __detach_extent_node(struct f2fs_sb_info *sbi,
				struct extent_tree *et, struct extent_node *en)
{
	rb_erase(&en->rb_node, &et->root);
222
	atomic_dec(&et->node_cnt);
223 224 225 226
	atomic_dec(&sbi->total_ext_node);

	if (et->cached_en == en)
		et->cached_en = NULL;
227 228 229 230 231 232 233 234 235 236 237 238 239
	kmem_cache_free(extent_node_slab, en);
}

/*
 * Flow to release an extent_node:
 * 1. list_del_init
 * 2. __detach_extent_node
 * 3. kmem_cache_free.
 */
static void __release_extent_node(struct f2fs_sb_info *sbi,
			struct extent_tree *et, struct extent_node *en)
{
	spin_lock(&sbi->extent_lock);
240 241
	f2fs_bug_on(sbi, list_empty(&en->list));
	list_del_init(&en->list);
242 243 244
	spin_unlock(&sbi->extent_lock);

	__detach_extent_node(sbi, et, en);
245 246 247 248 249 250 251 252
}

static struct extent_tree *__grab_extent_tree(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct extent_tree *et;
	nid_t ino = inode->i_ino;

253
	mutex_lock(&sbi->extent_tree_lock);
254 255 256 257 258 259 260 261 262
	et = radix_tree_lookup(&sbi->extent_tree_root, ino);
	if (!et) {
		et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
		f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
		memset(et, 0, sizeof(struct extent_tree));
		et->ino = ino;
		et->root = RB_ROOT;
		et->cached_en = NULL;
		rwlock_init(&et->lock);
263
		INIT_LIST_HEAD(&et->list);
264
		atomic_set(&et->node_cnt, 0);
265
		atomic_inc(&sbi->total_ext_tree);
266 267
	} else {
		atomic_dec(&sbi->total_zombie_tree);
268
		list_del_init(&et->list);
269
	}
270
	mutex_unlock(&sbi->extent_tree_lock);
271 272 273 274 275 276 277

	/* never died until evict_inode */
	F2FS_I(inode)->extent_tree = et;

	return et;
}

278 279
static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
				struct extent_tree *et, struct extent_info *ei)
280 281 282 283
{
	struct rb_node **p = &et->root.rb_node;
	struct extent_node *en;

284
	en = __attach_extent_node(sbi, et, ei, NULL, p);
285 286
	if (!en)
		return NULL;
287 288

	et->largest = en->ei;
289 290 291 292 293
	et->cached_en = en;
	return en;
}

static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
294
					struct extent_tree *et)
295 296 297
{
	struct rb_node *node, *next;
	struct extent_node *en;
298
	unsigned int count = atomic_read(&et->node_cnt);
299 300 301 302 303

	node = rb_first(&et->root);
	while (node) {
		next = rb_next(node);
		en = rb_entry(node, struct extent_node, rb_node);
304
		__release_extent_node(sbi, et, en);
305 306 307
		node = next;
	}

308
	return count - atomic_read(&et->node_cnt);
309 310
}

F
Fan Li 已提交
311 312
static void __drop_largest_extent(struct inode *inode,
					pgoff_t fofs, unsigned int len)
313 314 315
{
	struct extent_info *largest = &F2FS_I(inode)->extent_tree->largest;

316
	if (fofs < largest->fofs + largest->len && fofs + len > largest->fofs) {
317
		largest->len = 0;
318
		f2fs_mark_inode_dirty_sync(inode, true);
319
	}
320 321
}

322 323
/* return true, if inode page is changed */
bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
324 325 326 327 328 329
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct extent_tree *et;
	struct extent_node *en;
	struct extent_info ei;

330 331 332 333 334 335 336 337
	if (!f2fs_may_extent_tree(inode)) {
		/* drop largest extent */
		if (i_ext && i_ext->len) {
			i_ext->len = 0;
			return true;
		}
		return false;
	}
338 339 340

	et = __grab_extent_tree(inode);

341 342
	if (!i_ext || !i_ext->len)
		return false;
343

C
Chao Yu 已提交
344
	get_extent_info(&ei, i_ext);
345 346

	write_lock(&et->lock);
347
	if (atomic_read(&et->node_cnt))
348 349
		goto out;

350
	en = __init_extent_tree(sbi, et, &ei);
351 352 353 354 355 356 357
	if (en) {
		spin_lock(&sbi->extent_lock);
		list_add_tail(&en->list, &sbi->extent_list);
		spin_unlock(&sbi->extent_lock);
	}
out:
	write_unlock(&et->lock);
358
	return false;
359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378
}

static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
							struct extent_info *ei)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct extent_tree *et = F2FS_I(inode)->extent_tree;
	struct extent_node *en;
	bool ret = false;

	f2fs_bug_on(sbi, !et);

	trace_f2fs_lookup_extent_tree_start(inode, pgofs);

	read_lock(&et->lock);

	if (et->largest.fofs <= pgofs &&
			et->largest.fofs + et->largest.len > pgofs) {
		*ei = et->largest;
		ret = true;
379
		stat_inc_largest_node_hit(sbi);
380 381 382
		goto out;
	}

383 384 385 386 387 388 389 390 391 392 393 394 395 396 397
	en = (struct extent_node *)__lookup_rb_tree(&et->root,
				(struct rb_entry *)et->cached_en, pgofs);
	if (!en)
		goto out;

	if (en == et->cached_en)
		stat_inc_cached_node_hit(sbi);
	else
		stat_inc_rbtree_node_hit(sbi);

	*ei = en->ei;
	spin_lock(&sbi->extent_lock);
	if (!list_empty(&en->list)) {
		list_move_tail(&en->list, &sbi->extent_list);
		et->cached_en = en;
398
	}
399 400
	spin_unlock(&sbi->extent_lock);
	ret = true;
401
out:
402
	stat_inc_total_hit(sbi);
403 404 405 406 407 408
	read_unlock(&et->lock);

	trace_f2fs_lookup_extent_tree_end(inode, pgofs, ei);
	return ret;
}

409
static struct extent_node *__try_merge_extent_node(struct inode *inode,
410 411
				struct extent_tree *et, struct extent_info *ei,
				struct extent_node *prev_ex,
412
				struct extent_node *next_ex)
413
{
414
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
415 416 417 418 419 420 421
	struct extent_node *en = NULL;

	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
		prev_ex->ei.len += ei->len;
		ei = &prev_ex->ei;
		en = prev_ex;
	}
422

423 424 425 426
	if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
		next_ex->ei.fofs = ei->fofs;
		next_ex->ei.blk = ei->blk;
		next_ex->ei.len += ei->len;
427 428 429
		if (en)
			__release_extent_node(sbi, et, prev_ex);

430 431
		en = next_ex;
	}
432

433 434 435
	if (!en)
		return NULL;

436
	__try_update_largest_extent(inode, et, en);
437 438

	spin_lock(&sbi->extent_lock);
439
	if (!list_empty(&en->list)) {
440
		list_move_tail(&en->list, &sbi->extent_list);
441 442
		et->cached_en = en;
	}
443
	spin_unlock(&sbi->extent_lock);
444 445 446
	return en;
}

447
static struct extent_node *__insert_extent_tree(struct inode *inode,
448 449 450 451
				struct extent_tree *et, struct extent_info *ei,
				struct rb_node **insert_p,
				struct rb_node *insert_parent)
{
452
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
453 454 455
	struct rb_node **p = &et->root.rb_node;
	struct rb_node *parent = NULL;
	struct extent_node *en = NULL;
456 457 458 459 460 461 462

	if (insert_p && insert_parent) {
		parent = insert_parent;
		p = insert_p;
		goto do_insert;
	}

463
	p = __lookup_rb_tree_for_insert(sbi, &et->root, &parent, ei->fofs);
464 465 466 467
do_insert:
	en = __attach_extent_node(sbi, et, ei, parent, p);
	if (!en)
		return NULL;
468

469
	__try_update_largest_extent(inode, et, en);
470 471 472 473

	/* update in global extent list */
	spin_lock(&sbi->extent_lock);
	list_add_tail(&en->list, &sbi->extent_list);
474
	et->cached_en = en;
475
	spin_unlock(&sbi->extent_lock);
476 477 478
	return en;
}

C
Chao Yu 已提交
479
static void f2fs_update_extent_tree_range(struct inode *inode,
C
Chao Yu 已提交
480
				pgoff_t fofs, block_t blkaddr, unsigned int len)
481 482 483
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct extent_tree *et = F2FS_I(inode)->extent_tree;
484
	struct extent_node *en = NULL, *en1 = NULL;
C
Chao Yu 已提交
485
	struct extent_node *prev_en = NULL, *next_en = NULL;
486
	struct extent_info ei, dei, prev;
487
	struct rb_node **insert_p = NULL, *insert_parent = NULL;
C
Chao Yu 已提交
488 489
	unsigned int end = fofs + len;
	unsigned int pos = (unsigned int)fofs;
490 491

	if (!et)
C
Chao Yu 已提交
492
		return;
493

494 495
	trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, len);

496 497
	write_lock(&et->lock);

498
	if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
499
		write_unlock(&et->lock);
C
Chao Yu 已提交
500
		return;
501 502 503 504 505
	}

	prev = et->largest;
	dei.len = 0;

506 507 508 509
	/*
	 * drop largest extent before lookup, in case it's already
	 * been shrunk from extent tree
	 */
F
Fan Li 已提交
510
	__drop_largest_extent(inode, fofs, len);
511

C
Chao Yu 已提交
512
	/* 1. lookup first extent node in range [fofs, fofs + len - 1] */
513 514 515 516
	en = (struct extent_node *)__lookup_rb_tree_ret(&et->root,
					(struct rb_entry *)et->cached_en, fofs,
					(struct rb_entry **)&prev_en,
					(struct rb_entry **)&next_en,
517
					&insert_p, &insert_parent, false);
518 519
	if (!en)
		en = next_en;
C
Chao Yu 已提交
520 521

	/* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
522 523 524
	while (en && en->ei.fofs < end) {
		unsigned int org_end;
		int parts = 0;	/* # of parts current extent split into */
C
Chao Yu 已提交
525

526
		next_en = en1 = NULL;
C
Chao Yu 已提交
527 528

		dei = en->ei;
529 530
		org_end = dei.fofs + dei.len;
		f2fs_bug_on(sbi, pos >= org_end);
C
Chao Yu 已提交
531

532 533 534 535 536
		if (pos > dei.fofs &&	pos - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
			en->ei.len = pos - en->ei.fofs;
			prev_en = en;
			parts = 1;
		}
C
Chao Yu 已提交
537

538 539 540 541 542
		if (end < org_end && org_end - end >= F2FS_MIN_EXTENT_LEN) {
			if (parts) {
				set_extent_info(&ei, end,
						end - dei.fofs + dei.blk,
						org_end - end);
543
				en1 = __insert_extent_tree(inode, et, &ei,
544 545 546 547 548 549 550
							NULL, NULL);
				next_en = en1;
			} else {
				en->ei.fofs = end;
				en->ei.blk += end - dei.fofs;
				en->ei.len -= end - dei.fofs;
				next_en = en;
C
Chao Yu 已提交
551
			}
552
			parts++;
C
Chao Yu 已提交
553 554
		}

555 556
		if (!next_en) {
			struct rb_node *node = rb_next(&en->rb_node);
C
Chao Yu 已提交
557

G
Geliang Tang 已提交
558 559
			next_en = rb_entry_safe(node, struct extent_node,
						rb_node);
560 561
		}

562
		if (parts)
563
			__try_update_largest_extent(inode, et, en);
564
		else
565
			__release_extent_node(sbi, et, en);
C
Chao Yu 已提交
566 567

		/*
568 569 570
		 * if original extent is split into zero or two parts, extent
		 * tree has been altered by deletion or insertion, therefore
		 * invalidate pointers regard to tree.
C
Chao Yu 已提交
571
		 */
572 573 574
		if (parts != 1) {
			insert_p = NULL;
			insert_parent = NULL;
575
		}
576
		en = next_en;
577 578 579 580
	}

	/* 3. update extent in extent cache */
	if (blkaddr) {
C
Chao Yu 已提交
581 582

		set_extent_info(&ei, fofs, blkaddr, len);
583 584
		if (!__try_merge_extent_node(inode, et, &ei, prev_en, next_en))
			__insert_extent_tree(inode, et, &ei,
585
						insert_p, insert_parent);
586 587 588 589 590

		/* give up extent_cache, if split and small updates happen */
		if (dei.len >= 1 &&
				prev.len < F2FS_MIN_EXTENT_LEN &&
				et->largest.len < F2FS_MIN_EXTENT_LEN) {
591
			__drop_largest_extent(inode, 0, UINT_MAX);
592
			set_inode_flag(inode, FI_NO_EXTENT);
593
		}
C
Chao Yu 已提交
594
	}
595

596
	if (is_inode_flag_set(inode, FI_NO_EXTENT))
597
		__free_extent_tree(sbi, et);
598 599 600 601 602 603

	write_unlock(&et->lock);
}

unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
{
604
	struct extent_tree *et, *next;
605
	struct extent_node *en;
606 607 608 609 610 611
	unsigned int node_cnt = 0, tree_cnt = 0;
	int remained;

	if (!test_opt(sbi, EXTENT_CACHE))
		return 0;

612 613 614
	if (!atomic_read(&sbi->total_zombie_tree))
		goto free_node;

615
	if (!mutex_trylock(&sbi->extent_tree_lock))
616 617 618
		goto out;

	/* 1. remove unreferenced extent tree */
619
	list_for_each_entry_safe(et, next, &sbi->zombie_list, list) {
620 621
		if (atomic_read(&et->node_cnt)) {
			write_lock(&et->lock);
622
			node_cnt += __free_extent_tree(sbi, et);
623 624
			write_unlock(&et->lock);
		}
625
		f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
626 627 628 629 630 631
		list_del_init(&et->list);
		radix_tree_delete(&sbi->extent_tree_root, et->ino);
		kmem_cache_free(extent_tree_slab, et);
		atomic_dec(&sbi->total_ext_tree);
		atomic_dec(&sbi->total_zombie_tree);
		tree_cnt++;
632

633 634
		if (node_cnt + tree_cnt >= nr_shrink)
			goto unlock_out;
635
		cond_resched();
636
	}
637
	mutex_unlock(&sbi->extent_tree_lock);
638

639
free_node:
640
	/* 2. remove LRU extent entries */
641
	if (!mutex_trylock(&sbi->extent_tree_lock))
642 643 644 645 646
		goto out;

	remained = nr_shrink - (node_cnt + tree_cnt);

	spin_lock(&sbi->extent_lock);
647 648
	for (; remained > 0; remained--) {
		if (list_empty(&sbi->extent_list))
649
			break;
650 651 652 653 654 655 656 657
		en = list_first_entry(&sbi->extent_list,
					struct extent_node, list);
		et = en->et;
		if (!write_trylock(&et->lock)) {
			/* refresh this extent node's position in extent list */
			list_move_tail(&en->list, &sbi->extent_list);
			continue;
		}
658

659 660
		list_del_init(&en->list);
		spin_unlock(&sbi->extent_lock);
661

662
		__detach_extent_node(sbi, et, en);
663

664 665 666
		write_unlock(&et->lock);
		node_cnt++;
		spin_lock(&sbi->extent_lock);
667
	}
668 669
	spin_unlock(&sbi->extent_lock);

670
unlock_out:
671
	mutex_unlock(&sbi->extent_tree_lock);
672 673 674 675 676 677 678 679 680 681 682 683
out:
	trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);

	return node_cnt + tree_cnt;
}

unsigned int f2fs_destroy_extent_node(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct extent_tree *et = F2FS_I(inode)->extent_tree;
	unsigned int node_cnt = 0;

684
	if (!et || !atomic_read(&et->node_cnt))
685 686 687
		return 0;

	write_lock(&et->lock);
688
	node_cnt = __free_extent_tree(sbi, et);
689 690 691 692 693
	write_unlock(&et->lock);

	return node_cnt;
}

694 695 696 697 698 699 700 701 702 703 704 705 706
void f2fs_drop_extent_tree(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct extent_tree *et = F2FS_I(inode)->extent_tree;

	set_inode_flag(inode, FI_NO_EXTENT);

	write_lock(&et->lock);
	__free_extent_tree(sbi, et);
	__drop_largest_extent(inode, 0, UINT_MAX);
	write_unlock(&et->lock);
}

707 708 709 710 711 712 713 714 715
void f2fs_destroy_extent_tree(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct extent_tree *et = F2FS_I(inode)->extent_tree;
	unsigned int node_cnt = 0;

	if (!et)
		return;

716 717
	if (inode->i_nlink && !is_bad_inode(inode) &&
					atomic_read(&et->node_cnt)) {
718
		mutex_lock(&sbi->extent_tree_lock);
719
		list_add_tail(&et->list, &sbi->zombie_list);
720
		atomic_inc(&sbi->total_zombie_tree);
721
		mutex_unlock(&sbi->extent_tree_lock);
722 723 724 725 726 727 728
		return;
	}

	/* free all extent info belong to this extent tree */
	node_cnt = f2fs_destroy_extent_node(inode);

	/* delete extent tree entry in radix tree */
729
	mutex_lock(&sbi->extent_tree_lock);
730
	f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
731 732
	radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
	kmem_cache_free(extent_tree_slab, et);
733
	atomic_dec(&sbi->total_ext_tree);
734
	mutex_unlock(&sbi->extent_tree_lock);
735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752

	F2FS_I(inode)->extent_tree = NULL;

	trace_f2fs_destroy_extent_tree(inode, node_cnt);
}

bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
					struct extent_info *ei)
{
	if (!f2fs_may_extent_tree(inode))
		return false;

	return f2fs_lookup_extent_tree(inode, pgofs, ei);
}

void f2fs_update_extent_cache(struct dnode_of_data *dn)
{
	pgoff_t fofs;
753
	block_t blkaddr;
754 755 756 757

	if (!f2fs_may_extent_tree(dn->inode))
		return;

758 759 760 761
	if (dn->data_blkaddr == NEW_ADDR)
		blkaddr = NULL_ADDR;
	else
		blkaddr = dn->data_blkaddr;
C
Chao Yu 已提交
762

763 764
	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
								dn->ofs_in_node;
765
	f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, 1);
C
Chao Yu 已提交
766 767 768 769 770 771 772 773 774
}

void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
				pgoff_t fofs, block_t blkaddr, unsigned int len)

{
	if (!f2fs_may_extent_tree(dn->inode))
		return;

775
	f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len);
776 777 778 779 780
}

void init_extent_cache_info(struct f2fs_sb_info *sbi)
{
	INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
781
	mutex_init(&sbi->extent_tree_lock);
782 783
	INIT_LIST_HEAD(&sbi->extent_list);
	spin_lock_init(&sbi->extent_lock);
784
	atomic_set(&sbi->total_ext_tree, 0);
785
	INIT_LIST_HEAD(&sbi->zombie_list);
786
	atomic_set(&sbi->total_zombie_tree, 0);
787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809
	atomic_set(&sbi->total_ext_node, 0);
}

int __init create_extent_cache(void)
{
	extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
			sizeof(struct extent_tree));
	if (!extent_tree_slab)
		return -ENOMEM;
	extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
			sizeof(struct extent_node));
	if (!extent_node_slab) {
		kmem_cache_destroy(extent_tree_slab);
		return -ENOMEM;
	}
	return 0;
}

void destroy_extent_cache(void)
{
	kmem_cache_destroy(extent_node_slab);
	kmem_cache_destroy(extent_tree_slab);
}