extent_cache.c 19.3 KB
Newer Older
C
Chao Yu 已提交
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * f2fs extent cache support
 *
 * Copyright (c) 2015 Motorola Mobility
 * Copyright (c) 2015 Samsung Electronics
 * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
 *          Chao Yu <chao2.yu@samsung.com>
 */

#include <linux/fs.h>
#include <linux/f2fs_fs.h>

#include "f2fs.h"
#include "node.h"
#include <trace/events/f2fs.h>

18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
							unsigned int ofs)
{
	if (cached_re) {
		if (cached_re->ofs <= ofs &&
				cached_re->ofs + cached_re->len > ofs) {
			return cached_re;
		}
	}
	return NULL;
}

static struct rb_entry *__lookup_rb_tree_slow(struct rb_root *root,
							unsigned int ofs)
{
	struct rb_node *node = root->rb_node;
	struct rb_entry *re;

	while (node) {
		re = rb_entry(node, struct rb_entry, rb_node);

		if (ofs < re->ofs)
			node = node->rb_left;
		else if (ofs >= re->ofs + re->len)
			node = node->rb_right;
		else
			return re;
	}
	return NULL;
}

C
Chao Yu 已提交
49
struct rb_entry *f2fs_lookup_rb_tree(struct rb_root *root,
50 51 52 53 54 55 56 57 58 59 60
				struct rb_entry *cached_re, unsigned int ofs)
{
	struct rb_entry *re;

	re = __lookup_rb_tree_fast(cached_re, ofs);
	if (!re)
		return __lookup_rb_tree_slow(root, ofs);

	return re;
}

C
Chao Yu 已提交
61
struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
				struct rb_root *root, struct rb_node **parent,
				unsigned int ofs)
{
	struct rb_node **p = &root->rb_node;
	struct rb_entry *re;

	while (*p) {
		*parent = *p;
		re = rb_entry(*parent, struct rb_entry, rb_node);

		if (ofs < re->ofs)
			p = &(*p)->rb_left;
		else if (ofs >= re->ofs + re->len)
			p = &(*p)->rb_right;
		else
			f2fs_bug_on(sbi, 1);
	}

	return p;
}

/*
 * lookup rb entry in position of @ofs in rb-tree,
 * if hit, return the entry, otherwise, return NULL
 * @prev_ex: extent before ofs
 * @next_ex: extent after ofs
 * @insert_p: insert point for new extent at ofs
 * in order to simpfy the insertion after.
 * tree must stay unchanged between lookup and insertion.
 */
C
Chao Yu 已提交
92
struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root *root,
93 94 95 96 97
				struct rb_entry *cached_re,
				unsigned int ofs,
				struct rb_entry **prev_entry,
				struct rb_entry **next_entry,
				struct rb_node ***insert_p,
98 99
				struct rb_node **insert_parent,
				bool force)
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
{
	struct rb_node **pnode = &root->rb_node;
	struct rb_node *parent = NULL, *tmp_node;
	struct rb_entry *re = cached_re;

	*insert_p = NULL;
	*insert_parent = NULL;
	*prev_entry = NULL;
	*next_entry = NULL;

	if (RB_EMPTY_ROOT(root))
		return NULL;

	if (re) {
		if (re->ofs <= ofs && re->ofs + re->len > ofs)
			goto lookup_neighbors;
	}

	while (*pnode) {
		parent = *pnode;
		re = rb_entry(*pnode, struct rb_entry, rb_node);

		if (ofs < re->ofs)
			pnode = &(*pnode)->rb_left;
		else if (ofs >= re->ofs + re->len)
			pnode = &(*pnode)->rb_right;
		else
			goto lookup_neighbors;
	}

	*insert_p = pnode;
	*insert_parent = parent;

	re = rb_entry(parent, struct rb_entry, rb_node);
	tmp_node = parent;
	if (parent && ofs > re->ofs)
		tmp_node = rb_next(parent);
	*next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);

	tmp_node = parent;
	if (parent && ofs < re->ofs)
		tmp_node = rb_prev(parent);
	*prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
	return NULL;

lookup_neighbors:
146
	if (ofs == re->ofs || force) {
147 148 149 150
		/* lookup prev node for merging backward later */
		tmp_node = rb_prev(&re->rb_node);
		*prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
	}
151
	if (ofs == re->ofs + re->len - 1 || force) {
152 153 154 155 156 157 158
		/* lookup next node for merging frontward later */
		tmp_node = rb_next(&re->rb_node);
		*next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
	}
	return re;
}

C
Chao Yu 已提交
159
bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
						struct rb_root *root)
{
#ifdef CONFIG_F2FS_CHECK_FS
	struct rb_node *cur = rb_first(root), *next;
	struct rb_entry *cur_re, *next_re;

	if (!cur)
		return true;

	while (cur) {
		next = rb_next(cur);
		if (!next)
			return true;

		cur_re = rb_entry(cur, struct rb_entry, rb_node);
		next_re = rb_entry(next, struct rb_entry, rb_node);

		if (cur_re->ofs + cur_re->len > next_re->ofs) {
			f2fs_msg(sbi->sb, KERN_INFO, "inconsistent rbtree, "
				"cur(%u, %u) next(%u, %u)",
				cur_re->ofs, cur_re->len,
				next_re->ofs, next_re->len);
			return false;
		}

		cur = next;
	}
#endif
	return true;
}

191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
static struct kmem_cache *extent_tree_slab;
static struct kmem_cache *extent_node_slab;

static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
				struct extent_tree *et, struct extent_info *ei,
				struct rb_node *parent, struct rb_node **p)
{
	struct extent_node *en;

	en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
	if (!en)
		return NULL;

	en->ei = *ei;
	INIT_LIST_HEAD(&en->list);
206
	en->et = et;
207 208 209

	rb_link_node(&en->rb_node, parent, p);
	rb_insert_color(&en->rb_node, &et->root);
210
	atomic_inc(&et->node_cnt);
211 212 213 214 215 216 217 218
	atomic_inc(&sbi->total_ext_node);
	return en;
}

static void __detach_extent_node(struct f2fs_sb_info *sbi,
				struct extent_tree *et, struct extent_node *en)
{
	rb_erase(&en->rb_node, &et->root);
219
	atomic_dec(&et->node_cnt);
220 221 222 223
	atomic_dec(&sbi->total_ext_node);

	if (et->cached_en == en)
		et->cached_en = NULL;
224 225 226 227 228 229 230 231 232 233 234 235 236
	kmem_cache_free(extent_node_slab, en);
}

/*
 * Flow to release an extent_node:
 * 1. list_del_init
 * 2. __detach_extent_node
 * 3. kmem_cache_free.
 */
static void __release_extent_node(struct f2fs_sb_info *sbi,
			struct extent_tree *et, struct extent_node *en)
{
	spin_lock(&sbi->extent_lock);
237 238
	f2fs_bug_on(sbi, list_empty(&en->list));
	list_del_init(&en->list);
239 240 241
	spin_unlock(&sbi->extent_lock);

	__detach_extent_node(sbi, et, en);
242 243 244 245 246 247 248 249
}

static struct extent_tree *__grab_extent_tree(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct extent_tree *et;
	nid_t ino = inode->i_ino;

250
	mutex_lock(&sbi->extent_tree_lock);
251 252 253 254 255 256 257 258 259
	et = radix_tree_lookup(&sbi->extent_tree_root, ino);
	if (!et) {
		et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
		f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
		memset(et, 0, sizeof(struct extent_tree));
		et->ino = ino;
		et->root = RB_ROOT;
		et->cached_en = NULL;
		rwlock_init(&et->lock);
260
		INIT_LIST_HEAD(&et->list);
261
		atomic_set(&et->node_cnt, 0);
262
		atomic_inc(&sbi->total_ext_tree);
263 264
	} else {
		atomic_dec(&sbi->total_zombie_tree);
265
		list_del_init(&et->list);
266
	}
267
	mutex_unlock(&sbi->extent_tree_lock);
268 269 270 271 272 273 274

	/* never died until evict_inode */
	F2FS_I(inode)->extent_tree = et;

	return et;
}

275 276
static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
				struct extent_tree *et, struct extent_info *ei)
277 278 279 280
{
	struct rb_node **p = &et->root.rb_node;
	struct extent_node *en;

281
	en = __attach_extent_node(sbi, et, ei, NULL, p);
282 283
	if (!en)
		return NULL;
284 285

	et->largest = en->ei;
286 287 288 289 290
	et->cached_en = en;
	return en;
}

static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
291
					struct extent_tree *et)
292 293 294
{
	struct rb_node *node, *next;
	struct extent_node *en;
295
	unsigned int count = atomic_read(&et->node_cnt);
296 297 298 299 300

	node = rb_first(&et->root);
	while (node) {
		next = rb_next(node);
		en = rb_entry(node, struct extent_node, rb_node);
301
		__release_extent_node(sbi, et, en);
302 303 304
		node = next;
	}

305
	return count - atomic_read(&et->node_cnt);
306 307
}

308
static void __drop_largest_extent(struct extent_tree *et,
F
Fan Li 已提交
309
					pgoff_t fofs, unsigned int len)
310
{
311 312 313 314
	if (fofs < et->largest.fofs + et->largest.len &&
			fofs + len > et->largest.fofs) {
		et->largest.len = 0;
		et->largest_updated = true;
315
	}
316 317
}

318
/* return true, if inode page is changed */
319
static bool __f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
320 321 322 323 324 325
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct extent_tree *et;
	struct extent_node *en;
	struct extent_info ei;

326 327 328 329 330 331 332 333
	if (!f2fs_may_extent_tree(inode)) {
		/* drop largest extent */
		if (i_ext && i_ext->len) {
			i_ext->len = 0;
			return true;
		}
		return false;
	}
334 335 336

	et = __grab_extent_tree(inode);

337 338
	if (!i_ext || !i_ext->len)
		return false;
339

C
Chao Yu 已提交
340
	get_extent_info(&ei, i_ext);
341 342

	write_lock(&et->lock);
343
	if (atomic_read(&et->node_cnt))
344 345
		goto out;

346
	en = __init_extent_tree(sbi, et, &ei);
347 348 349 350 351 352 353
	if (en) {
		spin_lock(&sbi->extent_lock);
		list_add_tail(&en->list, &sbi->extent_list);
		spin_unlock(&sbi->extent_lock);
	}
out:
	write_unlock(&et->lock);
354
	return false;
355 356
}

357 358 359 360 361 362 363 364 365 366
bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
{
	bool ret =  __f2fs_init_extent_tree(inode, i_ext);

	if (!F2FS_I(inode)->extent_tree)
		set_inode_flag(inode, FI_NO_EXTENT);

	return ret;
}

367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
							struct extent_info *ei)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct extent_tree *et = F2FS_I(inode)->extent_tree;
	struct extent_node *en;
	bool ret = false;

	f2fs_bug_on(sbi, !et);

	trace_f2fs_lookup_extent_tree_start(inode, pgofs);

	read_lock(&et->lock);

	if (et->largest.fofs <= pgofs &&
			et->largest.fofs + et->largest.len > pgofs) {
		*ei = et->largest;
		ret = true;
385
		stat_inc_largest_node_hit(sbi);
386 387 388
		goto out;
	}

C
Chao Yu 已提交
389
	en = (struct extent_node *)f2fs_lookup_rb_tree(&et->root,
390 391 392 393 394 395 396 397 398 399 400 401 402 403
				(struct rb_entry *)et->cached_en, pgofs);
	if (!en)
		goto out;

	if (en == et->cached_en)
		stat_inc_cached_node_hit(sbi);
	else
		stat_inc_rbtree_node_hit(sbi);

	*ei = en->ei;
	spin_lock(&sbi->extent_lock);
	if (!list_empty(&en->list)) {
		list_move_tail(&en->list, &sbi->extent_list);
		et->cached_en = en;
404
	}
405 406
	spin_unlock(&sbi->extent_lock);
	ret = true;
407
out:
408
	stat_inc_total_hit(sbi);
409 410 411 412 413 414
	read_unlock(&et->lock);

	trace_f2fs_lookup_extent_tree_end(inode, pgofs, ei);
	return ret;
}

415
static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
416 417
				struct extent_tree *et, struct extent_info *ei,
				struct extent_node *prev_ex,
418
				struct extent_node *next_ex)
419 420 421 422 423 424 425 426
{
	struct extent_node *en = NULL;

	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
		prev_ex->ei.len += ei->len;
		ei = &prev_ex->ei;
		en = prev_ex;
	}
427

428 429 430 431
	if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
		next_ex->ei.fofs = ei->fofs;
		next_ex->ei.blk = ei->blk;
		next_ex->ei.len += ei->len;
432 433 434
		if (en)
			__release_extent_node(sbi, et, prev_ex);

435 436
		en = next_ex;
	}
437

438 439 440
	if (!en)
		return NULL;

441
	__try_update_largest_extent(et, en);
442 443

	spin_lock(&sbi->extent_lock);
444
	if (!list_empty(&en->list)) {
445
		list_move_tail(&en->list, &sbi->extent_list);
446 447
		et->cached_en = en;
	}
448
	spin_unlock(&sbi->extent_lock);
449 450 451
	return en;
}

452
static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
453 454 455 456
				struct extent_tree *et, struct extent_info *ei,
				struct rb_node **insert_p,
				struct rb_node *insert_parent)
{
457
	struct rb_node **p;
458 459
	struct rb_node *parent = NULL;
	struct extent_node *en = NULL;
460 461 462 463 464 465 466

	if (insert_p && insert_parent) {
		parent = insert_parent;
		p = insert_p;
		goto do_insert;
	}

C
Chao Yu 已提交
467
	p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent, ei->fofs);
468 469 470 471
do_insert:
	en = __attach_extent_node(sbi, et, ei, parent, p);
	if (!en)
		return NULL;
472

473
	__try_update_largest_extent(et, en);
474 475 476 477

	/* update in global extent list */
	spin_lock(&sbi->extent_lock);
	list_add_tail(&en->list, &sbi->extent_list);
478
	et->cached_en = en;
479
	spin_unlock(&sbi->extent_lock);
480 481 482
	return en;
}

C
Chao Yu 已提交
483
static void f2fs_update_extent_tree_range(struct inode *inode,
C
Chao Yu 已提交
484
				pgoff_t fofs, block_t blkaddr, unsigned int len)
485 486 487
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct extent_tree *et = F2FS_I(inode)->extent_tree;
488
	struct extent_node *en = NULL, *en1 = NULL;
C
Chao Yu 已提交
489
	struct extent_node *prev_en = NULL, *next_en = NULL;
490
	struct extent_info ei, dei, prev;
491
	struct rb_node **insert_p = NULL, *insert_parent = NULL;
C
Chao Yu 已提交
492 493
	unsigned int end = fofs + len;
	unsigned int pos = (unsigned int)fofs;
494
	bool updated = false;
495 496

	if (!et)
C
Chao Yu 已提交
497
		return;
498

499 500
	trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, len);

501 502
	write_lock(&et->lock);

503
	if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
504
		write_unlock(&et->lock);
C
Chao Yu 已提交
505
		return;
506 507 508 509 510
	}

	prev = et->largest;
	dei.len = 0;

511 512 513 514
	/*
	 * drop largest extent before lookup, in case it's already
	 * been shrunk from extent tree
	 */
515
	__drop_largest_extent(et, fofs, len);
516

C
Chao Yu 已提交
517
	/* 1. lookup first extent node in range [fofs, fofs + len - 1] */
C
Chao Yu 已提交
518
	en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
519 520 521
					(struct rb_entry *)et->cached_en, fofs,
					(struct rb_entry **)&prev_en,
					(struct rb_entry **)&next_en,
522
					&insert_p, &insert_parent, false);
523 524
	if (!en)
		en = next_en;
C
Chao Yu 已提交
525 526

	/* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
527 528 529
	while (en && en->ei.fofs < end) {
		unsigned int org_end;
		int parts = 0;	/* # of parts current extent split into */
C
Chao Yu 已提交
530

531
		next_en = en1 = NULL;
C
Chao Yu 已提交
532 533

		dei = en->ei;
534 535
		org_end = dei.fofs + dei.len;
		f2fs_bug_on(sbi, pos >= org_end);
C
Chao Yu 已提交
536

537 538 539 540 541
		if (pos > dei.fofs &&	pos - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
			en->ei.len = pos - en->ei.fofs;
			prev_en = en;
			parts = 1;
		}
C
Chao Yu 已提交
542

543 544 545 546 547
		if (end < org_end && org_end - end >= F2FS_MIN_EXTENT_LEN) {
			if (parts) {
				set_extent_info(&ei, end,
						end - dei.fofs + dei.blk,
						org_end - end);
548
				en1 = __insert_extent_tree(sbi, et, &ei,
549 550 551 552 553 554 555
							NULL, NULL);
				next_en = en1;
			} else {
				en->ei.fofs = end;
				en->ei.blk += end - dei.fofs;
				en->ei.len -= end - dei.fofs;
				next_en = en;
C
Chao Yu 已提交
556
			}
557
			parts++;
C
Chao Yu 已提交
558 559
		}

560 561
		if (!next_en) {
			struct rb_node *node = rb_next(&en->rb_node);
C
Chao Yu 已提交
562

G
Geliang Tang 已提交
563 564
			next_en = rb_entry_safe(node, struct extent_node,
						rb_node);
565 566
		}

567
		if (parts)
568
			__try_update_largest_extent(et, en);
569
		else
570
			__release_extent_node(sbi, et, en);
C
Chao Yu 已提交
571 572

		/*
573 574 575
		 * if original extent is split into zero or two parts, extent
		 * tree has been altered by deletion or insertion, therefore
		 * invalidate pointers regard to tree.
C
Chao Yu 已提交
576
		 */
577 578 579
		if (parts != 1) {
			insert_p = NULL;
			insert_parent = NULL;
580
		}
581
		en = next_en;
582 583 584 585
	}

	/* 3. update extent in extent cache */
	if (blkaddr) {
C
Chao Yu 已提交
586 587

		set_extent_info(&ei, fofs, blkaddr, len);
588 589
		if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
			__insert_extent_tree(sbi, et, &ei,
590
						insert_p, insert_parent);
591 592 593 594 595

		/* give up extent_cache, if split and small updates happen */
		if (dei.len >= 1 &&
				prev.len < F2FS_MIN_EXTENT_LEN &&
				et->largest.len < F2FS_MIN_EXTENT_LEN) {
596 597
			et->largest.len = 0;
			et->largest_updated = true;
598
			set_inode_flag(inode, FI_NO_EXTENT);
599
		}
C
Chao Yu 已提交
600
	}
601

602
	if (is_inode_flag_set(inode, FI_NO_EXTENT))
603
		__free_extent_tree(sbi, et);
604

605 606 607 608 609
	if (et->largest_updated) {
		et->largest_updated = false;
		updated = true;
	}

610
	write_unlock(&et->lock);
611 612 613

	if (updated)
		f2fs_mark_inode_dirty_sync(inode, true);
614 615 616 617
}

unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
{
618
	struct extent_tree *et, *next;
619
	struct extent_node *en;
620 621 622 623 624 625
	unsigned int node_cnt = 0, tree_cnt = 0;
	int remained;

	if (!test_opt(sbi, EXTENT_CACHE))
		return 0;

626 627 628
	if (!atomic_read(&sbi->total_zombie_tree))
		goto free_node;

629
	if (!mutex_trylock(&sbi->extent_tree_lock))
630 631 632
		goto out;

	/* 1. remove unreferenced extent tree */
633
	list_for_each_entry_safe(et, next, &sbi->zombie_list, list) {
634 635
		if (atomic_read(&et->node_cnt)) {
			write_lock(&et->lock);
636
			node_cnt += __free_extent_tree(sbi, et);
637 638
			write_unlock(&et->lock);
		}
639
		f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
640 641 642 643 644 645
		list_del_init(&et->list);
		radix_tree_delete(&sbi->extent_tree_root, et->ino);
		kmem_cache_free(extent_tree_slab, et);
		atomic_dec(&sbi->total_ext_tree);
		atomic_dec(&sbi->total_zombie_tree);
		tree_cnt++;
646

647 648
		if (node_cnt + tree_cnt >= nr_shrink)
			goto unlock_out;
649
		cond_resched();
650
	}
651
	mutex_unlock(&sbi->extent_tree_lock);
652

653
free_node:
654
	/* 2. remove LRU extent entries */
655
	if (!mutex_trylock(&sbi->extent_tree_lock))
656 657 658 659 660
		goto out;

	remained = nr_shrink - (node_cnt + tree_cnt);

	spin_lock(&sbi->extent_lock);
661 662
	for (; remained > 0; remained--) {
		if (list_empty(&sbi->extent_list))
663
			break;
664 665 666 667 668 669 670 671
		en = list_first_entry(&sbi->extent_list,
					struct extent_node, list);
		et = en->et;
		if (!write_trylock(&et->lock)) {
			/* refresh this extent node's position in extent list */
			list_move_tail(&en->list, &sbi->extent_list);
			continue;
		}
672

673 674
		list_del_init(&en->list);
		spin_unlock(&sbi->extent_lock);
675

676
		__detach_extent_node(sbi, et, en);
677

678 679 680
		write_unlock(&et->lock);
		node_cnt++;
		spin_lock(&sbi->extent_lock);
681
	}
682 683
	spin_unlock(&sbi->extent_lock);

684
unlock_out:
685
	mutex_unlock(&sbi->extent_tree_lock);
686 687 688 689 690 691 692 693 694 695 696 697
out:
	trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);

	return node_cnt + tree_cnt;
}

unsigned int f2fs_destroy_extent_node(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct extent_tree *et = F2FS_I(inode)->extent_tree;
	unsigned int node_cnt = 0;

698
	if (!et || !atomic_read(&et->node_cnt))
699 700 701
		return 0;

	write_lock(&et->lock);
702
	node_cnt = __free_extent_tree(sbi, et);
703 704 705 706 707
	write_unlock(&et->lock);

	return node_cnt;
}

708 709 710 711
void f2fs_drop_extent_tree(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct extent_tree *et = F2FS_I(inode)->extent_tree;
712
	bool updated = false;
713

714 715 716
	if (!f2fs_may_extent_tree(inode))
		return;

717 718 719 720
	set_inode_flag(inode, FI_NO_EXTENT);

	write_lock(&et->lock);
	__free_extent_tree(sbi, et);
721 722 723 724
	if (et->largest.len) {
		et->largest.len = 0;
		updated = true;
	}
725
	write_unlock(&et->lock);
726 727
	if (updated)
		f2fs_mark_inode_dirty_sync(inode, true);
728 729
}

730 731 732 733 734 735 736 737 738
void f2fs_destroy_extent_tree(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct extent_tree *et = F2FS_I(inode)->extent_tree;
	unsigned int node_cnt = 0;

	if (!et)
		return;

739 740
	if (inode->i_nlink && !is_bad_inode(inode) &&
					atomic_read(&et->node_cnt)) {
741
		mutex_lock(&sbi->extent_tree_lock);
742
		list_add_tail(&et->list, &sbi->zombie_list);
743
		atomic_inc(&sbi->total_zombie_tree);
744
		mutex_unlock(&sbi->extent_tree_lock);
745 746 747 748 749 750 751
		return;
	}

	/* free all extent info belong to this extent tree */
	node_cnt = f2fs_destroy_extent_node(inode);

	/* delete extent tree entry in radix tree */
752
	mutex_lock(&sbi->extent_tree_lock);
753
	f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
754 755
	radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
	kmem_cache_free(extent_tree_slab, et);
756
	atomic_dec(&sbi->total_ext_tree);
757
	mutex_unlock(&sbi->extent_tree_lock);
758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775

	F2FS_I(inode)->extent_tree = NULL;

	trace_f2fs_destroy_extent_tree(inode, node_cnt);
}

bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
					struct extent_info *ei)
{
	if (!f2fs_may_extent_tree(inode))
		return false;

	return f2fs_lookup_extent_tree(inode, pgofs, ei);
}

void f2fs_update_extent_cache(struct dnode_of_data *dn)
{
	pgoff_t fofs;
776
	block_t blkaddr;
777 778 779 780

	if (!f2fs_may_extent_tree(dn->inode))
		return;

781 782 783 784
	if (dn->data_blkaddr == NEW_ADDR)
		blkaddr = NULL_ADDR;
	else
		blkaddr = dn->data_blkaddr;
C
Chao Yu 已提交
785

C
Chao Yu 已提交
786
	fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
787
								dn->ofs_in_node;
788
	f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, 1);
C
Chao Yu 已提交
789 790 791 792 793 794 795 796 797
}

void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
				pgoff_t fofs, block_t blkaddr, unsigned int len)

{
	if (!f2fs_may_extent_tree(dn->inode))
		return;

798
	f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len);
799 800
}

C
Chao Yu 已提交
801
void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi)
802 803
{
	INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
804
	mutex_init(&sbi->extent_tree_lock);
805 806
	INIT_LIST_HEAD(&sbi->extent_list);
	spin_lock_init(&sbi->extent_lock);
807
	atomic_set(&sbi->total_ext_tree, 0);
808
	INIT_LIST_HEAD(&sbi->zombie_list);
809
	atomic_set(&sbi->total_zombie_tree, 0);
810 811 812
	atomic_set(&sbi->total_ext_node, 0);
}

C
Chao Yu 已提交
813
int __init f2fs_create_extent_cache(void)
814 815 816 817 818 819 820 821 822 823 824 825 826 827
{
	extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
			sizeof(struct extent_tree));
	if (!extent_tree_slab)
		return -ENOMEM;
	extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
			sizeof(struct extent_node));
	if (!extent_node_slab) {
		kmem_cache_destroy(extent_tree_slab);
		return -ENOMEM;
	}
	return 0;
}

C
Chao Yu 已提交
828
void f2fs_destroy_extent_cache(void)
829 830 831 832
{
	kmem_cache_destroy(extent_node_slab);
	kmem_cache_destroy(extent_tree_slab);
}