gc.c 17.9 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 * fs/f2fs/gc.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/init.h>
#include <linux/f2fs_fs.h>
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/freezer.h>
#include <linux/blkdev.h>

#include "f2fs.h"
#include "node.h"
#include "segment.h"
#include "gc.h"
25
#include <trace/events/f2fs.h>
26 27 28 29 30 31

static struct kmem_cache *winode_slab;

static int gc_thread_func(void *data)
{
	struct f2fs_sb_info *sbi = data;
32
	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
33 34 35
	wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
	long wait_ms;

36
	wait_ms = gc_th->min_sleep_time;
37 38 39 40 41 42 43 44 45 46 47

	do {
		if (try_to_freeze())
			continue;
		else
			wait_event_interruptible_timeout(*wq,
						kthread_should_stop(),
						msecs_to_jiffies(wait_ms));
		if (kthread_should_stop())
			break;

48
		if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
49
			wait_ms = increase_sleep_time(gc_th, wait_ms);
50 51 52
			continue;
		}

53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
		/*
		 * [GC triggering condition]
		 * 0. GC is not conducted currently.
		 * 1. There are enough dirty segments.
		 * 2. IO subsystem is idle by checking the # of writeback pages.
		 * 3. IO subsystem is idle by checking the # of requests in
		 *    bdev's request list.
		 *
		 * Note) We have to avoid triggering GCs too much frequently.
		 * Because it is possible that some segments can be
		 * invalidated soon after by user update or deletion.
		 * So, I'd like to wait some time to collect dirty segments.
		 */
		if (!mutex_trylock(&sbi->gc_mutex))
			continue;

		if (!is_idle(sbi)) {
70
			wait_ms = increase_sleep_time(gc_th, wait_ms);
71 72 73 74 75
			mutex_unlock(&sbi->gc_mutex);
			continue;
		}

		if (has_enough_invalid_blocks(sbi))
76
			wait_ms = decrease_sleep_time(gc_th, wait_ms);
77
		else
78
			wait_ms = increase_sleep_time(gc_th, wait_ms);
79

80
		stat_inc_bggc_count(sbi);
81

82 83
		/* if return value is not zero, no victim was selected */
		if (f2fs_gc(sbi))
84
			wait_ms = gc_th->no_gc_sleep_time;
85 86 87 88 89

		/* balancing prefree segments */
		if (excess_prefree_segs(sbi))
			f2fs_sync_fs(sbi->sb, true);

90 91 92 93 94 95
	} while (!kthread_should_stop());
	return 0;
}

int start_gc_thread(struct f2fs_sb_info *sbi)
{
N
Namjae Jeon 已提交
96
	struct f2fs_gc_kthread *gc_th;
97
	dev_t dev = sbi->sb->s_bdev->bd_dev;
98
	int err = 0;
99

100
	if (!test_opt(sbi, BG_GC))
101
		goto out;
102
	gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
103 104 105 106
	if (!gc_th) {
		err = -ENOMEM;
		goto out;
	}
107

108 109 110 111
	gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
	gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
	gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;

112 113
	gc_th->gc_idle = 0;

114 115 116
	sbi->gc_thread = gc_th;
	init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
	sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
117
			"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
118
	if (IS_ERR(gc_th->f2fs_gc_task)) {
119
		err = PTR_ERR(gc_th->f2fs_gc_task);
120
		kfree(gc_th);
121
		sbi->gc_thread = NULL;
122
	}
123 124 125

out:
	return err;
126 127 128 129 130 131 132 133 134 135 136 137
}

void stop_gc_thread(struct f2fs_sb_info *sbi)
{
	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
	if (!gc_th)
		return;
	kthread_stop(gc_th->f2fs_gc_task);
	kfree(gc_th);
	sbi->gc_thread = NULL;
}

138
static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
139
{
140 141 142 143 144 145 146 147 148
	int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;

	if (gc_th && gc_th->gc_idle) {
		if (gc_th->gc_idle == 1)
			gc_mode = GC_CB;
		else if (gc_th->gc_idle == 2)
			gc_mode = GC_GREEDY;
	}
	return gc_mode;
149 150 151 152 153 154 155
}

static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
			int type, struct victim_sel_policy *p)
{
	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);

156
	if (p->alloc_mode == SSR) {
157 158
		p->gc_mode = GC_GREEDY;
		p->dirty_segmap = dirty_i->dirty_segmap[type];
159
		p->max_search = dirty_i->nr_dirty[type];
160 161
		p->ofs_unit = 1;
	} else {
162
		p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
163
		p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
164
		p->max_search = dirty_i->nr_dirty[DIRTY];
165 166
		p->ofs_unit = sbi->segs_per_sec;
	}
167 168 169 170

	if (p->max_search > MAX_VICTIM_SEARCH)
		p->max_search = MAX_VICTIM_SEARCH;

171 172 173 174 175 176
	p->offset = sbi->last_victim[p->gc_mode];
}

static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
				struct victim_sel_policy *p)
{
177 178 179
	/* SSR allocates in a segment unit */
	if (p->alloc_mode == SSR)
		return 1 << sbi->log_blocks_per_seg;
180 181 182 183 184 185 186 187 188 189 190
	if (p->gc_mode == GC_GREEDY)
		return (1 << sbi->log_blocks_per_seg) * p->ofs_unit;
	else if (p->gc_mode == GC_CB)
		return UINT_MAX;
	else /* No other gc_mode */
		return 0;
}

static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
{
	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
191 192
	unsigned int hint = 0;
	unsigned int secno;
193 194 195 196 197 198

	/*
	 * If the gc_type is FG_GC, we can select victim segments
	 * selected by background GC before.
	 * Those segments guarantee they have small valid blocks.
	 */
199 200 201 202 203 204 205
next:
	secno = find_next_bit(dirty_i->victim_secmap, TOTAL_SECS(sbi), hint++);
	if (secno < TOTAL_SECS(sbi)) {
		if (sec_usage_check(sbi, secno))
			goto next;
		clear_bit(secno, dirty_i->victim_secmap);
		return secno * sbi->segs_per_sec;
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
	}
	return NULL_SEGNO;
}

static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
{
	struct sit_info *sit_i = SIT_I(sbi);
	unsigned int secno = GET_SECNO(sbi, segno);
	unsigned int start = secno * sbi->segs_per_sec;
	unsigned long long mtime = 0;
	unsigned int vblocks;
	unsigned char age = 0;
	unsigned char u;
	unsigned int i;

	for (i = 0; i < sbi->segs_per_sec; i++)
		mtime += get_seg_entry(sbi, start + i)->mtime;
	vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);

	mtime = div_u64(mtime, sbi->segs_per_sec);
	vblocks = div_u64(vblocks, sbi->segs_per_sec);

	u = (vblocks * 100) >> sbi->log_blocks_per_seg;

	/* Handle if the system time is changed by user */
	if (mtime < sit_i->min_mtime)
		sit_i->min_mtime = mtime;
	if (mtime > sit_i->max_mtime)
		sit_i->max_mtime = mtime;
	if (sit_i->max_mtime != sit_i->min_mtime)
		age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
				sit_i->max_mtime - sit_i->min_mtime);

	return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
}

242 243
static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
			unsigned int segno, struct victim_sel_policy *p)
244 245 246 247 248 249 250 251 252 253 254
{
	if (p->alloc_mode == SSR)
		return get_seg_entry(sbi, segno)->ckpt_valid_blocks;

	/* alloc_mode == LFS */
	if (p->gc_mode == GC_GREEDY)
		return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
	else
		return get_cb_cost(sbi, segno);
}

J
Jaegeuk Kim 已提交
255
/*
M
Masanari Iida 已提交
256
 * This function is called from two paths.
257 258 259 260 261 262 263 264 265 266 267
 * One is garbage collection and the other is SSR segment selection.
 * When it is called during GC, it just gets a victim segment
 * and it does not remove it from dirty seglist.
 * When it is called from SSR segment selection, it finds a segment
 * which has minimum valid blocks and removes it from dirty seglist.
 */
static int get_victim_by_default(struct f2fs_sb_info *sbi,
		unsigned int *result, int gc_type, int type, char alloc_mode)
{
	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
	struct victim_sel_policy p;
268
	unsigned int secno, max_cost;
269 270 271 272 273 274
	int nsearched = 0;

	p.alloc_mode = alloc_mode;
	select_policy(sbi, gc_type, type, &p);

	p.min_segno = NULL_SEGNO;
275
	p.min_cost = max_cost = get_max_cost(sbi, &p);
276 277 278 279 280 281 282 283 284 285 286

	mutex_lock(&dirty_i->seglist_lock);

	if (p.alloc_mode == LFS && gc_type == FG_GC) {
		p.min_segno = check_bg_victims(sbi);
		if (p.min_segno != NULL_SEGNO)
			goto got_it;
	}

	while (1) {
		unsigned long cost;
287
		unsigned int segno;
288 289 290 291 292 293 294 295 296 297 298

		segno = find_next_bit(p.dirty_segmap,
						TOTAL_SEGS(sbi), p.offset);
		if (segno >= TOTAL_SEGS(sbi)) {
			if (sbi->last_victim[p.gc_mode]) {
				sbi->last_victim[p.gc_mode] = 0;
				p.offset = 0;
				continue;
			}
			break;
		}
299 300 301 302 303

		p.offset = segno + p.ofs_unit;
		if (p.ofs_unit > 1)
			p.offset -= segno % p.ofs_unit;

304
		secno = GET_SECNO(sbi, segno);
305

306
		if (sec_usage_check(sbi, secno))
307
			continue;
308
		if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
309 310 311 312 313 314 315
			continue;

		cost = get_gc_cost(sbi, segno, &p);

		if (p.min_cost > cost) {
			p.min_segno = segno;
			p.min_cost = cost;
316
		} else if (unlikely(cost == max_cost)) {
317
			continue;
318
		}
319

320
		if (nsearched++ >= p.max_search) {
321 322 323 324 325
			sbi->last_victim[p.gc_mode] = segno;
			break;
		}
	}
	if (p.min_segno != NULL_SEGNO) {
326
got_it:
327
		if (p.alloc_mode == LFS) {
328 329 330 331 332
			secno = GET_SECNO(sbi, p.min_segno);
			if (gc_type == FG_GC)
				sbi->cur_victim_sec = secno;
			else
				set_bit(secno, dirty_i->victim_secmap);
333
		}
334
		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
335 336 337 338

		trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
				sbi->cur_victim_sec,
				prefree_segments(sbi), free_segments(sbi));
339 340 341 342 343 344 345 346 347 348 349 350 351 352
	}
	mutex_unlock(&dirty_i->seglist_lock);

	return (p.min_segno == NULL_SEGNO) ? 0 : 1;
}

static const struct victim_selection default_v_ops = {
	.get_victim = get_victim_by_default,
};

static struct inode *find_gc_inode(nid_t ino, struct list_head *ilist)
{
	struct inode_entry *ie;

353
	list_for_each_entry(ie, ilist, list)
354 355 356 357 358 359 360
		if (ie->inode->i_ino == ino)
			return ie->inode;
	return NULL;
}

static void add_gc_inode(struct inode *inode, struct list_head *ilist)
{
361 362 363 364 365
	struct inode_entry *new_ie;

	if (inode == find_gc_inode(inode->i_ino, ilist)) {
		iput(inode);
		return;
366
	}
367 368

	new_ie = f2fs_kmem_cache_alloc(winode_slab, GFP_NOFS);
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
	new_ie->inode = inode;
	list_add_tail(&new_ie->list, ilist);
}

static void put_gc_inode(struct list_head *ilist)
{
	struct inode_entry *ie, *next_ie;
	list_for_each_entry_safe(ie, next_ie, ilist, list) {
		iput(ie->inode);
		list_del(&ie->list);
		kmem_cache_free(winode_slab, ie);
	}
}

static int check_valid_map(struct f2fs_sb_info *sbi,
				unsigned int segno, int offset)
{
	struct sit_info *sit_i = SIT_I(sbi);
	struct seg_entry *sentry;
	int ret;

	mutex_lock(&sit_i->sentry_lock);
	sentry = get_seg_entry(sbi, segno);
	ret = f2fs_test_bit(offset, sentry->cur_valid_map);
	mutex_unlock(&sit_i->sentry_lock);
394
	return ret;
395 396
}

J
Jaegeuk Kim 已提交
397
/*
398 399 400 401
 * This function compares node address got in summary with that in NAT.
 * On validity, copy that node with cold status, otherwise (invalid node)
 * ignore that.
 */
402
static void gc_node_segment(struct f2fs_sb_info *sbi,
403 404 405 406 407 408 409 410
		struct f2fs_summary *sum, unsigned int segno, int gc_type)
{
	bool initial = true;
	struct f2fs_summary *entry;
	int off;

next_step:
	entry = sum;
411

412 413 414 415
	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
		nid_t nid = le32_to_cpu(entry->nid);
		struct page *node_page;

416 417 418
		/* stop BG_GC if there is not enough free sections. */
		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
			return;
419

420
		if (check_valid_map(sbi, segno, off) == 0)
421 422 423 424 425 426 427 428 429 430 431
			continue;

		if (initial) {
			ra_node_page(sbi, nid);
			continue;
		}
		node_page = get_node_page(sbi, nid);
		if (IS_ERR(node_page))
			continue;

		/* set page dirty and write it */
432
		if (gc_type == FG_GC) {
J
Jin Xu 已提交
433
			f2fs_wait_on_page_writeback(node_page, NODE, true);
434
			set_page_dirty(node_page);
435 436 437 438
		} else {
			if (!PageWriteback(node_page))
				set_page_dirty(node_page);
		}
439 440 441
		f2fs_put_page(node_page, 1);
		stat_inc_node_blk_count(sbi, 1);
	}
442

443 444 445 446 447 448 449 450 451 452 453 454
	if (initial) {
		initial = false;
		goto next_step;
	}

	if (gc_type == FG_GC) {
		struct writeback_control wbc = {
			.sync_mode = WB_SYNC_ALL,
			.nr_to_write = LONG_MAX,
			.for_reclaim = 0,
		};
		sync_node_pages(sbi, 0, &wbc);
455 456 457 458 459 460 461

		/*
		 * In the case of FG_GC, it'd be better to reclaim this victim
		 * completely.
		 */
		if (get_valid_blocks(sbi, segno, 1) != 0)
			goto next_step;
462 463 464
	}
}

J
Jaegeuk Kim 已提交
465
/*
466 467 468 469 470
 * Calculate start block index indicating the given node offset.
 * Be careful, caller should give this node offset only indicating direct node
 * blocks. If any node offsets, which point the other types of node blocks such
 * as indirect or double indirect node blocks, are given, it must be a caller's
 * bug.
471
 */
472
block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi)
473
{
474 475
	unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
	unsigned int bidx;
476

477 478
	if (node_ofs == 0)
		return 0;
479

480
	if (node_ofs <= 2) {
481 482
		bidx = node_ofs - 1;
	} else if (node_ofs <= indirect_blks) {
483
		int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
484 485
		bidx = node_ofs - 2 - dec;
	} else {
486
		int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
487 488
		bidx = node_ofs - 5 - dec;
	}
489
	return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi);
490 491 492 493 494 495 496 497 498 499 500 501 502 503 504
}

static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
		struct node_info *dni, block_t blkaddr, unsigned int *nofs)
{
	struct page *node_page;
	nid_t nid;
	unsigned int ofs_in_node;
	block_t source_blkaddr;

	nid = le32_to_cpu(sum->nid);
	ofs_in_node = le16_to_cpu(sum->ofs_in_node);

	node_page = get_node_page(sbi, nid);
	if (IS_ERR(node_page))
505
		return 0;
506 507 508 509 510

	get_node_info(sbi, nid, dni);

	if (sum->version != dni->version) {
		f2fs_put_page(node_page, 1);
511
		return 0;
512 513 514 515 516 517 518
	}

	*nofs = ofs_of_node(node_page);
	source_blkaddr = datablock_addr(node_page, ofs_in_node);
	f2fs_put_page(node_page, 1);

	if (source_blkaddr != blkaddr)
519 520
		return 0;
	return 1;
521 522 523 524 525
}

static void move_data_page(struct inode *inode, struct page *page, int gc_type)
{
	if (gc_type == BG_GC) {
526 527
		if (PageWriteback(page))
			goto out;
528 529 530 531
		set_page_dirty(page);
		set_cold_data(page);
	} else {
		struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
532

J
Jin Xu 已提交
533
		f2fs_wait_on_page_writeback(page, DATA, true);
534

535 536 537 538 539 540 541 542 543 544 545 546 547
		if (clear_page_dirty_for_io(page) &&
			S_ISDIR(inode->i_mode)) {
			dec_page_count(sbi, F2FS_DIRTY_DENTS);
			inode_dec_dirty_dents(inode);
		}
		set_cold_data(page);
		do_write_data_page(page);
		clear_cold_data(page);
	}
out:
	f2fs_put_page(page, 1);
}

J
Jaegeuk Kim 已提交
548
/*
549 550 551 552 553 554
 * This function tries to get parent node of victim data block, and identifies
 * data block validity. If the block is valid, copy that with cold status and
 * modify parent node.
 * If the parent node is not valid or the data block address is different,
 * the victim data block is ignored.
 */
555
static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
556 557 558 559 560
		struct list_head *ilist, unsigned int segno, int gc_type)
{
	struct super_block *sb = sbi->sb;
	struct f2fs_summary *entry;
	block_t start_addr;
561
	int off;
562 563 564 565 566 567
	int phase = 0;

	start_addr = START_BLOCK(sbi, segno);

next_step:
	entry = sum;
568

569 570 571 572 573 574 575
	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
		struct page *data_page;
		struct inode *inode;
		struct node_info dni; /* dnode info for the data */
		unsigned int ofs_in_node, nofs;
		block_t start_bidx;

576 577 578
		/* stop BG_GC if there is not enough free sections. */
		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
			return;
579

580
		if (check_valid_map(sbi, segno, off) == 0)
581 582 583 584 585 586 587 588
			continue;

		if (phase == 0) {
			ra_node_page(sbi, le32_to_cpu(entry->nid));
			continue;
		}

		/* Get an inode by ino with checking validity */
589
		if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0)
590 591 592 593 594 595 596 597 598 599
			continue;

		if (phase == 1) {
			ra_node_page(sbi, dni.ino);
			continue;
		}

		ofs_in_node = le16_to_cpu(entry->ofs_in_node);

		if (phase == 2) {
600
			inode = f2fs_iget(sb, dni.ino);
601 602 603
			if (IS_ERR(inode))
				continue;

604 605
			start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));

606
			data_page = find_data_page(inode,
607
					start_bidx + ofs_in_node, false);
608 609 610 611 612 613 614 615
			if (IS_ERR(data_page))
				goto next_iput;

			f2fs_put_page(data_page, 0);
			add_gc_inode(inode, ilist);
		} else {
			inode = find_gc_inode(dni.ino, ilist);
			if (inode) {
616 617
				start_bidx = start_bidx_of_node(nofs,
								F2FS_I(inode));
618 619 620 621 622 623 624 625 626 627 628 629
				data_page = get_lock_data_page(inode,
						start_bidx + ofs_in_node);
				if (IS_ERR(data_page))
					continue;
				move_data_page(inode, data_page, gc_type);
				stat_inc_data_blk_count(sbi, 1);
			}
		}
		continue;
next_iput:
		iput(inode);
	}
630

631 632
	if (++phase < 4)
		goto next_step;
633

634
	if (gc_type == FG_GC) {
635
		f2fs_submit_bio(sbi, DATA, true);
636 637 638 639 640 641 642 643 644 645

		/*
		 * In the case of FG_GC, it'd be better to reclaim this victim
		 * completely.
		 */
		if (get_valid_blocks(sbi, segno, 1) != 0) {
			phase = 2;
			goto next_step;
		}
	}
646 647 648 649 650 651 652 653 654 655 656 657 658
}

static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
						int gc_type, int type)
{
	struct sit_info *sit_i = SIT_I(sbi);
	int ret;
	mutex_lock(&sit_i->sentry_lock);
	ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, type, LFS);
	mutex_unlock(&sit_i->sentry_lock);
	return ret;
}

659
static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
660 661 662 663
				struct list_head *ilist, int gc_type)
{
	struct page *sum_page;
	struct f2fs_summary_block *sum;
664
	struct blk_plug plug;
665 666 667 668

	/* read segment summary of victim */
	sum_page = get_sum_page(sbi, segno);
	if (IS_ERR(sum_page))
669
		return;
670

671 672
	blk_start_plug(&plug);

673 674 675 676
	sum = page_address(sum_page);

	switch (GET_SUM_TYPE((&sum->footer))) {
	case SUM_TYPE_NODE:
677
		gc_node_segment(sbi, sum->entries, segno, gc_type);
678 679
		break;
	case SUM_TYPE_DATA:
680
		gc_data_segment(sbi, sum->entries, ilist, segno, gc_type);
681 682
		break;
	}
683 684
	blk_finish_plug(&plug);

685 686 687
	stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)));
	stat_inc_call_count(sbi->stat_info);

688
	f2fs_put_page(sum_page, 1);
689 690
}

J
Jaegeuk Kim 已提交
691
int f2fs_gc(struct f2fs_sb_info *sbi)
692 693
{
	struct list_head ilist;
J
Jaegeuk Kim 已提交
694
	unsigned int segno, i;
695
	int gc_type = BG_GC;
696 697
	int nfree = 0;
	int ret = -1;
698 699 700

	INIT_LIST_HEAD(&ilist);
gc_more:
J
Jaegeuk Kim 已提交
701 702
	if (!(sbi->sb->s_flags & MS_ACTIVE))
		goto stop;
703

704
	if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) {
J
Jaegeuk Kim 已提交
705
		gc_type = FG_GC;
706 707
		write_checkpoint(sbi, false);
	}
708

J
Jaegeuk Kim 已提交
709 710
	if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE))
		goto stop;
711
	ret = 0;
712

713 714 715
	for (i = 0; i < sbi->segs_per_sec; i++)
		do_garbage_collect(sbi, segno + i, &ilist, gc_type);

716 717
	if (gc_type == FG_GC) {
		sbi->cur_victim_sec = NULL_SEGNO;
718
		nfree++;
719 720
		WARN_ON(get_valid_blocks(sbi, segno, sbi->segs_per_sec));
	}
721 722 723 724 725 726

	if (has_not_enough_free_secs(sbi, nfree))
		goto gc_more;

	if (gc_type == FG_GC)
		write_checkpoint(sbi, false);
J
Jaegeuk Kim 已提交
727
stop:
728 729 730
	mutex_unlock(&sbi->gc_mutex);

	put_gc_inode(&ilist);
731
	return ret;
732 733 734 735 736 737 738
}

void build_gc_manager(struct f2fs_sb_info *sbi)
{
	DIRTY_I(sbi)->v_ops = &default_v_ops;
}

739
int __init create_gc_caches(void)
740 741 742 743 744 745 746 747 748 749 750 751
{
	winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes",
			sizeof(struct inode_entry), NULL);
	if (!winode_slab)
		return -ENOMEM;
	return 0;
}

void destroy_gc_caches(void)
{
	kmem_cache_destroy(winode_slab);
}