gc.c 17.8 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 * fs/f2fs/gc.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/init.h>
#include <linux/f2fs_fs.h>
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/freezer.h>
#include <linux/blkdev.h>

#include "f2fs.h"
#include "node.h"
#include "segment.h"
#include "gc.h"
25
#include <trace/events/f2fs.h>
26 27 28 29 30 31

static struct kmem_cache *winode_slab;

static int gc_thread_func(void *data)
{
	struct f2fs_sb_info *sbi = data;
32
	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
33 34 35
	wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
	long wait_ms;

36
	wait_ms = gc_th->min_sleep_time;
37 38 39 40 41 42 43 44 45 46 47

	do {
		if (try_to_freeze())
			continue;
		else
			wait_event_interruptible_timeout(*wq,
						kthread_should_stop(),
						msecs_to_jiffies(wait_ms));
		if (kthread_should_stop())
			break;

48
		if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
49
			wait_ms = increase_sleep_time(gc_th, wait_ms);
50 51 52
			continue;
		}

53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
		/*
		 * [GC triggering condition]
		 * 0. GC is not conducted currently.
		 * 1. There are enough dirty segments.
		 * 2. IO subsystem is idle by checking the # of writeback pages.
		 * 3. IO subsystem is idle by checking the # of requests in
		 *    bdev's request list.
		 *
		 * Note) We have to avoid triggering GCs too much frequently.
		 * Because it is possible that some segments can be
		 * invalidated soon after by user update or deletion.
		 * So, I'd like to wait some time to collect dirty segments.
		 */
		if (!mutex_trylock(&sbi->gc_mutex))
			continue;

		if (!is_idle(sbi)) {
70
			wait_ms = increase_sleep_time(gc_th, wait_ms);
71 72 73 74 75
			mutex_unlock(&sbi->gc_mutex);
			continue;
		}

		if (has_enough_invalid_blocks(sbi))
76
			wait_ms = decrease_sleep_time(gc_th, wait_ms);
77
		else
78
			wait_ms = increase_sleep_time(gc_th, wait_ms);
79

80
#ifdef CONFIG_F2FS_STAT_FS
81
		sbi->bg_gc++;
82
#endif
83

84 85
		/* if return value is not zero, no victim was selected */
		if (f2fs_gc(sbi))
86
			wait_ms = gc_th->no_gc_sleep_time;
87 88 89 90 91 92
	} while (!kthread_should_stop());
	return 0;
}

int start_gc_thread(struct f2fs_sb_info *sbi)
{
N
Namjae Jeon 已提交
93
	struct f2fs_gc_kthread *gc_th;
94
	dev_t dev = sbi->sb->s_bdev->bd_dev;
95
	int err = 0;
96

97
	if (!test_opt(sbi, BG_GC))
98
		goto out;
99
	gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
100 101 102 103
	if (!gc_th) {
		err = -ENOMEM;
		goto out;
	}
104

105 106 107 108
	gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
	gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
	gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;

109 110
	gc_th->gc_idle = 0;

111 112 113
	sbi->gc_thread = gc_th;
	init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
	sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
114
			"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
115
	if (IS_ERR(gc_th->f2fs_gc_task)) {
116
		err = PTR_ERR(gc_th->f2fs_gc_task);
117
		kfree(gc_th);
118
		sbi->gc_thread = NULL;
119
	}
120 121 122

out:
	return err;
123 124 125 126 127 128 129 130 131 132 133 134
}

void stop_gc_thread(struct f2fs_sb_info *sbi)
{
	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
	if (!gc_th)
		return;
	kthread_stop(gc_th->f2fs_gc_task);
	kfree(gc_th);
	sbi->gc_thread = NULL;
}

135
static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
136
{
137 138 139 140 141 142 143 144 145
	int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;

	if (gc_th && gc_th->gc_idle) {
		if (gc_th->gc_idle == 1)
			gc_mode = GC_CB;
		else if (gc_th->gc_idle == 2)
			gc_mode = GC_GREEDY;
	}
	return gc_mode;
146 147 148 149 150 151 152
}

static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
			int type, struct victim_sel_policy *p)
{
	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);

153
	if (p->alloc_mode == SSR) {
154 155
		p->gc_mode = GC_GREEDY;
		p->dirty_segmap = dirty_i->dirty_segmap[type];
156
		p->max_search = dirty_i->nr_dirty[type];
157 158
		p->ofs_unit = 1;
	} else {
159
		p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
160
		p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
161
		p->max_search = dirty_i->nr_dirty[DIRTY];
162 163
		p->ofs_unit = sbi->segs_per_sec;
	}
164 165 166 167

	if (p->max_search > MAX_VICTIM_SEARCH)
		p->max_search = MAX_VICTIM_SEARCH;

168 169 170 171 172 173
	p->offset = sbi->last_victim[p->gc_mode];
}

static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
				struct victim_sel_policy *p)
{
174 175 176
	/* SSR allocates in a segment unit */
	if (p->alloc_mode == SSR)
		return 1 << sbi->log_blocks_per_seg;
177 178 179 180 181 182 183 184 185 186 187
	if (p->gc_mode == GC_GREEDY)
		return (1 << sbi->log_blocks_per_seg) * p->ofs_unit;
	else if (p->gc_mode == GC_CB)
		return UINT_MAX;
	else /* No other gc_mode */
		return 0;
}

static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
{
	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
188 189
	unsigned int hint = 0;
	unsigned int secno;
190 191 192 193 194 195

	/*
	 * If the gc_type is FG_GC, we can select victim segments
	 * selected by background GC before.
	 * Those segments guarantee they have small valid blocks.
	 */
196 197 198 199 200 201 202
next:
	secno = find_next_bit(dirty_i->victim_secmap, TOTAL_SECS(sbi), hint++);
	if (secno < TOTAL_SECS(sbi)) {
		if (sec_usage_check(sbi, secno))
			goto next;
		clear_bit(secno, dirty_i->victim_secmap);
		return secno * sbi->segs_per_sec;
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
	}
	return NULL_SEGNO;
}

static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
{
	struct sit_info *sit_i = SIT_I(sbi);
	unsigned int secno = GET_SECNO(sbi, segno);
	unsigned int start = secno * sbi->segs_per_sec;
	unsigned long long mtime = 0;
	unsigned int vblocks;
	unsigned char age = 0;
	unsigned char u;
	unsigned int i;

	for (i = 0; i < sbi->segs_per_sec; i++)
		mtime += get_seg_entry(sbi, start + i)->mtime;
	vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);

	mtime = div_u64(mtime, sbi->segs_per_sec);
	vblocks = div_u64(vblocks, sbi->segs_per_sec);

	u = (vblocks * 100) >> sbi->log_blocks_per_seg;

	/* Handle if the system time is changed by user */
	if (mtime < sit_i->min_mtime)
		sit_i->min_mtime = mtime;
	if (mtime > sit_i->max_mtime)
		sit_i->max_mtime = mtime;
	if (sit_i->max_mtime != sit_i->min_mtime)
		age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
				sit_i->max_mtime - sit_i->min_mtime);

	return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
}

static unsigned int get_gc_cost(struct f2fs_sb_info *sbi, unsigned int segno,
					struct victim_sel_policy *p)
{
	if (p->alloc_mode == SSR)
		return get_seg_entry(sbi, segno)->ckpt_valid_blocks;

	/* alloc_mode == LFS */
	if (p->gc_mode == GC_GREEDY)
		return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
	else
		return get_cb_cost(sbi, segno);
}

J
Jaegeuk Kim 已提交
252
/*
M
Masanari Iida 已提交
253
 * This function is called from two paths.
254 255 256 257 258 259 260 261 262 263 264
 * One is garbage collection and the other is SSR segment selection.
 * When it is called during GC, it just gets a victim segment
 * and it does not remove it from dirty seglist.
 * When it is called from SSR segment selection, it finds a segment
 * which has minimum valid blocks and removes it from dirty seglist.
 */
static int get_victim_by_default(struct f2fs_sb_info *sbi,
		unsigned int *result, int gc_type, int type, char alloc_mode)
{
	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
	struct victim_sel_policy p;
265
	unsigned int secno, max_cost;
266 267 268 269 270 271
	int nsearched = 0;

	p.alloc_mode = alloc_mode;
	select_policy(sbi, gc_type, type, &p);

	p.min_segno = NULL_SEGNO;
272
	p.min_cost = max_cost = get_max_cost(sbi, &p);
273 274 275 276 277 278 279 280 281 282 283

	mutex_lock(&dirty_i->seglist_lock);

	if (p.alloc_mode == LFS && gc_type == FG_GC) {
		p.min_segno = check_bg_victims(sbi);
		if (p.min_segno != NULL_SEGNO)
			goto got_it;
	}

	while (1) {
		unsigned long cost;
284
		unsigned int segno;
285 286 287 288 289 290 291 292 293 294 295 296

		segno = find_next_bit(p.dirty_segmap,
						TOTAL_SEGS(sbi), p.offset);
		if (segno >= TOTAL_SEGS(sbi)) {
			if (sbi->last_victim[p.gc_mode]) {
				sbi->last_victim[p.gc_mode] = 0;
				p.offset = 0;
				continue;
			}
			break;
		}
		p.offset = ((segno / p.ofs_unit) * p.ofs_unit) + p.ofs_unit;
297
		secno = GET_SECNO(sbi, segno);
298

299
		if (sec_usage_check(sbi, secno))
300
			continue;
301
		if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
302 303 304 305 306 307 308 309 310
			continue;

		cost = get_gc_cost(sbi, segno, &p);

		if (p.min_cost > cost) {
			p.min_segno = segno;
			p.min_cost = cost;
		}

311
		if (cost == max_cost)
312 313
			continue;

314
		if (nsearched++ >= p.max_search) {
315 316 317 318 319
			sbi->last_victim[p.gc_mode] = segno;
			break;
		}
	}
	if (p.min_segno != NULL_SEGNO) {
320
got_it:
321
		if (p.alloc_mode == LFS) {
322 323 324 325 326
			secno = GET_SECNO(sbi, p.min_segno);
			if (gc_type == FG_GC)
				sbi->cur_victim_sec = secno;
			else
				set_bit(secno, dirty_i->victim_secmap);
327
		}
328
		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
329 330 331 332

		trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
				sbi->cur_victim_sec,
				prefree_segments(sbi), free_segments(sbi));
333 334 335 336 337 338 339 340 341 342 343 344 345 346
	}
	mutex_unlock(&dirty_i->seglist_lock);

	return (p.min_segno == NULL_SEGNO) ? 0 : 1;
}

static const struct victim_selection default_v_ops = {
	.get_victim = get_victim_by_default,
};

static struct inode *find_gc_inode(nid_t ino, struct list_head *ilist)
{
	struct inode_entry *ie;

347
	list_for_each_entry(ie, ilist, list)
348 349 350 351 352 353 354
		if (ie->inode->i_ino == ino)
			return ie->inode;
	return NULL;
}

static void add_gc_inode(struct inode *inode, struct list_head *ilist)
{
355 356 357 358 359
	struct inode_entry *new_ie;

	if (inode == find_gc_inode(inode->i_ino, ilist)) {
		iput(inode);
		return;
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
	}
repeat:
	new_ie = kmem_cache_alloc(winode_slab, GFP_NOFS);
	if (!new_ie) {
		cond_resched();
		goto repeat;
	}
	new_ie->inode = inode;
	list_add_tail(&new_ie->list, ilist);
}

static void put_gc_inode(struct list_head *ilist)
{
	struct inode_entry *ie, *next_ie;
	list_for_each_entry_safe(ie, next_ie, ilist, list) {
		iput(ie->inode);
		list_del(&ie->list);
		kmem_cache_free(winode_slab, ie);
	}
}

static int check_valid_map(struct f2fs_sb_info *sbi,
				unsigned int segno, int offset)
{
	struct sit_info *sit_i = SIT_I(sbi);
	struct seg_entry *sentry;
	int ret;

	mutex_lock(&sit_i->sentry_lock);
	sentry = get_seg_entry(sbi, segno);
	ret = f2fs_test_bit(offset, sentry->cur_valid_map);
	mutex_unlock(&sit_i->sentry_lock);
392
	return ret;
393 394
}

J
Jaegeuk Kim 已提交
395
/*
396 397 398 399
 * This function compares node address got in summary with that in NAT.
 * On validity, copy that node with cold status, otherwise (invalid node)
 * ignore that.
 */
400
static void gc_node_segment(struct f2fs_sb_info *sbi,
401 402 403 404 405 406 407 408
		struct f2fs_summary *sum, unsigned int segno, int gc_type)
{
	bool initial = true;
	struct f2fs_summary *entry;
	int off;

next_step:
	entry = sum;
409

410 411 412 413
	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
		nid_t nid = le32_to_cpu(entry->nid);
		struct page *node_page;

414 415 416
		/* stop BG_GC if there is not enough free sections. */
		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
			return;
417

418
		if (check_valid_map(sbi, segno, off) == 0)
419 420 421 422 423 424 425 426 427 428 429
			continue;

		if (initial) {
			ra_node_page(sbi, nid);
			continue;
		}
		node_page = get_node_page(sbi, nid);
		if (IS_ERR(node_page))
			continue;

		/* set page dirty and write it */
430
		if (gc_type == FG_GC) {
J
Jin Xu 已提交
431
			f2fs_wait_on_page_writeback(node_page, NODE, true);
432
			set_page_dirty(node_page);
433 434 435 436
		} else {
			if (!PageWriteback(node_page))
				set_page_dirty(node_page);
		}
437 438 439
		f2fs_put_page(node_page, 1);
		stat_inc_node_blk_count(sbi, 1);
	}
440

441 442 443 444 445 446 447 448 449 450 451 452
	if (initial) {
		initial = false;
		goto next_step;
	}

	if (gc_type == FG_GC) {
		struct writeback_control wbc = {
			.sync_mode = WB_SYNC_ALL,
			.nr_to_write = LONG_MAX,
			.for_reclaim = 0,
		};
		sync_node_pages(sbi, 0, &wbc);
453 454 455 456 457 458 459

		/*
		 * In the case of FG_GC, it'd be better to reclaim this victim
		 * completely.
		 */
		if (get_valid_blocks(sbi, segno, 1) != 0)
			goto next_step;
460 461 462
	}
}

J
Jaegeuk Kim 已提交
463
/*
464 465 466 467 468
 * Calculate start block index indicating the given node offset.
 * Be careful, caller should give this node offset only indicating direct node
 * blocks. If any node offsets, which point the other types of node blocks such
 * as indirect or double indirect node blocks, are given, it must be a caller's
 * bug.
469
 */
470
block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi)
471
{
472 473
	unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
	unsigned int bidx;
474

475 476
	if (node_ofs == 0)
		return 0;
477

478
	if (node_ofs <= 2) {
479 480
		bidx = node_ofs - 1;
	} else if (node_ofs <= indirect_blks) {
481
		int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
482 483
		bidx = node_ofs - 2 - dec;
	} else {
484
		int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
485 486
		bidx = node_ofs - 5 - dec;
	}
487
	return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi);
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
}

static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
		struct node_info *dni, block_t blkaddr, unsigned int *nofs)
{
	struct page *node_page;
	nid_t nid;
	unsigned int ofs_in_node;
	block_t source_blkaddr;

	nid = le32_to_cpu(sum->nid);
	ofs_in_node = le16_to_cpu(sum->ofs_in_node);

	node_page = get_node_page(sbi, nid);
	if (IS_ERR(node_page))
503
		return 0;
504 505 506 507 508

	get_node_info(sbi, nid, dni);

	if (sum->version != dni->version) {
		f2fs_put_page(node_page, 1);
509
		return 0;
510 511 512 513 514 515 516
	}

	*nofs = ofs_of_node(node_page);
	source_blkaddr = datablock_addr(node_page, ofs_in_node);
	f2fs_put_page(node_page, 1);

	if (source_blkaddr != blkaddr)
517 518
		return 0;
	return 1;
519 520 521 522 523
}

static void move_data_page(struct inode *inode, struct page *page, int gc_type)
{
	if (gc_type == BG_GC) {
524 525
		if (PageWriteback(page))
			goto out;
526 527 528 529
		set_page_dirty(page);
		set_cold_data(page);
	} else {
		struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
530

J
Jin Xu 已提交
531
		f2fs_wait_on_page_writeback(page, DATA, true);
532

533 534 535 536 537 538 539 540 541 542 543 544 545
		if (clear_page_dirty_for_io(page) &&
			S_ISDIR(inode->i_mode)) {
			dec_page_count(sbi, F2FS_DIRTY_DENTS);
			inode_dec_dirty_dents(inode);
		}
		set_cold_data(page);
		do_write_data_page(page);
		clear_cold_data(page);
	}
out:
	f2fs_put_page(page, 1);
}

J
Jaegeuk Kim 已提交
546
/*
547 548 549 550 551 552
 * This function tries to get parent node of victim data block, and identifies
 * data block validity. If the block is valid, copy that with cold status and
 * modify parent node.
 * If the parent node is not valid or the data block address is different,
 * the victim data block is ignored.
 */
553
static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
554 555 556 557 558
		struct list_head *ilist, unsigned int segno, int gc_type)
{
	struct super_block *sb = sbi->sb;
	struct f2fs_summary *entry;
	block_t start_addr;
559
	int off;
560 561 562 563 564 565
	int phase = 0;

	start_addr = START_BLOCK(sbi, segno);

next_step:
	entry = sum;
566

567 568 569 570 571 572 573
	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
		struct page *data_page;
		struct inode *inode;
		struct node_info dni; /* dnode info for the data */
		unsigned int ofs_in_node, nofs;
		block_t start_bidx;

574 575 576
		/* stop BG_GC if there is not enough free sections. */
		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
			return;
577

578
		if (check_valid_map(sbi, segno, off) == 0)
579 580 581 582 583 584 585 586
			continue;

		if (phase == 0) {
			ra_node_page(sbi, le32_to_cpu(entry->nid));
			continue;
		}

		/* Get an inode by ino with checking validity */
587
		if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0)
588 589 590 591 592 593 594 595 596 597
			continue;

		if (phase == 1) {
			ra_node_page(sbi, dni.ino);
			continue;
		}

		ofs_in_node = le16_to_cpu(entry->ofs_in_node);

		if (phase == 2) {
598
			inode = f2fs_iget(sb, dni.ino);
599 600 601
			if (IS_ERR(inode))
				continue;

602 603
			start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));

604
			data_page = find_data_page(inode,
605
					start_bidx + ofs_in_node, false);
606 607 608 609 610 611 612 613
			if (IS_ERR(data_page))
				goto next_iput;

			f2fs_put_page(data_page, 0);
			add_gc_inode(inode, ilist);
		} else {
			inode = find_gc_inode(dni.ino, ilist);
			if (inode) {
614 615
				start_bidx = start_bidx_of_node(nofs,
								F2FS_I(inode));
616 617 618 619 620 621 622 623 624 625 626 627
				data_page = get_lock_data_page(inode,
						start_bidx + ofs_in_node);
				if (IS_ERR(data_page))
					continue;
				move_data_page(inode, data_page, gc_type);
				stat_inc_data_blk_count(sbi, 1);
			}
		}
		continue;
next_iput:
		iput(inode);
	}
628

629 630
	if (++phase < 4)
		goto next_step;
631

632
	if (gc_type == FG_GC) {
633
		f2fs_submit_bio(sbi, DATA, true);
634 635 636 637 638 639 640 641 642 643

		/*
		 * In the case of FG_GC, it'd be better to reclaim this victim
		 * completely.
		 */
		if (get_valid_blocks(sbi, segno, 1) != 0) {
			phase = 2;
			goto next_step;
		}
	}
644 645 646 647 648 649 650 651 652 653 654 655 656
}

static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
						int gc_type, int type)
{
	struct sit_info *sit_i = SIT_I(sbi);
	int ret;
	mutex_lock(&sit_i->sentry_lock);
	ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, type, LFS);
	mutex_unlock(&sit_i->sentry_lock);
	return ret;
}

657
static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
658 659 660 661
				struct list_head *ilist, int gc_type)
{
	struct page *sum_page;
	struct f2fs_summary_block *sum;
662
	struct blk_plug plug;
663 664 665 666

	/* read segment summary of victim */
	sum_page = get_sum_page(sbi, segno);
	if (IS_ERR(sum_page))
667
		return;
668

669 670
	blk_start_plug(&plug);

671 672 673 674
	sum = page_address(sum_page);

	switch (GET_SUM_TYPE((&sum->footer))) {
	case SUM_TYPE_NODE:
675
		gc_node_segment(sbi, sum->entries, segno, gc_type);
676 677
		break;
	case SUM_TYPE_DATA:
678
		gc_data_segment(sbi, sum->entries, ilist, segno, gc_type);
679 680
		break;
	}
681 682
	blk_finish_plug(&plug);

683 684 685
	stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)));
	stat_inc_call_count(sbi->stat_info);

686
	f2fs_put_page(sum_page, 1);
687 688
}

J
Jaegeuk Kim 已提交
689
int f2fs_gc(struct f2fs_sb_info *sbi)
690 691
{
	struct list_head ilist;
J
Jaegeuk Kim 已提交
692
	unsigned int segno, i;
693
	int gc_type = BG_GC;
694 695
	int nfree = 0;
	int ret = -1;
696 697 698

	INIT_LIST_HEAD(&ilist);
gc_more:
J
Jaegeuk Kim 已提交
699 700
	if (!(sbi->sb->s_flags & MS_ACTIVE))
		goto stop;
701

702
	if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) {
J
Jaegeuk Kim 已提交
703
		gc_type = FG_GC;
704 705
		write_checkpoint(sbi, false);
	}
706

J
Jaegeuk Kim 已提交
707 708
	if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE))
		goto stop;
709
	ret = 0;
710

711 712 713
	for (i = 0; i < sbi->segs_per_sec; i++)
		do_garbage_collect(sbi, segno + i, &ilist, gc_type);

714 715
	if (gc_type == FG_GC) {
		sbi->cur_victim_sec = NULL_SEGNO;
716
		nfree++;
717 718
		WARN_ON(get_valid_blocks(sbi, segno, sbi->segs_per_sec));
	}
719 720 721 722 723 724

	if (has_not_enough_free_secs(sbi, nfree))
		goto gc_more;

	if (gc_type == FG_GC)
		write_checkpoint(sbi, false);
J
Jaegeuk Kim 已提交
725
stop:
726 727 728
	mutex_unlock(&sbi->gc_mutex);

	put_gc_inode(&ilist);
729
	return ret;
730 731 732 733 734 735 736
}

void build_gc_manager(struct f2fs_sb_info *sbi)
{
	DIRTY_I(sbi)->v_ops = &default_v_ops;
}

737
int __init create_gc_caches(void)
738 739 740 741 742 743 744 745 746 747 748 749
{
	winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes",
			sizeof(struct inode_entry), NULL);
	if (!winode_slab)
		return -ENOMEM;
	return 0;
}

void destroy_gc_caches(void)
{
	kmem_cache_destroy(winode_slab);
}