gc.c 20.2 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 * fs/f2fs/gc.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/init.h>
#include <linux/f2fs_fs.h>
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/freezer.h>
#include <linux/blkdev.h>

#include "f2fs.h"
#include "node.h"
#include "segment.h"
#include "gc.h"
25
#include <trace/events/f2fs.h>
26 27 28 29

static int gc_thread_func(void *data)
{
	struct f2fs_sb_info *sbi = data;
30
	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
31 32 33
	wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
	long wait_ms;

34
	wait_ms = gc_th->min_sleep_time;
35 36 37 38 39 40 41 42 43 44 45

	do {
		if (try_to_freeze())
			continue;
		else
			wait_event_interruptible_timeout(*wq,
						kthread_should_stop(),
						msecs_to_jiffies(wait_ms));
		if (kthread_should_stop())
			break;

46
		if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
47
			increase_sleep_time(gc_th, &wait_ms);
48 49 50
			continue;
		}

51 52 53 54 55 56 57 58
		/*
		 * [GC triggering condition]
		 * 0. GC is not conducted currently.
		 * 1. There are enough dirty segments.
		 * 2. IO subsystem is idle by checking the # of writeback pages.
		 * 3. IO subsystem is idle by checking the # of requests in
		 *    bdev's request list.
		 *
A
arter97 已提交
59
		 * Note) We have to avoid triggering GCs frequently.
60 61 62 63 64 65 66 67
		 * Because it is possible that some segments can be
		 * invalidated soon after by user update or deletion.
		 * So, I'd like to wait some time to collect dirty segments.
		 */
		if (!mutex_trylock(&sbi->gc_mutex))
			continue;

		if (!is_idle(sbi)) {
68
			increase_sleep_time(gc_th, &wait_ms);
69 70 71 72 73
			mutex_unlock(&sbi->gc_mutex);
			continue;
		}

		if (has_enough_invalid_blocks(sbi))
74
			decrease_sleep_time(gc_th, &wait_ms);
75
		else
76
			increase_sleep_time(gc_th, &wait_ms);
77

78
		stat_inc_bggc_count(sbi);
79

80 81
		/* if return value is not zero, no victim was selected */
		if (f2fs_gc(sbi))
82
			wait_ms = gc_th->no_gc_sleep_time;
83

84 85
		/* balancing f2fs's metadata periodically */
		f2fs_balance_fs_bg(sbi);
86

87 88 89 90 91 92
	} while (!kthread_should_stop());
	return 0;
}

int start_gc_thread(struct f2fs_sb_info *sbi)
{
N
Namjae Jeon 已提交
93
	struct f2fs_gc_kthread *gc_th;
94
	dev_t dev = sbi->sb->s_bdev->bd_dev;
95
	int err = 0;
96 97

	gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
98 99 100 101
	if (!gc_th) {
		err = -ENOMEM;
		goto out;
	}
102

103 104 105 106
	gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
	gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
	gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;

107 108
	gc_th->gc_idle = 0;

109 110 111
	sbi->gc_thread = gc_th;
	init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
	sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
112
			"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
113
	if (IS_ERR(gc_th->f2fs_gc_task)) {
114
		err = PTR_ERR(gc_th->f2fs_gc_task);
115
		kfree(gc_th);
116
		sbi->gc_thread = NULL;
117
	}
118 119
out:
	return err;
120 121 122 123 124 125 126 127 128 129 130 131
}

void stop_gc_thread(struct f2fs_sb_info *sbi)
{
	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
	if (!gc_th)
		return;
	kthread_stop(gc_th->f2fs_gc_task);
	kfree(gc_th);
	sbi->gc_thread = NULL;
}

132
static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
133
{
134 135 136 137 138 139 140 141 142
	int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;

	if (gc_th && gc_th->gc_idle) {
		if (gc_th->gc_idle == 1)
			gc_mode = GC_CB;
		else if (gc_th->gc_idle == 2)
			gc_mode = GC_GREEDY;
	}
	return gc_mode;
143 144 145 146 147 148 149
}

static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
			int type, struct victim_sel_policy *p)
{
	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);

150
	if (p->alloc_mode == SSR) {
151 152
		p->gc_mode = GC_GREEDY;
		p->dirty_segmap = dirty_i->dirty_segmap[type];
153
		p->max_search = dirty_i->nr_dirty[type];
154 155
		p->ofs_unit = 1;
	} else {
156
		p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
157
		p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
158
		p->max_search = dirty_i->nr_dirty[DIRTY];
159 160
		p->ofs_unit = sbi->segs_per_sec;
	}
161

162 163
	if (p->max_search > sbi->max_victim_search)
		p->max_search = sbi->max_victim_search;
164

165 166 167 168 169 170
	p->offset = sbi->last_victim[p->gc_mode];
}

static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
				struct victim_sel_policy *p)
{
171 172 173
	/* SSR allocates in a segment unit */
	if (p->alloc_mode == SSR)
		return 1 << sbi->log_blocks_per_seg;
174 175 176 177 178 179 180 181 182 183 184
	if (p->gc_mode == GC_GREEDY)
		return (1 << sbi->log_blocks_per_seg) * p->ofs_unit;
	else if (p->gc_mode == GC_CB)
		return UINT_MAX;
	else /* No other gc_mode */
		return 0;
}

static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
{
	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
185
	unsigned int secno;
186 187 188 189 190 191

	/*
	 * If the gc_type is FG_GC, we can select victim segments
	 * selected by background GC before.
	 * Those segments guarantee they have small valid blocks.
	 */
192
	for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
193
		if (sec_usage_check(sbi, secno))
194
			continue;
195 196
		clear_bit(secno, dirty_i->victim_secmap);
		return secno * sbi->segs_per_sec;
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
	}
	return NULL_SEGNO;
}

static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
{
	struct sit_info *sit_i = SIT_I(sbi);
	unsigned int secno = GET_SECNO(sbi, segno);
	unsigned int start = secno * sbi->segs_per_sec;
	unsigned long long mtime = 0;
	unsigned int vblocks;
	unsigned char age = 0;
	unsigned char u;
	unsigned int i;

	for (i = 0; i < sbi->segs_per_sec; i++)
		mtime += get_seg_entry(sbi, start + i)->mtime;
	vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);

	mtime = div_u64(mtime, sbi->segs_per_sec);
	vblocks = div_u64(vblocks, sbi->segs_per_sec);

	u = (vblocks * 100) >> sbi->log_blocks_per_seg;

A
arter97 已提交
221
	/* Handle if the system time has changed by the user */
222 223 224 225 226 227 228 229 230 231 232
	if (mtime < sit_i->min_mtime)
		sit_i->min_mtime = mtime;
	if (mtime > sit_i->max_mtime)
		sit_i->max_mtime = mtime;
	if (sit_i->max_mtime != sit_i->min_mtime)
		age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
				sit_i->max_mtime - sit_i->min_mtime);

	return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
}

233 234
static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
			unsigned int segno, struct victim_sel_policy *p)
235 236 237 238 239 240 241 242 243 244 245
{
	if (p->alloc_mode == SSR)
		return get_seg_entry(sbi, segno)->ckpt_valid_blocks;

	/* alloc_mode == LFS */
	if (p->gc_mode == GC_GREEDY)
		return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
	else
		return get_cb_cost(sbi, segno);
}

J
Jaegeuk Kim 已提交
246
/*
M
Masanari Iida 已提交
247
 * This function is called from two paths.
248 249 250 251 252 253 254 255 256 257 258
 * One is garbage collection and the other is SSR segment selection.
 * When it is called during GC, it just gets a victim segment
 * and it does not remove it from dirty seglist.
 * When it is called from SSR segment selection, it finds a segment
 * which has minimum valid blocks and removes it from dirty seglist.
 */
static int get_victim_by_default(struct f2fs_sb_info *sbi,
		unsigned int *result, int gc_type, int type, char alloc_mode)
{
	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
	struct victim_sel_policy p;
259
	unsigned int secno, max_cost;
260 261
	int nsearched = 0;

262 263
	mutex_lock(&dirty_i->seglist_lock);

264 265 266 267
	p.alloc_mode = alloc_mode;
	select_policy(sbi, gc_type, type, &p);

	p.min_segno = NULL_SEGNO;
268
	p.min_cost = max_cost = get_max_cost(sbi, &p);
269 270 271 272 273 274 275 276 277

	if (p.alloc_mode == LFS && gc_type == FG_GC) {
		p.min_segno = check_bg_victims(sbi);
		if (p.min_segno != NULL_SEGNO)
			goto got_it;
	}

	while (1) {
		unsigned long cost;
278
		unsigned int segno;
279

280 281
		segno = find_next_bit(p.dirty_segmap, MAIN_SEGS(sbi), p.offset);
		if (segno >= MAIN_SEGS(sbi)) {
282 283 284 285 286 287 288
			if (sbi->last_victim[p.gc_mode]) {
				sbi->last_victim[p.gc_mode] = 0;
				p.offset = 0;
				continue;
			}
			break;
		}
289 290 291 292 293

		p.offset = segno + p.ofs_unit;
		if (p.ofs_unit > 1)
			p.offset -= segno % p.ofs_unit;

294
		secno = GET_SECNO(sbi, segno);
295

296
		if (sec_usage_check(sbi, secno))
297
			continue;
298
		if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
299 300 301 302 303 304 305
			continue;

		cost = get_gc_cost(sbi, segno, &p);

		if (p.min_cost > cost) {
			p.min_segno = segno;
			p.min_cost = cost;
306
		} else if (unlikely(cost == max_cost)) {
307
			continue;
308
		}
309

310
		if (nsearched++ >= p.max_search) {
311 312 313 314 315
			sbi->last_victim[p.gc_mode] = segno;
			break;
		}
	}
	if (p.min_segno != NULL_SEGNO) {
316
got_it:
317
		if (p.alloc_mode == LFS) {
318 319 320 321 322
			secno = GET_SECNO(sbi, p.min_segno);
			if (gc_type == FG_GC)
				sbi->cur_victim_sec = secno;
			else
				set_bit(secno, dirty_i->victim_secmap);
323
		}
324
		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
325 326 327 328

		trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
				sbi->cur_victim_sec,
				prefree_segments(sbi), free_segments(sbi));
329 330 331 332 333 334 335 336 337 338
	}
	mutex_unlock(&dirty_i->seglist_lock);

	return (p.min_segno == NULL_SEGNO) ? 0 : 1;
}

static const struct victim_selection default_v_ops = {
	.get_victim = get_victim_by_default,
};

339
static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
340 341 342
{
	struct inode_entry *ie;

343 344 345
	ie = radix_tree_lookup(&gc_list->iroot, ino);
	if (ie)
		return ie->inode;
346 347 348
	return NULL;
}

349
static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
350
{
351 352
	struct inode_entry *new_ie;

353
	if (inode == find_gc_inode(gc_list, inode->i_ino)) {
354 355
		iput(inode);
		return;
356
	}
357
	new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
358
	new_ie->inode = inode;
359 360

	f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
361
	list_add_tail(&new_ie->list, &gc_list->ilist);
362 363
}

364
static void put_gc_inode(struct gc_inode_list *gc_list)
365 366
{
	struct inode_entry *ie, *next_ie;
367 368
	list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
		radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
369 370
		iput(ie->inode);
		list_del(&ie->list);
371
		kmem_cache_free(inode_entry_slab, ie);
372 373 374 375 376 377 378 379 380 381 382 383 384 385
	}
}

static int check_valid_map(struct f2fs_sb_info *sbi,
				unsigned int segno, int offset)
{
	struct sit_info *sit_i = SIT_I(sbi);
	struct seg_entry *sentry;
	int ret;

	mutex_lock(&sit_i->sentry_lock);
	sentry = get_seg_entry(sbi, segno);
	ret = f2fs_test_bit(offset, sentry->cur_valid_map);
	mutex_unlock(&sit_i->sentry_lock);
386
	return ret;
387 388
}

J
Jaegeuk Kim 已提交
389
/*
390 391 392 393
 * This function compares node address got in summary with that in NAT.
 * On validity, copy that node with cold status, otherwise (invalid node)
 * ignore that.
 */
394
static void gc_node_segment(struct f2fs_sb_info *sbi,
395 396 397 398 399 400 401 402
		struct f2fs_summary *sum, unsigned int segno, int gc_type)
{
	bool initial = true;
	struct f2fs_summary *entry;
	int off;

next_step:
	entry = sum;
403

404 405 406 407
	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
		nid_t nid = le32_to_cpu(entry->nid);
		struct page *node_page;

408 409 410
		/* stop BG_GC if there is not enough free sections. */
		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
			return;
411

412
		if (check_valid_map(sbi, segno, off) == 0)
413 414 415 416 417 418 419 420 421 422
			continue;

		if (initial) {
			ra_node_page(sbi, nid);
			continue;
		}
		node_page = get_node_page(sbi, nid);
		if (IS_ERR(node_page))
			continue;

423 424 425 426 427 428
		/* block may become invalid during get_node_page */
		if (check_valid_map(sbi, segno, off) == 0) {
			f2fs_put_page(node_page, 1);
			continue;
		}

429
		/* set page dirty and write it */
430
		if (gc_type == FG_GC) {
431
			f2fs_wait_on_page_writeback(node_page, NODE);
432
			set_page_dirty(node_page);
433 434 435 436
		} else {
			if (!PageWriteback(node_page))
				set_page_dirty(node_page);
		}
437
		f2fs_put_page(node_page, 1);
438
		stat_inc_node_blk_count(sbi, 1, gc_type);
439
	}
440

441 442 443 444 445 446 447 448 449 450 451 452
	if (initial) {
		initial = false;
		goto next_step;
	}

	if (gc_type == FG_GC) {
		struct writeback_control wbc = {
			.sync_mode = WB_SYNC_ALL,
			.nr_to_write = LONG_MAX,
			.for_reclaim = 0,
		};
		sync_node_pages(sbi, 0, &wbc);
453 454 455 456 457 458 459

		/*
		 * In the case of FG_GC, it'd be better to reclaim this victim
		 * completely.
		 */
		if (get_valid_blocks(sbi, segno, 1) != 0)
			goto next_step;
460 461 462
	}
}

J
Jaegeuk Kim 已提交
463
/*
464 465 466 467 468
 * Calculate start block index indicating the given node offset.
 * Be careful, caller should give this node offset only indicating direct node
 * blocks. If any node offsets, which point the other types of node blocks such
 * as indirect or double indirect node blocks, are given, it must be a caller's
 * bug.
469
 */
470
block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi)
471
{
472 473
	unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
	unsigned int bidx;
474

475 476
	if (node_ofs == 0)
		return 0;
477

478
	if (node_ofs <= 2) {
479 480
		bidx = node_ofs - 1;
	} else if (node_ofs <= indirect_blks) {
481
		int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
482 483
		bidx = node_ofs - 2 - dec;
	} else {
484
		int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
485 486
		bidx = node_ofs - 5 - dec;
	}
487
	return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi);
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
}

static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
		struct node_info *dni, block_t blkaddr, unsigned int *nofs)
{
	struct page *node_page;
	nid_t nid;
	unsigned int ofs_in_node;
	block_t source_blkaddr;

	nid = le32_to_cpu(sum->nid);
	ofs_in_node = le16_to_cpu(sum->ofs_in_node);

	node_page = get_node_page(sbi, nid);
	if (IS_ERR(node_page))
503
		return 0;
504 505 506 507 508

	get_node_info(sbi, nid, dni);

	if (sum->version != dni->version) {
		f2fs_put_page(node_page, 1);
509
		return 0;
510 511 512 513 514 515 516
	}

	*nofs = ofs_of_node(node_page);
	source_blkaddr = datablock_addr(node_page, ofs_in_node);
	f2fs_put_page(node_page, 1);

	if (source_blkaddr != blkaddr)
517 518
		return 0;
	return 1;
519 520
}

521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
static void move_encrypted_block(struct inode *inode, block_t bidx)
{
	struct f2fs_io_info fio = {
		.sbi = F2FS_I_SB(inode),
		.type = DATA,
		.rw = READ_SYNC,
		.encrypted_page = NULL,
	};
	struct dnode_of_data dn;
	struct f2fs_summary sum;
	struct node_info ni;
	struct page *page;
	int err;

	/* do not read out */
	page = grab_cache_page(inode->i_mapping, bidx);
	if (!page)
		return;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
	err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
	if (err)
		goto out;

	if (unlikely(dn.data_blkaddr == NULL_ADDR))
		goto put_out;

	get_node_info(fio.sbi, dn.nid, &ni);
	set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);

	/* read page */
	fio.page = page;
	fio.blk_addr = dn.data_blkaddr;

	fio.encrypted_page = grab_cache_page(META_MAPPING(fio.sbi), fio.blk_addr);
	if (!fio.encrypted_page)
		goto put_out;

	f2fs_submit_page_bio(&fio);

	/* allocate block address */
	f2fs_wait_on_page_writeback(dn.node_page, NODE);

	allocate_data_block(fio.sbi, NULL, fio.blk_addr,
					&fio.blk_addr, &sum, CURSEG_COLD_DATA);
	dn.data_blkaddr = fio.blk_addr;

	/* write page */
	lock_page(fio.encrypted_page);
	set_page_writeback(fio.encrypted_page);
	fio.rw = WRITE_SYNC;
	f2fs_submit_page_mbio(&fio);

	set_data_blkaddr(&dn);
	f2fs_update_extent_cache(&dn);
	set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
	if (page->index == 0)
		set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);

	f2fs_put_page(fio.encrypted_page, 1);
put_out:
	f2fs_put_dnode(&dn);
out:
	f2fs_put_page(page, 1);
}

J
Jaegeuk Kim 已提交
587
static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
588
{
J
Jaegeuk Kim 已提交
589 590 591 592 593
	struct page *page;

	page = get_lock_data_page(inode, bidx);
	if (IS_ERR(page))
		return;
594

595
	if (gc_type == BG_GC) {
596 597
		if (PageWriteback(page))
			goto out;
598 599 600
		set_page_dirty(page);
		set_cold_data(page);
	} else {
J
Jaegeuk Kim 已提交
601 602 603 604 605
		struct f2fs_io_info fio = {
			.sbi = F2FS_I_SB(inode),
			.type = DATA,
			.rw = WRITE_SYNC,
			.page = page,
606
			.encrypted_page = NULL,
J
Jaegeuk Kim 已提交
607
		};
608
		f2fs_wait_on_page_writeback(page, DATA);
609

610
		if (clear_page_dirty_for_io(page))
611
			inode_dec_dirty_pages(inode);
612
		set_cold_data(page);
613
		do_write_data_page(&fio);
614 615 616 617 618 619
		clear_cold_data(page);
	}
out:
	f2fs_put_page(page, 1);
}

J
Jaegeuk Kim 已提交
620
/*
621 622 623 624 625 626
 * This function tries to get parent node of victim data block, and identifies
 * data block validity. If the block is valid, copy that with cold status and
 * modify parent node.
 * If the parent node is not valid or the data block address is different,
 * the victim data block is ignored.
 */
627
static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
628
		struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
629 630 631 632
{
	struct super_block *sb = sbi->sb;
	struct f2fs_summary *entry;
	block_t start_addr;
633
	int off;
634 635 636 637 638 639
	int phase = 0;

	start_addr = START_BLOCK(sbi, segno);

next_step:
	entry = sum;
640

641 642 643 644 645 646 647
	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
		struct page *data_page;
		struct inode *inode;
		struct node_info dni; /* dnode info for the data */
		unsigned int ofs_in_node, nofs;
		block_t start_bidx;

648 649 650
		/* stop BG_GC if there is not enough free sections. */
		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
			return;
651

652
		if (check_valid_map(sbi, segno, off) == 0)
653 654 655 656 657 658 659 660
			continue;

		if (phase == 0) {
			ra_node_page(sbi, le32_to_cpu(entry->nid));
			continue;
		}

		/* Get an inode by ino with checking validity */
661
		if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0)
662 663 664 665 666 667 668 669 670 671
			continue;

		if (phase == 1) {
			ra_node_page(sbi, dni.ino);
			continue;
		}

		ofs_in_node = le16_to_cpu(entry->ofs_in_node);

		if (phase == 2) {
672
			inode = f2fs_iget(sb, dni.ino);
673
			if (IS_ERR(inode) || is_bad_inode(inode))
674 675
				continue;

676 677 678 679 680 681 682
			/* if encrypted inode, let's go phase 3 */
			if (f2fs_encrypted_inode(inode) &&
						S_ISREG(inode->i_mode)) {
				add_gc_inode(gc_list, inode);
				continue;
			}

683
			start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
684 685
			data_page = get_read_data_page(inode,
					start_bidx + ofs_in_node, READA);
686 687 688 689
			if (IS_ERR(data_page)) {
				iput(inode);
				continue;
			}
690 691

			f2fs_put_page(data_page, 0);
692
			add_gc_inode(gc_list, inode);
693 694 695 696
			continue;
		}

		/* phase 3 */
697
		inode = find_gc_inode(gc_list, dni.ino);
698
		if (inode) {
J
Jaegeuk Kim 已提交
699 700
			start_bidx = start_bidx_of_node(nofs, F2FS_I(inode))
								+ ofs_in_node;
701 702 703 704
			if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
				move_encrypted_block(inode, start_bidx);
			else
				move_data_page(inode, start_bidx, gc_type);
705
			stat_inc_data_blk_count(sbi, 1, gc_type);
706 707
		}
	}
708

709 710
	if (++phase < 4)
		goto next_step;
711

712
	if (gc_type == FG_GC) {
J
Jaegeuk Kim 已提交
713
		f2fs_submit_merged_bio(sbi, DATA, WRITE);
714 715 716 717 718 719 720 721 722 723

		/*
		 * In the case of FG_GC, it'd be better to reclaim this victim
		 * completely.
		 */
		if (get_valid_blocks(sbi, segno, 1) != 0) {
			phase = 2;
			goto next_step;
		}
	}
724 725 726
}

static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
727
			int gc_type)
728 729 730
{
	struct sit_info *sit_i = SIT_I(sbi);
	int ret;
731

732
	mutex_lock(&sit_i->sentry_lock);
733 734
	ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
					      NO_CHECK_TYPE, LFS);
735 736 737 738
	mutex_unlock(&sit_i->sentry_lock);
	return ret;
}

739
static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
740
				struct gc_inode_list *gc_list, int gc_type)
741 742 743
{
	struct page *sum_page;
	struct f2fs_summary_block *sum;
744
	struct blk_plug plug;
745 746 747 748

	/* read segment summary of victim */
	sum_page = get_sum_page(sbi, segno);

749 750
	blk_start_plug(&plug);

751 752
	sum = page_address(sum_page);

753 754 755 756 757 758 759 760 761
	/*
	 * this is to avoid deadlock:
	 * - lock_page(sum_page)         - f2fs_replace_block
	 *  - check_valid_map()            - mutex_lock(sentry_lock)
	 *   - mutex_lock(sentry_lock)     - change_curseg()
	 *                                  - lock_page(sum_page)
	 */
	unlock_page(sum_page);

762 763
	switch (GET_SUM_TYPE((&sum->footer))) {
	case SUM_TYPE_NODE:
764
		gc_node_segment(sbi, sum->entries, segno, gc_type);
765 766
		break;
	case SUM_TYPE_DATA:
767
		gc_data_segment(sbi, sum->entries, gc_list, segno, gc_type);
768 769
		break;
	}
770 771
	blk_finish_plug(&plug);

772
	stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)), gc_type);
773 774
	stat_inc_call_count(sbi->stat_info);

775
	f2fs_put_page(sum_page, 0);
776 777
}

J
Jaegeuk Kim 已提交
778
int f2fs_gc(struct f2fs_sb_info *sbi)
779
{
J
Jaegeuk Kim 已提交
780
	unsigned int segno, i;
781
	int gc_type = BG_GC;
782 783
	int nfree = 0;
	int ret = -1;
784
	struct cp_control cpc;
785 786
	struct gc_inode_list gc_list = {
		.ilist = LIST_HEAD_INIT(gc_list.ilist),
787
		.iroot = RADIX_TREE_INIT(GFP_NOFS),
788
	};
789

790
	cpc.reason = __get_cp_reason(sbi);
791
gc_more:
792
	if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
J
Jaegeuk Kim 已提交
793
		goto stop;
794
	if (unlikely(f2fs_cp_error(sbi)))
795
		goto stop;
796

797
	if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) {
J
Jaegeuk Kim 已提交
798
		gc_type = FG_GC;
799
		write_checkpoint(sbi, &cpc);
800
	}
801

802
	if (!__get_victim(sbi, &segno, gc_type))
J
Jaegeuk Kim 已提交
803
		goto stop;
804
	ret = 0;
805

806 807 808 809 810
	/* readahead multi ssa blocks those have contiguous address */
	if (sbi->segs_per_sec > 1)
		ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), sbi->segs_per_sec,
								META_SSA);

811
	for (i = 0; i < sbi->segs_per_sec; i++)
812
		do_garbage_collect(sbi, segno + i, &gc_list, gc_type);
813

814 815
	if (gc_type == FG_GC) {
		sbi->cur_victim_sec = NULL_SEGNO;
816
		nfree++;
817 818
		WARN_ON(get_valid_blocks(sbi, segno, sbi->segs_per_sec));
	}
819 820 821 822 823

	if (has_not_enough_free_secs(sbi, nfree))
		goto gc_more;

	if (gc_type == FG_GC)
824
		write_checkpoint(sbi, &cpc);
J
Jaegeuk Kim 已提交
825
stop:
826 827
	mutex_unlock(&sbi->gc_mutex);

828
	put_gc_inode(&gc_list);
829
	return ret;
830 831 832 833 834 835
}

void build_gc_manager(struct f2fs_sb_info *sbi)
{
	DIRTY_I(sbi)->v_ops = &default_v_ops;
}