gc.c 32.8 KB
Newer Older
C
Chao Yu 已提交
1
// SPDX-License-Identifier: GPL-2.0
J
Jaegeuk Kim 已提交
2
/*
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 * fs/f2fs/gc.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 */
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/init.h>
#include <linux/f2fs_fs.h>
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/freezer.h>

#include "f2fs.h"
#include "node.h"
#include "segment.h"
#include "gc.h"
21
#include <trace/events/f2fs.h>
22 23 24 25

static int gc_thread_func(void *data)
{
	struct f2fs_sb_info *sbi = data;
26
	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
27
	wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
28
	unsigned int wait_ms;
29

30
	wait_ms = gc_th->min_sleep_time;
31

32
	set_freezable();
33
	do {
34
		wait_event_interruptible_timeout(*wq,
35 36
				kthread_should_stop() || freezing(current) ||
				gc_th->gc_wake,
37 38
				msecs_to_jiffies(wait_ms));

39 40 41 42
		/* give it a try one time */
		if (gc_th->gc_wake)
			gc_th->gc_wake = 0;

43 44
		if (try_to_freeze()) {
			stat_other_skip_bggc_count(sbi);
45
			continue;
46
		}
47 48 49
		if (kthread_should_stop())
			break;

50
		if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
51
			increase_sleep_time(gc_th, &wait_ms);
52
			stat_other_skip_bggc_count(sbi);
53 54 55
			continue;
		}

56 57
		if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
			f2fs_show_injection_info(FAULT_CHECKPOINT);
58
			f2fs_stop_checkpoint(sbi, false);
59
		}
60

61 62
		if (!sb_start_write_trylock(sbi->sb)) {
			stat_other_skip_bggc_count(sbi);
63
			continue;
64
		}
65

66 67 68 69 70 71 72 73
		/*
		 * [GC triggering condition]
		 * 0. GC is not conducted currently.
		 * 1. There are enough dirty segments.
		 * 2. IO subsystem is idle by checking the # of writeback pages.
		 * 3. IO subsystem is idle by checking the # of requests in
		 *    bdev's request list.
		 *
A
arter97 已提交
74
		 * Note) We have to avoid triggering GCs frequently.
75 76 77 78
		 * Because it is possible that some segments can be
		 * invalidated soon after by user update or deletion.
		 * So, I'd like to wait some time to collect dirty segments.
		 */
79
		if (sbi->gc_mode == GC_URGENT) {
80
			wait_ms = gc_th->urgent_sleep_time;
81
			mutex_lock(&sbi->gc_mutex);
82 83 84
			goto do_gc;
		}

85 86
		if (!mutex_trylock(&sbi->gc_mutex)) {
			stat_other_skip_bggc_count(sbi);
87
			goto next;
88
		}
89

90
		if (!is_idle(sbi, GC_TIME)) {
91
			increase_sleep_time(gc_th, &wait_ms);
92
			mutex_unlock(&sbi->gc_mutex);
93
			stat_io_skip_bggc_count(sbi);
94
			goto next;
95 96 97
		}

		if (has_enough_invalid_blocks(sbi))
98
			decrease_sleep_time(gc_th, &wait_ms);
99
		else
100
			increase_sleep_time(gc_th, &wait_ms);
101
do_gc:
102
		stat_inc_bggc_count(sbi);
103

104
		/* if return value is not zero, no victim was selected */
105
		if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true, NULL_SEGNO))
106
			wait_ms = gc_th->no_gc_sleep_time;
107

108 109 110
		trace_f2fs_background_gc(sbi->sb, wait_ms,
				prefree_segments(sbi), free_segments(sbi));

111 112
		/* balancing f2fs's metadata periodically */
		f2fs_balance_fs_bg(sbi);
113 114
next:
		sb_end_write(sbi->sb);
115

116 117 118 119
	} while (!kthread_should_stop());
	return 0;
}

C
Chao Yu 已提交
120
int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
121
{
N
Namjae Jeon 已提交
122
	struct f2fs_gc_kthread *gc_th;
123
	dev_t dev = sbi->sb->s_bdev->bd_dev;
124
	int err = 0;
125

126
	gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
127 128 129 130
	if (!gc_th) {
		err = -ENOMEM;
		goto out;
	}
131

132
	gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
133 134 135 136
	gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
	gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
	gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;

137
	gc_th->gc_wake= 0;
138

139 140 141
	sbi->gc_thread = gc_th;
	init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
	sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
142
			"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
143
	if (IS_ERR(gc_th->f2fs_gc_task)) {
144
		err = PTR_ERR(gc_th->f2fs_gc_task);
145
		kfree(gc_th);
146
		sbi->gc_thread = NULL;
147
	}
148 149
out:
	return err;
150 151
}

C
Chao Yu 已提交
152
void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
153 154 155 156 157 158 159 160 161
{
	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
	if (!gc_th)
		return;
	kthread_stop(gc_th->f2fs_gc_task);
	kfree(gc_th);
	sbi->gc_thread = NULL;
}

162
static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
163
{
164 165
	int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;

166 167 168 169 170 171
	switch (sbi->gc_mode) {
	case GC_IDLE_CB:
		gc_mode = GC_CB;
		break;
	case GC_IDLE_GREEDY:
	case GC_URGENT:
172
		gc_mode = GC_GREEDY;
173 174
		break;
	}
175
	return gc_mode;
176 177 178 179 180 181 182
}

static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
			int type, struct victim_sel_policy *p)
{
	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);

183
	if (p->alloc_mode == SSR) {
184 185
		p->gc_mode = GC_GREEDY;
		p->dirty_segmap = dirty_i->dirty_segmap[type];
186
		p->max_search = dirty_i->nr_dirty[type];
187 188
		p->ofs_unit = 1;
	} else {
189
		p->gc_mode = select_gc_type(sbi, gc_type);
190
		p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
191
		p->max_search = dirty_i->nr_dirty[DIRTY];
192 193
		p->ofs_unit = sbi->segs_per_sec;
	}
194

195
	/* we need to check every dirty segments in the FG_GC case */
196
	if (gc_type != FG_GC &&
197
			(sbi->gc_mode != GC_URGENT) &&
198
			p->max_search > sbi->max_victim_search)
199
		p->max_search = sbi->max_victim_search;
200

201 202 203
	/* let's select beginning hot/small space first in no_heap mode*/
	if (test_opt(sbi, NOHEAP) &&
		(type == CURSEG_HOT_DATA || IS_NODESEG(type)))
204 205
		p->offset = 0;
	else
206
		p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
207 208 209 210 211
}

static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
				struct victim_sel_policy *p)
{
212 213
	/* SSR allocates in a segment unit */
	if (p->alloc_mode == SSR)
214
		return sbi->blocks_per_seg;
215
	if (p->gc_mode == GC_GREEDY)
216
		return 2 * sbi->blocks_per_seg * p->ofs_unit;
217 218 219 220 221 222 223 224 225
	else if (p->gc_mode == GC_CB)
		return UINT_MAX;
	else /* No other gc_mode */
		return 0;
}

static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
{
	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
226
	unsigned int secno;
227 228 229 230 231 232

	/*
	 * If the gc_type is FG_GC, we can select victim segments
	 * selected by background GC before.
	 * Those segments guarantee they have small valid blocks.
	 */
233
	for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
234
		if (sec_usage_check(sbi, secno))
235
			continue;
236
		clear_bit(secno, dirty_i->victim_secmap);
237
		return GET_SEG_FROM_SEC(sbi, secno);
238 239 240 241 242 243 244
	}
	return NULL_SEGNO;
}

static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
{
	struct sit_info *sit_i = SIT_I(sbi);
245 246
	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
	unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
247 248 249 250 251 252 253 254
	unsigned long long mtime = 0;
	unsigned int vblocks;
	unsigned char age = 0;
	unsigned char u;
	unsigned int i;

	for (i = 0; i < sbi->segs_per_sec; i++)
		mtime += get_seg_entry(sbi, start + i)->mtime;
255
	vblocks = get_valid_blocks(sbi, segno, true);
256 257 258 259 260 261

	mtime = div_u64(mtime, sbi->segs_per_sec);
	vblocks = div_u64(vblocks, sbi->segs_per_sec);

	u = (vblocks * 100) >> sbi->log_blocks_per_seg;

A
arter97 已提交
262
	/* Handle if the system time has changed by the user */
263 264 265 266 267 268 269 270 271 272 273
	if (mtime < sit_i->min_mtime)
		sit_i->min_mtime = mtime;
	if (mtime > sit_i->max_mtime)
		sit_i->max_mtime = mtime;
	if (sit_i->max_mtime != sit_i->min_mtime)
		age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
				sit_i->max_mtime - sit_i->min_mtime);

	return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
}

274 275
static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
			unsigned int segno, struct victim_sel_policy *p)
276 277
{
	if (p->alloc_mode == SSR)
278
		return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
279 280 281

	/* alloc_mode == LFS */
	if (p->gc_mode == GC_GREEDY)
282
		return get_valid_blocks(sbi, segno, true);
283 284 285 286
	else
		return get_cb_cost(sbi, segno);
}

287 288 289 290 291 292 293 294 295 296 297 298
static unsigned int count_bits(const unsigned long *addr,
				unsigned int offset, unsigned int len)
{
	unsigned int end = offset + len, sum = 0;

	while (offset < end) {
		if (test_bit(offset++, addr))
			++sum;
	}
	return sum;
}

J
Jaegeuk Kim 已提交
299
/*
M
Masanari Iida 已提交
300
 * This function is called from two paths.
301 302 303 304 305 306 307 308 309 310
 * One is garbage collection and the other is SSR segment selection.
 * When it is called during GC, it just gets a victim segment
 * and it does not remove it from dirty seglist.
 * When it is called from SSR segment selection, it finds a segment
 * which has minimum valid blocks and removes it from dirty seglist.
 */
static int get_victim_by_default(struct f2fs_sb_info *sbi,
		unsigned int *result, int gc_type, int type, char alloc_mode)
{
	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
311
	struct sit_info *sm = SIT_I(sbi);
312
	struct victim_sel_policy p;
S
Sheng Yong 已提交
313
	unsigned int secno, last_victim;
314
	unsigned int last_segment = MAIN_SEGS(sbi);
315
	unsigned int nsearched = 0;
316

317 318
	mutex_lock(&dirty_i->seglist_lock);

319 320 321 322
	p.alloc_mode = alloc_mode;
	select_policy(sbi, gc_type, type, &p);

	p.min_segno = NULL_SEGNO;
S
Sheng Yong 已提交
323
	p.min_cost = get_max_cost(sbi, &p);
324

325
	if (*result != NULL_SEGNO) {
326
		if (get_valid_blocks(sbi, *result, false) &&
327 328 329 330 331
			!sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
			p.min_segno = *result;
		goto out;
	}

332 333 334
	if (p.max_search == 0)
		goto out;

335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
	if (__is_large_section(sbi) && p.alloc_mode == LFS) {
		if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
			p.min_segno = sbi->next_victim_seg[BG_GC];
			*result = p.min_segno;
			sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
			goto got_result;
		}
		if (gc_type == FG_GC &&
				sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
			p.min_segno = sbi->next_victim_seg[FG_GC];
			*result = p.min_segno;
			sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
			goto got_result;
		}
	}

351
	last_victim = sm->last_victim[p.gc_mode];
352 353 354 355 356 357 358 359
	if (p.alloc_mode == LFS && gc_type == FG_GC) {
		p.min_segno = check_bg_victims(sbi);
		if (p.min_segno != NULL_SEGNO)
			goto got_it;
	}

	while (1) {
		unsigned long cost;
360
		unsigned int segno;
361

362 363
		segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
		if (segno >= last_segment) {
364 365 366 367
			if (sm->last_victim[p.gc_mode]) {
				last_segment =
					sm->last_victim[p.gc_mode];
				sm->last_victim[p.gc_mode] = 0;
368 369 370 371 372
				p.offset = 0;
				continue;
			}
			break;
		}
373 374

		p.offset = segno + p.ofs_unit;
375
		if (p.ofs_unit > 1) {
376
			p.offset -= segno % p.ofs_unit;
377 378 379 380 381 382 383
			nsearched += count_bits(p.dirty_segmap,
						p.offset - p.ofs_unit,
						p.ofs_unit);
		} else {
			nsearched++;
		}

384
		secno = GET_SEC_FROM_SEG(sbi, segno);
385

386
		if (sec_usage_check(sbi, secno))
387
			goto next;
D
Daniel Rosenberg 已提交
388 389 390 391
		/* Don't touch checkpointed data */
		if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
					get_ckpt_valid_blocks(sbi, segno)))
			goto next;
392
		if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
393
			goto next;
394 395 396 397 398 399

		cost = get_gc_cost(sbi, segno, &p);

		if (p.min_cost > cost) {
			p.min_segno = segno;
			p.min_cost = cost;
400
		}
401 402
next:
		if (nsearched >= p.max_search) {
403 404
			if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
				sm->last_victim[p.gc_mode] = last_victim + 1;
405
			else
406 407
				sm->last_victim[p.gc_mode] = segno + 1;
			sm->last_victim[p.gc_mode] %= MAIN_SEGS(sbi);
408 409 410 411
			break;
		}
	}
	if (p.min_segno != NULL_SEGNO) {
412
got_it:
413 414
		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
got_result:
415
		if (p.alloc_mode == LFS) {
416
			secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
417 418 419 420
			if (gc_type == FG_GC)
				sbi->cur_victim_sec = secno;
			else
				set_bit(secno, dirty_i->victim_secmap);
421
		}
422

423 424 425
	}
out:
	if (p.min_segno != NULL_SEGNO)
426 427 428
		trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
				sbi->cur_victim_sec,
				prefree_segments(sbi), free_segments(sbi));
429 430 431 432 433 434 435 436 437
	mutex_unlock(&dirty_i->seglist_lock);

	return (p.min_segno == NULL_SEGNO) ? 0 : 1;
}

static const struct victim_selection default_v_ops = {
	.get_victim = get_victim_by_default,
};

438
static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
439 440 441
{
	struct inode_entry *ie;

442 443 444
	ie = radix_tree_lookup(&gc_list->iroot, ino);
	if (ie)
		return ie->inode;
445 446 447
	return NULL;
}

448
static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
449
{
450 451
	struct inode_entry *new_ie;

452
	if (inode == find_gc_inode(gc_list, inode->i_ino)) {
453 454
		iput(inode);
		return;
455
	}
C
Chao Yu 已提交
456
	new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab, GFP_NOFS);
457
	new_ie->inode = inode;
458 459

	f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
460
	list_add_tail(&new_ie->list, &gc_list->ilist);
461 462
}

463
static void put_gc_inode(struct gc_inode_list *gc_list)
464 465
{
	struct inode_entry *ie, *next_ie;
466 467
	list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
		radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
468 469
		iput(ie->inode);
		list_del(&ie->list);
C
Chao Yu 已提交
470
		kmem_cache_free(f2fs_inode_entry_slab, ie);
471 472 473 474 475 476 477 478 479 480
	}
}

static int check_valid_map(struct f2fs_sb_info *sbi,
				unsigned int segno, int offset)
{
	struct sit_info *sit_i = SIT_I(sbi);
	struct seg_entry *sentry;
	int ret;

481
	down_read(&sit_i->sentry_lock);
482 483
	sentry = get_seg_entry(sbi, segno);
	ret = f2fs_test_bit(offset, sentry->cur_valid_map);
484
	up_read(&sit_i->sentry_lock);
485
	return ret;
486 487
}

J
Jaegeuk Kim 已提交
488
/*
489 490 491 492
 * This function compares node address got in summary with that in NAT.
 * On validity, copy that node with cold status, otherwise (invalid node)
 * ignore that.
 */
493
static int gc_node_segment(struct f2fs_sb_info *sbi,
494 495 496
		struct f2fs_summary *sum, unsigned int segno, int gc_type)
{
	struct f2fs_summary *entry;
497
	block_t start_addr;
498
	int off;
499
	int phase = 0;
500
	bool fggc = (gc_type == FG_GC);
501
	int submitted = 0;
502

503 504
	start_addr = START_BLOCK(sbi, segno);

505 506
next_step:
	entry = sum;
507

508 509 510
	if (fggc && phase == 2)
		atomic_inc(&sbi->wb_sync_req[NODE]);

511 512 513
	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
		nid_t nid = le32_to_cpu(entry->nid);
		struct page *node_page;
514
		struct node_info ni;
515
		int err;
516

517
		/* stop BG_GC if there is not enough free sections. */
518
		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
519
			return submitted;
520

521
		if (check_valid_map(sbi, segno, off) == 0)
522 523
			continue;

524
		if (phase == 0) {
C
Chao Yu 已提交
525
			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
526 527 528 529 530
							META_NAT, true);
			continue;
		}

		if (phase == 1) {
C
Chao Yu 已提交
531
			f2fs_ra_node_page(sbi, nid);
532 533
			continue;
		}
534 535

		/* phase == 2 */
C
Chao Yu 已提交
536
		node_page = f2fs_get_node_page(sbi, nid);
537 538 539
		if (IS_ERR(node_page))
			continue;

C
Chao Yu 已提交
540
		/* block may become invalid during f2fs_get_node_page */
541 542 543
		if (check_valid_map(sbi, segno, off) == 0) {
			f2fs_put_page(node_page, 1);
			continue;
544 545
		}

546 547 548 549 550
		if (f2fs_get_node_info(sbi, nid, &ni)) {
			f2fs_put_page(node_page, 1);
			continue;
		}

551 552 553
		if (ni.blk_addr != start_addr + off) {
			f2fs_put_page(node_page, 1);
			continue;
554 555
		}

556 557 558
		err = f2fs_move_node_page(node_page, gc_type);
		if (!err && gc_type == FG_GC)
			submitted++;
559
		stat_inc_node_blk_count(sbi, 1, gc_type);
560
	}
561

562
	if (++phase < 3)
563
		goto next_step;
564 565 566

	if (fggc)
		atomic_dec(&sbi->wb_sync_req[NODE]);
567
	return submitted;
568 569
}

J
Jaegeuk Kim 已提交
570
/*
571 572 573 574 575
 * Calculate start block index indicating the given node offset.
 * Be careful, caller should give this node offset only indicating direct node
 * blocks. If any node offsets, which point the other types of node blocks such
 * as indirect or double indirect node blocks, are given, it must be a caller's
 * bug.
576
 */
C
Chao Yu 已提交
577
block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
578
{
579 580
	unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
	unsigned int bidx;
581

582 583
	if (node_ofs == 0)
		return 0;
584

585
	if (node_ofs <= 2) {
586 587
		bidx = node_ofs - 1;
	} else if (node_ofs <= indirect_blks) {
588
		int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
589 590
		bidx = node_ofs - 2 - dec;
	} else {
591
		int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
592 593
		bidx = node_ofs - 5 - dec;
	}
594
	return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
595 596
}

597
static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
598 599 600 601 602 603 604 605 606 607
		struct node_info *dni, block_t blkaddr, unsigned int *nofs)
{
	struct page *node_page;
	nid_t nid;
	unsigned int ofs_in_node;
	block_t source_blkaddr;

	nid = le32_to_cpu(sum->nid);
	ofs_in_node = le16_to_cpu(sum->ofs_in_node);

C
Chao Yu 已提交
608
	node_page = f2fs_get_node_page(sbi, nid);
609
	if (IS_ERR(node_page))
610
		return false;
611

612 613 614 615
	if (f2fs_get_node_info(sbi, nid, dni)) {
		f2fs_put_page(node_page, 1);
		return false;
	}
616 617

	if (sum->version != dni->version) {
618 619 620 621
		f2fs_msg(sbi->sb, KERN_WARNING,
				"%s: valid data with mismatched node version.",
				__func__);
		set_sbi_flag(sbi, SBI_NEED_FSCK);
622 623 624
	}

	*nofs = ofs_of_node(node_page);
625
	source_blkaddr = datablock_addr(NULL, node_page, ofs_in_node);
626 627 628
	f2fs_put_page(node_page, 1);

	if (source_blkaddr != blkaddr)
629 630
		return false;
	return true;
631 632
}

633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
static int ra_data_block(struct inode *inode, pgoff_t index)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
	struct extent_info ei = {0, 0, 0};
	struct f2fs_io_info fio = {
		.sbi = sbi,
		.ino = inode->i_ino,
		.type = DATA,
		.temp = COLD,
		.op = REQ_OP_READ,
		.op_flags = 0,
		.encrypted_page = NULL,
		.in_list = false,
		.retry = false,
	};
	int err;

	page = f2fs_grab_cache_page(mapping, index, true);
	if (!page)
		return -ENOMEM;

	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn.data_blkaddr = ei.blk + index - ei.fofs;
		goto got_it;
	}

	set_new_dnode(&dn, inode, NULL, NULL, 0);
	err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
	if (err)
		goto put_page;
	f2fs_put_dnode(&dn);

	if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
						DATA_GENERIC))) {
		err = -EFAULT;
		goto put_page;
	}
got_it:
	/* read page */
	fio.page = page;
	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;

678 679 680 681 682 683 684 685
	/*
	 * don't cache encrypted data into meta inode until previous dirty
	 * data were writebacked to avoid racing between GC and flush.
	 */
	f2fs_wait_on_page_writeback(page, DATA, true);

	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);

686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
					dn.data_blkaddr,
					FGP_LOCK | FGP_CREAT, GFP_NOFS);
	if (!fio.encrypted_page) {
		err = -ENOMEM;
		goto put_page;
	}

	err = f2fs_submit_page_bio(&fio);
	if (err)
		goto put_encrypted_page;
	f2fs_put_page(fio.encrypted_page, 0);
	f2fs_put_page(page, 1);
	return 0;
put_encrypted_page:
	f2fs_put_page(fio.encrypted_page, 1);
put_page:
	f2fs_put_page(page, 1);
	return err;
}

707 708 709 710
/*
 * Move data block via META_MAPPING while keeping locked data page.
 * This can be used to move blocks, aka LBAs, directly on disk.
 */
711
static int move_data_block(struct inode *inode, block_t bidx,
712
				int gc_type, unsigned int segno, int off)
713 714 715
{
	struct f2fs_io_info fio = {
		.sbi = F2FS_I_SB(inode),
C
Chao Yu 已提交
716
		.ino = inode->i_ino,
717
		.type = DATA,
J
Jaegeuk Kim 已提交
718
		.temp = COLD,
M
Mike Christie 已提交
719
		.op = REQ_OP_READ,
720
		.op_flags = 0,
721
		.encrypted_page = NULL,
722
		.in_list = false,
723
		.retry = false,
724 725 726 727
	};
	struct dnode_of_data dn;
	struct f2fs_summary sum;
	struct node_info ni;
728
	struct page *page, *mpage;
729
	block_t newaddr;
730
	int err = 0;
731
	bool lfs_mode = test_opt(fio.sbi, LFS);
732 733

	/* do not read out */
734
	page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
735
	if (!page)
736
		return -ENOMEM;
737

738 739
	if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
		err = -ENOENT;
740
		goto out;
741
	}
742

743 744 745
	if (f2fs_is_atomic_file(inode)) {
		F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
		F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
746
		err = -EAGAIN;
C
Chao Yu 已提交
747
		goto out;
748
	}
C
Chao Yu 已提交
749

750 751
	if (f2fs_is_pinned_file(inode)) {
		f2fs_pin_file_control(inode, true);
752
		err = -EAGAIN;
753 754 755
		goto out;
	}

756
	set_new_dnode(&dn, inode, NULL, NULL, 0);
C
Chao Yu 已提交
757
	err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
758 759 760
	if (err)
		goto out;

761 762
	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
		ClearPageUptodate(page);
763
		err = -ENOENT;
764
		goto put_out;
765 766 767 768 769 770
	}

	/*
	 * don't cache encrypted data into meta inode until previous dirty
	 * data were writebacked to avoid racing between GC and flush.
	 */
771
	f2fs_wait_on_page_writeback(page, DATA, true);
772

773 774
	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);

775 776 777 778
	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
	if (err)
		goto put_out;

779 780 781 782
	set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);

	/* read page */
	fio.page = page;
783
	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
784

785 786 787
	if (lfs_mode)
		down_write(&fio.sbi->io_order_lock);

C
Chao Yu 已提交
788
	f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
789
					&sum, CURSEG_COLD_DATA, NULL, false);
790

C
Chao Yu 已提交
791 792
	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
				newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
793 794 795 796
	if (!fio.encrypted_page) {
		err = -ENOMEM;
		goto recover_block;
	}
797

798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
	mpage = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
					fio.old_blkaddr, FGP_LOCK, GFP_NOFS);
	if (mpage) {
		bool updated = false;

		if (PageUptodate(mpage)) {
			memcpy(page_address(fio.encrypted_page),
					page_address(mpage), PAGE_SIZE);
			updated = true;
		}
		f2fs_put_page(mpage, 1);
		invalidate_mapping_pages(META_MAPPING(fio.sbi),
					fio.old_blkaddr, fio.old_blkaddr);
		if (updated)
			goto write_page;
	}

815 816 817 818 819 820 821
	err = f2fs_submit_page_bio(&fio);
	if (err)
		goto put_page_out;

	/* write page */
	lock_page(fio.encrypted_page);

822
	if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
823
		err = -EIO;
824
		goto put_page_out;
825
	}
826
	if (unlikely(!PageUptodate(fio.encrypted_page))) {
827
		err = -EIO;
828
		goto put_page_out;
829
	}
830

831
write_page:
832
	set_page_dirty(fio.encrypted_page);
833
	f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
834 835 836
	if (clear_page_dirty_for_io(fio.encrypted_page))
		dec_page_count(fio.sbi, F2FS_DIRTY_META);

837
	set_page_writeback(fio.encrypted_page);
J
Jaegeuk Kim 已提交
838
	ClearPageError(page);
839 840

	/* allocate block address */
841
	f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
842

M
Mike Christie 已提交
843
	fio.op = REQ_OP_WRITE;
844
	fio.op_flags = REQ_SYNC;
845
	fio.new_blkaddr = newaddr;
846 847
	f2fs_submit_page_write(&fio);
	if (fio.retry) {
848
		err = -EAGAIN;
849 850 851 852
		if (PageWriteback(fio.encrypted_page))
			end_page_writeback(fio.encrypted_page);
		goto put_page_out;
	}
853

C
Chao Yu 已提交
854 855
	f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);

856
	f2fs_update_data_blkaddr(&dn, newaddr);
857
	set_inode_flag(inode, FI_APPEND_WRITE);
858
	if (page->index == 0)
859
		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
860
put_page_out:
861
	f2fs_put_page(fio.encrypted_page, 1);
862
recover_block:
863 864
	if (lfs_mode)
		up_write(&fio.sbi->io_order_lock);
865
	if (err)
C
Chao Yu 已提交
866
		f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
867
								true, true);
868 869 870 871
put_out:
	f2fs_put_dnode(&dn);
out:
	f2fs_put_page(page, 1);
872
	return err;
873 874
}

875
static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
876
							unsigned int segno, int off)
877
{
J
Jaegeuk Kim 已提交
878
	struct page *page;
879
	int err = 0;
J
Jaegeuk Kim 已提交
880

C
Chao Yu 已提交
881
	page = f2fs_get_lock_data_page(inode, bidx, true);
J
Jaegeuk Kim 已提交
882
	if (IS_ERR(page))
883
		return PTR_ERR(page);
884

885 886
	if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
		err = -ENOENT;
887
		goto out;
888
	}
889

890 891 892
	if (f2fs_is_atomic_file(inode)) {
		F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
		F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
893
		err = -EAGAIN;
C
Chao Yu 已提交
894
		goto out;
895
	}
896 897 898
	if (f2fs_is_pinned_file(inode)) {
		if (gc_type == FG_GC)
			f2fs_pin_file_control(inode, true);
899
		err = -EAGAIN;
900 901
		goto out;
	}
C
Chao Yu 已提交
902

903
	if (gc_type == BG_GC) {
904 905
		if (PageWriteback(page)) {
			err = -EAGAIN;
906
			goto out;
907
		}
908 909 910
		set_page_dirty(page);
		set_cold_data(page);
	} else {
J
Jaegeuk Kim 已提交
911 912
		struct f2fs_io_info fio = {
			.sbi = F2FS_I_SB(inode),
C
Chao Yu 已提交
913
			.ino = inode->i_ino,
J
Jaegeuk Kim 已提交
914
			.type = DATA,
J
Jaegeuk Kim 已提交
915
			.temp = COLD,
M
Mike Christie 已提交
916
			.op = REQ_OP_WRITE,
917
			.op_flags = REQ_SYNC,
918
			.old_blkaddr = NULL_ADDR,
J
Jaegeuk Kim 已提交
919
			.page = page,
920
			.encrypted_page = NULL,
921
			.need_lock = LOCK_REQ,
C
Chao Yu 已提交
922
			.io_type = FS_GC_DATA_IO,
J
Jaegeuk Kim 已提交
923
		};
924 925 926
		bool is_dirty = PageDirty(page);

retry:
927
		set_page_dirty(page);
928
		f2fs_wait_on_page_writeback(page, DATA, true);
929
		if (clear_page_dirty_for_io(page)) {
930
			inode_dec_dirty_pages(inode);
C
Chao Yu 已提交
931
			f2fs_remove_dirty_inode(inode);
932
		}
933

934
		set_cold_data(page);
935

C
Chao Yu 已提交
936
		err = f2fs_do_write_data_page(&fio);
C
Chao Yu 已提交
937 938 939 940 941 942 943 944
		if (err) {
			clear_cold_data(page);
			if (err == -ENOMEM) {
				congestion_wait(BLK_RW_ASYNC, HZ/50);
				goto retry;
			}
			if (is_dirty)
				set_page_dirty(page);
945
		}
946 947 948
	}
out:
	f2fs_put_page(page, 1);
949
	return err;
950 951
}

J
Jaegeuk Kim 已提交
952
/*
953 954 955 956 957 958
 * This function tries to get parent node of victim data block, and identifies
 * data block validity. If the block is valid, copy that with cold status and
 * modify parent node.
 * If the parent node is not valid or the data block address is different,
 * the victim data block is ignored.
 */
959
static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
960
		struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
961 962 963 964
{
	struct super_block *sb = sbi->sb;
	struct f2fs_summary *entry;
	block_t start_addr;
965
	int off;
966
	int phase = 0;
967
	int submitted = 0;
968 969 970 971 972

	start_addr = START_BLOCK(sbi, segno);

next_step:
	entry = sum;
973

974 975 976 977 978 979
	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
		struct page *data_page;
		struct inode *inode;
		struct node_info dni; /* dnode info for the data */
		unsigned int ofs_in_node, nofs;
		block_t start_bidx;
980
		nid_t nid = le32_to_cpu(entry->nid);
981

982
		/* stop BG_GC if there is not enough free sections. */
983
		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
984
			return submitted;
985

986
		if (check_valid_map(sbi, segno, off) == 0)
987 988 989
			continue;

		if (phase == 0) {
C
Chao Yu 已提交
990
			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
991 992 993 994 995
							META_NAT, true);
			continue;
		}

		if (phase == 1) {
C
Chao Yu 已提交
996
			f2fs_ra_node_page(sbi, nid);
997 998 999 1000
			continue;
		}

		/* Get an inode by ino with checking validity */
1001
		if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1002 1003
			continue;

1004
		if (phase == 2) {
C
Chao Yu 已提交
1005
			f2fs_ra_node_page(sbi, dni.ino);
1006 1007 1008 1009 1010
			continue;
		}

		ofs_in_node = le16_to_cpu(entry->ofs_in_node);

1011
		if (phase == 3) {
1012
			inode = f2fs_iget(sb, dni.ino);
1013
			if (IS_ERR(inode) || is_bad_inode(inode))
1014 1015
				continue;

1016
			if (!down_write_trylock(
C
Chao Yu 已提交
1017
				&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1018
				iput(inode);
1019
				sbi->skipped_gc_rwsem++;
1020 1021 1022
				continue;
			}

1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
			start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
								ofs_in_node;

			if (f2fs_post_read_required(inode)) {
				int err = ra_data_block(inode, start_bidx);

				up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
				if (err) {
					iput(inode);
					continue;
				}
				add_gc_inode(gc_list, inode);
				continue;
			}

C
Chao Yu 已提交
1038
			data_page = f2fs_get_read_data_page(inode,
1039
						start_bidx, REQ_RAHEAD, true);
C
Chao Yu 已提交
1040
			up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1041 1042 1043 1044
			if (IS_ERR(data_page)) {
				iput(inode);
				continue;
			}
1045 1046

			f2fs_put_page(data_page, 0);
1047
			add_gc_inode(gc_list, inode);
1048 1049 1050
			continue;
		}

1051
		/* phase 4 */
1052
		inode = find_gc_inode(gc_list, dni.ino);
1053
		if (inode) {
1054 1055
			struct f2fs_inode_info *fi = F2FS_I(inode);
			bool locked = false;
1056
			int err;
1057 1058

			if (S_ISREG(inode->i_mode)) {
C
Chao Yu 已提交
1059
				if (!down_write_trylock(&fi->i_gc_rwsem[READ]))
1060 1061
					continue;
				if (!down_write_trylock(
C
Chao Yu 已提交
1062
						&fi->i_gc_rwsem[WRITE])) {
1063
					sbi->skipped_gc_rwsem++;
C
Chao Yu 已提交
1064
					up_write(&fi->i_gc_rwsem[READ]);
1065 1066 1067
					continue;
				}
				locked = true;
1068 1069 1070

				/* wait for all inflight aio data */
				inode_dio_wait(inode);
1071 1072
			}

C
Chao Yu 已提交
1073
			start_bidx = f2fs_start_bidx_of_node(nofs, inode)
J
Jaegeuk Kim 已提交
1074
								+ ofs_in_node;
1075
			if (f2fs_post_read_required(inode))
1076 1077
				err = move_data_block(inode, start_bidx,
							gc_type, segno, off);
1078
			else
1079
				err = move_data_page(inode, start_bidx, gc_type,
1080
								segno, off);
1081

1082 1083 1084 1085
			if (!err && (gc_type == FG_GC ||
					f2fs_post_read_required(inode)))
				submitted++;

1086
			if (locked) {
C
Chao Yu 已提交
1087 1088
				up_write(&fi->i_gc_rwsem[WRITE]);
				up_write(&fi->i_gc_rwsem[READ]);
1089 1090
			}

1091
			stat_inc_data_blk_count(sbi, 1, gc_type);
1092 1093
		}
	}
1094

1095
	if (++phase < 5)
1096
		goto next_step;
1097 1098

	return submitted;
1099 1100 1101
}

static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1102
			int gc_type)
1103 1104 1105
{
	struct sit_info *sit_i = SIT_I(sbi);
	int ret;
1106

1107
	down_write(&sit_i->sentry_lock);
1108 1109
	ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
					      NO_CHECK_TYPE, LFS);
1110
	up_write(&sit_i->sentry_lock);
1111 1112 1113
	return ret;
}

C
Chao Yu 已提交
1114 1115
static int do_garbage_collect(struct f2fs_sb_info *sbi,
				unsigned int start_segno,
1116
				struct gc_inode_list *gc_list, int gc_type)
1117 1118 1119
{
	struct page *sum_page;
	struct f2fs_summary_block *sum;
1120
	struct blk_plug plug;
C
Chao Yu 已提交
1121 1122
	unsigned int segno = start_segno;
	unsigned int end_segno = start_segno + sbi->segs_per_sec;
1123
	int seg_freed = 0, migrated = 0;
C
Chao Yu 已提交
1124 1125
	unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
						SUM_TYPE_DATA : SUM_TYPE_NODE;
1126
	int submitted = 0;
1127

1128 1129 1130
	if (__is_large_section(sbi))
		end_segno = rounddown(end_segno, sbi->segs_per_sec);

C
Chao Yu 已提交
1131
	/* readahead multi ssa blocks those have contiguous address */
1132
	if (__is_large_section(sbi))
C
Chao Yu 已提交
1133
		f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1134
					end_segno - segno, META_SSA, true);
C
Chao Yu 已提交
1135 1136 1137

	/* reference all summary page */
	while (segno < end_segno) {
C
Chao Yu 已提交
1138
		sum_page = f2fs_get_sum_page(sbi, segno++);
1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150
		if (IS_ERR(sum_page)) {
			int err = PTR_ERR(sum_page);

			end_segno = segno - 1;
			for (segno = start_segno; segno < end_segno; segno++) {
				sum_page = find_get_page(META_MAPPING(sbi),
						GET_SUM_BLOCK(sbi, segno));
				f2fs_put_page(sum_page, 0);
				f2fs_put_page(sum_page, 0);
			}
			return err;
		}
C
Chao Yu 已提交
1151 1152
		unlock_page(sum_page);
	}
1153

1154 1155
	blk_start_plug(&plug);

C
Chao Yu 已提交
1156
	for (segno = start_segno; segno < end_segno; segno++) {
J
Jaegeuk Kim 已提交
1157

C
Chao Yu 已提交
1158 1159 1160 1161
		/* find segment summary of victim */
		sum_page = find_get_page(META_MAPPING(sbi),
					GET_SUM_BLOCK(sbi, segno));
		f2fs_put_page(sum_page, 0);
1162

1163 1164
		if (get_valid_blocks(sbi, segno, false) == 0)
			goto freed;
1165 1166 1167
		if (__is_large_section(sbi) &&
				migrated >= sbi->migration_granularity)
			goto skip;
1168
		if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1169
			goto skip;
1170

C
Chao Yu 已提交
1171
		sum = page_address(sum_page);
1172 1173 1174 1175 1176
		if (type != GET_SUM_TYPE((&sum->footer))) {
			f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) "
				"type [%d, %d] in SSA and SIT",
				segno, type, GET_SUM_TYPE((&sum->footer)));
			set_sbi_flag(sbi, SBI_NEED_FSCK);
1177
			goto skip;
1178
		}
C
Chao Yu 已提交
1179 1180 1181 1182

		/*
		 * this is to avoid deadlock:
		 * - lock_page(sum_page)         - f2fs_replace_block
1183 1184
		 *  - check_valid_map()            - down_write(sentry_lock)
		 *   - down_read(sentry_lock)     - change_curseg()
C
Chao Yu 已提交
1185 1186 1187
		 *                                  - lock_page(sum_page)
		 */
		if (type == SUM_TYPE_NODE)
1188
			submitted += gc_node_segment(sbi, sum->entries, segno,
C
Chao Yu 已提交
1189
								gc_type);
1190 1191 1192
		else
			submitted += gc_data_segment(sbi, sum->entries, gc_list,
							segno, gc_type);
C
Chao Yu 已提交
1193 1194

		stat_inc_seg_count(sbi, type, gc_type);
C
Chao Yu 已提交
1195

1196
freed:
C
Chao Yu 已提交
1197 1198 1199
		if (gc_type == FG_GC &&
				get_valid_blocks(sbi, segno, false) == 0)
			seg_freed++;
1200 1201 1202 1203 1204
		migrated++;

		if (__is_large_section(sbi) && segno + 1 < end_segno)
			sbi->next_victim_seg[gc_type] = segno + 1;
skip:
C
Chao Yu 已提交
1205 1206 1207
		f2fs_put_page(sum_page, 0);
	}

1208
	if (submitted)
1209 1210
		f2fs_submit_merged_write(sbi,
				(type == SUM_TYPE_NODE) ? NODE : DATA);
1211

C
Chao Yu 已提交
1212
	blk_finish_plug(&plug);
1213

1214 1215
	stat_inc_call_count(sbi->stat_info);

C
Chao Yu 已提交
1216
	return seg_freed;
1217 1218
}

1219 1220
int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
			bool background, unsigned int segno)
1221
{
C
Chao Yu 已提交
1222
	int gc_type = sync ? FG_GC : BG_GC;
C
Chao Yu 已提交
1223 1224
	int sec_freed = 0, seg_freed = 0, total_freed = 0;
	int ret = 0;
1225
	struct cp_control cpc;
1226
	unsigned int init_segno = segno;
1227 1228
	struct gc_inode_list gc_list = {
		.ilist = LIST_HEAD_INIT(gc_list.ilist),
1229
		.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1230
	};
1231
	unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
1232
	unsigned long long first_skipped;
1233
	unsigned int skipped_round = 0, round = 0;
1234

C
Chao Yu 已提交
1235 1236 1237 1238 1239 1240 1241 1242 1243
	trace_f2fs_gc_begin(sbi->sb, sync, background,
				get_pages(sbi, F2FS_DIRTY_NODES),
				get_pages(sbi, F2FS_DIRTY_DENTS),
				get_pages(sbi, F2FS_DIRTY_IMETA),
				free_sections(sbi),
				free_segments(sbi),
				reserved_segments(sbi),
				prefree_segments(sbi));

1244
	cpc.reason = __get_cp_reason(sbi);
1245 1246
	sbi->skipped_gc_rwsem = 0;
	first_skipped = last_skipped;
1247
gc_more:
1248
	if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1249
		ret = -EINVAL;
J
Jaegeuk Kim 已提交
1250
		goto stop;
1251
	}
C
Chao Yu 已提交
1252 1253
	if (unlikely(f2fs_cp_error(sbi))) {
		ret = -EIO;
1254
		goto stop;
C
Chao Yu 已提交
1255
	}
1256

1257
	if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
1258
		/*
1259 1260 1261
		 * For example, if there are many prefree_segments below given
		 * threshold, we can make them free by checkpoint. Then, we
		 * secure free segments which doesn't need fggc any more.
1262
		 */
D
Daniel Rosenberg 已提交
1263 1264
		if (prefree_segments(sbi) &&
				!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
C
Chao Yu 已提交
1265
			ret = f2fs_write_checkpoint(sbi, &cpc);
1266 1267 1268
			if (ret)
				goto stop;
		}
1269 1270
		if (has_not_enough_free_secs(sbi, 0, 0))
			gc_type = FG_GC;
1271
	}
1272

1273
	/* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
C
Chao Yu 已提交
1274 1275
	if (gc_type == BG_GC && !background) {
		ret = -EINVAL;
1276
		goto stop;
C
Chao Yu 已提交
1277 1278 1279
	}
	if (!__get_victim(sbi, &segno, gc_type)) {
		ret = -ENODATA;
J
Jaegeuk Kim 已提交
1280
		goto stop;
C
Chao Yu 已提交
1281
	}
1282

C
Chao Yu 已提交
1283 1284
	seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
	if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec)
1285
		sec_freed++;
C
Chao Yu 已提交
1286
	total_freed += seg_freed;
1287

1288
	if (gc_type == FG_GC) {
1289 1290
		if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
						sbi->skipped_gc_rwsem)
1291 1292 1293 1294 1295
			skipped_round++;
		last_skipped = sbi->skipped_atomic_files[FG_GC];
		round++;
	}

1296
	if (gc_type == FG_GC)
1297
		sbi->cur_victim_sec = NULL_SEGNO;
1298

1299 1300 1301 1302 1303 1304
	if (sync)
		goto stop;

	if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
		if (skipped_round <= MAX_SKIP_GC_COUNT ||
					skipped_round * 2 < round) {
1305
			segno = NULL_SEGNO;
C
Chao Yu 已提交
1306
			goto gc_more;
1307
		}
1308

1309 1310 1311 1312 1313 1314 1315
		if (first_skipped < last_skipped &&
				(last_skipped - first_skipped) >
						sbi->skipped_gc_rwsem) {
			f2fs_drop_inmem_pages_all(sbi, true);
			segno = NULL_SEGNO;
			goto gc_more;
		}
D
Daniel Rosenberg 已提交
1316
		if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED))
C
Chao Yu 已提交
1317
			ret = f2fs_write_checkpoint(sbi, &cpc);
C
Chao Yu 已提交
1318
	}
J
Jaegeuk Kim 已提交
1319
stop:
1320 1321
	SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
	SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
C
Chao Yu 已提交
1322 1323 1324 1325 1326 1327 1328 1329 1330 1331

	trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
				get_pages(sbi, F2FS_DIRTY_NODES),
				get_pages(sbi, F2FS_DIRTY_DENTS),
				get_pages(sbi, F2FS_DIRTY_IMETA),
				free_sections(sbi),
				free_segments(sbi),
				reserved_segments(sbi),
				prefree_segments(sbi));

1332 1333
	mutex_unlock(&sbi->gc_mutex);

1334
	put_gc_inode(&gc_list);
C
Chao Yu 已提交
1335

1336
	if (sync && !ret)
C
Chao Yu 已提交
1337
		ret = sec_freed ? 0 : -EAGAIN;
1338
	return ret;
1339 1340
}

C
Chao Yu 已提交
1341
void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1342 1343
{
	DIRTY_I(sbi)->v_ops = &default_v_ops;
1344

1345
	sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1346 1347

	/* give warm/cold data area from slower device */
1348
	if (sbi->s_ndevs && !__is_large_section(sbi))
1349 1350
		SIT_I(sbi)->last_victim[ALLOC_NEXT] =
				GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
1351
}