gc.c 32.0 KB
Newer Older
C
Chao Yu 已提交
1
// SPDX-License-Identifier: GPL-2.0
J
Jaegeuk Kim 已提交
2
/*
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 * fs/f2fs/gc.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 */
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/init.h>
#include <linux/f2fs_fs.h>
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/freezer.h>

#include "f2fs.h"
#include "node.h"
#include "segment.h"
#include "gc.h"
21
#include <trace/events/f2fs.h>
22 23 24 25

static int gc_thread_func(void *data)
{
	struct f2fs_sb_info *sbi = data;
26
	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
27
	wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
28
	unsigned int wait_ms;
29

30
	wait_ms = gc_th->min_sleep_time;
31

32
	set_freezable();
33
	do {
34
		wait_event_interruptible_timeout(*wq,
35 36
				kthread_should_stop() || freezing(current) ||
				gc_th->gc_wake,
37 38
				msecs_to_jiffies(wait_ms));

39 40 41 42
		/* give it a try one time */
		if (gc_th->gc_wake)
			gc_th->gc_wake = 0;

43 44
		if (try_to_freeze()) {
			stat_other_skip_bggc_count(sbi);
45
			continue;
46
		}
47 48 49
		if (kthread_should_stop())
			break;

50
		if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
51
			increase_sleep_time(gc_th, &wait_ms);
52
			stat_other_skip_bggc_count(sbi);
53 54 55
			continue;
		}

56 57
		if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
			f2fs_show_injection_info(FAULT_CHECKPOINT);
58
			f2fs_stop_checkpoint(sbi, false);
59
		}
60

61 62
		if (!sb_start_write_trylock(sbi->sb)) {
			stat_other_skip_bggc_count(sbi);
63
			continue;
64
		}
65

66 67 68 69 70 71 72 73
		/*
		 * [GC triggering condition]
		 * 0. GC is not conducted currently.
		 * 1. There are enough dirty segments.
		 * 2. IO subsystem is idle by checking the # of writeback pages.
		 * 3. IO subsystem is idle by checking the # of requests in
		 *    bdev's request list.
		 *
A
arter97 已提交
74
		 * Note) We have to avoid triggering GCs frequently.
75 76 77 78
		 * Because it is possible that some segments can be
		 * invalidated soon after by user update or deletion.
		 * So, I'd like to wait some time to collect dirty segments.
		 */
79
		if (sbi->gc_mode == GC_URGENT) {
80
			wait_ms = gc_th->urgent_sleep_time;
81
			mutex_lock(&sbi->gc_mutex);
82 83 84
			goto do_gc;
		}

85 86
		if (!mutex_trylock(&sbi->gc_mutex)) {
			stat_other_skip_bggc_count(sbi);
87
			goto next;
88
		}
89

90
		if (!is_idle(sbi, GC_TIME)) {
91
			increase_sleep_time(gc_th, &wait_ms);
92
			mutex_unlock(&sbi->gc_mutex);
93
			stat_io_skip_bggc_count(sbi);
94
			goto next;
95 96 97
		}

		if (has_enough_invalid_blocks(sbi))
98
			decrease_sleep_time(gc_th, &wait_ms);
99
		else
100
			increase_sleep_time(gc_th, &wait_ms);
101
do_gc:
102
		stat_inc_bggc_count(sbi);
103

104
		/* if return value is not zero, no victim was selected */
105
		if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true, NULL_SEGNO))
106
			wait_ms = gc_th->no_gc_sleep_time;
107

108 109 110
		trace_f2fs_background_gc(sbi->sb, wait_ms,
				prefree_segments(sbi), free_segments(sbi));

111 112
		/* balancing f2fs's metadata periodically */
		f2fs_balance_fs_bg(sbi);
113 114
next:
		sb_end_write(sbi->sb);
115

116 117 118 119
	} while (!kthread_should_stop());
	return 0;
}

C
Chao Yu 已提交
120
int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
121
{
N
Namjae Jeon 已提交
122
	struct f2fs_gc_kthread *gc_th;
123
	dev_t dev = sbi->sb->s_bdev->bd_dev;
124
	int err = 0;
125

126
	gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
127 128 129 130
	if (!gc_th) {
		err = -ENOMEM;
		goto out;
	}
131

132
	gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
133 134 135 136
	gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
	gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
	gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;

137
	gc_th->gc_wake= 0;
138

139 140 141
	sbi->gc_thread = gc_th;
	init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
	sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
142
			"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
143
	if (IS_ERR(gc_th->f2fs_gc_task)) {
144
		err = PTR_ERR(gc_th->f2fs_gc_task);
145
		kfree(gc_th);
146
		sbi->gc_thread = NULL;
147
	}
148 149
out:
	return err;
150 151
}

C
Chao Yu 已提交
152
void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
153 154 155 156 157 158 159 160 161
{
	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
	if (!gc_th)
		return;
	kthread_stop(gc_th->f2fs_gc_task);
	kfree(gc_th);
	sbi->gc_thread = NULL;
}

162
static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
163
{
164 165
	int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;

166 167 168 169 170 171
	switch (sbi->gc_mode) {
	case GC_IDLE_CB:
		gc_mode = GC_CB;
		break;
	case GC_IDLE_GREEDY:
	case GC_URGENT:
172
		gc_mode = GC_GREEDY;
173 174
		break;
	}
175
	return gc_mode;
176 177 178 179 180 181 182
}

static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
			int type, struct victim_sel_policy *p)
{
	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);

183
	if (p->alloc_mode == SSR) {
184 185
		p->gc_mode = GC_GREEDY;
		p->dirty_segmap = dirty_i->dirty_segmap[type];
186
		p->max_search = dirty_i->nr_dirty[type];
187 188
		p->ofs_unit = 1;
	} else {
189
		p->gc_mode = select_gc_type(sbi, gc_type);
190
		p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
191
		p->max_search = dirty_i->nr_dirty[DIRTY];
192 193
		p->ofs_unit = sbi->segs_per_sec;
	}
194

195
	/* we need to check every dirty segments in the FG_GC case */
196
	if (gc_type != FG_GC &&
197
			(sbi->gc_mode != GC_URGENT) &&
198
			p->max_search > sbi->max_victim_search)
199
		p->max_search = sbi->max_victim_search;
200

201 202 203
	/* let's select beginning hot/small space first in no_heap mode*/
	if (test_opt(sbi, NOHEAP) &&
		(type == CURSEG_HOT_DATA || IS_NODESEG(type)))
204 205
		p->offset = 0;
	else
206
		p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
207 208 209 210 211
}

static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
				struct victim_sel_policy *p)
{
212 213
	/* SSR allocates in a segment unit */
	if (p->alloc_mode == SSR)
214
		return sbi->blocks_per_seg;
215
	if (p->gc_mode == GC_GREEDY)
216
		return 2 * sbi->blocks_per_seg * p->ofs_unit;
217 218 219 220 221 222 223 224 225
	else if (p->gc_mode == GC_CB)
		return UINT_MAX;
	else /* No other gc_mode */
		return 0;
}

static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
{
	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
226
	unsigned int secno;
227 228 229 230 231 232

	/*
	 * If the gc_type is FG_GC, we can select victim segments
	 * selected by background GC before.
	 * Those segments guarantee they have small valid blocks.
	 */
233
	for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
234
		if (sec_usage_check(sbi, secno))
235
			continue;
236
		clear_bit(secno, dirty_i->victim_secmap);
237
		return GET_SEG_FROM_SEC(sbi, secno);
238 239 240 241 242 243 244
	}
	return NULL_SEGNO;
}

static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
{
	struct sit_info *sit_i = SIT_I(sbi);
245 246
	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
	unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
247 248 249 250 251 252 253 254
	unsigned long long mtime = 0;
	unsigned int vblocks;
	unsigned char age = 0;
	unsigned char u;
	unsigned int i;

	for (i = 0; i < sbi->segs_per_sec; i++)
		mtime += get_seg_entry(sbi, start + i)->mtime;
255
	vblocks = get_valid_blocks(sbi, segno, true);
256 257 258 259 260 261

	mtime = div_u64(mtime, sbi->segs_per_sec);
	vblocks = div_u64(vblocks, sbi->segs_per_sec);

	u = (vblocks * 100) >> sbi->log_blocks_per_seg;

A
arter97 已提交
262
	/* Handle if the system time has changed by the user */
263 264 265 266 267 268 269 270 271 272 273
	if (mtime < sit_i->min_mtime)
		sit_i->min_mtime = mtime;
	if (mtime > sit_i->max_mtime)
		sit_i->max_mtime = mtime;
	if (sit_i->max_mtime != sit_i->min_mtime)
		age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
				sit_i->max_mtime - sit_i->min_mtime);

	return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
}

274 275
static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
			unsigned int segno, struct victim_sel_policy *p)
276 277
{
	if (p->alloc_mode == SSR)
278
		return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
279 280 281

	/* alloc_mode == LFS */
	if (p->gc_mode == GC_GREEDY)
282
		return get_valid_blocks(sbi, segno, true);
283 284 285 286
	else
		return get_cb_cost(sbi, segno);
}

287 288 289 290 291 292 293 294 295 296 297 298
static unsigned int count_bits(const unsigned long *addr,
				unsigned int offset, unsigned int len)
{
	unsigned int end = offset + len, sum = 0;

	while (offset < end) {
		if (test_bit(offset++, addr))
			++sum;
	}
	return sum;
}

J
Jaegeuk Kim 已提交
299
/*
M
Masanari Iida 已提交
300
 * This function is called from two paths.
301 302 303 304 305 306 307 308 309 310
 * One is garbage collection and the other is SSR segment selection.
 * When it is called during GC, it just gets a victim segment
 * and it does not remove it from dirty seglist.
 * When it is called from SSR segment selection, it finds a segment
 * which has minimum valid blocks and removes it from dirty seglist.
 */
static int get_victim_by_default(struct f2fs_sb_info *sbi,
		unsigned int *result, int gc_type, int type, char alloc_mode)
{
	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
311
	struct sit_info *sm = SIT_I(sbi);
312
	struct victim_sel_policy p;
S
Sheng Yong 已提交
313
	unsigned int secno, last_victim;
314
	unsigned int last_segment = MAIN_SEGS(sbi);
315
	unsigned int nsearched = 0;
316

317 318
	mutex_lock(&dirty_i->seglist_lock);

319 320 321 322
	p.alloc_mode = alloc_mode;
	select_policy(sbi, gc_type, type, &p);

	p.min_segno = NULL_SEGNO;
S
Sheng Yong 已提交
323
	p.min_cost = get_max_cost(sbi, &p);
324

325 326 327 328 329 330 331 332
	if (*result != NULL_SEGNO) {
		if (IS_DATASEG(get_seg_entry(sbi, *result)->type) &&
			get_valid_blocks(sbi, *result, false) &&
			!sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
			p.min_segno = *result;
		goto out;
	}

333 334 335
	if (p.max_search == 0)
		goto out;

336
	last_victim = sm->last_victim[p.gc_mode];
337 338 339 340 341 342 343 344
	if (p.alloc_mode == LFS && gc_type == FG_GC) {
		p.min_segno = check_bg_victims(sbi);
		if (p.min_segno != NULL_SEGNO)
			goto got_it;
	}

	while (1) {
		unsigned long cost;
345
		unsigned int segno;
346

347 348
		segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
		if (segno >= last_segment) {
349 350 351 352
			if (sm->last_victim[p.gc_mode]) {
				last_segment =
					sm->last_victim[p.gc_mode];
				sm->last_victim[p.gc_mode] = 0;
353 354 355 356 357
				p.offset = 0;
				continue;
			}
			break;
		}
358 359

		p.offset = segno + p.ofs_unit;
360
		if (p.ofs_unit > 1) {
361
			p.offset -= segno % p.ofs_unit;
362 363 364 365 366 367 368
			nsearched += count_bits(p.dirty_segmap,
						p.offset - p.ofs_unit,
						p.ofs_unit);
		} else {
			nsearched++;
		}

369
		secno = GET_SEC_FROM_SEG(sbi, segno);
370

371
		if (sec_usage_check(sbi, secno))
372
			goto next;
D
Daniel Rosenberg 已提交
373 374 375 376
		/* Don't touch checkpointed data */
		if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
					get_ckpt_valid_blocks(sbi, segno)))
			goto next;
377
		if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
378
			goto next;
379 380 381 382 383 384

		cost = get_gc_cost(sbi, segno, &p);

		if (p.min_cost > cost) {
			p.min_segno = segno;
			p.min_cost = cost;
385
		}
386 387
next:
		if (nsearched >= p.max_search) {
388 389
			if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
				sm->last_victim[p.gc_mode] = last_victim + 1;
390
			else
391 392
				sm->last_victim[p.gc_mode] = segno + 1;
			sm->last_victim[p.gc_mode] %= MAIN_SEGS(sbi);
393 394 395 396
			break;
		}
	}
	if (p.min_segno != NULL_SEGNO) {
397
got_it:
398
		if (p.alloc_mode == LFS) {
399
			secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
400 401 402 403
			if (gc_type == FG_GC)
				sbi->cur_victim_sec = secno;
			else
				set_bit(secno, dirty_i->victim_secmap);
404
		}
405
		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
406 407 408 409

		trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
				sbi->cur_victim_sec,
				prefree_segments(sbi), free_segments(sbi));
410
	}
411
out:
412 413 414 415 416 417 418 419 420
	mutex_unlock(&dirty_i->seglist_lock);

	return (p.min_segno == NULL_SEGNO) ? 0 : 1;
}

static const struct victim_selection default_v_ops = {
	.get_victim = get_victim_by_default,
};

421
static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
422 423 424
{
	struct inode_entry *ie;

425 426 427
	ie = radix_tree_lookup(&gc_list->iroot, ino);
	if (ie)
		return ie->inode;
428 429 430
	return NULL;
}

431
static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
432
{
433 434
	struct inode_entry *new_ie;

435
	if (inode == find_gc_inode(gc_list, inode->i_ino)) {
436 437
		iput(inode);
		return;
438
	}
C
Chao Yu 已提交
439
	new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab, GFP_NOFS);
440
	new_ie->inode = inode;
441 442

	f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
443
	list_add_tail(&new_ie->list, &gc_list->ilist);
444 445
}

446
static void put_gc_inode(struct gc_inode_list *gc_list)
447 448
{
	struct inode_entry *ie, *next_ie;
449 450
	list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
		radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
451 452
		iput(ie->inode);
		list_del(&ie->list);
C
Chao Yu 已提交
453
		kmem_cache_free(f2fs_inode_entry_slab, ie);
454 455 456 457 458 459 460 461 462 463
	}
}

static int check_valid_map(struct f2fs_sb_info *sbi,
				unsigned int segno, int offset)
{
	struct sit_info *sit_i = SIT_I(sbi);
	struct seg_entry *sentry;
	int ret;

464
	down_read(&sit_i->sentry_lock);
465 466
	sentry = get_seg_entry(sbi, segno);
	ret = f2fs_test_bit(offset, sentry->cur_valid_map);
467
	up_read(&sit_i->sentry_lock);
468
	return ret;
469 470
}

J
Jaegeuk Kim 已提交
471
/*
472 473 474 475
 * This function compares node address got in summary with that in NAT.
 * On validity, copy that node with cold status, otherwise (invalid node)
 * ignore that.
 */
476
static int gc_node_segment(struct f2fs_sb_info *sbi,
477 478 479
		struct f2fs_summary *sum, unsigned int segno, int gc_type)
{
	struct f2fs_summary *entry;
480
	block_t start_addr;
481
	int off;
482
	int phase = 0;
483
	bool fggc = (gc_type == FG_GC);
484
	int submitted = 0;
485

486 487
	start_addr = START_BLOCK(sbi, segno);

488 489
next_step:
	entry = sum;
490

491 492 493
	if (fggc && phase == 2)
		atomic_inc(&sbi->wb_sync_req[NODE]);

494 495 496
	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
		nid_t nid = le32_to_cpu(entry->nid);
		struct page *node_page;
497
		struct node_info ni;
498
		int err;
499

500
		/* stop BG_GC if there is not enough free sections. */
501
		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
502
			return submitted;
503

504
		if (check_valid_map(sbi, segno, off) == 0)
505 506
			continue;

507
		if (phase == 0) {
C
Chao Yu 已提交
508
			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
509 510 511 512 513
							META_NAT, true);
			continue;
		}

		if (phase == 1) {
C
Chao Yu 已提交
514
			f2fs_ra_node_page(sbi, nid);
515 516
			continue;
		}
517 518

		/* phase == 2 */
C
Chao Yu 已提交
519
		node_page = f2fs_get_node_page(sbi, nid);
520 521 522
		if (IS_ERR(node_page))
			continue;

C
Chao Yu 已提交
523
		/* block may become invalid during f2fs_get_node_page */
524 525 526
		if (check_valid_map(sbi, segno, off) == 0) {
			f2fs_put_page(node_page, 1);
			continue;
527 528
		}

529 530 531 532 533
		if (f2fs_get_node_info(sbi, nid, &ni)) {
			f2fs_put_page(node_page, 1);
			continue;
		}

534 535 536
		if (ni.blk_addr != start_addr + off) {
			f2fs_put_page(node_page, 1);
			continue;
537 538
		}

539 540 541
		err = f2fs_move_node_page(node_page, gc_type);
		if (!err && gc_type == FG_GC)
			submitted++;
542
		stat_inc_node_blk_count(sbi, 1, gc_type);
543
	}
544

545
	if (++phase < 3)
546
		goto next_step;
547 548 549

	if (fggc)
		atomic_dec(&sbi->wb_sync_req[NODE]);
550
	return submitted;
551 552
}

J
Jaegeuk Kim 已提交
553
/*
554 555 556 557 558
 * Calculate start block index indicating the given node offset.
 * Be careful, caller should give this node offset only indicating direct node
 * blocks. If any node offsets, which point the other types of node blocks such
 * as indirect or double indirect node blocks, are given, it must be a caller's
 * bug.
559
 */
C
Chao Yu 已提交
560
block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
561
{
562 563
	unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
	unsigned int bidx;
564

565 566
	if (node_ofs == 0)
		return 0;
567

568
	if (node_ofs <= 2) {
569 570
		bidx = node_ofs - 1;
	} else if (node_ofs <= indirect_blks) {
571
		int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
572 573
		bidx = node_ofs - 2 - dec;
	} else {
574
		int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
575 576
		bidx = node_ofs - 5 - dec;
	}
577
	return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
578 579
}

580
static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
581 582 583 584 585 586 587 588 589 590
		struct node_info *dni, block_t blkaddr, unsigned int *nofs)
{
	struct page *node_page;
	nid_t nid;
	unsigned int ofs_in_node;
	block_t source_blkaddr;

	nid = le32_to_cpu(sum->nid);
	ofs_in_node = le16_to_cpu(sum->ofs_in_node);

C
Chao Yu 已提交
591
	node_page = f2fs_get_node_page(sbi, nid);
592
	if (IS_ERR(node_page))
593
		return false;
594

595 596 597 598
	if (f2fs_get_node_info(sbi, nid, dni)) {
		f2fs_put_page(node_page, 1);
		return false;
	}
599 600

	if (sum->version != dni->version) {
601 602 603 604
		f2fs_msg(sbi->sb, KERN_WARNING,
				"%s: valid data with mismatched node version.",
				__func__);
		set_sbi_flag(sbi, SBI_NEED_FSCK);
605 606 607
	}

	*nofs = ofs_of_node(node_page);
608
	source_blkaddr = datablock_addr(NULL, node_page, ofs_in_node);
609 610 611
	f2fs_put_page(node_page, 1);

	if (source_blkaddr != blkaddr)
612 613
		return false;
	return true;
614 615
}

616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660
static int ra_data_block(struct inode *inode, pgoff_t index)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
	struct extent_info ei = {0, 0, 0};
	struct f2fs_io_info fio = {
		.sbi = sbi,
		.ino = inode->i_ino,
		.type = DATA,
		.temp = COLD,
		.op = REQ_OP_READ,
		.op_flags = 0,
		.encrypted_page = NULL,
		.in_list = false,
		.retry = false,
	};
	int err;

	page = f2fs_grab_cache_page(mapping, index, true);
	if (!page)
		return -ENOMEM;

	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn.data_blkaddr = ei.blk + index - ei.fofs;
		goto got_it;
	}

	set_new_dnode(&dn, inode, NULL, NULL, 0);
	err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
	if (err)
		goto put_page;
	f2fs_put_dnode(&dn);

	if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
						DATA_GENERIC))) {
		err = -EFAULT;
		goto put_page;
	}
got_it:
	/* read page */
	fio.page = page;
	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;

661 662 663 664 665 666 667 668
	/*
	 * don't cache encrypted data into meta inode until previous dirty
	 * data were writebacked to avoid racing between GC and flush.
	 */
	f2fs_wait_on_page_writeback(page, DATA, true);

	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);

669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689
	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
					dn.data_blkaddr,
					FGP_LOCK | FGP_CREAT, GFP_NOFS);
	if (!fio.encrypted_page) {
		err = -ENOMEM;
		goto put_page;
	}

	err = f2fs_submit_page_bio(&fio);
	if (err)
		goto put_encrypted_page;
	f2fs_put_page(fio.encrypted_page, 0);
	f2fs_put_page(page, 1);
	return 0;
put_encrypted_page:
	f2fs_put_page(fio.encrypted_page, 1);
put_page:
	f2fs_put_page(page, 1);
	return err;
}

690 691 692 693
/*
 * Move data block via META_MAPPING while keeping locked data page.
 * This can be used to move blocks, aka LBAs, directly on disk.
 */
694
static int move_data_block(struct inode *inode, block_t bidx,
695
				int gc_type, unsigned int segno, int off)
696 697 698
{
	struct f2fs_io_info fio = {
		.sbi = F2FS_I_SB(inode),
C
Chao Yu 已提交
699
		.ino = inode->i_ino,
700
		.type = DATA,
J
Jaegeuk Kim 已提交
701
		.temp = COLD,
M
Mike Christie 已提交
702
		.op = REQ_OP_READ,
703
		.op_flags = 0,
704
		.encrypted_page = NULL,
705
		.in_list = false,
706
		.retry = false,
707 708 709 710
	};
	struct dnode_of_data dn;
	struct f2fs_summary sum;
	struct node_info ni;
711
	struct page *page, *mpage;
712
	block_t newaddr;
713
	int err = 0;
714
	bool lfs_mode = test_opt(fio.sbi, LFS);
715 716

	/* do not read out */
717
	page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
718
	if (!page)
719
		return -ENOMEM;
720

721 722
	if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
		err = -ENOENT;
723
		goto out;
724
	}
725

726 727 728
	if (f2fs_is_atomic_file(inode)) {
		F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
		F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
729
		err = -EAGAIN;
C
Chao Yu 已提交
730
		goto out;
731
	}
C
Chao Yu 已提交
732

733 734
	if (f2fs_is_pinned_file(inode)) {
		f2fs_pin_file_control(inode, true);
735
		err = -EAGAIN;
736 737 738
		goto out;
	}

739
	set_new_dnode(&dn, inode, NULL, NULL, 0);
C
Chao Yu 已提交
740
	err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
741 742 743
	if (err)
		goto out;

744 745
	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
		ClearPageUptodate(page);
746
		err = -ENOENT;
747
		goto put_out;
748 749 750 751 752 753
	}

	/*
	 * don't cache encrypted data into meta inode until previous dirty
	 * data were writebacked to avoid racing between GC and flush.
	 */
754
	f2fs_wait_on_page_writeback(page, DATA, true);
755

756 757
	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);

758 759 760 761
	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
	if (err)
		goto put_out;

762 763 764 765
	set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);

	/* read page */
	fio.page = page;
766
	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
767

768 769 770
	if (lfs_mode)
		down_write(&fio.sbi->io_order_lock);

C
Chao Yu 已提交
771
	f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
772
					&sum, CURSEG_COLD_DATA, NULL, false);
773

C
Chao Yu 已提交
774 775
	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
				newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
776 777 778 779
	if (!fio.encrypted_page) {
		err = -ENOMEM;
		goto recover_block;
	}
780

781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797
	mpage = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
					fio.old_blkaddr, FGP_LOCK, GFP_NOFS);
	if (mpage) {
		bool updated = false;

		if (PageUptodate(mpage)) {
			memcpy(page_address(fio.encrypted_page),
					page_address(mpage), PAGE_SIZE);
			updated = true;
		}
		f2fs_put_page(mpage, 1);
		invalidate_mapping_pages(META_MAPPING(fio.sbi),
					fio.old_blkaddr, fio.old_blkaddr);
		if (updated)
			goto write_page;
	}

798 799 800 801 802 803 804
	err = f2fs_submit_page_bio(&fio);
	if (err)
		goto put_page_out;

	/* write page */
	lock_page(fio.encrypted_page);

805
	if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
806
		err = -EIO;
807
		goto put_page_out;
808
	}
809
	if (unlikely(!PageUptodate(fio.encrypted_page))) {
810
		err = -EIO;
811
		goto put_page_out;
812
	}
813

814
write_page:
815
	set_page_dirty(fio.encrypted_page);
816
	f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
817 818 819
	if (clear_page_dirty_for_io(fio.encrypted_page))
		dec_page_count(fio.sbi, F2FS_DIRTY_META);

820
	set_page_writeback(fio.encrypted_page);
J
Jaegeuk Kim 已提交
821
	ClearPageError(page);
822 823

	/* allocate block address */
824
	f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
825

M
Mike Christie 已提交
826
	fio.op = REQ_OP_WRITE;
827
	fio.op_flags = REQ_SYNC;
828
	fio.new_blkaddr = newaddr;
829 830
	f2fs_submit_page_write(&fio);
	if (fio.retry) {
831
		err = -EAGAIN;
832 833 834 835
		if (PageWriteback(fio.encrypted_page))
			end_page_writeback(fio.encrypted_page);
		goto put_page_out;
	}
836

C
Chao Yu 已提交
837 838
	f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);

839
	f2fs_update_data_blkaddr(&dn, newaddr);
840
	set_inode_flag(inode, FI_APPEND_WRITE);
841
	if (page->index == 0)
842
		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
843
put_page_out:
844
	f2fs_put_page(fio.encrypted_page, 1);
845
recover_block:
846 847
	if (lfs_mode)
		up_write(&fio.sbi->io_order_lock);
848
	if (err)
C
Chao Yu 已提交
849
		f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
850
								true, true);
851 852 853 854
put_out:
	f2fs_put_dnode(&dn);
out:
	f2fs_put_page(page, 1);
855
	return err;
856 857
}

858
static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
859
							unsigned int segno, int off)
860
{
J
Jaegeuk Kim 已提交
861
	struct page *page;
862
	int err = 0;
J
Jaegeuk Kim 已提交
863

C
Chao Yu 已提交
864
	page = f2fs_get_lock_data_page(inode, bidx, true);
J
Jaegeuk Kim 已提交
865
	if (IS_ERR(page))
866
		return PTR_ERR(page);
867

868 869
	if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
		err = -ENOENT;
870
		goto out;
871
	}
872

873 874 875
	if (f2fs_is_atomic_file(inode)) {
		F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
		F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
876
		err = -EAGAIN;
C
Chao Yu 已提交
877
		goto out;
878
	}
879 880 881
	if (f2fs_is_pinned_file(inode)) {
		if (gc_type == FG_GC)
			f2fs_pin_file_control(inode, true);
882
		err = -EAGAIN;
883 884
		goto out;
	}
C
Chao Yu 已提交
885

886
	if (gc_type == BG_GC) {
887 888
		if (PageWriteback(page)) {
			err = -EAGAIN;
889
			goto out;
890
		}
891 892 893
		set_page_dirty(page);
		set_cold_data(page);
	} else {
J
Jaegeuk Kim 已提交
894 895
		struct f2fs_io_info fio = {
			.sbi = F2FS_I_SB(inode),
C
Chao Yu 已提交
896
			.ino = inode->i_ino,
J
Jaegeuk Kim 已提交
897
			.type = DATA,
J
Jaegeuk Kim 已提交
898
			.temp = COLD,
M
Mike Christie 已提交
899
			.op = REQ_OP_WRITE,
900
			.op_flags = REQ_SYNC,
901
			.old_blkaddr = NULL_ADDR,
J
Jaegeuk Kim 已提交
902
			.page = page,
903
			.encrypted_page = NULL,
904
			.need_lock = LOCK_REQ,
C
Chao Yu 已提交
905
			.io_type = FS_GC_DATA_IO,
J
Jaegeuk Kim 已提交
906
		};
907 908 909
		bool is_dirty = PageDirty(page);

retry:
910
		set_page_dirty(page);
911
		f2fs_wait_on_page_writeback(page, DATA, true);
912
		if (clear_page_dirty_for_io(page)) {
913
			inode_dec_dirty_pages(inode);
C
Chao Yu 已提交
914
			f2fs_remove_dirty_inode(inode);
915
		}
916

917
		set_cold_data(page);
918

C
Chao Yu 已提交
919
		err = f2fs_do_write_data_page(&fio);
C
Chao Yu 已提交
920 921 922 923 924 925 926 927
		if (err) {
			clear_cold_data(page);
			if (err == -ENOMEM) {
				congestion_wait(BLK_RW_ASYNC, HZ/50);
				goto retry;
			}
			if (is_dirty)
				set_page_dirty(page);
928
		}
929 930 931
	}
out:
	f2fs_put_page(page, 1);
932
	return err;
933 934
}

J
Jaegeuk Kim 已提交
935
/*
936 937 938 939 940 941
 * This function tries to get parent node of victim data block, and identifies
 * data block validity. If the block is valid, copy that with cold status and
 * modify parent node.
 * If the parent node is not valid or the data block address is different,
 * the victim data block is ignored.
 */
942
static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
943
		struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
944 945 946 947
{
	struct super_block *sb = sbi->sb;
	struct f2fs_summary *entry;
	block_t start_addr;
948
	int off;
949
	int phase = 0;
950
	int submitted = 0;
951 952 953 954 955

	start_addr = START_BLOCK(sbi, segno);

next_step:
	entry = sum;
956

957 958 959 960 961 962
	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
		struct page *data_page;
		struct inode *inode;
		struct node_info dni; /* dnode info for the data */
		unsigned int ofs_in_node, nofs;
		block_t start_bidx;
963
		nid_t nid = le32_to_cpu(entry->nid);
964

965
		/* stop BG_GC if there is not enough free sections. */
966
		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
967
			return submitted;
968

969
		if (check_valid_map(sbi, segno, off) == 0)
970 971 972
			continue;

		if (phase == 0) {
C
Chao Yu 已提交
973
			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
974 975 976 977 978
							META_NAT, true);
			continue;
		}

		if (phase == 1) {
C
Chao Yu 已提交
979
			f2fs_ra_node_page(sbi, nid);
980 981 982 983
			continue;
		}

		/* Get an inode by ino with checking validity */
984
		if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
985 986
			continue;

987
		if (phase == 2) {
C
Chao Yu 已提交
988
			f2fs_ra_node_page(sbi, dni.ino);
989 990 991 992 993
			continue;
		}

		ofs_in_node = le16_to_cpu(entry->ofs_in_node);

994
		if (phase == 3) {
995
			inode = f2fs_iget(sb, dni.ino);
996
			if (IS_ERR(inode) || is_bad_inode(inode))
997 998
				continue;

999
			if (!down_write_trylock(
C
Chao Yu 已提交
1000
				&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1001
				iput(inode);
1002
				sbi->skipped_gc_rwsem++;
1003 1004 1005
				continue;
			}

1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
			start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
								ofs_in_node;

			if (f2fs_post_read_required(inode)) {
				int err = ra_data_block(inode, start_bidx);

				up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
				if (err) {
					iput(inode);
					continue;
				}
				add_gc_inode(gc_list, inode);
				continue;
			}

C
Chao Yu 已提交
1021
			data_page = f2fs_get_read_data_page(inode,
1022
						start_bidx, REQ_RAHEAD, true);
C
Chao Yu 已提交
1023
			up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1024 1025 1026 1027
			if (IS_ERR(data_page)) {
				iput(inode);
				continue;
			}
1028 1029

			f2fs_put_page(data_page, 0);
1030
			add_gc_inode(gc_list, inode);
1031 1032 1033
			continue;
		}

1034
		/* phase 4 */
1035
		inode = find_gc_inode(gc_list, dni.ino);
1036
		if (inode) {
1037 1038
			struct f2fs_inode_info *fi = F2FS_I(inode);
			bool locked = false;
1039
			int err;
1040 1041

			if (S_ISREG(inode->i_mode)) {
C
Chao Yu 已提交
1042
				if (!down_write_trylock(&fi->i_gc_rwsem[READ]))
1043 1044
					continue;
				if (!down_write_trylock(
C
Chao Yu 已提交
1045
						&fi->i_gc_rwsem[WRITE])) {
1046
					sbi->skipped_gc_rwsem++;
C
Chao Yu 已提交
1047
					up_write(&fi->i_gc_rwsem[READ]);
1048 1049 1050
					continue;
				}
				locked = true;
1051 1052 1053

				/* wait for all inflight aio data */
				inode_dio_wait(inode);
1054 1055
			}

C
Chao Yu 已提交
1056
			start_bidx = f2fs_start_bidx_of_node(nofs, inode)
J
Jaegeuk Kim 已提交
1057
								+ ofs_in_node;
1058
			if (f2fs_post_read_required(inode))
1059 1060
				err = move_data_block(inode, start_bidx,
							gc_type, segno, off);
1061
			else
1062
				err = move_data_page(inode, start_bidx, gc_type,
1063
								segno, off);
1064

1065 1066 1067 1068
			if (!err && (gc_type == FG_GC ||
					f2fs_post_read_required(inode)))
				submitted++;

1069
			if (locked) {
C
Chao Yu 已提交
1070 1071
				up_write(&fi->i_gc_rwsem[WRITE]);
				up_write(&fi->i_gc_rwsem[READ]);
1072 1073
			}

1074
			stat_inc_data_blk_count(sbi, 1, gc_type);
1075 1076
		}
	}
1077

1078
	if (++phase < 5)
1079
		goto next_step;
1080 1081

	return submitted;
1082 1083 1084
}

static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1085
			int gc_type)
1086 1087 1088
{
	struct sit_info *sit_i = SIT_I(sbi);
	int ret;
1089

1090
	down_write(&sit_i->sentry_lock);
1091 1092
	ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
					      NO_CHECK_TYPE, LFS);
1093
	up_write(&sit_i->sentry_lock);
1094 1095 1096
	return ret;
}

C
Chao Yu 已提交
1097 1098
static int do_garbage_collect(struct f2fs_sb_info *sbi,
				unsigned int start_segno,
1099
				struct gc_inode_list *gc_list, int gc_type)
1100 1101 1102
{
	struct page *sum_page;
	struct f2fs_summary_block *sum;
1103
	struct blk_plug plug;
C
Chao Yu 已提交
1104 1105
	unsigned int segno = start_segno;
	unsigned int end_segno = start_segno + sbi->segs_per_sec;
C
Chao Yu 已提交
1106
	int seg_freed = 0;
C
Chao Yu 已提交
1107 1108
	unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
						SUM_TYPE_DATA : SUM_TYPE_NODE;
1109
	int submitted = 0;
1110

C
Chao Yu 已提交
1111 1112
	/* readahead multi ssa blocks those have contiguous address */
	if (sbi->segs_per_sec > 1)
C
Chao Yu 已提交
1113
		f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
C
Chao Yu 已提交
1114 1115 1116 1117
					sbi->segs_per_sec, META_SSA, true);

	/* reference all summary page */
	while (segno < end_segno) {
C
Chao Yu 已提交
1118
		sum_page = f2fs_get_sum_page(sbi, segno++);
1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
		if (IS_ERR(sum_page)) {
			int err = PTR_ERR(sum_page);

			end_segno = segno - 1;
			for (segno = start_segno; segno < end_segno; segno++) {
				sum_page = find_get_page(META_MAPPING(sbi),
						GET_SUM_BLOCK(sbi, segno));
				f2fs_put_page(sum_page, 0);
				f2fs_put_page(sum_page, 0);
			}
			return err;
		}
C
Chao Yu 已提交
1131 1132
		unlock_page(sum_page);
	}
1133

1134 1135
	blk_start_plug(&plug);

C
Chao Yu 已提交
1136
	for (segno = start_segno; segno < end_segno; segno++) {
J
Jaegeuk Kim 已提交
1137

C
Chao Yu 已提交
1138 1139 1140 1141
		/* find segment summary of victim */
		sum_page = find_get_page(META_MAPPING(sbi),
					GET_SUM_BLOCK(sbi, segno));
		f2fs_put_page(sum_page, 0);
1142

1143 1144 1145
		if (get_valid_blocks(sbi, segno, false) == 0)
			goto freed;
		if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1146 1147
			goto next;

C
Chao Yu 已提交
1148
		sum = page_address(sum_page);
1149 1150 1151 1152 1153 1154 1155
		if (type != GET_SUM_TYPE((&sum->footer))) {
			f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) "
				"type [%d, %d] in SSA and SIT",
				segno, type, GET_SUM_TYPE((&sum->footer)));
			set_sbi_flag(sbi, SBI_NEED_FSCK);
			goto next;
		}
C
Chao Yu 已提交
1156 1157 1158 1159

		/*
		 * this is to avoid deadlock:
		 * - lock_page(sum_page)         - f2fs_replace_block
1160 1161
		 *  - check_valid_map()            - down_write(sentry_lock)
		 *   - down_read(sentry_lock)     - change_curseg()
C
Chao Yu 已提交
1162 1163 1164
		 *                                  - lock_page(sum_page)
		 */
		if (type == SUM_TYPE_NODE)
1165
			submitted += gc_node_segment(sbi, sum->entries, segno,
C
Chao Yu 已提交
1166
								gc_type);
1167 1168 1169
		else
			submitted += gc_data_segment(sbi, sum->entries, gc_list,
							segno, gc_type);
C
Chao Yu 已提交
1170 1171

		stat_inc_seg_count(sbi, type, gc_type);
C
Chao Yu 已提交
1172

1173
freed:
C
Chao Yu 已提交
1174 1175 1176
		if (gc_type == FG_GC &&
				get_valid_blocks(sbi, segno, false) == 0)
			seg_freed++;
1177
next:
C
Chao Yu 已提交
1178 1179 1180
		f2fs_put_page(sum_page, 0);
	}

1181
	if (submitted)
1182 1183
		f2fs_submit_merged_write(sbi,
				(type == SUM_TYPE_NODE) ? NODE : DATA);
1184

C
Chao Yu 已提交
1185
	blk_finish_plug(&plug);
1186

1187 1188
	stat_inc_call_count(sbi->stat_info);

C
Chao Yu 已提交
1189
	return seg_freed;
1190 1191
}

1192 1193
int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
			bool background, unsigned int segno)
1194
{
C
Chao Yu 已提交
1195
	int gc_type = sync ? FG_GC : BG_GC;
C
Chao Yu 已提交
1196 1197
	int sec_freed = 0, seg_freed = 0, total_freed = 0;
	int ret = 0;
1198
	struct cp_control cpc;
1199
	unsigned int init_segno = segno;
1200 1201
	struct gc_inode_list gc_list = {
		.ilist = LIST_HEAD_INIT(gc_list.ilist),
1202
		.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1203
	};
1204
	unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
1205
	unsigned long long first_skipped;
1206
	unsigned int skipped_round = 0, round = 0;
1207

C
Chao Yu 已提交
1208 1209 1210 1211 1212 1213 1214 1215 1216
	trace_f2fs_gc_begin(sbi->sb, sync, background,
				get_pages(sbi, F2FS_DIRTY_NODES),
				get_pages(sbi, F2FS_DIRTY_DENTS),
				get_pages(sbi, F2FS_DIRTY_IMETA),
				free_sections(sbi),
				free_segments(sbi),
				reserved_segments(sbi),
				prefree_segments(sbi));

1217
	cpc.reason = __get_cp_reason(sbi);
1218 1219
	sbi->skipped_gc_rwsem = 0;
	first_skipped = last_skipped;
1220
gc_more:
1221
	if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1222
		ret = -EINVAL;
J
Jaegeuk Kim 已提交
1223
		goto stop;
1224
	}
C
Chao Yu 已提交
1225 1226
	if (unlikely(f2fs_cp_error(sbi))) {
		ret = -EIO;
1227
		goto stop;
C
Chao Yu 已提交
1228
	}
1229

1230
	if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
1231
		/*
1232 1233 1234
		 * For example, if there are many prefree_segments below given
		 * threshold, we can make them free by checkpoint. Then, we
		 * secure free segments which doesn't need fggc any more.
1235
		 */
D
Daniel Rosenberg 已提交
1236 1237
		if (prefree_segments(sbi) &&
				!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
C
Chao Yu 已提交
1238
			ret = f2fs_write_checkpoint(sbi, &cpc);
1239 1240 1241
			if (ret)
				goto stop;
		}
1242 1243
		if (has_not_enough_free_secs(sbi, 0, 0))
			gc_type = FG_GC;
1244
	}
1245

1246
	/* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
C
Chao Yu 已提交
1247 1248
	if (gc_type == BG_GC && !background) {
		ret = -EINVAL;
1249
		goto stop;
C
Chao Yu 已提交
1250 1251 1252
	}
	if (!__get_victim(sbi, &segno, gc_type)) {
		ret = -ENODATA;
J
Jaegeuk Kim 已提交
1253
		goto stop;
C
Chao Yu 已提交
1254
	}
1255

C
Chao Yu 已提交
1256 1257
	seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
	if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec)
1258
		sec_freed++;
C
Chao Yu 已提交
1259
	total_freed += seg_freed;
1260

1261
	if (gc_type == FG_GC) {
1262 1263
		if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
						sbi->skipped_gc_rwsem)
1264 1265 1266 1267 1268
			skipped_round++;
		last_skipped = sbi->skipped_atomic_files[FG_GC];
		round++;
	}

1269
	if (gc_type == FG_GC)
1270
		sbi->cur_victim_sec = NULL_SEGNO;
1271

1272 1273 1274 1275 1276 1277
	if (sync)
		goto stop;

	if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
		if (skipped_round <= MAX_SKIP_GC_COUNT ||
					skipped_round * 2 < round) {
1278
			segno = NULL_SEGNO;
C
Chao Yu 已提交
1279
			goto gc_more;
1280
		}
1281

1282 1283 1284 1285 1286 1287 1288
		if (first_skipped < last_skipped &&
				(last_skipped - first_skipped) >
						sbi->skipped_gc_rwsem) {
			f2fs_drop_inmem_pages_all(sbi, true);
			segno = NULL_SEGNO;
			goto gc_more;
		}
D
Daniel Rosenberg 已提交
1289
		if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED))
C
Chao Yu 已提交
1290
			ret = f2fs_write_checkpoint(sbi, &cpc);
C
Chao Yu 已提交
1291
	}
J
Jaegeuk Kim 已提交
1292
stop:
1293 1294
	SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
	SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
C
Chao Yu 已提交
1295 1296 1297 1298 1299 1300 1301 1302 1303 1304

	trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
				get_pages(sbi, F2FS_DIRTY_NODES),
				get_pages(sbi, F2FS_DIRTY_DENTS),
				get_pages(sbi, F2FS_DIRTY_IMETA),
				free_sections(sbi),
				free_segments(sbi),
				reserved_segments(sbi),
				prefree_segments(sbi));

1305 1306
	mutex_unlock(&sbi->gc_mutex);

1307
	put_gc_inode(&gc_list);
C
Chao Yu 已提交
1308

1309
	if (sync && !ret)
C
Chao Yu 已提交
1310
		ret = sec_freed ? 0 : -EAGAIN;
1311
	return ret;
1312 1313
}

C
Chao Yu 已提交
1314
void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1315 1316
{
	DIRTY_I(sbi)->v_ops = &default_v_ops;
1317

1318
	sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1319 1320 1321 1322 1323

	/* give warm/cold data area from slower device */
	if (sbi->s_ndevs && sbi->segs_per_sec == 1)
		SIT_I(sbi)->last_victim[ALLOC_NEXT] =
				GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
1324
}