super.c 46.8 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
J
Jaegeuk Kim 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 * fs/f2fs/super.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/statfs.h>
#include <linux/buffer_head.h>
#include <linux/backing-dev.h>
#include <linux/kthread.h>
#include <linux/parser.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
21
#include <linux/proc_fs.h>
J
Jaegeuk Kim 已提交
22 23
#include <linux/random.h>
#include <linux/exportfs.h>
24
#include <linux/blkdev.h>
J
Jaegeuk Kim 已提交
25
#include <linux/f2fs_fs.h>
26
#include <linux/sysfs.h>
J
Jaegeuk Kim 已提交
27 28 29

#include "f2fs.h"
#include "node.h"
30
#include "segment.h"
J
Jaegeuk Kim 已提交
31
#include "xattr.h"
32
#include "gc.h"
J
Jaegeuk Kim 已提交
33
#include "trace.h"
J
Jaegeuk Kim 已提交
34

35 36 37
#define CREATE_TRACE_POINTS
#include <trace/events/f2fs.h>

38
static struct proc_dir_entry *f2fs_proc_root;
J
Jaegeuk Kim 已提交
39
static struct kmem_cache *f2fs_inode_cachep;
40
static struct kset *f2fs_kset;
J
Jaegeuk Kim 已提交
41

42 43
#ifdef CONFIG_F2FS_FAULT_INJECTION
u32 f2fs_fault_rate = 0;
J
Jaegeuk Kim 已提交
44 45 46 47 48
atomic_t f2fs_ops;

char *fault_name[FAULT_MAX] = {
	[FAULT_KMALLOC]		= "kmalloc",
};
49 50
#endif

51 52 53 54 55 56 57
/* f2fs-wide shrinker description */
static struct shrinker f2fs_shrinker_info = {
	.scan_objects = f2fs_shrink_scan,
	.count_objects = f2fs_shrink_count,
	.seeks = DEFAULT_SEEKS,
};

J
Jaegeuk Kim 已提交
58
enum {
59
	Opt_gc_background,
J
Jaegeuk Kim 已提交
60
	Opt_disable_roll_forward,
61
	Opt_norecovery,
J
Jaegeuk Kim 已提交
62 63
	Opt_discard,
	Opt_noheap,
64
	Opt_user_xattr,
J
Jaegeuk Kim 已提交
65
	Opt_nouser_xattr,
66
	Opt_acl,
J
Jaegeuk Kim 已提交
67 68 69
	Opt_noacl,
	Opt_active_logs,
	Opt_disable_ext_identify,
J
Jaegeuk Kim 已提交
70
	Opt_inline_xattr,
71
	Opt_inline_data,
72
	Opt_inline_dentry,
73
	Opt_flush_merge,
J
Jaegeuk Kim 已提交
74
	Opt_nobarrier,
75
	Opt_fastboot,
76
	Opt_extent_cache,
77
	Opt_noextent_cache,
W
Wanpeng Li 已提交
78
	Opt_noinline_data,
79
	Opt_data_flush,
80
	Opt_fault_injection,
J
Jaegeuk Kim 已提交
81 82 83 84
	Opt_err,
};

static match_table_t f2fs_tokens = {
85
	{Opt_gc_background, "background_gc=%s"},
J
Jaegeuk Kim 已提交
86
	{Opt_disable_roll_forward, "disable_roll_forward"},
87
	{Opt_norecovery, "norecovery"},
J
Jaegeuk Kim 已提交
88 89
	{Opt_discard, "discard"},
	{Opt_noheap, "no_heap"},
90
	{Opt_user_xattr, "user_xattr"},
J
Jaegeuk Kim 已提交
91
	{Opt_nouser_xattr, "nouser_xattr"},
92
	{Opt_acl, "acl"},
J
Jaegeuk Kim 已提交
93 94 95
	{Opt_noacl, "noacl"},
	{Opt_active_logs, "active_logs=%u"},
	{Opt_disable_ext_identify, "disable_ext_identify"},
J
Jaegeuk Kim 已提交
96
	{Opt_inline_xattr, "inline_xattr"},
97
	{Opt_inline_data, "inline_data"},
98
	{Opt_inline_dentry, "inline_dentry"},
99
	{Opt_flush_merge, "flush_merge"},
J
Jaegeuk Kim 已提交
100
	{Opt_nobarrier, "nobarrier"},
101
	{Opt_fastboot, "fastboot"},
102
	{Opt_extent_cache, "extent_cache"},
103
	{Opt_noextent_cache, "noextent_cache"},
W
Wanpeng Li 已提交
104
	{Opt_noinline_data, "noinline_data"},
105
	{Opt_data_flush, "data_flush"},
106
	{Opt_fault_injection, "fault_injection=%u"},
J
Jaegeuk Kim 已提交
107 108 109
	{Opt_err, NULL},
};

110
/* Sysfs support for f2fs */
111 112 113
enum {
	GC_THREAD,	/* struct f2fs_gc_thread */
	SM_INFO,	/* struct f2fs_sm_info */
114
	NM_INFO,	/* struct f2fs_nm_info */
115
	F2FS_SBI,	/* struct f2fs_sb_info */
116 117
};

118 119 120 121 122
struct f2fs_attr {
	struct attribute attr;
	ssize_t (*show)(struct f2fs_attr *, struct f2fs_sb_info *, char *);
	ssize_t (*store)(struct f2fs_attr *, struct f2fs_sb_info *,
			 const char *, size_t);
123
	int struct_type;
124 125 126
	int offset;
};

127 128 129 130 131 132
static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
{
	if (struct_type == GC_THREAD)
		return (unsigned char *)sbi->gc_thread;
	else if (struct_type == SM_INFO)
		return (unsigned char *)SM_I(sbi);
133 134
	else if (struct_type == NM_INFO)
		return (unsigned char *)NM_I(sbi);
135 136
	else if (struct_type == F2FS_SBI)
		return (unsigned char *)sbi;
137 138 139
	return NULL;
}

140 141 142 143 144 145 146 147 148 149 150 151 152
static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
		struct f2fs_sb_info *sbi, char *buf)
{
	struct super_block *sb = sbi->sb;

	if (!sb->s_bdev->bd_part)
		return snprintf(buf, PAGE_SIZE, "0\n");

	return snprintf(buf, PAGE_SIZE, "%llu\n",
		(unsigned long long)(sbi->kbytes_written +
			BD_PART_WRITTEN(sbi)));
}

153 154 155
static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
			struct f2fs_sb_info *sbi, char *buf)
{
156
	unsigned char *ptr = NULL;
157 158
	unsigned int *ui;

159 160
	ptr = __struct_ptr(sbi, a->struct_type);
	if (!ptr)
161 162
		return -EINVAL;

163
	ui = (unsigned int *)(ptr + a->offset);
164 165 166 167 168 169 170 171

	return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
}

static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
			struct f2fs_sb_info *sbi,
			const char *buf, size_t count)
{
172
	unsigned char *ptr;
173 174 175 176
	unsigned long t;
	unsigned int *ui;
	ssize_t ret;

177 178
	ptr = __struct_ptr(sbi, a->struct_type);
	if (!ptr)
179 180
		return -EINVAL;

181
	ui = (unsigned int *)(ptr + a->offset);
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216

	ret = kstrtoul(skip_spaces(buf), 0, &t);
	if (ret < 0)
		return ret;
	*ui = t;
	return count;
}

static ssize_t f2fs_attr_show(struct kobject *kobj,
				struct attribute *attr, char *buf)
{
	struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
								s_kobj);
	struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);

	return a->show ? a->show(a, sbi, buf) : 0;
}

static ssize_t f2fs_attr_store(struct kobject *kobj, struct attribute *attr,
						const char *buf, size_t len)
{
	struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
									s_kobj);
	struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);

	return a->store ? a->store(a, sbi, buf, len) : 0;
}

static void f2fs_sb_release(struct kobject *kobj)
{
	struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
								s_kobj);
	complete(&sbi->s_kobj_unregister);
}

217
#define F2FS_ATTR_OFFSET(_struct_type, _name, _mode, _show, _store, _offset) \
218 219 220 221
static struct f2fs_attr f2fs_attr_##_name = {			\
	.attr = {.name = __stringify(_name), .mode = _mode },	\
	.show	= _show,					\
	.store	= _store,					\
222 223
	.struct_type = _struct_type,				\
	.offset = _offset					\
224 225
}

226 227 228 229
#define F2FS_RW_ATTR(struct_type, struct_name, name, elname)	\
	F2FS_ATTR_OFFSET(struct_type, name, 0644,		\
		f2fs_sbi_show, f2fs_sbi_store,			\
		offsetof(struct struct_name, elname))
230

231 232 233
#define F2FS_GENERAL_RO_ATTR(name) \
static struct f2fs_attr f2fs_attr_##name = __ATTR(name, 0444, name##_show, NULL)

234 235 236 237 238
F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_min_sleep_time, min_sleep_time);
F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_max_sleep_time, max_sleep_time);
F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_idle, gc_idle);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments);
239
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, max_small_discards, max_discards);
J
Jaegeuk Kim 已提交
240
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, batched_trim_sections, trim_sections);
241 242
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util);
243
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks);
244
F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh);
C
Chao Yu 已提交
245
F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ra_nid_pages, ra_nid_pages);
C
Chao Yu 已提交
246
F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, dirty_nats_ratio, dirty_nats_ratio);
247
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search);
248
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
249
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, interval_time[CP_TIME]);
250
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]);
251
F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes);
252 253 254 255 256 257

#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
static struct attribute *f2fs_attrs[] = {
	ATTR_LIST(gc_min_sleep_time),
	ATTR_LIST(gc_max_sleep_time),
	ATTR_LIST(gc_no_gc_sleep_time),
258
	ATTR_LIST(gc_idle),
259
	ATTR_LIST(reclaim_segments),
260
	ATTR_LIST(max_small_discards),
J
Jaegeuk Kim 已提交
261
	ATTR_LIST(batched_trim_sections),
262 263
	ATTR_LIST(ipu_policy),
	ATTR_LIST(min_ipu_util),
264
	ATTR_LIST(min_fsync_blocks),
265
	ATTR_LIST(max_victim_search),
266
	ATTR_LIST(dir_level),
267
	ATTR_LIST(ram_thresh),
C
Chao Yu 已提交
268
	ATTR_LIST(ra_nid_pages),
C
Chao Yu 已提交
269
	ATTR_LIST(dirty_nats_ratio),
270
	ATTR_LIST(cp_interval),
271
	ATTR_LIST(idle_interval),
272
	ATTR_LIST(lifetime_write_kbytes),
273 274 275 276 277 278 279 280 281 282 283 284 285 286
	NULL,
};

static const struct sysfs_ops f2fs_attr_ops = {
	.show	= f2fs_attr_show,
	.store	= f2fs_attr_store,
};

static struct kobj_type f2fs_ktype = {
	.default_attrs	= f2fs_attrs,
	.sysfs_ops	= &f2fs_attr_ops,
	.release	= f2fs_sb_release,
};

287 288 289 290 291 292 293 294 295 296 297 298
void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
{
	struct va_format vaf;
	va_list args;

	va_start(args, fmt);
	vaf.fmt = fmt;
	vaf.va = &args;
	printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
	va_end(args);
}

J
Jaegeuk Kim 已提交
299 300 301 302 303 304 305
static void init_once(void *foo)
{
	struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;

	inode_init_once(&fi->vfs_inode);
}

306 307 308
static int parse_options(struct super_block *sb, char *options)
{
	struct f2fs_sb_info *sbi = F2FS_SB(sb);
309
	struct request_queue *q;
310 311 312 313
	substring_t args[MAX_OPT_ARGS];
	char *p, *name;
	int arg = 0;

314 315 316
#ifdef CONFIG_F2FS_FAULT_INJECTION
	f2fs_fault_rate = 0;
#endif
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
	if (!options)
		return 0;

	while ((p = strsep(&options, ",")) != NULL) {
		int token;
		if (!*p)
			continue;
		/*
		 * Initialize args struct so we know whether arg was
		 * found; some options take optional arguments.
		 */
		args[0].to = args[0].from = NULL;
		token = match_token(p, f2fs_tokens, args);

		switch (token) {
		case Opt_gc_background:
			name = match_strdup(&args[0]);

			if (!name)
				return -ENOMEM;
337
			if (strlen(name) == 2 && !strncmp(name, "on", 2)) {
338
				set_opt(sbi, BG_GC);
339 340
				clear_opt(sbi, FORCE_FG_GC);
			} else if (strlen(name) == 3 && !strncmp(name, "off", 3)) {
341
				clear_opt(sbi, BG_GC);
342 343 344 345 346
				clear_opt(sbi, FORCE_FG_GC);
			} else if (strlen(name) == 4 && !strncmp(name, "sync", 4)) {
				set_opt(sbi, BG_GC);
				set_opt(sbi, FORCE_FG_GC);
			} else {
347 348 349 350 351 352 353 354
				kfree(name);
				return -EINVAL;
			}
			kfree(name);
			break;
		case Opt_disable_roll_forward:
			set_opt(sbi, DISABLE_ROLL_FORWARD);
			break;
355 356 357 358 359 360
		case Opt_norecovery:
			/* this option mounts f2fs with ro */
			set_opt(sbi, DISABLE_ROLL_FORWARD);
			if (!f2fs_readonly(sb))
				return -EINVAL;
			break;
361
		case Opt_discard:
362 363 364 365 366 367 368 369
			q = bdev_get_queue(sb->s_bdev);
			if (blk_queue_discard(q)) {
				set_opt(sbi, DISCARD);
			} else {
				f2fs_msg(sb, KERN_WARNING,
					"mounting with \"discard\" option, but "
					"the device does not support discard");
			}
370 371 372 373 374
			break;
		case Opt_noheap:
			set_opt(sbi, NOHEAP);
			break;
#ifdef CONFIG_F2FS_FS_XATTR
375 376 377
		case Opt_user_xattr:
			set_opt(sbi, XATTR_USER);
			break;
378 379 380
		case Opt_nouser_xattr:
			clear_opt(sbi, XATTR_USER);
			break;
J
Jaegeuk Kim 已提交
381 382 383
		case Opt_inline_xattr:
			set_opt(sbi, INLINE_XATTR);
			break;
384
#else
385 386 387 388
		case Opt_user_xattr:
			f2fs_msg(sb, KERN_INFO,
				"user_xattr options not supported");
			break;
389 390 391 392
		case Opt_nouser_xattr:
			f2fs_msg(sb, KERN_INFO,
				"nouser_xattr options not supported");
			break;
J
Jaegeuk Kim 已提交
393 394 395 396
		case Opt_inline_xattr:
			f2fs_msg(sb, KERN_INFO,
				"inline_xattr options not supported");
			break;
397 398
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
399 400 401
		case Opt_acl:
			set_opt(sbi, POSIX_ACL);
			break;
402 403 404 405
		case Opt_noacl:
			clear_opt(sbi, POSIX_ACL);
			break;
#else
406 407 408
		case Opt_acl:
			f2fs_msg(sb, KERN_INFO, "acl options not supported");
			break;
409 410 411 412 413 414 415 416 417 418 419 420 421 422
		case Opt_noacl:
			f2fs_msg(sb, KERN_INFO, "noacl options not supported");
			break;
#endif
		case Opt_active_logs:
			if (args->from && match_int(args, &arg))
				return -EINVAL;
			if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
				return -EINVAL;
			sbi->active_logs = arg;
			break;
		case Opt_disable_ext_identify:
			set_opt(sbi, DISABLE_EXT_IDENTIFY);
			break;
423 424 425
		case Opt_inline_data:
			set_opt(sbi, INLINE_DATA);
			break;
426 427 428
		case Opt_inline_dentry:
			set_opt(sbi, INLINE_DENTRY);
			break;
429 430 431
		case Opt_flush_merge:
			set_opt(sbi, FLUSH_MERGE);
			break;
J
Jaegeuk Kim 已提交
432 433 434
		case Opt_nobarrier:
			set_opt(sbi, NOBARRIER);
			break;
435 436 437
		case Opt_fastboot:
			set_opt(sbi, FASTBOOT);
			break;
438 439 440
		case Opt_extent_cache:
			set_opt(sbi, EXTENT_CACHE);
			break;
441 442 443
		case Opt_noextent_cache:
			clear_opt(sbi, EXTENT_CACHE);
			break;
W
Wanpeng Li 已提交
444 445 446
		case Opt_noinline_data:
			clear_opt(sbi, INLINE_DATA);
			break;
447 448 449
		case Opt_data_flush:
			set_opt(sbi, DATA_FLUSH);
			break;
450 451 452 453 454
		case Opt_fault_injection:
			if (args->from && match_int(args, &arg))
				return -EINVAL;
#ifdef CONFIG_F2FS_FAULT_INJECTION
			f2fs_fault_rate = arg;
J
Jaegeuk Kim 已提交
455
			atomic_set(&f2fs_ops, 0);
456 457 458 459 460
#else
			f2fs_msg(sb, KERN_INFO,
				"FAULT_INJECTION was not selected");
#endif
			break;
461 462 463 464 465 466 467 468 469 470
		default:
			f2fs_msg(sb, KERN_ERR,
				"Unrecognized mount option \"%s\" or missing value",
				p);
			return -EINVAL;
		}
	}
	return 0;
}

J
Jaegeuk Kim 已提交
471 472 473 474
static struct inode *f2fs_alloc_inode(struct super_block *sb)
{
	struct f2fs_inode_info *fi;

475
	fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
J
Jaegeuk Kim 已提交
476 477 478 479 480
	if (!fi)
		return NULL;

	init_once((void *) fi);

M
Masanari Iida 已提交
481
	/* Initialize f2fs-specific inode info */
J
Jaegeuk Kim 已提交
482
	fi->vfs_inode.i_version = 1;
483
	atomic_set(&fi->dirty_pages, 0);
J
Jaegeuk Kim 已提交
484 485
	fi->i_current_depth = 1;
	fi->i_advise = 0;
486
	init_rwsem(&fi->i_sem);
487
	INIT_LIST_HEAD(&fi->dirty_list);
J
Jaegeuk Kim 已提交
488 489
	INIT_LIST_HEAD(&fi->inmem_pages);
	mutex_init(&fi->inmem_lock);
J
Jaegeuk Kim 已提交
490 491 492

	set_inode_flag(fi, FI_NEW_INODE);

J
Jaegeuk Kim 已提交
493 494 495
	if (test_opt(F2FS_SB(sb), INLINE_XATTR))
		set_inode_flag(fi, FI_INLINE_XATTR);

496 497
	/* Will be used by directory only */
	fi->i_dir_level = F2FS_SB(sb)->dir_level;
J
Jaegeuk Kim 已提交
498 499 500
	return &fi->vfs_inode;
}

501 502 503 504 505 506 507 508 509
static int f2fs_drop_inode(struct inode *inode)
{
	/*
	 * This is to avoid a deadlock condition like below.
	 * writeback_single_inode(inode)
	 *  - f2fs_write_data_page
	 *    - f2fs_gc -> iput -> evict
	 *       - inode_wait_for_writeback(inode)
	 */
510 511
	if (!inode_unhashed(inode) && inode->i_state & I_SYNC) {
		if (!inode->i_nlink && !is_bad_inode(inode)) {
J
Jaegeuk Kim 已提交
512 513
			/* to avoid evict_inode call simultaneously */
			atomic_inc(&inode->i_count);
514 515 516 517
			spin_unlock(&inode->i_lock);

			/* some remained atomic pages should discarded */
			if (f2fs_is_atomic_file(inode))
518
				drop_inmem_pages(inode);
519

J
Jaegeuk Kim 已提交
520 521 522
			/* should remain fi->extent_tree for writepage */
			f2fs_destroy_extent_node(inode);

523 524 525 526
			sb_start_intwrite(inode->i_sb);
			i_size_write(inode, 0);

			if (F2FS_HAS_BLOCKS(inode))
527
				f2fs_truncate(inode, true);
528 529 530

			sb_end_intwrite(inode->i_sb);

531
			fscrypt_put_encryption_info(inode, NULL);
532
			spin_lock(&inode->i_lock);
J
Jaegeuk Kim 已提交
533
			atomic_dec(&inode->i_count);
534
		}
535
		return 0;
536
	}
537 538 539
	return generic_drop_inode(inode);
}

540 541 542 543 544 545 546 547 548 549
/*
 * f2fs_dirty_inode() is called from __mark_inode_dirty()
 *
 * We should call set_dirty_inode to write the dirty inode through write_inode.
 */
static void f2fs_dirty_inode(struct inode *inode, int flags)
{
	set_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
}

J
Jaegeuk Kim 已提交
550 551 552 553 554 555
static void f2fs_i_callback(struct rcu_head *head)
{
	struct inode *inode = container_of(head, struct inode, i_rcu);
	kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
}

556
static void f2fs_destroy_inode(struct inode *inode)
J
Jaegeuk Kim 已提交
557 558 559 560 561 562 563 564
{
	call_rcu(&inode->i_rcu, f2fs_i_callback);
}

static void f2fs_put_super(struct super_block *sb)
{
	struct f2fs_sb_info *sbi = F2FS_SB(sb);

565 566
	if (sbi->s_proc) {
		remove_proc_entry("segment_info", sbi->s_proc);
567
		remove_proc_entry("segment_bits", sbi->s_proc);
568 569
		remove_proc_entry(sb->s_id, f2fs_proc_root);
	}
570
	kobject_del(&sbi->s_kobj);
571

J
Jaegeuk Kim 已提交
572 573
	stop_gc_thread(sbi);

574 575 576
	/* prevent remaining shrinker jobs */
	mutex_lock(&sbi->umount_mutex);

577 578 579 580 581
	/*
	 * We don't need to do checkpoint when superblock is clean.
	 * But, the previous checkpoint was not done by umount, it needs to do
	 * clean checkpoint again.
	 */
582
	if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
583
			!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) {
584 585 586 587 588
		struct cp_control cpc = {
			.reason = CP_UMOUNT,
		};
		write_checkpoint(sbi, &cpc);
	}
J
Jaegeuk Kim 已提交
589

J
Jaegeuk Kim 已提交
590 591 592
	/* write_checkpoint can update stat informaion */
	f2fs_destroy_stats(sbi);

593 594 595 596
	/*
	 * normally superblock is clean, so we need to release this.
	 * In addition, EIO will skip do checkpoint, we need this as well.
	 */
597
	release_ino_entry(sbi);
598
	release_discard_addrs(sbi);
599

600 601 602
	f2fs_leave_shrinker(sbi);
	mutex_unlock(&sbi->umount_mutex);

603
	/* our cp_error case, we can wait for any writeback page */
604 605
	if (get_pages(sbi, F2FS_WRITEBACK))
		f2fs_flush_merged_bios(sbi);
606

J
Jaegeuk Kim 已提交
607 608 609 610 611 612 613 614
	iput(sbi->node_inode);
	iput(sbi->meta_inode);

	/* destroy f2fs internal modules */
	destroy_node_manager(sbi);
	destroy_segment_manager(sbi);

	kfree(sbi->ckpt);
615 616
	kobject_put(&sbi->s_kobj);
	wait_for_completion(&sbi->s_kobj_unregister);
J
Jaegeuk Kim 已提交
617 618

	sb->s_fs_info = NULL;
K
Keith Mok 已提交
619 620
	if (sbi->s_chksum_driver)
		crypto_free_shash(sbi->s_chksum_driver);
Y
Yunlei He 已提交
621
	kfree(sbi->raw_super);
J
Jaegeuk Kim 已提交
622 623 624 625 626 627
	kfree(sbi);
}

int f2fs_sync_fs(struct super_block *sb, int sync)
{
	struct f2fs_sb_info *sbi = F2FS_SB(sb);
C
Chao Yu 已提交
628
	int err = 0;
J
Jaegeuk Kim 已提交
629

630 631
	trace_f2fs_sync_fs(sb, sync);

632
	if (sync) {
633 634
		struct cp_control cpc;

635 636
		cpc.reason = __get_cp_reason(sbi);

637
		mutex_lock(&sbi->gc_mutex);
C
Chao Yu 已提交
638
		err = write_checkpoint(sbi, &cpc);
639 640
		mutex_unlock(&sbi->gc_mutex);
	}
641
	f2fs_trace_ios(NULL, 1);
J
Jaegeuk Kim 已提交
642

C
Chao Yu 已提交
643
	return err;
J
Jaegeuk Kim 已提交
644 645
}

646 647 648 649
static int f2fs_freeze(struct super_block *sb)
{
	int err;

J
Jaegeuk Kim 已提交
650
	if (f2fs_readonly(sb))
651 652 653 654 655 656 657 658 659 660 661
		return 0;

	err = f2fs_sync_fs(sb, 1);
	return err;
}

static int f2fs_unfreeze(struct super_block *sb)
{
	return 0;
}

J
Jaegeuk Kim 已提交
662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679
static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
	struct super_block *sb = dentry->d_sb;
	struct f2fs_sb_info *sbi = F2FS_SB(sb);
	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
	block_t total_count, user_block_count, start_count, ovp_count;

	total_count = le64_to_cpu(sbi->raw_super->block_count);
	user_block_count = sbi->user_block_count;
	start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
	ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
	buf->f_type = F2FS_SUPER_MAGIC;
	buf->f_bsize = sbi->blocksize;

	buf->f_blocks = total_count - start_count;
	buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count;
	buf->f_bavail = user_block_count - valid_user_blocks(sbi);

680 681
	buf->f_files = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
	buf->f_ffree = buf->f_files - valid_inode_count(sbi);
J
Jaegeuk Kim 已提交
682

683
	buf->f_namelen = F2FS_NAME_LEN;
J
Jaegeuk Kim 已提交
684 685 686 687 688 689 690 691 692 693
	buf->f_fsid.val[0] = (u32)id;
	buf->f_fsid.val[1] = (u32)(id >> 32);

	return 0;
}

static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
{
	struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);

694 695 696 697 698 699
	if (!f2fs_readonly(sbi->sb) && test_opt(sbi, BG_GC)) {
		if (test_opt(sbi, FORCE_FG_GC))
			seq_printf(seq, ",background_gc=%s", "sync");
		else
			seq_printf(seq, ",background_gc=%s", "on");
	} else {
700
		seq_printf(seq, ",background_gc=%s", "off");
701
	}
J
Jaegeuk Kim 已提交
702 703 704 705 706 707 708 709 710 711 712
	if (test_opt(sbi, DISABLE_ROLL_FORWARD))
		seq_puts(seq, ",disable_roll_forward");
	if (test_opt(sbi, DISCARD))
		seq_puts(seq, ",discard");
	if (test_opt(sbi, NOHEAP))
		seq_puts(seq, ",no_heap_alloc");
#ifdef CONFIG_F2FS_FS_XATTR
	if (test_opt(sbi, XATTR_USER))
		seq_puts(seq, ",user_xattr");
	else
		seq_puts(seq, ",nouser_xattr");
J
Jaegeuk Kim 已提交
713 714
	if (test_opt(sbi, INLINE_XATTR))
		seq_puts(seq, ",inline_xattr");
J
Jaegeuk Kim 已提交
715 716 717 718 719 720 721 722
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
	if (test_opt(sbi, POSIX_ACL))
		seq_puts(seq, ",acl");
	else
		seq_puts(seq, ",noacl");
#endif
	if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
723
		seq_puts(seq, ",disable_ext_identify");
724 725
	if (test_opt(sbi, INLINE_DATA))
		seq_puts(seq, ",inline_data");
W
Wanpeng Li 已提交
726 727
	else
		seq_puts(seq, ",noinline_data");
728 729
	if (test_opt(sbi, INLINE_DENTRY))
		seq_puts(seq, ",inline_dentry");
730
	if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
731
		seq_puts(seq, ",flush_merge");
J
Jaegeuk Kim 已提交
732 733
	if (test_opt(sbi, NOBARRIER))
		seq_puts(seq, ",nobarrier");
734 735
	if (test_opt(sbi, FASTBOOT))
		seq_puts(seq, ",fastboot");
736 737
	if (test_opt(sbi, EXTENT_CACHE))
		seq_puts(seq, ",extent_cache");
738 739
	else
		seq_puts(seq, ",noextent_cache");
740 741
	if (test_opt(sbi, DATA_FLUSH))
		seq_puts(seq, ",data_flush");
J
Jaegeuk Kim 已提交
742 743 744 745 746
	seq_printf(seq, ",active_logs=%u", sbi->active_logs);

	return 0;
}

747 748 749 750
static int segment_info_seq_show(struct seq_file *seq, void *offset)
{
	struct super_block *sb = seq->private;
	struct f2fs_sb_info *sbi = F2FS_SB(sb);
C
Chris Fries 已提交
751 752
	unsigned int total_segs =
			le32_to_cpu(sbi->raw_super->segment_count_main);
753 754
	int i;

755 756 757
	seq_puts(seq, "format: segment_type|valid_blocks\n"
		"segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");

758
	for (i = 0; i < total_segs; i++) {
759 760 761
		struct seg_entry *se = get_seg_entry(sbi, i);

		if ((i % 10) == 0)
Y
Yunlei He 已提交
762
			seq_printf(seq, "%-10d", i);
763 764
		seq_printf(seq, "%d|%-3u", se->type,
					get_valid_blocks(sbi, i, 1));
765 766
		if ((i % 10) == 9 || i == (total_segs - 1))
			seq_putc(seq, '\n');
767
		else
768
			seq_putc(seq, ' ');
769
	}
770

771 772 773
	return 0;
}

774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797
static int segment_bits_seq_show(struct seq_file *seq, void *offset)
{
	struct super_block *sb = seq->private;
	struct f2fs_sb_info *sbi = F2FS_SB(sb);
	unsigned int total_segs =
			le32_to_cpu(sbi->raw_super->segment_count_main);
	int i, j;

	seq_puts(seq, "format: segment_type|valid_blocks|bitmaps\n"
		"segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");

	for (i = 0; i < total_segs; i++) {
		struct seg_entry *se = get_seg_entry(sbi, i);

		seq_printf(seq, "%-10d", i);
		seq_printf(seq, "%d|%-3u|", se->type,
					get_valid_blocks(sbi, i, 1));
		for (j = 0; j < SIT_VBLOCK_MAP_SIZE; j++)
			seq_printf(seq, "%x ", se->cur_valid_map[j]);
		seq_putc(seq, '\n');
	}
	return 0;
}

798 799 800 801 802 803 804 805 806 807 808 809
#define F2FS_PROC_FILE_DEF(_name)					\
static int _name##_open_fs(struct inode *inode, struct file *file)	\
{									\
	return single_open(file, _name##_seq_show, PDE_DATA(inode));	\
}									\
									\
static const struct file_operations f2fs_seq_##_name##_fops = {		\
	.owner = THIS_MODULE,						\
	.open = _name##_open_fs,					\
	.read = seq_read,						\
	.llseek = seq_lseek,						\
	.release = single_release,					\
810 811
};

812
F2FS_PROC_FILE_DEF(segment_info);
813
F2FS_PROC_FILE_DEF(segment_bits);
814

815 816 817 818 819 820 821
static void default_options(struct f2fs_sb_info *sbi)
{
	/* init some FS parameters */
	sbi->active_logs = NR_CURSEG_TYPE;

	set_opt(sbi, BG_GC);
	set_opt(sbi, INLINE_DATA);
J
Jaegeuk Kim 已提交
822
	set_opt(sbi, EXTENT_CACHE);
823 824 825 826 827 828 829 830 831

#ifdef CONFIG_F2FS_FS_XATTR
	set_opt(sbi, XATTR_USER);
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
	set_opt(sbi, POSIX_ACL);
#endif
}

832 833 834 835 836
static int f2fs_remount(struct super_block *sb, int *flags, char *data)
{
	struct f2fs_sb_info *sbi = F2FS_SB(sb);
	struct f2fs_mount_info org_mount_opt;
	int err, active_logs;
837 838
	bool need_restart_gc = false;
	bool need_stop_gc = false;
839
	bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
840 841 842 843 844 845 846 847

	/*
	 * Save the old mount options in case we
	 * need to restore them.
	 */
	org_mount_opt = sbi->mount_opt;
	active_logs = sbi->active_logs;

848 849 850 851 852 853 854 855 856
	/* recover superblocks we couldn't write due to previous RO mount */
	if (!(*flags & MS_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
		err = f2fs_commit_super(sbi, false);
		f2fs_msg(sb, KERN_INFO,
			"Try to recover all the superblocks, ret: %d", err);
		if (!err)
			clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
	}

857
	sbi->mount_opt.opt = 0;
858
	default_options(sbi);
859

860 861 862 863 864 865 866
	/* parse mount options */
	err = parse_options(sb, data);
	if (err)
		goto restore_opts;

	/*
	 * Previous and new state of filesystem is RO,
867
	 * so skip checking GC and FLUSH_MERGE conditions.
868
	 */
869
	if (f2fs_readonly(sb) && (*flags & MS_RDONLY))
870 871
		goto skip;

872 873 874 875 876 877 878 879
	/* disallow enable/disable extent_cache dynamically */
	if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
		err = -EINVAL;
		f2fs_msg(sbi->sb, KERN_WARNING,
				"switch extent_cache option is not allowed");
		goto restore_opts;
	}

880 881 882 883 884 885 886 887
	/*
	 * We stop the GC thread if FS is mounted as RO
	 * or if background_gc = off is passed in mount
	 * option. Also sync the filesystem.
	 */
	if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) {
		if (sbi->gc_thread) {
			stop_gc_thread(sbi);
888
			need_restart_gc = true;
889
		}
890
	} else if (!sbi->gc_thread) {
891 892 893
		err = start_gc_thread(sbi);
		if (err)
			goto restore_opts;
894 895 896
		need_stop_gc = true;
	}

897 898 899 900 901 902 903 904 905 906
	if (*flags & MS_RDONLY) {
		writeback_inodes_sb(sb, WB_REASON_SYNC);
		sync_inodes_sb(sb);

		set_sbi_flag(sbi, SBI_IS_DIRTY);
		set_sbi_flag(sbi, SBI_IS_CLOSE);
		f2fs_sync_fs(sb, 1);
		clear_sbi_flag(sbi, SBI_IS_CLOSE);
	}

907 908 909 910 911
	/*
	 * We stop issue flush thread if FS is mounted as RO
	 * or if flush_merge is not passed in mount option.
	 */
	if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
912
		destroy_flush_cmd_control(sbi);
913
	} else if (!SM_I(sbi)->cmd_control_info) {
914 915
		err = create_flush_cmd_control(sbi);
		if (err)
916
			goto restore_gc;
917 918 919
	}
skip:
	/* Update the POSIXACL Flag */
920
	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
921
		(test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
922

923
	return 0;
924 925 926 927
restore_gc:
	if (need_restart_gc) {
		if (start_gc_thread(sbi))
			f2fs_msg(sbi->sb, KERN_WARNING,
A
arter97 已提交
928
				"background gc thread has stopped");
929 930 931
	} else if (need_stop_gc) {
		stop_gc_thread(sbi);
	}
932 933 934 935 936 937
restore_opts:
	sbi->mount_opt = org_mount_opt;
	sbi->active_logs = active_logs;
	return err;
}

J
Jaegeuk Kim 已提交
938 939
static struct super_operations f2fs_sops = {
	.alloc_inode	= f2fs_alloc_inode,
940
	.drop_inode	= f2fs_drop_inode,
J
Jaegeuk Kim 已提交
941 942
	.destroy_inode	= f2fs_destroy_inode,
	.write_inode	= f2fs_write_inode,
943
	.dirty_inode	= f2fs_dirty_inode,
J
Jaegeuk Kim 已提交
944 945 946 947
	.show_options	= f2fs_show_options,
	.evict_inode	= f2fs_evict_inode,
	.put_super	= f2fs_put_super,
	.sync_fs	= f2fs_sync_fs,
948 949
	.freeze_fs	= f2fs_freeze,
	.unfreeze_fs	= f2fs_unfreeze,
J
Jaegeuk Kim 已提交
950
	.statfs		= f2fs_statfs,
951
	.remount_fs	= f2fs_remount,
J
Jaegeuk Kim 已提交
952 953
};

954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988
#ifdef CONFIG_F2FS_FS_ENCRYPTION
static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
{
	return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
				F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
				ctx, len, NULL);
}

static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
							void *fs_data)
{
	return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
				F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
				ctx, len, fs_data, XATTR_CREATE);
}

static unsigned f2fs_max_namelen(struct inode *inode)
{
	return S_ISLNK(inode->i_mode) ?
			inode->i_sb->s_blocksize : F2FS_NAME_LEN;
}

static struct fscrypt_operations f2fs_cryptops = {
	.get_context	= f2fs_get_context,
	.set_context	= f2fs_set_context,
	.is_encrypted	= f2fs_encrypted_inode,
	.empty_dir	= f2fs_empty_dir,
	.max_namelen	= f2fs_max_namelen,
};
#else
static struct fscrypt_operations f2fs_cryptops = {
	.is_encrypted	= f2fs_encrypted_inode,
};
#endif

J
Jaegeuk Kim 已提交
989 990 991 992 993 994
static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
		u64 ino, u32 generation)
{
	struct f2fs_sb_info *sbi = F2FS_SB(sb);
	struct inode *inode;

995
	if (check_nid_range(sbi, ino))
996
		return ERR_PTR(-ESTALE);
J
Jaegeuk Kim 已提交
997 998 999 1000 1001 1002 1003 1004 1005

	/*
	 * f2fs_iget isn't quite right if the inode is currently unallocated!
	 * However f2fs_iget currently does appropriate checks to handle stale
	 * inodes so everything is OK.
	 */
	inode = f2fs_iget(sb, ino);
	if (IS_ERR(inode))
		return ERR_CAST(inode);
1006
	if (unlikely(generation && inode->i_generation != generation)) {
J
Jaegeuk Kim 已提交
1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
		/* we didn't find the right inode.. */
		iput(inode);
		return ERR_PTR(-ESTALE);
	}
	return inode;
}

static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
		int fh_len, int fh_type)
{
	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
				    f2fs_nfs_get_inode);
}

static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
		int fh_len, int fh_type)
{
	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
				    f2fs_nfs_get_inode);
}

static const struct export_operations f2fs_export_ops = {
	.fh_to_dentry = f2fs_fh_to_dentry,
	.fh_to_parent = f2fs_fh_to_parent,
	.get_parent = f2fs_get_parent,
};

C
Chao Yu 已提交
1034
static loff_t max_file_blocks(void)
J
Jaegeuk Kim 已提交
1035
{
1036
	loff_t result = (DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS);
J
Jaegeuk Kim 已提交
1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052
	loff_t leaf_count = ADDRS_PER_BLOCK;

	/* two direct node blocks */
	result += (leaf_count * 2);

	/* two indirect node blocks */
	leaf_count *= NIDS_PER_BLOCK;
	result += (leaf_count * 2);

	/* one double indirect node block */
	leaf_count *= NIDS_PER_BLOCK;
	result += leaf_count;

	return result;
}

1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
static int __f2fs_commit_super(struct buffer_head *bh,
			struct f2fs_super_block *super)
{
	lock_buffer(bh);
	if (super)
		memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
	set_buffer_uptodate(bh);
	set_buffer_dirty(bh);
	unlock_buffer(bh);

	/* it's rare case, we can do fua all the time */
	return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
}

1067
static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
1068
					struct buffer_head *bh)
1069
{
1070 1071
	struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
					(bh->b_data + F2FS_SUPER_OFFSET);
1072
	struct super_block *sb = sbi->sb;
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
	u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
	u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
	u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
	u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
	u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
	u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
	u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
	u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
	u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
	u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
	u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
	u32 segment_count = le32_to_cpu(raw_super->segment_count);
	u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
1086 1087 1088 1089
	u64 main_end_blkaddr = main_blkaddr +
				(segment_count_main << log_blocks_per_seg);
	u64 seg_end_blkaddr = segment0_blkaddr +
				(segment_count << log_blocks_per_seg);
1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133

	if (segment0_blkaddr != cp_blkaddr) {
		f2fs_msg(sb, KERN_INFO,
			"Mismatch start address, segment0(%u) cp_blkaddr(%u)",
			segment0_blkaddr, cp_blkaddr);
		return true;
	}

	if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
							sit_blkaddr) {
		f2fs_msg(sb, KERN_INFO,
			"Wrong CP boundary, start(%u) end(%u) blocks(%u)",
			cp_blkaddr, sit_blkaddr,
			segment_count_ckpt << log_blocks_per_seg);
		return true;
	}

	if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
							nat_blkaddr) {
		f2fs_msg(sb, KERN_INFO,
			"Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
			sit_blkaddr, nat_blkaddr,
			segment_count_sit << log_blocks_per_seg);
		return true;
	}

	if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
							ssa_blkaddr) {
		f2fs_msg(sb, KERN_INFO,
			"Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
			nat_blkaddr, ssa_blkaddr,
			segment_count_nat << log_blocks_per_seg);
		return true;
	}

	if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
							main_blkaddr) {
		f2fs_msg(sb, KERN_INFO,
			"Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
			ssa_blkaddr, main_blkaddr,
			segment_count_ssa << log_blocks_per_seg);
		return true;
	}

1134
	if (main_end_blkaddr > seg_end_blkaddr) {
1135
		f2fs_msg(sb, KERN_INFO,
1136
			"Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
1137
			main_blkaddr,
1138 1139
			segment0_blkaddr +
				(segment_count << log_blocks_per_seg),
1140 1141
			segment_count_main << log_blocks_per_seg);
		return true;
1142 1143 1144 1145 1146 1147 1148 1149 1150
	} else if (main_end_blkaddr < seg_end_blkaddr) {
		int err = 0;
		char *res;

		/* fix in-memory information all the time */
		raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
				segment0_blkaddr) >> log_blocks_per_seg);

		if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
1151
			set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
			res = "internally";
		} else {
			err = __f2fs_commit_super(bh, NULL);
			res = err ? "failed" : "done";
		}
		f2fs_msg(sb, KERN_INFO,
			"Fix alignment : %s, start(%u) end(%u) block(%u)",
			res, main_blkaddr,
			segment0_blkaddr +
				(segment_count << log_blocks_per_seg),
			segment_count_main << log_blocks_per_seg);
		if (err)
			return true;
1165 1166 1167 1168
	}
	return false;
}

1169
static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
1170
				struct buffer_head *bh)
J
Jaegeuk Kim 已提交
1171
{
1172 1173
	struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
					(bh->b_data + F2FS_SUPER_OFFSET);
1174
	struct super_block *sb = sbi->sb;
J
Jaegeuk Kim 已提交
1175 1176
	unsigned int blocksize;

1177 1178 1179 1180
	if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
		f2fs_msg(sb, KERN_INFO,
			"Magic Mismatch, valid(0x%x) - read(0x%x)",
			F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
J
Jaegeuk Kim 已提交
1181
		return 1;
1182
	}
J
Jaegeuk Kim 已提交
1183

1184
	/* Currently, support only 4KB page cache size */
1185
	if (F2FS_BLKSIZE != PAGE_SIZE) {
1186
		f2fs_msg(sb, KERN_INFO,
1187
			"Invalid page_cache_size (%lu), supports only 4KB\n",
1188
			PAGE_SIZE);
1189 1190 1191
		return 1;
	}

J
Jaegeuk Kim 已提交
1192 1193
	/* Currently, support only 4KB block size */
	blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
1194
	if (blocksize != F2FS_BLKSIZE) {
1195 1196 1197
		f2fs_msg(sb, KERN_INFO,
			"Invalid blocksize (%u), supports only 4KB\n",
			blocksize);
J
Jaegeuk Kim 已提交
1198
		return 1;
1199
	}
1200

1201 1202 1203 1204 1205 1206 1207 1208
	/* check log blocks per segment */
	if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
		f2fs_msg(sb, KERN_INFO,
			"Invalid log blocks per segment (%u)\n",
			le32_to_cpu(raw_super->log_blocks_per_seg));
		return 1;
	}

C
Chao Yu 已提交
1209 1210 1211 1212 1213 1214 1215
	/* Currently, support 512/1024/2048/4096 bytes sector size */
	if (le32_to_cpu(raw_super->log_sectorsize) >
				F2FS_MAX_LOG_SECTOR_SIZE ||
		le32_to_cpu(raw_super->log_sectorsize) <
				F2FS_MIN_LOG_SECTOR_SIZE) {
		f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)",
			le32_to_cpu(raw_super->log_sectorsize));
J
Jaegeuk Kim 已提交
1216
		return 1;
1217
	}
C
Chao Yu 已提交
1218 1219 1220 1221 1222 1223 1224
	if (le32_to_cpu(raw_super->log_sectors_per_block) +
		le32_to_cpu(raw_super->log_sectorsize) !=
			F2FS_MAX_LOG_SECTOR_SIZE) {
		f2fs_msg(sb, KERN_INFO,
			"Invalid log sectors per block(%u) log sectorsize(%u)",
			le32_to_cpu(raw_super->log_sectors_per_block),
			le32_to_cpu(raw_super->log_sectorsize));
J
Jaegeuk Kim 已提交
1225
		return 1;
1226
	}
1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240

	/* check reserved ino info */
	if (le32_to_cpu(raw_super->node_ino) != 1 ||
		le32_to_cpu(raw_super->meta_ino) != 2 ||
		le32_to_cpu(raw_super->root_ino) != 3) {
		f2fs_msg(sb, KERN_INFO,
			"Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
			le32_to_cpu(raw_super->node_ino),
			le32_to_cpu(raw_super->meta_ino),
			le32_to_cpu(raw_super->root_ino));
		return 1;
	}

	/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
1241
	if (sanity_check_area_boundary(sbi, bh))
1242 1243
		return 1;

J
Jaegeuk Kim 已提交
1244 1245 1246
	return 0;
}

1247
int sanity_check_ckpt(struct f2fs_sb_info *sbi)
J
Jaegeuk Kim 已提交
1248 1249
{
	unsigned int total, fsmeta;
1250 1251
	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
J
Jaegeuk Kim 已提交
1252 1253 1254 1255 1256 1257 1258 1259

	total = le32_to_cpu(raw_super->segment_count);
	fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
	fsmeta += le32_to_cpu(raw_super->segment_count_sit);
	fsmeta += le32_to_cpu(raw_super->segment_count_nat);
	fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
	fsmeta += le32_to_cpu(raw_super->segment_count_ssa);

1260
	if (unlikely(fsmeta >= total))
J
Jaegeuk Kim 已提交
1261
		return 1;
1262

1263
	if (unlikely(f2fs_cp_error(sbi))) {
1264 1265 1266
		f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
		return 1;
	}
J
Jaegeuk Kim 已提交
1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289
	return 0;
}

static void init_sb_info(struct f2fs_sb_info *sbi)
{
	struct f2fs_super_block *raw_super = sbi->raw_super;
	int i;

	sbi->log_sectors_per_block =
		le32_to_cpu(raw_super->log_sectors_per_block);
	sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
	sbi->blocksize = 1 << sbi->log_blocksize;
	sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
	sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
	sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
	sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
	sbi->total_sections = le32_to_cpu(raw_super->section_count);
	sbi->total_node_count =
		(le32_to_cpu(raw_super->segment_count_nat) / 2)
			* sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
	sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
	sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
	sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
1290
	sbi->cur_victim_sec = NULL_SECNO;
1291
	sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
J
Jaegeuk Kim 已提交
1292 1293 1294

	for (i = 0; i < NR_COUNT_TYPE; i++)
		atomic_set(&sbi->nr_pages[i], 0);
1295 1296

	sbi->dir_level = DEF_DIR_LEVEL;
1297
	sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
1298
	sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
1299
	clear_sbi_flag(sbi, SBI_NEED_FSCK);
1300 1301 1302

	INIT_LIST_HEAD(&sbi->s_list);
	mutex_init(&sbi->umount_mutex);
J
Jaegeuk Kim 已提交
1303 1304
}

1305 1306
/*
 * Read f2fs raw super block.
1307 1308 1309
 * Because we have two copies of super block, so read both of them
 * to get the first valid one. If any one of them is broken, we pass
 * them recovery flag back to the caller.
1310
 */
1311
static int read_raw_super_block(struct f2fs_sb_info *sbi,
1312
			struct f2fs_super_block **raw_super,
1313
			int *valid_super_block, int *recovery)
1314
{
1315
	struct super_block *sb = sbi->sb;
1316
	int block;
1317
	struct buffer_head *bh;
1318
	struct f2fs_super_block *super;
1319
	int err = 0;
1320

Y
Yunlei He 已提交
1321 1322 1323
	super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
	if (!super)
		return -ENOMEM;
1324 1325 1326 1327 1328

	for (block = 0; block < 2; block++) {
		bh = sb_bread(sb, block);
		if (!bh) {
			f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
1329
				block + 1);
1330 1331 1332
			err = -EIO;
			continue;
		}
1333

1334
		/* sanity checking of raw super */
1335
		if (sanity_check_raw_super(sbi, bh)) {
1336 1337 1338 1339 1340 1341 1342
			f2fs_msg(sb, KERN_ERR,
				"Can't find valid F2FS filesystem in %dth superblock",
				block + 1);
			err = -EINVAL;
			brelse(bh);
			continue;
		}
1343

1344
		if (!*raw_super) {
1345 1346
			memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
							sizeof(*super));
1347 1348 1349 1350
			*valid_super_block = block;
			*raw_super = super;
		}
		brelse(bh);
1351 1352
	}

1353 1354 1355
	/* Fail to read any one of the superblocks*/
	if (err < 0)
		*recovery = 1;
1356 1357

	/* No valid superblock */
1358
	if (!*raw_super)
Y
Yunlei He 已提交
1359
		kfree(super);
1360 1361
	else
		err = 0;
1362

1363
	return err;
1364 1365
}

1366
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
J
Jaegeuk Kim 已提交
1367
{
J
Jaegeuk Kim 已提交
1368
	struct buffer_head *bh;
J
Jaegeuk Kim 已提交
1369 1370
	int err;

1371 1372 1373
	if ((recover && f2fs_readonly(sbi->sb)) ||
				bdev_read_only(sbi->sb->s_bdev)) {
		set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
1374
		return -EROFS;
1375
	}
1376

1377 1378
	/* write back-up superblock first */
	bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
J
Jaegeuk Kim 已提交
1379 1380
	if (!bh)
		return -EIO;
1381
	err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
J
Jaegeuk Kim 已提交
1382
	brelse(bh);
C
Chao Yu 已提交
1383 1384 1385

	/* if we are in recovery path, skip writing valid superblock */
	if (recover || err)
J
Jaegeuk Kim 已提交
1386
		return err;
J
Jaegeuk Kim 已提交
1387 1388

	/* write current valid superblock */
1389 1390 1391 1392 1393 1394
	bh = sb_getblk(sbi->sb, sbi->valid_super_block);
	if (!bh)
		return -EIO;
	err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
	brelse(bh);
	return err;
J
Jaegeuk Kim 已提交
1395 1396
}

J
Jaegeuk Kim 已提交
1397 1398 1399
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
{
	struct f2fs_sb_info *sbi;
1400
	struct f2fs_super_block *raw_super;
J
Jaegeuk Kim 已提交
1401
	struct inode *root;
1402
	long err;
1403
	bool retry = true, need_fsck = false;
1404
	char *options = NULL;
1405
	int recovery, i, valid_super_block;
1406
	struct curseg_info *seg_i;
J
Jaegeuk Kim 已提交
1407

1408
try_onemore:
1409 1410
	err = -EINVAL;
	raw_super = NULL;
1411
	valid_super_block = -1;
1412 1413
	recovery = 0;

J
Jaegeuk Kim 已提交
1414 1415 1416 1417 1418
	/* allocate memory for f2fs-specific super block info */
	sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
	if (!sbi)
		return -ENOMEM;

1419 1420
	sbi->sb = sb;

K
Keith Mok 已提交
1421 1422 1423 1424 1425 1426 1427 1428 1429
	/* Load the checksum driver */
	sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
	if (IS_ERR(sbi->s_chksum_driver)) {
		f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver.");
		err = PTR_ERR(sbi->s_chksum_driver);
		sbi->s_chksum_driver = NULL;
		goto free_sbi;
	}

1430
	/* set a block size */
1431
	if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
1432
		f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
J
Jaegeuk Kim 已提交
1433
		goto free_sbi;
1434
	}
J
Jaegeuk Kim 已提交
1435

1436
	err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
1437
								&recovery);
1438 1439 1440
	if (err)
		goto free_sbi;

1441
	sb->s_fs_info = sbi;
1442
	default_options(sbi);
J
Jaegeuk Kim 已提交
1443
	/* parse mount options */
1444 1445 1446
	options = kstrdup((const char *)data, GFP_KERNEL);
	if (data && !options) {
		err = -ENOMEM;
J
Jaegeuk Kim 已提交
1447
		goto free_sb_buf;
1448 1449 1450 1451 1452
	}

	err = parse_options(sb, options);
	if (err)
		goto free_options;
J
Jaegeuk Kim 已提交
1453

C
Chao Yu 已提交
1454 1455 1456
	sbi->max_file_blocks = max_file_blocks();
	sb->s_maxbytes = sbi->max_file_blocks <<
				le32_to_cpu(raw_super->log_blocksize);
J
Jaegeuk Kim 已提交
1457 1458 1459 1460
	sb->s_max_links = F2FS_LINK_MAX;
	get_random_bytes(&sbi->s_next_generation, sizeof(u32));

	sb->s_op = &f2fs_sops;
1461
	sb->s_cop = &f2fs_cryptops;
J
Jaegeuk Kim 已提交
1462 1463 1464 1465 1466 1467 1468 1469 1470 1471
	sb->s_xattr = f2fs_xattr_handlers;
	sb->s_export_op = &f2fs_export_ops;
	sb->s_magic = F2FS_SUPER_MAGIC;
	sb->s_time_gran = 1;
	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
		(test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
	memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));

	/* init f2fs-specific super block info */
	sbi->raw_super = raw_super;
1472
	sbi->valid_super_block = valid_super_block;
J
Jaegeuk Kim 已提交
1473
	mutex_init(&sbi->gc_mutex);
1474
	mutex_init(&sbi->writepages);
J
Jaegeuk Kim 已提交
1475
	mutex_init(&sbi->cp_mutex);
1476
	init_rwsem(&sbi->node_write);
1477 1478 1479

	/* disallow all the data/node/meta page writes */
	set_sbi_flag(sbi, SBI_POR_DOING);
J
Jaegeuk Kim 已提交
1480
	spin_lock_init(&sbi->stat_lock);
1481

1482
	init_rwsem(&sbi->read_io.io_rwsem);
J
Jaegeuk Kim 已提交
1483 1484 1485
	sbi->read_io.sbi = sbi;
	sbi->read_io.bio = NULL;
	for (i = 0; i < NR_PAGE_TYPE; i++) {
1486
		init_rwsem(&sbi->write_io[i].io_rwsem);
J
Jaegeuk Kim 已提交
1487 1488 1489
		sbi->write_io[i].sbi = sbi;
		sbi->write_io[i].bio = NULL;
	}
1490

1491
	init_rwsem(&sbi->cp_rwsem);
1492
	init_waitqueue_head(&sbi->cp_wait);
J
Jaegeuk Kim 已提交
1493 1494 1495 1496 1497
	init_sb_info(sbi);

	/* get an inode for meta space */
	sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
	if (IS_ERR(sbi->meta_inode)) {
1498
		f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
J
Jaegeuk Kim 已提交
1499
		err = PTR_ERR(sbi->meta_inode);
1500
		goto free_options;
J
Jaegeuk Kim 已提交
1501 1502 1503
	}

	err = get_valid_checkpoint(sbi);
1504 1505
	if (err) {
		f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
J
Jaegeuk Kim 已提交
1506
		goto free_meta_inode;
1507
	}
J
Jaegeuk Kim 已提交
1508 1509 1510 1511 1512 1513 1514 1515 1516 1517

	sbi->total_valid_node_count =
				le32_to_cpu(sbi->ckpt->valid_node_count);
	sbi->total_valid_inode_count =
				le32_to_cpu(sbi->ckpt->valid_inode_count);
	sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
	sbi->total_valid_block_count =
				le64_to_cpu(sbi->ckpt->valid_block_count);
	sbi->last_valid_block_count = sbi->total_valid_block_count;
	sbi->alloc_valid_block_count = 0;
1518 1519 1520 1521
	for (i = 0; i < NR_INODE_TYPE; i++) {
		INIT_LIST_HEAD(&sbi->inode_list[i]);
		spin_lock_init(&sbi->inode_lock[i]);
	}
J
Jaegeuk Kim 已提交
1522

C
Chao Yu 已提交
1523 1524
	init_extent_cache_info(sbi);

J
Jaegeuk Kim 已提交
1525
	init_ino_entry_info(sbi);
J
Jaegeuk Kim 已提交
1526 1527 1528

	/* setup f2fs internal modules */
	err = build_segment_manager(sbi);
1529 1530 1531
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS segment manager");
J
Jaegeuk Kim 已提交
1532
		goto free_sm;
1533
	}
J
Jaegeuk Kim 已提交
1534
	err = build_node_manager(sbi);
1535 1536 1537
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS node manager");
J
Jaegeuk Kim 已提交
1538
		goto free_nm;
1539
	}
J
Jaegeuk Kim 已提交
1540

1541 1542 1543 1544 1545 1546 1547 1548 1549
	/* For write statistics */
	if (sb->s_bdev->bd_part)
		sbi->sectors_written_start =
			(u64)part_stat_read(sb->s_bdev->bd_part, sectors[1]);

	/* Read accumulated write IO statistics if exists */
	seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
	if (__exist_node_summaries(sbi))
		sbi->kbytes_written =
1550
			le64_to_cpu(seg_i->journal->info.kbytes_written);
1551

J
Jaegeuk Kim 已提交
1552 1553 1554 1555 1556
	build_gc_manager(sbi);

	/* get an inode for node space */
	sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
	if (IS_ERR(sbi->node_inode)) {
1557
		f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
J
Jaegeuk Kim 已提交
1558 1559 1560 1561
		err = PTR_ERR(sbi->node_inode);
		goto free_nm;
	}

1562 1563
	f2fs_join_shrinker(sbi);

J
Jaegeuk Kim 已提交
1564
	/* if there are nt orphan nodes free them */
1565 1566 1567
	err = recover_orphan_inodes(sbi);
	if (err)
		goto free_node_inode;
J
Jaegeuk Kim 已提交
1568 1569 1570 1571

	/* read root inode and dentry */
	root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
	if (IS_ERR(root)) {
1572
		f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
J
Jaegeuk Kim 已提交
1573 1574 1575
		err = PTR_ERR(root);
		goto free_node_inode;
	}
1576
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
1577
		iput(root);
1578
		err = -EINVAL;
1579
		goto free_node_inode;
1580
	}
J
Jaegeuk Kim 已提交
1581 1582 1583 1584 1585 1586 1587 1588 1589

	sb->s_root = d_make_root(root); /* allocate root dentry */
	if (!sb->s_root) {
		err = -ENOMEM;
		goto free_root_inode;
	}

	err = f2fs_build_stats(sbi);
	if (err)
1590
		goto free_root_inode;
J
Jaegeuk Kim 已提交
1591

1592 1593 1594
	if (f2fs_proc_root)
		sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);

1595
	if (sbi->s_proc) {
1596 1597
		proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
				 &f2fs_seq_segment_info_fops, sb);
1598 1599 1600
		proc_create_data("segment_bits", S_IRUGO, sbi->s_proc,
				 &f2fs_seq_segment_bits_fops, sb);
	}
1601

1602 1603 1604 1605 1606
	sbi->s_kobj.kset = f2fs_kset;
	init_completion(&sbi->s_kobj_unregister);
	err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL,
							"%s", sb->s_id);
	if (err)
1607
		goto free_proc;
1608

1609 1610
	/* recover fsynced data */
	if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
1611 1612 1613 1614 1615 1616 1617 1618 1619
		/*
		 * mount should be failed, when device has readonly mode, and
		 * previous checkpoint was not done by clean system shutdown.
		 */
		if (bdev_read_only(sb->s_bdev) &&
				!is_set_ckpt_flags(sbi->ckpt, CP_UMOUNT_FLAG)) {
			err = -EROFS;
			goto free_kobj;
		}
1620 1621 1622 1623

		if (need_fsck)
			set_sbi_flag(sbi, SBI_NEED_FSCK);

1624 1625
		err = recover_fsync_data(sbi, false);
		if (err < 0) {
1626
			need_fsck = true;
1627 1628
			f2fs_msg(sb, KERN_ERR,
				"Cannot recover all fsync data errno=%ld", err);
1629 1630
			goto free_kobj;
		}
1631 1632 1633 1634 1635 1636 1637 1638 1639
	} else {
		err = recover_fsync_data(sbi, true);

		if (!f2fs_readonly(sb) && err > 0) {
			err = -EINVAL;
			f2fs_msg(sb, KERN_ERR,
				"Need to recover fsync data");
			goto free_kobj;
		}
1640
	}
1641

1642 1643
	/* recover_fsync_data() cleared this already */
	clear_sbi_flag(sbi, SBI_POR_DOING);
1644

1645 1646 1647 1648
	/*
	 * If filesystem is not mounted as read-only then
	 * do start the gc_thread.
	 */
1649
	if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {
1650 1651 1652 1653 1654
		/* After POR, we can run background GC thread.*/
		err = start_gc_thread(sbi);
		if (err)
			goto free_kobj;
	}
1655
	kfree(options);
1656 1657

	/* recover broken superblock */
1658
	if (recovery) {
1659 1660 1661 1662
		err = f2fs_commit_super(sbi, true);
		f2fs_msg(sb, KERN_INFO,
			"Try to recover %dth superblock, ret: %ld",
			sbi->valid_super_block ? 1 : 2, err);
1663 1664
	}

1665
	f2fs_update_time(sbi, CP_TIME);
1666
	f2fs_update_time(sbi, REQ_TIME);
J
Jaegeuk Kim 已提交
1667
	return 0;
1668 1669 1670

free_kobj:
	kobject_del(&sbi->s_kobj);
1671 1672
	kobject_put(&sbi->s_kobj);
	wait_for_completion(&sbi->s_kobj_unregister);
1673
free_proc:
1674 1675
	if (sbi->s_proc) {
		remove_proc_entry("segment_info", sbi->s_proc);
1676
		remove_proc_entry("segment_bits", sbi->s_proc);
1677 1678 1679
		remove_proc_entry(sb->s_id, f2fs_proc_root);
	}
	f2fs_destroy_stats(sbi);
J
Jaegeuk Kim 已提交
1680 1681 1682 1683
free_root_inode:
	dput(sb->s_root);
	sb->s_root = NULL;
free_node_inode:
1684 1685
	mutex_lock(&sbi->umount_mutex);
	f2fs_leave_shrinker(sbi);
J
Jaegeuk Kim 已提交
1686
	iput(sbi->node_inode);
1687
	mutex_unlock(&sbi->umount_mutex);
J
Jaegeuk Kim 已提交
1688 1689 1690 1691 1692 1693 1694 1695
free_nm:
	destroy_node_manager(sbi);
free_sm:
	destroy_segment_manager(sbi);
	kfree(sbi->ckpt);
free_meta_inode:
	make_bad_inode(sbi->meta_inode);
	iput(sbi->meta_inode);
1696 1697
free_options:
	kfree(options);
J
Jaegeuk Kim 已提交
1698
free_sb_buf:
Y
Yunlei He 已提交
1699
	kfree(raw_super);
J
Jaegeuk Kim 已提交
1700
free_sbi:
K
Keith Mok 已提交
1701 1702
	if (sbi->s_chksum_driver)
		crypto_free_shash(sbi->s_chksum_driver);
J
Jaegeuk Kim 已提交
1703
	kfree(sbi);
1704 1705 1706

	/* give only one another chance */
	if (retry) {
1707
		retry = false;
1708 1709 1710
		shrink_dcache_sb(sb);
		goto try_onemore;
	}
J
Jaegeuk Kim 已提交
1711 1712 1713 1714 1715 1716 1717 1718 1719
	return err;
}

static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
			const char *dev_name, void *data)
{
	return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
}

1720 1721 1722
static void kill_f2fs_super(struct super_block *sb)
{
	if (sb->s_root)
1723
		set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
1724 1725 1726
	kill_block_super(sb);
}

J
Jaegeuk Kim 已提交
1727 1728 1729 1730
static struct file_system_type f2fs_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "f2fs",
	.mount		= f2fs_mount,
1731
	.kill_sb	= kill_f2fs_super,
J
Jaegeuk Kim 已提交
1732 1733
	.fs_flags	= FS_REQUIRES_DEV,
};
1734
MODULE_ALIAS_FS("f2fs");
J
Jaegeuk Kim 已提交
1735

1736
static int __init init_inodecache(void)
J
Jaegeuk Kim 已提交
1737
{
1738 1739 1740
	f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
			sizeof(struct f2fs_inode_info), 0,
			SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
1741
	if (!f2fs_inode_cachep)
J
Jaegeuk Kim 已提交
1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759
		return -ENOMEM;
	return 0;
}

static void destroy_inodecache(void)
{
	/*
	 * Make sure all delayed rcu free inodes are flushed before we
	 * destroy cache.
	 */
	rcu_barrier();
	kmem_cache_destroy(f2fs_inode_cachep);
}

static int __init init_f2fs_fs(void)
{
	int err;

1760 1761
	f2fs_build_trace_ios();

J
Jaegeuk Kim 已提交
1762 1763 1764 1765 1766
	err = init_inodecache();
	if (err)
		goto fail;
	err = create_node_manager_caches();
	if (err)
1767
		goto free_inodecache;
1768
	err = create_segment_manager_caches();
J
Jaegeuk Kim 已提交
1769
	if (err)
1770
		goto free_node_manager_caches;
J
Jaegeuk Kim 已提交
1771 1772
	err = create_checkpoint_caches();
	if (err)
1773
		goto free_segment_manager_caches;
C
Chao Yu 已提交
1774 1775 1776
	err = create_extent_cache();
	if (err)
		goto free_checkpoint_caches;
1777
	f2fs_kset = kset_create_and_add("f2fs", NULL, fs_kobj);
1778 1779
	if (!f2fs_kset) {
		err = -ENOMEM;
C
Chao Yu 已提交
1780
		goto free_extent_cache;
1781
	}
1782
	err = register_shrinker(&f2fs_shrinker_info);
1783
	if (err)
1784
		goto free_kset;
1785 1786 1787 1788

	err = register_filesystem(&f2fs_fs_type);
	if (err)
		goto free_shrinker;
1789 1790 1791
	err = f2fs_create_root_stats();
	if (err)
		goto free_filesystem;
1792
	f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
1793 1794
	return 0;

1795 1796
free_filesystem:
	unregister_filesystem(&f2fs_fs_type);
1797 1798
free_shrinker:
	unregister_shrinker(&f2fs_shrinker_info);
1799 1800
free_kset:
	kset_unregister(f2fs_kset);
C
Chao Yu 已提交
1801 1802
free_extent_cache:
	destroy_extent_cache();
1803 1804
free_checkpoint_caches:
	destroy_checkpoint_caches();
1805 1806
free_segment_manager_caches:
	destroy_segment_manager_caches();
1807 1808 1809 1810
free_node_manager_caches:
	destroy_node_manager_caches();
free_inodecache:
	destroy_inodecache();
J
Jaegeuk Kim 已提交
1811 1812 1813 1814 1815 1816
fail:
	return err;
}

static void __exit exit_f2fs_fs(void)
{
1817
	remove_proc_entry("fs/f2fs", NULL);
1818
	f2fs_destroy_root_stats();
1819
	unregister_shrinker(&f2fs_shrinker_info);
J
Jaegeuk Kim 已提交
1820
	unregister_filesystem(&f2fs_fs_type);
W
Wanpeng Li 已提交
1821
	destroy_extent_cache();
J
Jaegeuk Kim 已提交
1822
	destroy_checkpoint_caches();
1823
	destroy_segment_manager_caches();
J
Jaegeuk Kim 已提交
1824 1825
	destroy_node_manager_caches();
	destroy_inodecache();
1826
	kset_unregister(f2fs_kset);
1827
	f2fs_destroy_trace_ios();
J
Jaegeuk Kim 已提交
1828 1829 1830 1831 1832 1833 1834 1835
}

module_init(init_f2fs_fs)
module_exit(exit_f2fs_fs)

MODULE_AUTHOR("Samsung Electronics's Praesto Team");
MODULE_DESCRIPTION("Flash Friendly File System");
MODULE_LICENSE("GPL");