super.c 166.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 *  linux/fs/ext4/super.c
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/inode.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/module.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/time.h>
24
#include <linux/vmalloc.h>
25 26 27
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/blkdev.h>
28
#include <linux/backing-dev.h>
29 30
#include <linux/parser.h>
#include <linux/buffer_head.h>
31
#include <linux/exportfs.h>
32 33 34 35 36 37
#include <linux/vfs.h>
#include <linux/random.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/quotaops.h>
#include <linux/seq_file.h>
T
Theodore Ts'o 已提交
38
#include <linux/ctype.h>
V
Vignesh Babu 已提交
39
#include <linux/log2.h>
A
Andreas Dilger 已提交
40
#include <linux/crc16.h>
41
#include <linux/dax.h>
D
Dan Magenheimer 已提交
42
#include <linux/cleancache.h>
43
#include <linux/uaccess.h>
J
Jeff Layton 已提交
44
#include <linux/iversion.h>
45

46 47 48
#include <linux/kthread.h>
#include <linux/freezer.h>

49
#include "ext4.h"
50
#include "ext4_extents.h"	/* Needed for trace points definition */
51
#include "ext4_jbd2.h"
52 53
#include "xattr.h"
#include "acl.h"
54
#include "mballoc.h"
D
Darrick J. Wong 已提交
55
#include "fsmap.h"
56

57 58 59
#define CREATE_TRACE_POINTS
#include <trace/events/ext4.h>

60 61
static struct ext4_lazy_init *ext4_li_info;
static struct mutex ext4_li_mtx;
62
static struct ratelimit_state ext4_mount_msg_ratelimit;
63

64
static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
65
			     unsigned long journal_devnum);
66
static int ext4_show_options(struct seq_file *seq, struct dentry *root);
67
static int ext4_commit_super(struct super_block *sb, int sync);
68 69 70 71
static void ext4_mark_recovery_complete(struct super_block *sb,
					struct ext4_super_block *es);
static void ext4_clear_journal_err(struct super_block *sb,
				   struct ext4_super_block *es);
72
static int ext4_sync_fs(struct super_block *sb, int wait);
73 74
static int ext4_remount(struct super_block *sb, int *flags, char *data);
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
75 76
static int ext4_unfreeze(struct super_block *sb);
static int ext4_freeze(struct super_block *sb);
A
Al Viro 已提交
77 78
static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
		       const char *dev_name, void *data);
79 80
static inline int ext2_feature_set_ok(struct super_block *sb);
static inline int ext3_feature_set_ok(struct super_block *sb);
81
static int ext4_feature_set_ok(struct super_block *sb, int readonly);
82 83
static void ext4_destroy_lazyinit_thread(void);
static void ext4_unregister_li_request(struct super_block *sb);
84
static void ext4_clear_request_list(void);
85 86
static struct inode *ext4_get_journal_inode(struct super_block *sb,
					    unsigned int journal_inum);
87

J
Jan Kara 已提交
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
/*
 * Lock ordering
 *
 * Note the difference between i_mmap_sem (EXT4_I(inode)->i_mmap_sem) and
 * i_mmap_rwsem (inode->i_mmap_rwsem)!
 *
 * page fault path:
 * mmap_sem -> sb_start_pagefault -> i_mmap_sem (r) -> transaction start ->
 *   page lock -> i_data_sem (rw)
 *
 * buffered write path:
 * sb_start_write -> i_mutex -> mmap_sem
 * sb_start_write -> i_mutex -> transaction start -> page lock ->
 *   i_data_sem (rw)
 *
 * truncate:
104 105 106
 * sb_start_write -> i_mutex -> i_mmap_sem (w) -> i_mmap_rwsem (w) -> page lock
 * sb_start_write -> i_mutex -> i_mmap_sem (w) -> transaction start ->
 *   i_data_sem (rw)
J
Jan Kara 已提交
107 108
 *
 * direct IO:
109 110
 * sb_start_write -> i_mutex -> mmap_sem
 * sb_start_write -> i_mutex -> transaction start -> i_data_sem (rw)
J
Jan Kara 已提交
111 112 113 114 115
 *
 * writepages:
 * transaction start -> page lock(s) -> i_data_sem (rw)
 */

J
Jan Kara 已提交
116
#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
117 118 119 120 121 122 123
static struct file_system_type ext2_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "ext2",
	.mount		= ext4_mount,
	.kill_sb	= kill_block_super,
	.fs_flags	= FS_REQUIRES_DEV,
};
124
MODULE_ALIAS_FS("ext2");
125
MODULE_ALIAS("ext2");
126 127 128 129 130 131
#define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
#else
#define IS_EXT2_SB(sb) (0)
#endif


132 133 134
static struct file_system_type ext3_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "ext3",
A
Al Viro 已提交
135
	.mount		= ext4_mount,
136 137 138
	.kill_sb	= kill_block_super,
	.fs_flags	= FS_REQUIRES_DEV,
};
139
MODULE_ALIAS_FS("ext3");
140
MODULE_ALIAS("ext3");
141
#define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
L
Laurent Vivier 已提交
142

143 144 145
static int ext4_verify_csum_type(struct super_block *sb,
				 struct ext4_super_block *es)
{
146
	if (!ext4_has_feature_metadata_csum(sb))
147 148 149 150 151
		return 1;

	return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
}

152 153 154 155 156 157 158 159 160 161 162 163
static __le32 ext4_superblock_csum(struct super_block *sb,
				   struct ext4_super_block *es)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	int offset = offsetof(struct ext4_super_block, s_checksum);
	__u32 csum;

	csum = ext4_chksum(sbi, ~0, (char *)es, offset);

	return cpu_to_le32(csum);
}

164 165
static int ext4_superblock_csum_verify(struct super_block *sb,
				       struct ext4_super_block *es)
166
{
167
	if (!ext4_has_metadata_csum(sb))
168 169 170 171 172
		return 1;

	return es->s_checksum == ext4_superblock_csum(sb, es);
}

173
void ext4_superblock_csum_set(struct super_block *sb)
174
{
175 176
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;

177
	if (!ext4_has_metadata_csum(sb))
178 179 180 181 182
		return;

	es->s_checksum = ext4_superblock_csum(sb, es);
}

183 184 185 186
void *ext4_kvmalloc(size_t size, gfp_t flags)
{
	void *ret;

187
	ret = kmalloc(size, flags | __GFP_NOWARN);
188 189 190 191 192 193 194 195 196
	if (!ret)
		ret = __vmalloc(size, flags, PAGE_KERNEL);
	return ret;
}

void *ext4_kvzalloc(size_t size, gfp_t flags)
{
	void *ret;

197
	ret = kzalloc(size, flags | __GFP_NOWARN);
198 199 200 201 202
	if (!ret)
		ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
	return ret;
}

203 204
ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
			       struct ext4_group_desc *bg)
L
Laurent Vivier 已提交
205
{
206
	return le32_to_cpu(bg->bg_block_bitmap_lo) |
207
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
208
		 (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
L
Laurent Vivier 已提交
209 210
}

211 212
ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
			       struct ext4_group_desc *bg)
L
Laurent Vivier 已提交
213
{
214
	return le32_to_cpu(bg->bg_inode_bitmap_lo) |
215
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
216
		 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
L
Laurent Vivier 已提交
217 218
}

219 220
ext4_fsblk_t ext4_inode_table(struct super_block *sb,
			      struct ext4_group_desc *bg)
L
Laurent Vivier 已提交
221
{
222
	return le32_to_cpu(bg->bg_inode_table_lo) |
223
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
224
		 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
L
Laurent Vivier 已提交
225 226
}

227 228
__u32 ext4_free_group_clusters(struct super_block *sb,
			       struct ext4_group_desc *bg)
229 230 231
{
	return le16_to_cpu(bg->bg_free_blocks_count_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
232
		 (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
233 234 235 236 237 238 239
}

__u32 ext4_free_inodes_count(struct super_block *sb,
			      struct ext4_group_desc *bg)
{
	return le16_to_cpu(bg->bg_free_inodes_count_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
240
		 (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
241 242 243 244 245 246 247
}

__u32 ext4_used_dirs_count(struct super_block *sb,
			      struct ext4_group_desc *bg)
{
	return le16_to_cpu(bg->bg_used_dirs_count_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
248
		 (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
249 250 251 252 253 254 255
}

__u32 ext4_itable_unused_count(struct super_block *sb,
			      struct ext4_group_desc *bg)
{
	return le16_to_cpu(bg->bg_itable_unused_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
256
		 (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
257 258
}

259 260
void ext4_block_bitmap_set(struct super_block *sb,
			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
L
Laurent Vivier 已提交
261
{
262
	bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
263 264
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
L
Laurent Vivier 已提交
265 266
}

267 268
void ext4_inode_bitmap_set(struct super_block *sb,
			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
L
Laurent Vivier 已提交
269
{
270
	bg->bg_inode_bitmap_lo  = cpu_to_le32((u32)blk);
271 272
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
L
Laurent Vivier 已提交
273 274
}

275 276
void ext4_inode_table_set(struct super_block *sb,
			  struct ext4_group_desc *bg, ext4_fsblk_t blk)
L
Laurent Vivier 已提交
277
{
278
	bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
279 280
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
L
Laurent Vivier 已提交
281 282
}

283 284
void ext4_free_group_clusters_set(struct super_block *sb,
				  struct ext4_group_desc *bg, __u32 count)
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
{
	bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
}

void ext4_free_inodes_set(struct super_block *sb,
			  struct ext4_group_desc *bg, __u32 count)
{
	bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
}

void ext4_used_dirs_set(struct super_block *sb,
			  struct ext4_group_desc *bg, __u32 count)
{
	bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
}

void ext4_itable_unused_set(struct super_block *sb,
			  struct ext4_group_desc *bg, __u32 count)
{
	bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
}

315

316 317 318 319 320 321
static void __save_error_info(struct super_block *sb, const char *func,
			    unsigned int line)
{
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;

	EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
322 323
	if (bdev_read_only(sb->s_bdev))
		return;
324 325 326 327 328 329 330 331 332 333 334 335
	es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
	es->s_last_error_time = cpu_to_le32(get_seconds());
	strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
	es->s_last_error_line = cpu_to_le32(line);
	if (!es->s_first_error_time) {
		es->s_first_error_time = es->s_last_error_time;
		strncpy(es->s_first_error_func, func,
			sizeof(es->s_first_error_func));
		es->s_first_error_line = cpu_to_le32(line);
		es->s_first_error_ino = es->s_last_error_ino;
		es->s_first_error_block = es->s_last_error_block;
	}
336 337 338 339 340 341
	/*
	 * Start the daily error reporting function if it hasn't been
	 * started already
	 */
	if (!es->s_error_count)
		mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ);
342
	le32_add_cpu(&es->s_error_count, 1);
343 344 345 346 347 348 349 350 351
}

static void save_error_info(struct super_block *sb, const char *func,
			    unsigned int line)
{
	__save_error_info(sb, func, line);
	ext4_commit_super(sb, 1);
}

352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367
/*
 * The del_gendisk() function uninitializes the disk-specific data
 * structures, including the bdi structure, without telling anyone
 * else.  Once this happens, any attempt to call mark_buffer_dirty()
 * (for example, by ext4_commit_super), will cause a kernel OOPS.
 * This is a kludge to prevent these oops until we can put in a proper
 * hook in del_gendisk() to inform the VFS and file system layers.
 */
static int block_device_ejected(struct super_block *sb)
{
	struct inode *bd_inode = sb->s_bdev->bd_inode;
	struct backing_dev_info *bdi = inode_to_bdi(bd_inode);

	return bdi->dev == NULL;
}

B
Bobi Jam 已提交
368 369 370 371 372
static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
{
	struct super_block		*sb = journal->j_private;
	struct ext4_sb_info		*sbi = EXT4_SB(sb);
	int				error = is_journal_aborted(journal);
373
	struct ext4_journal_cb_entry	*jce;
B
Bobi Jam 已提交
374

375
	BUG_ON(txn->t_state == T_FINISHED);
376 377 378

	ext4_process_freed_data(sb, txn->t_tid);

B
Bobi Jam 已提交
379
	spin_lock(&sbi->s_md_lock);
380 381 382
	while (!list_empty(&txn->t_private_list)) {
		jce = list_entry(txn->t_private_list.next,
				 struct ext4_journal_cb_entry, jce_list);
B
Bobi Jam 已提交
383 384 385 386 387 388 389
		list_del_init(&jce->jce_list);
		spin_unlock(&sbi->s_md_lock);
		jce->jce_func(sb, jce, error);
		spin_lock(&sbi->s_md_lock);
	}
	spin_unlock(&sbi->s_md_lock);
}
390

391 392 393 394
/* Deal with the reporting of failure conditions on a filesystem such as
 * inconsistencies detected or read IO failures.
 *
 * On ext2, we can store the error state of the filesystem in the
395
 * superblock.  That is not possible on ext4, because we may have other
396 397 398 399 400
 * write ordering constraints on the superblock which prevent us from
 * writing it out straight away; and given that the journal is about to
 * be aborted, we can't rely on the current, or future, transactions to
 * write out the superblock safely.
 *
401
 * We'll just use the jbd2_journal_abort() error code to record an error in
402
 * the journal instead.  On recovery, the journal will complain about
403 404 405
 * that error until we've noted it down and cleared it.
 */

406
static void ext4_handle_error(struct super_block *sb)
407
{
408
	if (sb_rdonly(sb))
409 410
		return;

411
	if (!test_opt(sb, ERRORS_CONT)) {
412
		journal_t *journal = EXT4_SB(sb)->s_journal;
413

414
		EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
415
		if (journal)
416
			jbd2_journal_abort(journal, -EIO);
417
	}
418
	if (test_opt(sb, ERRORS_RO)) {
419
		ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
420 421 422 423 424
		/*
		 * Make sure updated value of ->s_mount_flags will be visible
		 * before ->s_flags update
		 */
		smp_wmb();
425
		sb->s_flags |= SB_RDONLY;
426
	}
427 428 429 430
	if (test_opt(sb, ERRORS_PANIC)) {
		if (EXT4_SB(sb)->s_journal &&
		  !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
			return;
431
		panic("EXT4-fs (device %s): panic forced after error\n",
432
			sb->s_id);
433
	}
434 435
}

436 437 438 439
#define ext4_error_ratelimit(sb)					\
		___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state),	\
			     "EXT4-fs error")

440
void __ext4_error(struct super_block *sb, const char *function,
441
		  unsigned int line, const char *fmt, ...)
442
{
J
Joe Perches 已提交
443
	struct va_format vaf;
444 445
	va_list args;

446 447 448
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return;

449
	trace_ext4_error(sb, function, line);
450 451 452 453 454 455 456 457 458
	if (ext4_error_ratelimit(sb)) {
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		printk(KERN_CRIT
		       "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
		       sb->s_id, function, line, current->comm, &vaf);
		va_end(args);
	}
459
	save_error_info(sb, function, line);
460
	ext4_handle_error(sb);
461 462
}

463 464 465
void __ext4_error_inode(struct inode *inode, const char *function,
			unsigned int line, ext4_fsblk_t block,
			const char *fmt, ...)
466 467
{
	va_list args;
468
	struct va_format vaf;
469
	struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
470

471 472 473
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return;

474
	trace_ext4_error(inode->i_sb, function, line);
475 476
	es->s_last_error_ino = cpu_to_le32(inode->i_ino);
	es->s_last_error_block = cpu_to_le64(block);
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492
	if (ext4_error_ratelimit(inode->i_sb)) {
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		if (block)
			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
			       "inode #%lu: block %llu: comm %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       block, current->comm, &vaf);
		else
			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
			       "inode #%lu: comm %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       current->comm, &vaf);
		va_end(args);
	}
493
	save_error_info(inode->i_sb, function, line);
494 495 496
	ext4_handle_error(inode->i_sb);
}

497 498 499
void __ext4_error_file(struct file *file, const char *function,
		       unsigned int line, ext4_fsblk_t block,
		       const char *fmt, ...)
500 501
{
	va_list args;
502
	struct va_format vaf;
503
	struct ext4_super_block *es;
A
Al Viro 已提交
504
	struct inode *inode = file_inode(file);
505 506
	char pathname[80], *path;

507 508 509
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return;

510
	trace_ext4_error(inode->i_sb, function, line);
511 512
	es = EXT4_SB(inode->i_sb)->s_es;
	es->s_last_error_ino = cpu_to_le32(inode->i_ino);
513
	if (ext4_error_ratelimit(inode->i_sb)) {
M
Miklos Szeredi 已提交
514
		path = file_path(file, pathname, sizeof(pathname));
515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
		if (IS_ERR(path))
			path = "(unknown)";
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		if (block)
			printk(KERN_CRIT
			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
			       "block %llu: comm %s: path %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       block, current->comm, path, &vaf);
		else
			printk(KERN_CRIT
			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
			       "comm %s: path %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       current->comm, path, &vaf);
		va_end(args);
	}
534
	save_error_info(inode->i_sb, function, line);
535 536 537
	ext4_handle_error(inode->i_sb);
}

538 539
const char *ext4_decode_error(struct super_block *sb, int errno,
			      char nbuf[16])
540 541 542 543
{
	char *errstr = NULL;

	switch (errno) {
544 545 546 547 548 549
	case -EFSCORRUPTED:
		errstr = "Corrupt filesystem";
		break;
	case -EFSBADCRC:
		errstr = "Filesystem failed CRC";
		break;
550 551 552 553 554 555 556
	case -EIO:
		errstr = "IO failure";
		break;
	case -ENOMEM:
		errstr = "Out of memory";
		break;
	case -EROFS:
557 558
		if (!sb || (EXT4_SB(sb)->s_journal &&
			    EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577
			errstr = "Journal has aborted";
		else
			errstr = "Readonly filesystem";
		break;
	default:
		/* If the caller passed in an extra buffer for unknown
		 * errors, textualise them now.  Else we just return
		 * NULL. */
		if (nbuf) {
			/* Check for truncated error codes... */
			if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
				errstr = nbuf;
		}
		break;
	}

	return errstr;
}

578
/* __ext4_std_error decodes expected errors from journaling functions
579 580
 * automatically and invokes the appropriate error response.  */

581 582
void __ext4_std_error(struct super_block *sb, const char *function,
		      unsigned int line, int errno)
583 584 585 586
{
	char nbuf[16];
	const char *errstr;

587 588 589
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return;

590 591 592
	/* Special case: if the error is EROFS, and we're not already
	 * inside a transaction, then there's really no point in logging
	 * an error. */
593
	if (errno == -EROFS && journal_current_handle() == NULL && sb_rdonly(sb))
594 595
		return;

596 597 598 599 600
	if (ext4_error_ratelimit(sb)) {
		errstr = ext4_decode_error(sb, errno, nbuf);
		printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
		       sb->s_id, function, line, errstr);
	}
601

602
	save_error_info(sb, function, line);
603
	ext4_handle_error(sb);
604 605 606
}

/*
607
 * ext4_abort is a much stronger failure handler than ext4_error.  The
608 609 610 611 612 613 614 615
 * abort function may be used to deal with unrecoverable failures such
 * as journal IO errors or ENOMEM at a critical moment in log management.
 *
 * We unconditionally force the filesystem into an ABORT|READONLY state,
 * unless the error response on the fs has been set to panic in which
 * case we take the easy way out and panic immediately.
 */

616
void __ext4_abort(struct super_block *sb, const char *function,
617
		unsigned int line, const char *fmt, ...)
618
{
619
	struct va_format vaf;
620 621
	va_list args;

622 623 624
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return;

625
	save_error_info(sb, function, line);
626
	va_start(args, fmt);
627 628 629 630
	vaf.fmt = fmt;
	vaf.va = &args;
	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: %pV\n",
	       sb->s_id, function, line, &vaf);
631 632
	va_end(args);

633
	if (sb_rdonly(sb) == 0) {
634 635
		ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
		EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
636 637 638 639 640
		/*
		 * Make sure updated value of ->s_mount_flags will be visible
		 * before ->s_flags update
		 */
		smp_wmb();
641
		sb->s_flags |= SB_RDONLY;
642 643 644 645
		if (EXT4_SB(sb)->s_journal)
			jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
		save_error_info(sb, function, line);
	}
646 647 648 649
	if (test_opt(sb, ERRORS_PANIC)) {
		if (EXT4_SB(sb)->s_journal &&
		  !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
			return;
650
		panic("EXT4-fs panic from previous error\n");
651
	}
652 653
}

654 655
void __ext4_msg(struct super_block *sb,
		const char *prefix, const char *fmt, ...)
656
{
J
Joe Perches 已提交
657
	struct va_format vaf;
658 659
	va_list args;

660 661 662
	if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
		return;

663
	va_start(args, fmt);
J
Joe Perches 已提交
664 665 666
	vaf.fmt = fmt;
	vaf.va = &args;
	printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
667 668 669
	va_end(args);
}

670 671 672 673
#define ext4_warning_ratelimit(sb)					\
		___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state),	\
			     "EXT4-fs warning")

674
void __ext4_warning(struct super_block *sb, const char *function,
675
		    unsigned int line, const char *fmt, ...)
676
{
J
Joe Perches 已提交
677
	struct va_format vaf;
678 679
	va_list args;

680
	if (!ext4_warning_ratelimit(sb))
681 682
		return;

683
	va_start(args, fmt);
J
Joe Perches 已提交
684 685 686 687
	vaf.fmt = fmt;
	vaf.va = &args;
	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
	       sb->s_id, function, line, &vaf);
688 689 690
	va_end(args);
}

691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708
void __ext4_warning_inode(const struct inode *inode, const char *function,
			  unsigned int line, const char *fmt, ...)
{
	struct va_format vaf;
	va_list args;

	if (!ext4_warning_ratelimit(inode->i_sb))
		return;

	va_start(args, fmt);
	vaf.fmt = fmt;
	vaf.va = &args;
	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: "
	       "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id,
	       function, line, inode->i_ino, current->comm, &vaf);
	va_end(args);
}

709 710 711 712
void __ext4_grp_locked_error(const char *function, unsigned int line,
			     struct super_block *sb, ext4_group_t grp,
			     unsigned long ino, ext4_fsblk_t block,
			     const char *fmt, ...)
713 714 715
__releases(bitlock)
__acquires(bitlock)
{
J
Joe Perches 已提交
716
	struct va_format vaf;
717 718 719
	va_list args;
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;

720 721 722
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return;

723
	trace_ext4_error(sb, function, line);
724 725 726
	es->s_last_error_ino = cpu_to_le32(ino);
	es->s_last_error_block = cpu_to_le64(block);
	__save_error_info(sb, function, line);
J
Joe Perches 已提交
727

728 729 730 731 732 733 734 735 736 737 738 739 740 741
	if (ext4_error_ratelimit(sb)) {
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
		       sb->s_id, function, line, grp);
		if (ino)
			printk(KERN_CONT "inode %lu: ", ino);
		if (block)
			printk(KERN_CONT "block %llu:",
			       (unsigned long long) block);
		printk(KERN_CONT "%pV\n", &vaf);
		va_end(args);
	}
742 743

	if (test_opt(sb, ERRORS_CONT)) {
744
		ext4_commit_super(sb, 0);
745 746
		return;
	}
747

748
	ext4_unlock_group(sb, grp);
749
	ext4_commit_super(sb, 1);
750 751 752 753 754 755 756
	ext4_handle_error(sb);
	/*
	 * We only get here in the ERRORS_RO case; relocking the group
	 * may be dangerous, but nothing bad will happen since the
	 * filesystem will have already been marked read/only and the
	 * journal has been aborted.  We return 1 as a hint to callers
	 * who might what to use the return value from
L
Lucas De Marchi 已提交
757
	 * ext4_grp_locked_error() to distinguish between the
758 759 760 761 762 763 764 765
	 * ERRORS_CONT and ERRORS_RO case, and perhaps return more
	 * aggressively from the ext4 function in question, with a
	 * more appropriate error code.
	 */
	ext4_lock_group(sb, grp);
	return;
}

766
void ext4_update_dynamic_rev(struct super_block *sb)
767
{
768
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
769

770
	if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
771 772
		return;

773
	ext4_warning(sb,
774 775
		     "updating to rev %d because of new feature flag, "
		     "running e2fsck is recommended",
776
		     EXT4_DYNAMIC_REV);
777

778 779 780
	es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
	es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
	es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
781 782 783 784 785 786 787 788 789 790 791 792 793
	/* leave es->s_feature_*compat flags alone */
	/* es->s_uuid will be set by e2fsck if empty */

	/*
	 * The rest of the superblock fields should be zero, and if not it
	 * means they are likely already in use, so leave them alone.  We
	 * can leave it up to e2fsck to clean up any inconsistencies there.
	 */
}

/*
 * Open the external journal device
 */
794
static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
795 796 797 798
{
	struct block_device *bdev;
	char b[BDEVNAME_SIZE];

799
	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
800 801 802 803 804
	if (IS_ERR(bdev))
		goto fail;
	return bdev;

fail:
805
	ext4_msg(sb, KERN_ERR, "failed to open journal device %s: %ld",
806 807 808 809 810 811 812
			__bdevname(dev, b), PTR_ERR(bdev));
	return NULL;
}

/*
 * Release the journal device
 */
A
Al Viro 已提交
813
static void ext4_blkdev_put(struct block_device *bdev)
814
{
A
Al Viro 已提交
815
	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
816 817
}

A
Al Viro 已提交
818
static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
819 820 821 822
{
	struct block_device *bdev;
	bdev = sbi->journal_bdev;
	if (bdev) {
A
Al Viro 已提交
823
		ext4_blkdev_put(bdev);
824 825 826 827 828 829
		sbi->journal_bdev = NULL;
	}
}

static inline struct inode *orphan_list_entry(struct list_head *l)
{
830
	return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
831 832
}

833
static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
834 835 836
{
	struct list_head *l;

837 838
	ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
		 le32_to_cpu(sbi->s_es->s_last_orphan));
839 840 841 842 843 844 845 846 847 848 849 850

	printk(KERN_ERR "sb_info orphan list:\n");
	list_for_each(l, &sbi->s_orphan) {
		struct inode *inode = orphan_list_entry(l);
		printk(KERN_ERR "  "
		       "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
		       inode->i_sb->s_id, inode->i_ino, inode,
		       inode->i_mode, inode->i_nlink,
		       NEXT_ORPHAN(inode));
	}
}

851 852 853 854 855 856 857
#ifdef CONFIG_QUOTA
static int ext4_quota_off(struct super_block *sb, int type);

static inline void ext4_quota_off_umount(struct super_block *sb)
{
	int type;

858 859 860
	/* Use our quota_off function to clear inode flags etc. */
	for (type = 0; type < EXT4_MAXQUOTAS; type++)
		ext4_quota_off(sb, type);
861 862 863 864 865 866 867
}
#else
static inline void ext4_quota_off_umount(struct super_block *sb)
{
}
#endif

868
static void ext4_put_super(struct super_block *sb)
869
{
870 871
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
872
	int aborted = 0;
873
	int i, err;
874

875
	ext4_unregister_li_request(sb);
876
	ext4_quota_off_umount(sb);
877

878
	destroy_workqueue(sbi->rsv_conversion_wq);
879

880
	if (sbi->s_journal) {
881
		aborted = is_journal_aborted(sbi->s_journal);
882 883
		err = jbd2_journal_destroy(sbi->s_journal);
		sbi->s_journal = NULL;
884
		if ((err < 0) && !aborted)
885
			ext4_abort(sb, "Couldn't clean up the journal");
886
	}
887

888
	ext4_unregister_sysfs(sb);
889
	ext4_es_unregister_shrinker(sbi);
890
	del_timer_sync(&sbi->s_err_report);
891 892 893 894
	ext4_release_system_zone(sb);
	ext4_mb_release(sb);
	ext4_ext_release(sb);

895
	if (!sb_rdonly(sb) && !aborted) {
896
		ext4_clear_feature_journal_needs_recovery(sb);
897 898
		es->s_state = cpu_to_le16(sbi->s_mount_state);
	}
899
	if (!sb_rdonly(sb))
900 901
		ext4_commit_super(sb, 1);

902 903
	for (i = 0; i < sbi->s_gdb_count; i++)
		brelse(sbi->s_group_desc[i]);
A
Al Viro 已提交
904 905
	kvfree(sbi->s_group_desc);
	kvfree(sbi->s_flex_groups);
906
	percpu_counter_destroy(&sbi->s_freeclusters_counter);
907 908
	percpu_counter_destroy(&sbi->s_freeinodes_counter);
	percpu_counter_destroy(&sbi->s_dirs_counter);
909
	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
910
	percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
911
#ifdef CONFIG_QUOTA
J
Jan Kara 已提交
912
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
913 914 915 916 917 918 919 920 921 922 923
		kfree(sbi->s_qf_names[i]);
#endif

	/* Debugging code just in case the in-memory inode orphan list
	 * isn't empty.  The on-disk one can be non-empty if we've
	 * detected an error and taken the fs readonly, but the
	 * in-memory list had better be clean by this point. */
	if (!list_empty(&sbi->s_orphan))
		dump_orphan_list(sb, sbi);
	J_ASSERT(list_empty(&sbi->s_orphan));

924
	sync_blockdev(sb->s_bdev);
925
	invalidate_bdev(sb->s_bdev);
926 927 928 929 930 931 932
	if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
		/*
		 * Invalidate the journal device's buffers.  We don't want them
		 * floating about in memory - the physical journal device may
		 * hotswapped, and it breaks the `ro-after' testing code.
		 */
		sync_blockdev(sbi->journal_bdev);
933
		invalidate_bdev(sbi->journal_bdev);
934
		ext4_blkdev_remove(sbi);
935
	}
T
Tahsin Erdogan 已提交
936 937 938 939
	if (sbi->s_ea_inode_cache) {
		ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
		sbi->s_ea_inode_cache = NULL;
	}
940 941 942
	if (sbi->s_ea_block_cache) {
		ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
		sbi->s_ea_block_cache = NULL;
943
	}
944 945
	if (sbi->s_mmp_tsk)
		kthread_stop(sbi->s_mmp_tsk);
946
	brelse(sbi->s_sbh);
947
	sb->s_fs_info = NULL;
T
Theodore Ts'o 已提交
948 949 950 951 952 953
	/*
	 * Now that we are completely done shutting down the
	 * superblock, we need to actually destroy the kobject.
	 */
	kobject_put(&sbi->s_kobj);
	wait_for_completion(&sbi->s_kobj_unregister);
954 955
	if (sbi->s_chksum_driver)
		crypto_free_shash(sbi->s_chksum_driver);
956
	kfree(sbi->s_blockgroup_lock);
957
	fs_put_dax(sbi->s_daxdev);
958 959 960
	kfree(sbi);
}

961
static struct kmem_cache *ext4_inode_cachep;
962 963 964 965

/*
 * Called inside transaction, so use GFP_NOFS
 */
966
static struct inode *ext4_alloc_inode(struct super_block *sb)
967
{
968
	struct ext4_inode_info *ei;
969

C
Christoph Lameter 已提交
970
	ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
971 972
	if (!ei)
		return NULL;
973

J
Jeff Layton 已提交
974
	inode_set_iversion(&ei->vfs_inode, 1);
975
	spin_lock_init(&ei->i_raw_lock);
976 977
	INIT_LIST_HEAD(&ei->i_prealloc_list);
	spin_lock_init(&ei->i_prealloc_lock);
Z
Zheng Liu 已提交
978 979
	ext4_es_init_tree(&ei->i_es_tree);
	rwlock_init(&ei->i_es_lock);
980
	INIT_LIST_HEAD(&ei->i_es_list);
981
	ei->i_es_all_nr = 0;
982
	ei->i_es_shk_nr = 0;
983
	ei->i_es_shrink_lblk = 0;
984
	ei->i_reserved_data_blocks = 0;
985
	ei->i_da_metadata_calc_len = 0;
986
	ei->i_da_metadata_calc_last_lblock = 0;
987
	spin_lock_init(&(ei->i_block_reservation_lock));
988 989
#ifdef CONFIG_QUOTA
	ei->i_reserved_quota = 0;
J
Jan Kara 已提交
990
	memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
991
#endif
992
	ei->jinode = NULL;
993
	INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
994
	spin_lock_init(&ei->i_completed_io_lock);
995 996
	ei->i_sync_tid = 0;
	ei->i_datasync_tid = 0;
997
	atomic_set(&ei->i_unwritten, 0);
998
	INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
999 1000 1001
	return &ei->vfs_inode;
}

1002 1003 1004 1005 1006 1007 1008 1009
static int ext4_drop_inode(struct inode *inode)
{
	int drop = generic_drop_inode(inode);

	trace_ext4_drop_inode(inode, drop);
	return drop;
}

N
Nick Piggin 已提交
1010 1011 1012 1013 1014 1015
static void ext4_i_callback(struct rcu_head *head)
{
	struct inode *inode = container_of(head, struct inode, i_rcu);
	kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
}

1016
static void ext4_destroy_inode(struct inode *inode)
1017
{
1018
	if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
1019 1020 1021
		ext4_msg(inode->i_sb, KERN_ERR,
			 "Inode %lu (%p): orphan list check failed!",
			 inode->i_ino, EXT4_I(inode));
1022 1023 1024 1025 1026
		print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
				EXT4_I(inode), sizeof(struct ext4_inode_info),
				true);
		dump_stack();
	}
N
Nick Piggin 已提交
1027
	call_rcu(&inode->i_rcu, ext4_i_callback);
1028 1029
}

1030
static void init_once(void *foo)
1031
{
1032
	struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
1033

C
Christoph Lameter 已提交
1034 1035
	INIT_LIST_HEAD(&ei->i_orphan);
	init_rwsem(&ei->xattr_sem);
1036
	init_rwsem(&ei->i_data_sem);
1037
	init_rwsem(&ei->i_mmap_sem);
C
Christoph Lameter 已提交
1038
	inode_init_once(&ei->vfs_inode);
1039 1040
}

1041
static int __init init_inodecache(void)
1042
{
1043 1044 1045 1046 1047 1048 1049
	ext4_inode_cachep = kmem_cache_create_usercopy("ext4_inode_cache",
				sizeof(struct ext4_inode_info), 0,
				(SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
					SLAB_ACCOUNT),
				offsetof(struct ext4_inode_info, i_data),
				sizeof_field(struct ext4_inode_info, i_data),
				init_once);
1050
	if (ext4_inode_cachep == NULL)
1051 1052 1053 1054 1055 1056
		return -ENOMEM;
	return 0;
}

static void destroy_inodecache(void)
{
1057 1058 1059 1060 1061
	/*
	 * Make sure all delayed rcu free inodes are flushed before we
	 * destroy cache.
	 */
	rcu_barrier();
1062
	kmem_cache_destroy(ext4_inode_cachep);
1063 1064
}

A
Al Viro 已提交
1065
void ext4_clear_inode(struct inode *inode)
1066
{
A
Al Viro 已提交
1067
	invalidate_inode_buffers(inode);
1068
	clear_inode(inode);
1069
	dquot_drop(inode);
1070
	ext4_discard_preallocations(inode);
1071
	ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
1072 1073 1074 1075 1076 1077
	if (EXT4_I(inode)->jinode) {
		jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
					       EXT4_I(inode)->jinode);
		jbd2_free_inode(EXT4_I(inode)->jinode);
		EXT4_I(inode)->jinode = NULL;
	}
1078
	fscrypt_put_encryption_info(inode);
1079 1080
}

C
Christoph Hellwig 已提交
1081
static struct inode *ext4_nfs_get_inode(struct super_block *sb,
1082
					u64 ino, u32 generation)
1083 1084 1085
{
	struct inode *inode;

1086
	if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
1087
		return ERR_PTR(-ESTALE);
1088
	if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
1089 1090 1091 1092
		return ERR_PTR(-ESTALE);

	/* iget isn't really right if the inode is currently unallocated!!
	 *
1093
	 * ext4_read_inode will return a bad_inode if the inode had been
1094 1095 1096 1097 1098
	 * deleted, so we should be safe.
	 *
	 * Currently we don't know the generation for parent directory, so
	 * a generation of 0 means "accept any"
	 */
1099
	inode = ext4_iget_normal(sb, ino);
1100 1101 1102
	if (IS_ERR(inode))
		return ERR_CAST(inode);
	if (generation && inode->i_generation != generation) {
1103 1104 1105
		iput(inode);
		return ERR_PTR(-ESTALE);
	}
C
Christoph Hellwig 已提交
1106 1107 1108 1109 1110

	return inode;
}

static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
1111
					int fh_len, int fh_type)
C
Christoph Hellwig 已提交
1112 1113 1114 1115 1116 1117
{
	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
				    ext4_nfs_get_inode);
}

static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
1118
					int fh_len, int fh_type)
C
Christoph Hellwig 已提交
1119 1120 1121
{
	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
				    ext4_nfs_get_inode);
1122 1123
}

1124 1125 1126 1127 1128 1129
/*
 * Try to release metadata pages (indirect blocks, directories) which are
 * mapped via the block device.  Since these pages could have journal heads
 * which would prevent try_to_free_buffers() from freeing them, we must use
 * jbd2 layer's try_to_free_buffers() function to release them.
 */
1130 1131
static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
				 gfp_t wait)
1132 1133 1134 1135 1136 1137 1138 1139
{
	journal_t *journal = EXT4_SB(sb)->s_journal;

	WARN_ON(PageChecked(page));
	if (!page_has_buffers(page))
		return 0;
	if (journal)
		return jbd2_journal_try_to_free_buffers(journal, page,
1140
						wait & ~__GFP_DIRECT_RECLAIM);
1141 1142 1143
	return try_to_free_buffers(page);
}

1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
#ifdef CONFIG_EXT4_FS_ENCRYPTION
static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
{
	return ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
				 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len);
}

static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
							void *fs_data)
{
1154
	handle_t *handle = fs_data;
1155
	int res, res2, credits, retries = 0;
1156

1157 1158 1159 1160 1161 1162 1163 1164
	/*
	 * Encrypting the root directory is not allowed because e2fsck expects
	 * lost+found to exist and be unencrypted, and encrypting the root
	 * directory would imply encrypting the lost+found directory as well as
	 * the filename "lost+found" itself.
	 */
	if (inode->i_ino == EXT4_ROOT_INO)
		return -EPERM;
1165

1166 1167 1168
	if (WARN_ON_ONCE(IS_DAX(inode) && i_size_read(inode)))
		return -EINVAL;

1169 1170 1171 1172
	res = ext4_convert_inline_data(inode);
	if (res)
		return res;

1173 1174 1175 1176 1177 1178 1179
	/*
	 * If a journal handle was specified, then the encryption context is
	 * being set on a new inode via inheritance and is part of a larger
	 * transaction to create the inode.  Otherwise the encryption context is
	 * being set on an existing inode in its own transaction.  Only in the
	 * latter case should the "retry on ENOSPC" logic be used.
	 */
1180

1181 1182 1183 1184 1185
	if (handle) {
		res = ext4_xattr_set_handle(handle, inode,
					    EXT4_XATTR_INDEX_ENCRYPTION,
					    EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
					    ctx, len, 0);
1186 1187 1188 1189
		if (!res) {
			ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
			ext4_clear_inode_state(inode,
					EXT4_STATE_MAY_INLINE_DATA);
1190
			/*
1191 1192
			 * Update inode->i_flags - S_ENCRYPTED will be enabled,
			 * S_DAX may be disabled
1193 1194
			 */
			ext4_set_inode_flags(inode);
1195 1196 1197 1198
		}
		return res;
	}

1199 1200 1201
	res = dquot_initialize(inode);
	if (res)
		return res;
1202
retry:
1203 1204
	res = ext4_xattr_set_credits(inode, len, false /* is_create */,
				     &credits);
T
Tahsin Erdogan 已提交
1205 1206 1207
	if (res)
		return res;

1208
	handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
1209 1210 1211
	if (IS_ERR(handle))
		return PTR_ERR(handle);

1212 1213 1214
	res = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_ENCRYPTION,
				    EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
				    ctx, len, 0);
1215 1216
	if (!res) {
		ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
1217 1218 1219 1220
		/*
		 * Update inode->i_flags - S_ENCRYPTED will be enabled,
		 * S_DAX may be disabled
		 */
1221
		ext4_set_inode_flags(inode);
1222 1223 1224 1225 1226
		res = ext4_mark_inode_dirty(handle, inode);
		if (res)
			EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
	}
	res2 = ext4_journal_stop(handle);
1227 1228 1229

	if (res == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
		goto retry;
1230 1231 1232 1233 1234
	if (!res)
		res = res2;
	return res;
}

1235
static bool ext4_dummy_context(struct inode *inode)
1236 1237 1238 1239 1240 1241 1242 1243 1244 1245
{
	return DUMMY_ENCRYPTION_ENABLED(EXT4_SB(inode->i_sb));
}

static unsigned ext4_max_namelen(struct inode *inode)
{
	return S_ISLNK(inode->i_mode) ? inode->i_sb->s_blocksize :
		EXT4_NAME_LEN;
}

1246
static const struct fscrypt_operations ext4_cryptops = {
1247
	.key_prefix		= "ext4:",
1248 1249 1250 1251 1252 1253 1254 1255
	.get_context		= ext4_get_context,
	.set_context		= ext4_set_context,
	.dummy_context		= ext4_dummy_context,
	.empty_dir		= ext4_empty_dir,
	.max_namelen		= ext4_max_namelen,
};
#endif

1256
#ifdef CONFIG_QUOTA
1257
static const char * const quotatypes[] = INITQFNAMES;
L
Li Xi 已提交
1258
#define QTYPE2NAME(t) (quotatypes[t])
1259

1260 1261 1262 1263 1264
static int ext4_write_dquot(struct dquot *dquot);
static int ext4_acquire_dquot(struct dquot *dquot);
static int ext4_release_dquot(struct dquot *dquot);
static int ext4_mark_dquot_dirty(struct dquot *dquot);
static int ext4_write_info(struct super_block *sb, int type);
1265
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
A
Al Viro 已提交
1266
			 const struct path *path);
1267 1268
static int ext4_quota_on_mount(struct super_block *sb, int type);
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
1269
			       size_t len, loff_t off);
1270
static ssize_t ext4_quota_write(struct super_block *sb, int type,
1271
				const char *data, size_t len, loff_t off);
1272 1273 1274
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
			     unsigned int flags);
static int ext4_enable_quotas(struct super_block *sb);
1275
static int ext4_get_next_id(struct super_block *sb, struct kqid *qid);
1276

J
Jan Kara 已提交
1277 1278 1279 1280 1281
static struct dquot **ext4_get_dquots(struct inode *inode)
{
	return EXT4_I(inode)->i_dquot;
}

1282
static const struct dquot_operations ext4_quota_operations = {
1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293
	.get_reserved_space	= ext4_get_reserved_space,
	.write_dquot		= ext4_write_dquot,
	.acquire_dquot		= ext4_acquire_dquot,
	.release_dquot		= ext4_release_dquot,
	.mark_dirty		= ext4_mark_dquot_dirty,
	.write_info		= ext4_write_info,
	.alloc_dquot		= dquot_alloc,
	.destroy_dquot		= dquot_destroy,
	.get_projid		= ext4_get_projid,
	.get_inode_usage	= ext4_get_inode_usage,
	.get_next_id		= ext4_get_next_id,
1294 1295
};

1296
static const struct quotactl_ops ext4_qctl_operations = {
1297
	.quota_on	= ext4_quota_on,
1298
	.quota_off	= ext4_quota_off,
1299
	.quota_sync	= dquot_quota_sync,
1300
	.get_state	= dquot_get_state,
1301 1302
	.set_info	= dquot_set_dqinfo,
	.get_dqblk	= dquot_get_dqblk,
1303 1304
	.set_dqblk	= dquot_set_dqblk,
	.get_nextdqblk	= dquot_get_next_dqblk,
1305 1306 1307
};
#endif

1308
static const struct super_operations ext4_sops = {
1309 1310 1311 1312
	.alloc_inode	= ext4_alloc_inode,
	.destroy_inode	= ext4_destroy_inode,
	.write_inode	= ext4_write_inode,
	.dirty_inode	= ext4_dirty_inode,
1313
	.drop_inode	= ext4_drop_inode,
A
Al Viro 已提交
1314
	.evict_inode	= ext4_evict_inode,
1315 1316
	.put_super	= ext4_put_super,
	.sync_fs	= ext4_sync_fs,
1317 1318
	.freeze_fs	= ext4_freeze,
	.unfreeze_fs	= ext4_unfreeze,
1319 1320 1321
	.statfs		= ext4_statfs,
	.remount_fs	= ext4_remount,
	.show_options	= ext4_show_options,
1322
#ifdef CONFIG_QUOTA
1323 1324
	.quota_read	= ext4_quota_read,
	.quota_write	= ext4_quota_write,
J
Jan Kara 已提交
1325
	.get_dquots	= ext4_get_dquots,
1326
#endif
1327
	.bdev_try_to_free_page = bdev_try_to_free_page,
1328 1329
};

1330
static const struct export_operations ext4_export_ops = {
C
Christoph Hellwig 已提交
1331 1332
	.fh_to_dentry = ext4_fh_to_dentry,
	.fh_to_parent = ext4_fh_to_parent,
1333
	.get_parent = ext4_get_parent,
1334 1335 1336 1337 1338
};

enum {
	Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
	Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
1339
	Opt_nouid32, Opt_debug, Opt_removed,
1340
	Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
1341
	Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
1342 1343
	Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
	Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
1344
	Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
1345
	Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
1346
	Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
J
Jan Kara 已提交
1347
	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
T
Theodore Ts'o 已提交
1348
	Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
1349
	Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax,
1350
	Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
1351
	Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize,
1352
	Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
1353
	Opt_inode_readahead_blks, Opt_journal_ioprio,
1354
	Opt_dioread_nolock, Opt_dioread_lock,
1355
	Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
1356
	Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache,
1357 1358
};

1359
static const match_table_t tokens = {
1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373
	{Opt_bsd_df, "bsddf"},
	{Opt_minix_df, "minixdf"},
	{Opt_grpid, "grpid"},
	{Opt_grpid, "bsdgroups"},
	{Opt_nogrpid, "nogrpid"},
	{Opt_nogrpid, "sysvgroups"},
	{Opt_resgid, "resgid=%u"},
	{Opt_resuid, "resuid=%u"},
	{Opt_sb, "sb=%u"},
	{Opt_err_cont, "errors=continue"},
	{Opt_err_panic, "errors=panic"},
	{Opt_err_ro, "errors=remount-ro"},
	{Opt_nouid32, "nouid32"},
	{Opt_debug, "debug"},
1374 1375
	{Opt_removed, "oldalloc"},
	{Opt_removed, "orlov"},
1376 1377 1378 1379
	{Opt_user_xattr, "user_xattr"},
	{Opt_nouser_xattr, "nouser_xattr"},
	{Opt_acl, "acl"},
	{Opt_noacl, "noacl"},
1380
	{Opt_noload, "norecovery"},
1381
	{Opt_noload, "noload"},
1382 1383
	{Opt_removed, "nobh"},
	{Opt_removed, "bh"},
1384
	{Opt_commit, "commit=%u"},
1385 1386
	{Opt_min_batch_time, "min_batch_time=%u"},
	{Opt_max_batch_time, "max_batch_time=%u"},
1387
	{Opt_journal_dev, "journal_dev=%u"},
1388
	{Opt_journal_path, "journal_path=%s"},
1389
	{Opt_journal_checksum, "journal_checksum"},
1390
	{Opt_nojournal_checksum, "nojournal_checksum"},
1391
	{Opt_journal_async_commit, "journal_async_commit"},
1392 1393 1394 1395
	{Opt_abort, "abort"},
	{Opt_data_journal, "data=journal"},
	{Opt_data_ordered, "data=ordered"},
	{Opt_data_writeback, "data=writeback"},
1396 1397
	{Opt_data_err_abort, "data_err=abort"},
	{Opt_data_err_ignore, "data_err=ignore"},
1398 1399 1400 1401 1402 1403
	{Opt_offusrjquota, "usrjquota="},
	{Opt_usrjquota, "usrjquota=%s"},
	{Opt_offgrpjquota, "grpjquota="},
	{Opt_grpjquota, "grpjquota=%s"},
	{Opt_jqfmt_vfsold, "jqfmt=vfsold"},
	{Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
J
Jan Kara 已提交
1404
	{Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
1405 1406 1407 1408
	{Opt_grpquota, "grpquota"},
	{Opt_noquota, "noquota"},
	{Opt_quota, "quota"},
	{Opt_usrquota, "usrquota"},
1409
	{Opt_prjquota, "prjquota"},
1410
	{Opt_barrier, "barrier=%u"},
T
Theodore Ts'o 已提交
1411 1412
	{Opt_barrier, "barrier"},
	{Opt_nobarrier, "nobarrier"},
1413
	{Opt_i_version, "i_version"},
R
Ross Zwisler 已提交
1414
	{Opt_dax, "dax"},
1415
	{Opt_stripe, "stripe=%u"},
1416
	{Opt_delalloc, "delalloc"},
1417 1418
	{Opt_lazytime, "lazytime"},
	{Opt_nolazytime, "nolazytime"},
1419
	{Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"},
1420
	{Opt_nodelalloc, "nodelalloc"},
1421 1422
	{Opt_removed, "mblk_io_submit"},
	{Opt_removed, "nomblk_io_submit"},
1423 1424
	{Opt_block_validity, "block_validity"},
	{Opt_noblock_validity, "noblock_validity"},
1425
	{Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
1426
	{Opt_journal_ioprio, "journal_ioprio=%u"},
1427
	{Opt_auto_da_alloc, "auto_da_alloc=%u"},
T
Theodore Ts'o 已提交
1428 1429
	{Opt_auto_da_alloc, "auto_da_alloc"},
	{Opt_noauto_da_alloc, "noauto_da_alloc"},
1430 1431
	{Opt_dioread_nolock, "dioread_nolock"},
	{Opt_dioread_lock, "dioread_lock"},
1432 1433
	{Opt_discard, "discard"},
	{Opt_nodiscard, "nodiscard"},
1434 1435 1436
	{Opt_init_itable, "init_itable=%u"},
	{Opt_init_itable, "init_itable"},
	{Opt_noinit_itable, "noinit_itable"},
1437
	{Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
1438
	{Opt_test_dummy_encryption, "test_dummy_encryption"},
1439 1440
	{Opt_nombcache, "nombcache"},
	{Opt_nombcache, "no_mbcache"},	/* for backward compatibility */
1441 1442 1443 1444 1445
	{Opt_removed, "check=none"},	/* mount option from ext2/3 */
	{Opt_removed, "nocheck"},	/* mount option from ext2/3 */
	{Opt_removed, "reservation"},	/* mount option from ext2/3 */
	{Opt_removed, "noreservation"}, /* mount option from ext2/3 */
	{Opt_removed, "journal=%u"},	/* mount option from ext2/3 */
J
Josef Bacik 已提交
1446
	{Opt_err, NULL},
1447 1448
};

1449
static ext4_fsblk_t get_sb_block(void **data)
1450
{
1451
	ext4_fsblk_t	sb_block;
1452 1453 1454 1455
	char		*options = (char *) *data;

	if (!options || strncmp(options, "sb=", 3) != 0)
		return 1;	/* Default location */
1456

1457
	options += 3;
1458
	/* TODO: use simple_strtoll with >32bit ext4 */
1459 1460
	sb_block = simple_strtoul(options, &options, 0);
	if (*options && *options != ',') {
1461
		printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n",
1462 1463 1464 1465 1466 1467
		       (char *) *data);
		return 1;
	}
	if (*options == ',')
		options++;
	*data = (void *) options;
1468

1469 1470 1471
	return sb_block;
}

1472
#define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
1473 1474
static const char deprecated_msg[] =
	"Mount option \"%s\" will be removed by %s\n"
1475
	"Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
1476

D
Dmitry Monakhov 已提交
1477 1478 1479 1480 1481
#ifdef CONFIG_QUOTA
static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	char *qname;
1482
	int ret = -1;
D
Dmitry Monakhov 已提交
1483 1484 1485 1486 1487 1488

	if (sb_any_quota_loaded(sb) &&
		!sbi->s_qf_names[qtype]) {
		ext4_msg(sb, KERN_ERR,
			"Cannot change journaled "
			"quota options when quota turned on");
1489
		return -1;
D
Dmitry Monakhov 已提交
1490
	}
1491
	if (ext4_has_feature_quota(sb)) {
1492 1493 1494
		ext4_msg(sb, KERN_INFO, "Journaled quota options "
			 "ignored when QUOTA feature is enabled");
		return 1;
1495
	}
D
Dmitry Monakhov 已提交
1496 1497 1498 1499
	qname = match_strdup(args);
	if (!qname) {
		ext4_msg(sb, KERN_ERR,
			"Not enough memory for storing quotafile name");
1500
		return -1;
D
Dmitry Monakhov 已提交
1501
	}
1502 1503 1504 1505 1506 1507 1508 1509
	if (sbi->s_qf_names[qtype]) {
		if (strcmp(sbi->s_qf_names[qtype], qname) == 0)
			ret = 1;
		else
			ext4_msg(sb, KERN_ERR,
				 "%s quota file already specified",
				 QTYPE2NAME(qtype));
		goto errout;
D
Dmitry Monakhov 已提交
1510
	}
1511
	if (strchr(qname, '/')) {
D
Dmitry Monakhov 已提交
1512 1513
		ext4_msg(sb, KERN_ERR,
			"quotafile must be on filesystem root");
1514
		goto errout;
D
Dmitry Monakhov 已提交
1515
	}
1516
	sbi->s_qf_names[qtype] = qname;
1517
	set_opt(sb, QUOTA);
D
Dmitry Monakhov 已提交
1518
	return 1;
1519 1520 1521
errout:
	kfree(qname);
	return ret;
D
Dmitry Monakhov 已提交
1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532
}

static int clear_qf_name(struct super_block *sb, int qtype)
{

	struct ext4_sb_info *sbi = EXT4_SB(sb);

	if (sb_any_quota_loaded(sb) &&
		sbi->s_qf_names[qtype]) {
		ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
			" when quota turned on");
1533
		return -1;
D
Dmitry Monakhov 已提交
1534
	}
1535
	kfree(sbi->s_qf_names[qtype]);
D
Dmitry Monakhov 已提交
1536 1537 1538 1539 1540
	sbi->s_qf_names[qtype] = NULL;
	return 1;
}
#endif

1541 1542 1543 1544 1545 1546
#define MOPT_SET	0x0001
#define MOPT_CLEAR	0x0002
#define MOPT_NOSUPPORT	0x0004
#define MOPT_EXPLICIT	0x0008
#define MOPT_CLEAR_ERR	0x0010
#define MOPT_GTE0	0x0020
1547
#ifdef CONFIG_QUOTA
1548 1549 1550 1551 1552
#define MOPT_Q		0
#define MOPT_QFMT	0x0040
#else
#define MOPT_Q		MOPT_NOSUPPORT
#define MOPT_QFMT	MOPT_NOSUPPORT
1553
#endif
1554
#define MOPT_DATAJ	0x0080
1555 1556 1557
#define MOPT_NO_EXT2	0x0100
#define MOPT_NO_EXT3	0x0200
#define MOPT_EXT4_ONLY	(MOPT_NO_EXT2 | MOPT_NO_EXT3)
1558
#define MOPT_STRING	0x0400
1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570

static const struct mount_opts {
	int	token;
	int	mount_opt;
	int	flags;
} ext4_mount_opts[] = {
	{Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET},
	{Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR},
	{Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET},
	{Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR},
	{Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET},
	{Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR},
1571 1572 1573 1574
	{Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK,
	 MOPT_EXT4_ONLY | MOPT_SET},
	{Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK,
	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1575 1576
	{Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET},
	{Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR},
1577 1578 1579
	{Opt_delalloc, EXT4_MOUNT_DELALLOC,
	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
	{Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
1580
	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1581 1582
	{Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1583
	{Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1584
	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1585
	{Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
1586
				    EXT4_MOUNT_JOURNAL_CHECKSUM),
1587
	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1588
	{Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET},
1589 1590 1591
	{Opt_err_panic, EXT4_MOUNT_ERRORS_PANIC, MOPT_SET | MOPT_CLEAR_ERR},
	{Opt_err_ro, EXT4_MOUNT_ERRORS_RO, MOPT_SET | MOPT_CLEAR_ERR},
	{Opt_err_cont, EXT4_MOUNT_ERRORS_CONT, MOPT_SET | MOPT_CLEAR_ERR},
1592
	{Opt_data_err_abort, EXT4_MOUNT_DATA_ERR_ABORT,
1593
	 MOPT_NO_EXT2},
1594
	{Opt_data_err_ignore, EXT4_MOUNT_DATA_ERR_ABORT,
1595
	 MOPT_NO_EXT2},
1596 1597 1598 1599 1600 1601 1602 1603 1604 1605
	{Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET},
	{Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR},
	{Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
	{Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
	{Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
	{Opt_commit, 0, MOPT_GTE0},
	{Opt_max_batch_time, 0, MOPT_GTE0},
	{Opt_min_batch_time, 0, MOPT_GTE0},
	{Opt_inode_readahead_blks, 0, MOPT_GTE0},
	{Opt_init_itable, 0, MOPT_GTE0},
R
Ross Zwisler 已提交
1606
	{Opt_dax, EXT4_MOUNT_DAX, MOPT_SET},
1607
	{Opt_stripe, 0, MOPT_GTE0},
1608 1609
	{Opt_resuid, 0, MOPT_GTE0},
	{Opt_resgid, 0, MOPT_GTE0},
1610 1611 1612
	{Opt_journal_dev, 0, MOPT_NO_EXT2 | MOPT_GTE0},
	{Opt_journal_path, 0, MOPT_NO_EXT2 | MOPT_STRING},
	{Opt_journal_ioprio, 0, MOPT_NO_EXT2 | MOPT_GTE0},
1613 1614 1615 1616
	{Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
	{Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
	{Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA,
	 MOPT_NO_EXT2 | MOPT_DATAJ},
1617 1618
	{Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
	{Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR},
T
Theodore Ts'o 已提交
1619
#ifdef CONFIG_EXT4_FS_POSIX_ACL
1620 1621
	{Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET},
	{Opt_noacl, EXT4_MOUNT_POSIX_ACL, MOPT_CLEAR},
1622
#else
1623 1624
	{Opt_acl, 0, MOPT_NOSUPPORT},
	{Opt_noacl, 0, MOPT_NOSUPPORT},
1625
#endif
1626 1627
	{Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET},
	{Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET},
1628
	{Opt_debug_want_extra_isize, 0, MOPT_GTE0},
1629 1630 1631 1632 1633
	{Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q},
	{Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA,
							MOPT_SET | MOPT_Q},
	{Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA,
							MOPT_SET | MOPT_Q},
1634 1635
	{Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA,
							MOPT_SET | MOPT_Q},
1636
	{Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
1637 1638
		       EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
							MOPT_CLEAR | MOPT_Q},
1639 1640 1641 1642 1643 1644 1645
	{Opt_usrjquota, 0, MOPT_Q},
	{Opt_grpjquota, 0, MOPT_Q},
	{Opt_offusrjquota, 0, MOPT_Q},
	{Opt_offgrpjquota, 0, MOPT_Q},
	{Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
	{Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
	{Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
1646
	{Opt_max_dir_size_kb, 0, MOPT_GTE0},
1647
	{Opt_test_dummy_encryption, 0, MOPT_GTE0},
1648
	{Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
1649 1650 1651 1652 1653 1654 1655 1656 1657
	{Opt_err, 0, 0}
};

static int handle_mount_opt(struct super_block *sb, char *opt, int token,
			    substring_t *args, unsigned long *journal_devnum,
			    unsigned int *journal_ioprio, int is_remount)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	const struct mount_opts *m;
1658 1659
	kuid_t uid;
	kgid_t gid;
1660 1661
	int arg = 0;

1662 1663 1664 1665 1666 1667 1668 1669 1670 1671
#ifdef CONFIG_QUOTA
	if (token == Opt_usrjquota)
		return set_qf_name(sb, USRQUOTA, &args[0]);
	else if (token == Opt_grpjquota)
		return set_qf_name(sb, GRPQUOTA, &args[0]);
	else if (token == Opt_offusrjquota)
		return clear_qf_name(sb, USRQUOTA);
	else if (token == Opt_offgrpjquota)
		return clear_qf_name(sb, GRPQUOTA);
#endif
1672
	switch (token) {
1673 1674 1675 1676
	case Opt_noacl:
	case Opt_nouser_xattr:
		ext4_msg(sb, KERN_WARNING, deprecated_msg, opt, "3.5");
		break;
1677 1678 1679
	case Opt_sb:
		return 1;	/* handled by get_sb_block() */
	case Opt_removed:
1680
		ext4_msg(sb, KERN_WARNING, "Ignoring removed %s option", opt);
1681 1682 1683 1684 1685
		return 1;
	case Opt_abort:
		sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
		return 1;
	case Opt_i_version:
M
Matthew Garrett 已提交
1686
		sb->s_flags |= SB_I_VERSION;
1687
		return 1;
1688
	case Opt_lazytime:
1689
		sb->s_flags |= SB_LAZYTIME;
1690 1691
		return 1;
	case Opt_nolazytime:
1692
		sb->s_flags &= ~SB_LAZYTIME;
1693
		return 1;
1694 1695
	}

1696 1697 1698 1699 1700 1701 1702 1703 1704 1705
	for (m = ext4_mount_opts; m->token != Opt_err; m++)
		if (token == m->token)
			break;

	if (m->token == Opt_err) {
		ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" "
			 "or missing value", opt);
		return -1;
	}

1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716
	if ((m->flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) {
		ext4_msg(sb, KERN_ERR,
			 "Mount option \"%s\" incompatible with ext2", opt);
		return -1;
	}
	if ((m->flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) {
		ext4_msg(sb, KERN_ERR,
			 "Mount option \"%s\" incompatible with ext3", opt);
		return -1;
	}

1717
	if (args->from && !(m->flags & MOPT_STRING) && match_int(args, &arg))
1718 1719 1720
		return -1;
	if (args->from && (m->flags & MOPT_GTE0) && (arg < 0))
		return -1;
1721 1722 1723
	if (m->flags & MOPT_EXPLICIT) {
		if (m->mount_opt & EXT4_MOUNT_DELALLOC) {
			set_opt2(sb, EXPLICIT_DELALLOC);
1724 1725
		} else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) {
			set_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM);
1726 1727 1728
		} else
			return -1;
	}
1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742
	if (m->flags & MOPT_CLEAR_ERR)
		clear_opt(sb, ERRORS_MASK);
	if (token == Opt_noquota && sb_any_quota_loaded(sb)) {
		ext4_msg(sb, KERN_ERR, "Cannot change quota "
			 "options when quota turned on");
		return -1;
	}

	if (m->flags & MOPT_NOSUPPORT) {
		ext4_msg(sb, KERN_ERR, "%s option not supported", opt);
	} else if (token == Opt_commit) {
		if (arg == 0)
			arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
		sbi->s_commit_interval = HZ * arg;
1743 1744
	} else if (token == Opt_debug_want_extra_isize) {
		sbi->s_want_extra_isize = arg;
1745 1746 1747 1748 1749
	} else if (token == Opt_max_batch_time) {
		sbi->s_max_batch_time = arg;
	} else if (token == Opt_min_batch_time) {
		sbi->s_min_batch_time = arg;
	} else if (token == Opt_inode_readahead_blks) {
1750 1751 1752 1753
		if (arg && (arg > (1 << 30) || !is_power_of_2(arg))) {
			ext4_msg(sb, KERN_ERR,
				 "EXT4-fs: inode_readahead_blks must be "
				 "0 or a power of 2 smaller than 2^31");
1754
			return -1;
1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769
		}
		sbi->s_inode_readahead_blks = arg;
	} else if (token == Opt_init_itable) {
		set_opt(sb, INIT_INODE_TABLE);
		if (!args->from)
			arg = EXT4_DEF_LI_WAIT_MULT;
		sbi->s_li_wait_mult = arg;
	} else if (token == Opt_max_dir_size_kb) {
		sbi->s_max_dir_size_kb = arg;
	} else if (token == Opt_stripe) {
		sbi->s_stripe = arg;
	} else if (token == Opt_resuid) {
		uid = make_kuid(current_user_ns(), arg);
		if (!uid_valid(uid)) {
			ext4_msg(sb, KERN_ERR, "Invalid uid value %d", arg);
1770 1771
			return -1;
		}
1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786
		sbi->s_resuid = uid;
	} else if (token == Opt_resgid) {
		gid = make_kgid(current_user_ns(), arg);
		if (!gid_valid(gid)) {
			ext4_msg(sb, KERN_ERR, "Invalid gid value %d", arg);
			return -1;
		}
		sbi->s_resgid = gid;
	} else if (token == Opt_journal_dev) {
		if (is_remount) {
			ext4_msg(sb, KERN_ERR,
				 "Cannot specify journal on remount");
			return -1;
		}
		*journal_devnum = arg;
1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812
	} else if (token == Opt_journal_path) {
		char *journal_path;
		struct inode *journal_inode;
		struct path path;
		int error;

		if (is_remount) {
			ext4_msg(sb, KERN_ERR,
				 "Cannot specify journal on remount");
			return -1;
		}
		journal_path = match_strdup(&args[0]);
		if (!journal_path) {
			ext4_msg(sb, KERN_ERR, "error: could not dup "
				"journal device string");
			return -1;
		}

		error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
		if (error) {
			ext4_msg(sb, KERN_ERR, "error: could not find "
				"journal device path: error %d", error);
			kfree(journal_path);
			return -1;
		}

1813
		journal_inode = d_inode(path.dentry);
1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824
		if (!S_ISBLK(journal_inode->i_mode)) {
			ext4_msg(sb, KERN_ERR, "error: journal path %s "
				"is not a block device", journal_path);
			path_put(&path);
			kfree(journal_path);
			return -1;
		}

		*journal_devnum = new_encode_dev(journal_inode->i_rdev);
		path_put(&path);
		kfree(journal_path);
1825 1826 1827 1828 1829 1830 1831 1832
	} else if (token == Opt_journal_ioprio) {
		if (arg > 7) {
			ext4_msg(sb, KERN_ERR, "Invalid journal IO priority"
				 " (must be 0-7)");
			return -1;
		}
		*journal_ioprio =
			IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
1833 1834 1835 1836 1837 1838 1839 1840 1841
	} else if (token == Opt_test_dummy_encryption) {
#ifdef CONFIG_EXT4_FS_ENCRYPTION
		sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION;
		ext4_msg(sb, KERN_WARNING,
			 "Test dummy encryption mode enabled");
#else
		ext4_msg(sb, KERN_WARNING,
			 "Test dummy encryption mount option ignored");
#endif
1842 1843 1844 1845 1846
	} else if (m->flags & MOPT_DATAJ) {
		if (is_remount) {
			if (!sbi->s_journal)
				ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
			else if (test_opt(sb, DATA_FLAGS) != m->mount_opt) {
1847
				ext4_msg(sb, KERN_ERR,
1848 1849
					 "Cannot change data mode on remount");
				return -1;
1850
			}
1851
		} else {
1852 1853
			clear_opt(sb, DATA_FLAGS);
			sbi->s_mount_opt |= m->mount_opt;
1854
		}
1855 1856 1857 1858 1859 1860 1861 1862
#ifdef CONFIG_QUOTA
	} else if (m->flags & MOPT_QFMT) {
		if (sb_any_quota_loaded(sb) &&
		    sbi->s_jquota_fmt != m->mount_opt) {
			ext4_msg(sb, KERN_ERR, "Cannot change journaled "
				 "quota options when quota turned on");
			return -1;
		}
1863
		if (ext4_has_feature_quota(sb)) {
1864 1865
			ext4_msg(sb, KERN_INFO,
				 "Quota format mount options ignored "
1866
				 "when QUOTA feature is enabled");
1867
			return 1;
1868
		}
1869
		sbi->s_jquota_fmt = m->mount_opt;
R
Ross Zwisler 已提交
1870 1871
#endif
	} else if (token == Opt_dax) {
1872 1873 1874 1875 1876
#ifdef CONFIG_FS_DAX
		ext4_msg(sb, KERN_WARNING,
		"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
			sbi->s_mount_opt |= m->mount_opt;
#else
R
Ross Zwisler 已提交
1877 1878
		ext4_msg(sb, KERN_INFO, "dax option not supported");
		return -1;
1879
#endif
1880 1881 1882 1883
	} else if (token == Opt_data_err_abort) {
		sbi->s_mount_opt |= m->mount_opt;
	} else if (token == Opt_data_err_ignore) {
		sbi->s_mount_opt &= ~m->mount_opt;
1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898
	} else {
		if (!args->from)
			arg = 1;
		if (m->flags & MOPT_CLEAR)
			arg = !arg;
		else if (unlikely(!(m->flags & MOPT_SET))) {
			ext4_msg(sb, KERN_WARNING,
				 "buggy handling of option %s", opt);
			WARN_ON(1);
			return -1;
		}
		if (arg != 0)
			sbi->s_mount_opt |= m->mount_opt;
		else
			sbi->s_mount_opt &= ~m->mount_opt;
1899
	}
1900
	return 1;
1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922
}

static int parse_options(char *options, struct super_block *sb,
			 unsigned long *journal_devnum,
			 unsigned int *journal_ioprio,
			 int is_remount)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	char *p;
	substring_t args[MAX_OPT_ARGS];
	int token;

	if (!options)
		return 1;

	while ((p = strsep(&options, ",")) != NULL) {
		if (!*p)
			continue;
		/*
		 * Initialize args struct so we know whether arg was
		 * found; some options take optional arguments.
		 */
1923
		args[0].to = args[0].from = NULL;
1924 1925 1926 1927
		token = match_token(p, tokens, args);
		if (handle_mount_opt(sb, p, token, args, journal_devnum,
				     journal_ioprio, is_remount) < 0)
			return 0;
1928 1929
	}
#ifdef CONFIG_QUOTA
1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940
	/*
	 * We do the test below only for project quotas. 'usrquota' and
	 * 'grpquota' mount options are allowed even without quota feature
	 * to support legacy quotas in quota files.
	 */
	if (test_opt(sb, PRJQUOTA) && !ext4_has_feature_project(sb)) {
		ext4_msg(sb, KERN_ERR, "Project quota feature not enabled. "
			 "Cannot enable project quota enforcement.");
		return 0;
	}
	if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
1941
		if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
1942
			clear_opt(sb, USRQUOTA);
1943

1944
		if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
1945
			clear_opt(sb, GRPQUOTA);
1946

D
Dmitry Monakhov 已提交
1947
		if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
1948 1949
			ext4_msg(sb, KERN_ERR, "old and new quota "
					"format mixing");
1950 1951 1952 1953
			return 0;
		}

		if (!sbi->s_jquota_fmt) {
1954 1955
			ext4_msg(sb, KERN_ERR, "journaled quota format "
					"not specified");
1956 1957 1958 1959
			return 0;
		}
	}
#endif
J
Jan Kara 已提交
1960 1961 1962 1963
	if (test_opt(sb, DIOREAD_NOLOCK)) {
		int blocksize =
			BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);

1964
		if (blocksize < PAGE_SIZE) {
J
Jan Kara 已提交
1965 1966 1967 1968 1969
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "dioread_nolock if block size != PAGE_SIZE");
			return 0;
		}
	}
1970 1971 1972
	return 1;
}

1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996
static inline void ext4_show_quota_options(struct seq_file *seq,
					   struct super_block *sb)
{
#if defined(CONFIG_QUOTA)
	struct ext4_sb_info *sbi = EXT4_SB(sb);

	if (sbi->s_jquota_fmt) {
		char *fmtname = "";

		switch (sbi->s_jquota_fmt) {
		case QFMT_VFS_OLD:
			fmtname = "vfsold";
			break;
		case QFMT_VFS_V0:
			fmtname = "vfsv0";
			break;
		case QFMT_VFS_V1:
			fmtname = "vfsv1";
			break;
		}
		seq_printf(seq, ",jqfmt=%s", fmtname);
	}

	if (sbi->s_qf_names[USRQUOTA])
1997
		seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
1998 1999

	if (sbi->s_qf_names[GRPQUOTA])
2000
		seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
2001 2002 2003
#endif
}

2004 2005
static const char *token2str(int token)
{
2006
	const struct match_token *t;
2007 2008 2009 2010 2011 2012 2013

	for (t = tokens; t->token != Opt_err; t++)
		if (t->token == token && !strchr(t->pattern, '='))
			break;
	return t->pattern;
}

2014 2015 2016 2017 2018
/*
 * Show an option if
 *  - it's set to a non-default value OR
 *  - if the per-sb default is different from the global default
 */
2019 2020
static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
			      int nodefs)
2021 2022 2023
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
2024
	int def_errors, def_mount_opt = sbi->s_def_mount_opt;
2025
	const struct mount_opts *m;
2026
	char sep = nodefs ? '\n' : ',';
2027

2028 2029
#define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep)
#define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg)
2030 2031

	if (sbi->s_sb_block != 1)
2032 2033 2034 2035 2036 2037 2038
		SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block);

	for (m = ext4_mount_opts; m->token != Opt_err; m++) {
		int want_set = m->flags & MOPT_SET;
		if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
		    (m->flags & MOPT_CLEAR_ERR))
			continue;
2039
		if (!nodefs && !(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt)))
2040 2041 2042 2043 2044 2045
			continue; /* skip if same as the default */
		if ((want_set &&
		     (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) ||
		    (!want_set && (sbi->s_mount_opt & m->mount_opt)))
			continue; /* select Opt_noFoo vs Opt_Foo */
		SEQ_OPTS_PRINT("%s", token2str(m->token));
2046
	}
2047

2048
	if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) ||
2049
	    le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID)
2050 2051 2052
		SEQ_OPTS_PRINT("resuid=%u",
				from_kuid_munged(&init_user_ns, sbi->s_resuid));
	if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) ||
2053
	    le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID)
2054 2055
		SEQ_OPTS_PRINT("resgid=%u",
				from_kgid_munged(&init_user_ns, sbi->s_resgid));
2056
	def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors);
2057 2058
	if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO)
		SEQ_OPTS_PUTS("errors=remount-ro");
2059
	if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
2060
		SEQ_OPTS_PUTS("errors=continue");
2061
	if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
2062
		SEQ_OPTS_PUTS("errors=panic");
2063
	if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
2064
		SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ);
2065
	if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME)
2066
		SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
2067
	if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
2068
		SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
M
Matthew Garrett 已提交
2069
	if (sb->s_flags & SB_I_VERSION)
2070
		SEQ_OPTS_PUTS("i_version");
2071
	if (nodefs || sbi->s_stripe)
2072
		SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
2073 2074
	if (nodefs || EXT4_MOUNT_DATA_FLAGS &
			(sbi->s_mount_opt ^ def_mount_opt)) {
2075 2076 2077 2078 2079 2080 2081
		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
			SEQ_OPTS_PUTS("data=journal");
		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
			SEQ_OPTS_PUTS("data=ordered");
		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
			SEQ_OPTS_PUTS("data=writeback");
	}
2082 2083
	if (nodefs ||
	    sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
2084 2085
		SEQ_OPTS_PRINT("inode_readahead_blks=%u",
			       sbi->s_inode_readahead_blks);
2086

2087
	if (test_opt(sb, INIT_INODE_TABLE) && (nodefs ||
2088
		       (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
2089
		SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
2090 2091
	if (nodefs || sbi->s_max_dir_size_kb)
		SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
2092 2093
	if (test_opt(sb, DATA_ERR_ABORT))
		SEQ_OPTS_PUTS("data_err=abort");
2094 2095 2096 2097 2098

	ext4_show_quota_options(seq, sb);
	return 0;
}

2099 2100 2101 2102 2103
static int ext4_show_options(struct seq_file *seq, struct dentry *root)
{
	return _ext4_show_options(seq, root->d_sb, 0);
}

2104
int ext4_seq_options_show(struct seq_file *seq, void *offset)
2105 2106 2107 2108
{
	struct super_block *sb = seq->private;
	int rc;

2109
	seq_puts(seq, sb_rdonly(sb) ? "ro" : "rw");
2110 2111 2112 2113 2114
	rc = _ext4_show_options(seq, sb, 1);
	seq_puts(seq, "\n");
	return rc;
}

2115
static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
2116 2117
			    int read_only)
{
2118
	struct ext4_sb_info *sbi = EXT4_SB(sb);
2119 2120
	int res = 0;

2121
	if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
2122 2123
		ext4_msg(sb, KERN_ERR, "revision level too high, "
			 "forcing read-only mode");
2124
		res = SB_RDONLY;
2125 2126
	}
	if (read_only)
2127
		goto done;
2128
	if (!(sbi->s_mount_state & EXT4_VALID_FS))
2129 2130
		ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, "
			 "running e2fsck is recommended");
2131
	else if (sbi->s_mount_state & EXT4_ERROR_FS)
2132 2133 2134
		ext4_msg(sb, KERN_WARNING,
			 "warning: mounting fs with errors, "
			 "running e2fsck is recommended");
2135
	else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
2136 2137
		 le16_to_cpu(es->s_mnt_count) >=
		 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
2138 2139 2140
		ext4_msg(sb, KERN_WARNING,
			 "warning: maximal mount count reached, "
			 "running e2fsck is recommended");
2141 2142 2143
	else if (le32_to_cpu(es->s_checkinterval) &&
		(le32_to_cpu(es->s_lastcheck) +
			le32_to_cpu(es->s_checkinterval) <= get_seconds()))
2144 2145 2146
		ext4_msg(sb, KERN_WARNING,
			 "warning: checktime reached, "
			 "running e2fsck is recommended");
2147
	if (!sbi->s_journal)
2148
		es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
2149
	if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
2150
		es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
M
Marcin Slusarz 已提交
2151
	le16_add_cpu(&es->s_mnt_count, 1);
2152
	es->s_mtime = cpu_to_le32(get_seconds());
2153
	ext4_update_dynamic_rev(sb);
2154
	if (sbi->s_journal)
2155
		ext4_set_feature_journal_needs_recovery(sb);
2156

2157
	ext4_commit_super(sb, 1);
2158
done:
2159
	if (test_opt(sb, DEBUG))
2160
		printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
2161
				"bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
2162 2163
			sb->s_blocksize,
			sbi->s_groups_count,
2164 2165
			EXT4_BLOCKS_PER_GROUP(sb),
			EXT4_INODES_PER_GROUP(sb),
2166
			sbi->s_mount_opt, sbi->s_mount_opt2);
2167

D
Dan Magenheimer 已提交
2168
	cleancache_init_fs(sb);
2169 2170 2171
	return res;
}

2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185
int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct flex_groups *new_groups;
	int size;

	if (!sbi->s_log_groups_per_flex)
		return 0;

	size = ext4_flex_group(sbi, ngroup - 1) + 1;
	if (size <= sbi->s_flex_groups_allocated)
		return 0;

	size = roundup_pow_of_two(size * sizeof(struct flex_groups));
M
Michal Hocko 已提交
2186
	new_groups = kvzalloc(size, GFP_KERNEL);
2187 2188 2189 2190 2191 2192 2193 2194 2195 2196
	if (!new_groups) {
		ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
			 size / (int) sizeof(struct flex_groups));
		return -ENOMEM;
	}

	if (sbi->s_flex_groups) {
		memcpy(new_groups, sbi->s_flex_groups,
		       (sbi->s_flex_groups_allocated *
			sizeof(struct flex_groups)));
A
Al Viro 已提交
2197
		kvfree(sbi->s_flex_groups);
2198 2199 2200 2201 2202 2203
	}
	sbi->s_flex_groups = new_groups;
	sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
	return 0;
}

2204 2205 2206 2207 2208
static int ext4_fill_flex_info(struct super_block *sb)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_group_desc *gdp = NULL;
	ext4_group_t flex_group;
2209
	int i, err;
2210

2211
	sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
2212
	if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
2213 2214 2215 2216
		sbi->s_log_groups_per_flex = 0;
		return 1;
	}

2217 2218
	err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
	if (err)
2219
		goto failed;
2220 2221

	for (i = 0; i < sbi->s_groups_count; i++) {
2222
		gdp = ext4_get_group_desc(sb, i, NULL);
2223 2224

		flex_group = ext4_flex_group(sbi, i);
2225 2226
		atomic_add(ext4_free_inodes_count(sb, gdp),
			   &sbi->s_flex_groups[flex_group].free_inodes);
2227 2228
		atomic64_add(ext4_free_group_clusters(sb, gdp),
			     &sbi->s_flex_groups[flex_group].free_clusters);
2229 2230
		atomic_add(ext4_used_dirs_count(sb, gdp),
			   &sbi->s_flex_groups[flex_group].used_dirs);
2231 2232 2233 2234 2235 2236 2237
	}

	return 1;
failed:
	return 0;
}

2238
static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
2239
				   struct ext4_group_desc *gdp)
A
Andreas Dilger 已提交
2240
{
2241
	int offset = offsetof(struct ext4_group_desc, bg_checksum);
A
Andreas Dilger 已提交
2242
	__u16 crc = 0;
2243
	__le32 le_group = cpu_to_le32(block_group);
2244
	struct ext4_sb_info *sbi = EXT4_SB(sb);
A
Andreas Dilger 已提交
2245

2246
	if (ext4_has_metadata_csum(sbi->s_sb)) {
2247 2248
		/* Use new metadata_csum algorithm */
		__u32 csum32;
2249
		__u16 dummy_csum = 0;
2250 2251 2252

		csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
				     sizeof(le_group));
2253 2254 2255 2256 2257 2258 2259
		csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset);
		csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum,
				     sizeof(dummy_csum));
		offset += sizeof(dummy_csum);
		if (offset < sbi->s_desc_size)
			csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset,
					     sbi->s_desc_size - offset);
2260 2261 2262

		crc = csum32 & 0xFFFF;
		goto out;
A
Andreas Dilger 已提交
2263 2264
	}

2265
	/* old crc16 code */
2266
	if (!ext4_has_feature_gdt_csum(sb))
2267 2268
		return 0;

2269 2270 2271 2272 2273
	crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
	crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
	crc = crc16(crc, (__u8 *)gdp, offset);
	offset += sizeof(gdp->bg_checksum); /* skip checksum */
	/* for checksum of struct ext4_group_desc do the rest...*/
2274
	if (ext4_has_feature_64bit(sb) &&
2275 2276 2277 2278 2279 2280
	    offset < le16_to_cpu(sbi->s_es->s_desc_size))
		crc = crc16(crc, (__u8 *)gdp + offset,
			    le16_to_cpu(sbi->s_es->s_desc_size) -
				offset);

out:
A
Andreas Dilger 已提交
2281 2282 2283
	return cpu_to_le16(crc);
}

2284
int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
A
Andreas Dilger 已提交
2285 2286
				struct ext4_group_desc *gdp)
{
2287
	if (ext4_has_group_desc_csum(sb) &&
2288
	    (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp)))
A
Andreas Dilger 已提交
2289 2290 2291 2292 2293
		return 0;

	return 1;
}

2294 2295 2296 2297 2298
void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
			      struct ext4_group_desc *gdp)
{
	if (!ext4_has_group_desc_csum(sb))
		return;
2299
	gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp);
2300 2301
}

2302
/* Called at mount-time, super-block is locked */
2303
static int ext4_check_descriptors(struct super_block *sb,
2304
				  ext4_fsblk_t sb_block,
2305
				  ext4_group_t *first_not_zeroed)
2306
{
2307 2308 2309
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
	ext4_fsblk_t last_block;
L
Laurent Vivier 已提交
2310 2311 2312
	ext4_fsblk_t block_bitmap;
	ext4_fsblk_t inode_bitmap;
	ext4_fsblk_t inode_table;
J
Jose R. Santos 已提交
2313
	int flexbg_flag = 0;
2314
	ext4_group_t i, grp = sbi->s_groups_count;
2315

2316
	if (ext4_has_feature_flex_bg(sb))
J
Jose R. Santos 已提交
2317 2318
		flexbg_flag = 1;

2319
	ext4_debug("Checking group descriptors");
2320

2321 2322 2323
	for (i = 0; i < sbi->s_groups_count; i++) {
		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);

J
Jose R. Santos 已提交
2324
		if (i == sbi->s_groups_count - 1 || flexbg_flag)
L
Laurent Vivier 已提交
2325
			last_block = ext4_blocks_count(sbi->s_es) - 1;
2326 2327
		else
			last_block = first_block +
2328
				(EXT4_BLOCKS_PER_GROUP(sb) - 1);
2329

2330 2331 2332 2333
		if ((grp == sbi->s_groups_count) &&
		   !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
			grp = i;

2334
		block_bitmap = ext4_block_bitmap(sb, gdp);
2335 2336 2337 2338
		if (block_bitmap == sb_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Block bitmap for group %u overlaps "
				 "superblock", i);
2339 2340
			if (!sb_rdonly(sb))
				return 0;
2341
		}
2342
		if (block_bitmap < first_block || block_bitmap > last_block) {
2343
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2344
			       "Block bitmap for group %u not in group "
2345
			       "(block %llu)!", i, block_bitmap);
2346 2347
			return 0;
		}
2348
		inode_bitmap = ext4_inode_bitmap(sb, gdp);
2349 2350 2351 2352
		if (inode_bitmap == sb_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Inode bitmap for group %u overlaps "
				 "superblock", i);
2353 2354
			if (!sb_rdonly(sb))
				return 0;
2355
		}
2356
		if (inode_bitmap < first_block || inode_bitmap > last_block) {
2357
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2358
			       "Inode bitmap for group %u not in group "
2359
			       "(block %llu)!", i, inode_bitmap);
2360 2361
			return 0;
		}
2362
		inode_table = ext4_inode_table(sb, gdp);
2363 2364 2365 2366
		if (inode_table == sb_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Inode table for group %u overlaps "
				 "superblock", i);
2367 2368
			if (!sb_rdonly(sb))
				return 0;
2369
		}
L
Laurent Vivier 已提交
2370
		if (inode_table < first_block ||
2371
		    inode_table + sbi->s_itb_per_group - 1 > last_block) {
2372
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2373
			       "Inode table for group %u not in group "
2374
			       "(block %llu)!", i, inode_table);
2375 2376
			return 0;
		}
2377
		ext4_lock_group(sb, i);
2378
		if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
2379 2380
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Checksum for group %u failed (%u!=%u)",
2381
				 i, le16_to_cpu(ext4_group_desc_csum(sb, i,
2382
				     gdp)), le16_to_cpu(gdp->bg_checksum));
2383
			if (!sb_rdonly(sb)) {
2384
				ext4_unlock_group(sb, i);
2385
				return 0;
2386
			}
A
Andreas Dilger 已提交
2387
		}
2388
		ext4_unlock_group(sb, i);
J
Jose R. Santos 已提交
2389 2390
		if (!flexbg_flag)
			first_block += EXT4_BLOCKS_PER_GROUP(sb);
2391
	}
2392 2393
	if (NULL != first_not_zeroed)
		*first_not_zeroed = grp;
2394 2395 2396
	return 1;
}

2397
/* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at
2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409
 * the superblock) which were deleted from all directories, but held open by
 * a process at the time of a crash.  We walk the list and try to delete these
 * inodes at recovery time (only with a read-write filesystem).
 *
 * In order to keep the orphan inode chain consistent during traversal (in
 * case of crash during recovery), we link each inode into the superblock
 * orphan list_head and handle it the same way as an inode deletion during
 * normal operation (which journals the operations for us).
 *
 * We only do an iget() and an iput() on each inode, which is very safe if we
 * accidentally point at an in-use or already deleted inode.  The worst that
 * can happen in this case is that we get a "bit already cleared" message from
2410
 * ext4_free_inode().  The only reason we would point at a wrong inode is if
2411 2412 2413
 * e2fsck was run on this filesystem, and it must have already done the orphan
 * inode cleanup for us, so we can safely abort without any further action.
 */
2414 2415
static void ext4_orphan_cleanup(struct super_block *sb,
				struct ext4_super_block *es)
2416 2417
{
	unsigned int s_flags = sb->s_flags;
2418
	int ret, nr_orphans = 0, nr_truncates = 0;
2419
#ifdef CONFIG_QUOTA
2420
	int quota_update = 0;
2421 2422 2423 2424 2425 2426 2427
	int i;
#endif
	if (!es->s_last_orphan) {
		jbd_debug(4, "no orphan inodes to clean up\n");
		return;
	}

2428
	if (bdev_read_only(sb->s_bdev)) {
2429 2430
		ext4_msg(sb, KERN_ERR, "write access "
			"unavailable, skipping orphan cleanup");
2431 2432 2433
		return;
	}

2434 2435 2436 2437 2438 2439 2440
	/* Check if feature set would not allow a r/w mount */
	if (!ext4_feature_set_ok(sb, 0)) {
		ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
			 "unknown ROCOMPAT features");
		return;
	}

2441
	if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
2442
		/* don't clear list on RO mount w/ errors */
2443
		if (es->s_last_orphan && !(s_flags & SB_RDONLY)) {
2444
			ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
2445
				  "clearing orphan list.\n");
2446 2447
			es->s_last_orphan = 0;
		}
2448 2449 2450 2451
		jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
		return;
	}

2452
	if (s_flags & SB_RDONLY) {
2453
		ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
2454
		sb->s_flags &= ~SB_RDONLY;
2455 2456 2457
	}
#ifdef CONFIG_QUOTA
	/* Needed for iput() to work correctly and not trash data */
2458
	sb->s_flags |= SB_ACTIVE;
2459 2460 2461 2462 2463

	/*
	 * Turn on quotas which were not enabled for read-only mounts if
	 * filesystem has quota feature, so that they are updated correctly.
	 */
2464
	if (ext4_has_feature_quota(sb) && (s_flags & SB_RDONLY)) {
2465 2466 2467 2468 2469 2470 2471 2472 2473 2474
		int ret = ext4_enable_quotas(sb);

		if (!ret)
			quota_update = 1;
		else
			ext4_msg(sb, KERN_ERR,
				"Cannot turn on quotas: error %d", ret);
	}

	/* Turn on journaled quotas used for old sytle */
J
Jan Kara 已提交
2475
	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
2476 2477
		if (EXT4_SB(sb)->s_qf_names[i]) {
			int ret = ext4_quota_on_mount(sb, i);
2478 2479 2480 2481

			if (!ret)
				quota_update = 1;
			else
2482 2483
				ext4_msg(sb, KERN_ERR,
					"Cannot turn on journaled "
2484
					"quota: type %d: error %d", i, ret);
2485 2486 2487 2488 2489 2490 2491
		}
	}
#endif

	while (es->s_last_orphan) {
		struct inode *inode;

2492 2493 2494 2495 2496 2497 2498 2499 2500 2501
		/*
		 * We may have encountered an error during cleanup; if
		 * so, skip the rest.
		 */
		if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
			jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
			es->s_last_orphan = 0;
			break;
		}

2502 2503
		inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
		if (IS_ERR(inode)) {
2504 2505 2506 2507
			es->s_last_orphan = 0;
			break;
		}

2508
		list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
2509
		dquot_initialize(inode);
2510
		if (inode->i_nlink) {
2511 2512 2513 2514
			if (test_opt(sb, DEBUG))
				ext4_msg(sb, KERN_DEBUG,
					"%s: truncating inode %lu to %lld bytes",
					__func__, inode->i_ino, inode->i_size);
2515
			jbd_debug(2, "truncating inode %lu to %lld bytes\n",
2516
				  inode->i_ino, inode->i_size);
A
Al Viro 已提交
2517
			inode_lock(inode);
2518
			truncate_inode_pages(inode->i_mapping, inode->i_size);
2519 2520 2521
			ret = ext4_truncate(inode);
			if (ret)
				ext4_std_error(inode->i_sb, ret);
A
Al Viro 已提交
2522
			inode_unlock(inode);
2523 2524
			nr_truncates++;
		} else {
2525 2526 2527 2528
			if (test_opt(sb, DEBUG))
				ext4_msg(sb, KERN_DEBUG,
					"%s: deleting unreferenced inode %lu",
					__func__, inode->i_ino);
2529 2530 2531 2532 2533 2534 2535
			jbd_debug(2, "deleting unreferenced inode %lu\n",
				  inode->i_ino);
			nr_orphans++;
		}
		iput(inode);  /* The delete magic happens here! */
	}

2536
#define PLURAL(x) (x), ((x) == 1) ? "" : "s"
2537 2538

	if (nr_orphans)
2539 2540
		ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted",
		       PLURAL(nr_orphans));
2541
	if (nr_truncates)
2542 2543
		ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
		       PLURAL(nr_truncates));
2544
#ifdef CONFIG_QUOTA
2545 2546 2547 2548 2549 2550
	/* Turn off quotas if they were enabled for orphan cleanup */
	if (quota_update) {
		for (i = 0; i < EXT4_MAXQUOTAS; i++) {
			if (sb_dqopt(sb)->files[i])
				dquot_quota_off(sb, i);
		}
2551 2552
	}
#endif
2553
	sb->s_flags = s_flags; /* Restore SB_RDONLY status */
2554
}
2555

2556 2557 2558 2559 2560 2561 2562
/*
 * Maximal extent format file size.
 * Resulting logical blkno at s_maxbytes must fit in our on-disk
 * extent format containers, within a sector_t, and within i_blocks
 * in the vfs.  ext4 inode has 48 bits of i_block in fsblock units,
 * so that won't be a limiting factor.
 *
2563 2564 2565 2566 2567 2568
 * However there is other limiting factor. We do store extents in the form
 * of starting block and length, hence the resulting length of the extent
 * covering maximum file size must fit into on-disk format containers as
 * well. Given that length is always by 1 unit bigger than max unit (because
 * we count 0 as well) we have to lower the s_maxbytes by one fs block.
 *
2569 2570
 * Note, this does *not* consider any metadata overhead for vfs i_blocks.
 */
2571
static loff_t ext4_max_size(int blkbits, int has_huge_files)
2572 2573 2574 2575 2576
{
	loff_t res;
	loff_t upper_limit = MAX_LFS_FILESIZE;

	/* small i_blocks in vfs inode? */
2577
	if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
2578
		/*
2579
		 * CONFIG_LBDAF is not enabled implies the inode
2580 2581 2582 2583 2584 2585 2586 2587 2588 2589
		 * i_block represent total blocks in 512 bytes
		 * 32 == size of vfs inode i_blocks * 8
		 */
		upper_limit = (1LL << 32) - 1;

		/* total blocks in file system block size */
		upper_limit >>= (blkbits - 9);
		upper_limit <<= blkbits;
	}

2590 2591 2592 2593 2594 2595
	/*
	 * 32-bit extent-start container, ee_block. We lower the maxbytes
	 * by one fs block, so ee_len can cover the extent of maximum file
	 * size
	 */
	res = (1LL << 32) - 1;
2596 2597 2598 2599 2600 2601 2602 2603
	res <<= blkbits;

	/* Sanity check against vm- & vfs- imposed limits */
	if (res > upper_limit)
		res = upper_limit;

	return res;
}
2604 2605

/*
2606
 * Maximal bitmap file size.  There is a direct, and {,double-,triple-}indirect
2607 2608
 * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
 * We need to be 1 filesystem block less than the 2^48 sector limit.
2609
 */
2610
static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
2611
{
2612
	loff_t res = EXT4_NDIR_BLOCKS;
2613 2614
	int meta_blocks;
	loff_t upper_limit;
2615 2616 2617 2618 2619 2620
	/* This is calculated to be the largest file size for a dense, block
	 * mapped file such that the file's total number of 512-byte sectors,
	 * including data and all indirect blocks, does not exceed (2^48 - 1).
	 *
	 * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
	 * number of 512-byte sectors of the file.
2621 2622
	 */

2623
	if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
2624
		/*
2625
		 * !has_huge_files or CONFIG_LBDAF not enabled implies that
2626 2627
		 * the inode i_block field represents total file blocks in
		 * 2^32 512-byte sectors == size of vfs inode i_blocks * 8
2628 2629 2630 2631 2632 2633 2634
		 */
		upper_limit = (1LL << 32) - 1;

		/* total blocks in file system block size */
		upper_limit >>= (bits - 9);

	} else {
A
Aneesh Kumar K.V 已提交
2635 2636 2637 2638 2639 2640
		/*
		 * We use 48 bit ext4_inode i_blocks
		 * With EXT4_HUGE_FILE_FL set the i_blocks
		 * represent total number of blocks in
		 * file system block size
		 */
2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653
		upper_limit = (1LL << 48) - 1;

	}

	/* indirect blocks */
	meta_blocks = 1;
	/* double indirect blocks */
	meta_blocks += 1 + (1LL << (bits-2));
	/* tripple indirect blocks */
	meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));

	upper_limit -= meta_blocks;
	upper_limit <<= bits;
2654 2655 2656 2657 2658 2659 2660

	res += 1LL << (bits-2);
	res += 1LL << (2*(bits-2));
	res += 1LL << (3*(bits-2));
	res <<= bits;
	if (res > upper_limit)
		res = upper_limit;
2661 2662 2663 2664

	if (res > MAX_LFS_FILESIZE)
		res = MAX_LFS_FILESIZE;

2665 2666 2667
	return res;
}

2668
static ext4_fsblk_t descriptor_loc(struct super_block *sb,
2669
				   ext4_fsblk_t logical_sb_block, int nr)
2670
{
2671
	struct ext4_sb_info *sbi = EXT4_SB(sb);
2672
	ext4_group_t bg, first_meta_bg;
2673 2674 2675 2676
	int has_super = 0;

	first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);

2677
	if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg)
2678
		return logical_sb_block + nr + 1;
2679
	bg = sbi->s_desc_per_block * nr;
2680
	if (ext4_bg_has_super(sb, bg))
2681
		has_super = 1;
2682

2683 2684 2685 2686 2687 2688 2689
	/*
	 * If we have a meta_bg fs with 1k blocks, group 0's GDT is at
	 * block 2, not 1.  If s_first_data_block == 0 (bigalloc is enabled
	 * on modern mke2fs or blksize > 1k on older mke2fs) then we must
	 * compensate.
	 */
	if (sb->s_blocksize == 1024 && nr == 0 &&
2690
	    le32_to_cpu(sbi->s_es->s_first_data_block) == 0)
2691 2692
		has_super++;

2693
	return (has_super + ext4_group_first_block_no(sb, bg));
2694 2695
}

2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711
/**
 * ext4_get_stripe_size: Get the stripe size.
 * @sbi: In memory super block info
 *
 * If we have specified it via mount option, then
 * use the mount option value. If the value specified at mount time is
 * greater than the blocks per group use the super block value.
 * If the super block value is greater than blocks per group return 0.
 * Allocator needs it be less than blocks per group.
 *
 */
static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
{
	unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
	unsigned long stripe_width =
			le32_to_cpu(sbi->s_es->s_raid_stripe_width);
D
Dan Ehrenberg 已提交
2712
	int ret;
2713 2714

	if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
D
Dan Ehrenberg 已提交
2715
		ret = sbi->s_stripe;
2716
	else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
D
Dan Ehrenberg 已提交
2717
		ret = stripe_width;
2718
	else if (stride && stride <= sbi->s_blocks_per_group)
D
Dan Ehrenberg 已提交
2719 2720 2721
		ret = stride;
	else
		ret = 0;
2722

D
Dan Ehrenberg 已提交
2723 2724 2725 2726 2727 2728
	/*
	 * If the stripe width is 1, this makes no sense and
	 * we set it to 0 to turn off stripe handling code.
	 */
	if (ret <= 1)
		ret = 0;
2729

D
Dan Ehrenberg 已提交
2730
	return ret;
2731
}
2732

2733 2734 2735 2736 2737 2738 2739 2740
/*
 * Check whether this filesystem can be mounted based on
 * the features present and the RDONLY/RDWR mount requested.
 * Returns 1 if this filesystem can be mounted as requested,
 * 0 if it cannot be.
 */
static int ext4_feature_set_ok(struct super_block *sb, int readonly)
{
2741
	if (ext4_has_unknown_ext4_incompat_features(sb)) {
2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752
		ext4_msg(sb, KERN_ERR,
			"Couldn't mount because of "
			"unsupported optional features (%x)",
			(le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
			~EXT4_FEATURE_INCOMPAT_SUPP));
		return 0;
	}

	if (readonly)
		return 1;

2753
	if (ext4_has_feature_readonly(sb)) {
D
Darrick J. Wong 已提交
2754
		ext4_msg(sb, KERN_INFO, "filesystem is read-only");
2755
		sb->s_flags |= SB_RDONLY;
D
Darrick J. Wong 已提交
2756 2757 2758
		return 1;
	}

2759
	/* Check that feature set is OK for a read-write mount */
2760
	if (ext4_has_unknown_ext4_ro_compat_features(sb)) {
2761 2762 2763 2764 2765 2766 2767 2768 2769 2770
		ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
			 "unsupported optional features (%x)",
			 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
				~EXT4_FEATURE_RO_COMPAT_SUPP));
		return 0;
	}
	/*
	 * Large file size enabled file system can only be mounted
	 * read-write on 32-bit systems if kernel is built with CONFIG_LBDAF
	 */
2771
	if (ext4_has_feature_huge_file(sb)) {
2772 2773 2774 2775 2776 2777 2778
		if (sizeof(blkcnt_t) < sizeof(u64)) {
			ext4_msg(sb, KERN_ERR, "Filesystem with huge files "
				 "cannot be mounted RDWR without "
				 "CONFIG_LBDAF");
			return 0;
		}
	}
2779
	if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) {
2780 2781 2782 2783 2784
		ext4_msg(sb, KERN_ERR,
			 "Can't support bigalloc feature without "
			 "extents feature\n");
		return 0;
	}
2785 2786

#ifndef CONFIG_QUOTA
2787
	if (ext4_has_feature_quota(sb) && !readonly) {
2788 2789 2790 2791 2792
		ext4_msg(sb, KERN_ERR,
			 "Filesystem with quota feature cannot be mounted RDWR "
			 "without CONFIG_QUOTA");
		return 0;
	}
L
Li Xi 已提交
2793 2794 2795 2796 2797 2798
	if (ext4_has_feature_project(sb) && !readonly) {
		ext4_msg(sb, KERN_ERR,
			 "Filesystem with project quota feature cannot be mounted RDWR "
			 "without CONFIG_QUOTA");
		return 0;
	}
2799
#endif  /* CONFIG_QUOTA */
2800 2801 2802
	return 1;
}

2803 2804 2805 2806
/*
 * This function is called once a day if we have errors logged
 * on the file system
 */
2807
static void print_daily_error_info(struct timer_list *t)
2808
{
2809 2810 2811
	struct ext4_sb_info *sbi = from_timer(sbi, t, s_err_report);
	struct super_block *sb = sbi->s_sb;
	struct ext4_super_block *es = sbi->s_es;
2812 2813

	if (es->s_error_count)
2814 2815
		/* fsck newer than v1.41.13 is needed to clean this condition. */
		ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
2816 2817
			 le32_to_cpu(es->s_error_count));
	if (es->s_first_error_time) {
2818
		printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d",
2819 2820 2821 2822 2823
		       sb->s_id, le32_to_cpu(es->s_first_error_time),
		       (int) sizeof(es->s_first_error_func),
		       es->s_first_error_func,
		       le32_to_cpu(es->s_first_error_line));
		if (es->s_first_error_ino)
2824
			printk(KERN_CONT ": inode %u",
2825 2826
			       le32_to_cpu(es->s_first_error_ino));
		if (es->s_first_error_block)
2827
			printk(KERN_CONT ": block %llu", (unsigned long long)
2828
			       le64_to_cpu(es->s_first_error_block));
2829
		printk(KERN_CONT "\n");
2830 2831
	}
	if (es->s_last_error_time) {
2832
		printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d",
2833 2834 2835 2836 2837
		       sb->s_id, le32_to_cpu(es->s_last_error_time),
		       (int) sizeof(es->s_last_error_func),
		       es->s_last_error_func,
		       le32_to_cpu(es->s_last_error_line));
		if (es->s_last_error_ino)
2838
			printk(KERN_CONT ": inode %u",
2839 2840
			       le32_to_cpu(es->s_last_error_ino));
		if (es->s_last_error_block)
2841
			printk(KERN_CONT ": block %llu", (unsigned long long)
2842
			       le64_to_cpu(es->s_last_error_block));
2843
		printk(KERN_CONT "\n");
2844 2845 2846 2847
	}
	mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ);  /* Once a day */
}

2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870
/* Find next suitable group and run ext4_init_inode_table */
static int ext4_run_li_request(struct ext4_li_request *elr)
{
	struct ext4_group_desc *gdp = NULL;
	ext4_group_t group, ngroups;
	struct super_block *sb;
	unsigned long timeout = 0;
	int ret = 0;

	sb = elr->lr_super;
	ngroups = EXT4_SB(sb)->s_groups_count;

	for (group = elr->lr_next_group; group < ngroups; group++) {
		gdp = ext4_get_group_desc(sb, group, NULL);
		if (!gdp) {
			ret = 1;
			break;
		}

		if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
			break;
	}

2871
	if (group >= ngroups)
2872 2873 2874 2875 2876 2877 2878
		ret = 1;

	if (!ret) {
		timeout = jiffies;
		ret = ext4_init_inode_table(sb, group,
					    elr->lr_timeout ? 0 : 1);
		if (elr->lr_timeout == 0) {
2879 2880
			timeout = (jiffies - timeout) *
				  elr->lr_sbi->s_li_wait_mult;
2881 2882 2883 2884 2885 2886 2887 2888 2889 2890
			elr->lr_timeout = timeout;
		}
		elr->lr_next_sched = jiffies + elr->lr_timeout;
		elr->lr_next_group = group + 1;
	}
	return ret;
}

/*
 * Remove lr_request from the list_request and free the
2891
 * request structure. Should be called with li_list_mtx held
2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908
 */
static void ext4_remove_li_request(struct ext4_li_request *elr)
{
	struct ext4_sb_info *sbi;

	if (!elr)
		return;

	sbi = elr->lr_sbi;

	list_del(&elr->lr_request);
	sbi->s_li_request = NULL;
	kfree(elr);
}

static void ext4_unregister_li_request(struct super_block *sb)
{
2909 2910 2911
	mutex_lock(&ext4_li_mtx);
	if (!ext4_li_info) {
		mutex_unlock(&ext4_li_mtx);
2912
		return;
2913
	}
2914 2915

	mutex_lock(&ext4_li_info->li_list_mtx);
2916
	ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
2917
	mutex_unlock(&ext4_li_info->li_list_mtx);
2918
	mutex_unlock(&ext4_li_mtx);
2919 2920
}

2921 2922
static struct task_struct *ext4_lazyinit_task;

2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936
/*
 * This is the function where ext4lazyinit thread lives. It walks
 * through the request list searching for next scheduled filesystem.
 * When such a fs is found, run the lazy initialization request
 * (ext4_rn_li_request) and keep track of the time spend in this
 * function. Based on that time we compute next schedule time of
 * the request. When walking through the list is complete, compute
 * next waking time and put itself into sleep.
 */
static int ext4_lazyinit_thread(void *arg)
{
	struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg;
	struct list_head *pos, *n;
	struct ext4_li_request *elr;
2937
	unsigned long next_wakeup, cur;
2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950

	BUG_ON(NULL == eli);

cont_thread:
	while (true) {
		next_wakeup = MAX_JIFFY_OFFSET;

		mutex_lock(&eli->li_list_mtx);
		if (list_empty(&eli->li_request_list)) {
			mutex_unlock(&eli->li_list_mtx);
			goto exit_thread;
		}
		list_for_each_safe(pos, n, &eli->li_request_list) {
2951 2952
			int err = 0;
			int progress = 0;
2953 2954 2955
			elr = list_entry(pos, struct ext4_li_request,
					 lr_request);

2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973
			if (time_before(jiffies, elr->lr_next_sched)) {
				if (time_before(elr->lr_next_sched, next_wakeup))
					next_wakeup = elr->lr_next_sched;
				continue;
			}
			if (down_read_trylock(&elr->lr_super->s_umount)) {
				if (sb_start_write_trylock(elr->lr_super)) {
					progress = 1;
					/*
					 * We hold sb->s_umount, sb can not
					 * be removed from the list, it is
					 * now safe to drop li_list_mtx
					 */
					mutex_unlock(&eli->li_list_mtx);
					err = ext4_run_li_request(elr);
					sb_end_write(elr->lr_super);
					mutex_lock(&eli->li_list_mtx);
					n = pos->next;
2974
				}
2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985
				up_read((&elr->lr_super->s_umount));
			}
			/* error, remove the lazy_init job */
			if (err) {
				ext4_remove_li_request(elr);
				continue;
			}
			if (!progress) {
				elr->lr_next_sched = jiffies +
					(prandom_u32()
					 % (EXT4_DEF_LI_MAX_START_DELAY * HZ));
2986 2987 2988 2989 2990 2991
			}
			if (time_before(elr->lr_next_sched, next_wakeup))
				next_wakeup = elr->lr_next_sched;
		}
		mutex_unlock(&eli->li_list_mtx);

2992
		try_to_freeze();
2993

2994 2995
		cur = jiffies;
		if ((time_after_eq(cur, next_wakeup)) ||
2996
		    (MAX_JIFFY_OFFSET == next_wakeup)) {
2997 2998 2999 3000
			cond_resched();
			continue;
		}

3001 3002
		schedule_timeout_interruptible(next_wakeup - cur);

3003 3004 3005 3006
		if (kthread_should_stop()) {
			ext4_clear_request_list();
			goto exit_thread;
		}
3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048
	}

exit_thread:
	/*
	 * It looks like the request list is empty, but we need
	 * to check it under the li_list_mtx lock, to prevent any
	 * additions into it, and of course we should lock ext4_li_mtx
	 * to atomically free the list and ext4_li_info, because at
	 * this point another ext4 filesystem could be registering
	 * new one.
	 */
	mutex_lock(&ext4_li_mtx);
	mutex_lock(&eli->li_list_mtx);
	if (!list_empty(&eli->li_request_list)) {
		mutex_unlock(&eli->li_list_mtx);
		mutex_unlock(&ext4_li_mtx);
		goto cont_thread;
	}
	mutex_unlock(&eli->li_list_mtx);
	kfree(ext4_li_info);
	ext4_li_info = NULL;
	mutex_unlock(&ext4_li_mtx);

	return 0;
}

static void ext4_clear_request_list(void)
{
	struct list_head *pos, *n;
	struct ext4_li_request *elr;

	mutex_lock(&ext4_li_info->li_list_mtx);
	list_for_each_safe(pos, n, &ext4_li_info->li_request_list) {
		elr = list_entry(pos, struct ext4_li_request,
				 lr_request);
		ext4_remove_li_request(elr);
	}
	mutex_unlock(&ext4_li_info->li_list_mtx);
}

static int ext4_run_lazyinit_thread(void)
{
3049 3050 3051 3052
	ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
					 ext4_li_info, "ext4lazyinit");
	if (IS_ERR(ext4_lazyinit_task)) {
		int err = PTR_ERR(ext4_lazyinit_task);
3053 3054 3055
		ext4_clear_request_list();
		kfree(ext4_li_info);
		ext4_li_info = NULL;
3056
		printk(KERN_CRIT "EXT4-fs: error %d creating inode table "
3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124
				 "initialization thread\n",
				 err);
		return err;
	}
	ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
	return 0;
}

/*
 * Check whether it make sense to run itable init. thread or not.
 * If there is at least one uninitialized inode table, return
 * corresponding group number, else the loop goes through all
 * groups and return total number of groups.
 */
static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
{
	ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
	struct ext4_group_desc *gdp = NULL;

	for (group = 0; group < ngroups; group++) {
		gdp = ext4_get_group_desc(sb, group, NULL);
		if (!gdp)
			continue;

		if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
			break;
	}

	return group;
}

static int ext4_li_info_new(void)
{
	struct ext4_lazy_init *eli = NULL;

	eli = kzalloc(sizeof(*eli), GFP_KERNEL);
	if (!eli)
		return -ENOMEM;

	INIT_LIST_HEAD(&eli->li_request_list);
	mutex_init(&eli->li_list_mtx);

	eli->li_state |= EXT4_LAZYINIT_QUIT;

	ext4_li_info = eli;

	return 0;
}

static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
					    ext4_group_t start)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_li_request *elr;

	elr = kzalloc(sizeof(*elr), GFP_KERNEL);
	if (!elr)
		return NULL;

	elr->lr_super = sb;
	elr->lr_sbi = sbi;
	elr->lr_next_group = start;

	/*
	 * Randomize first schedule time of the request to
	 * spread the inode table initialization requests
	 * better.
	 */
3125 3126
	elr->lr_next_sched = jiffies + (prandom_u32() %
				(EXT4_DEF_LI_MAX_START_DELAY * HZ));
3127 3128 3129
	return elr;
}

3130 3131
int ext4_register_li_request(struct super_block *sb,
			     ext4_group_t first_not_zeroed)
3132 3133
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
3134
	struct ext4_li_request *elr = NULL;
3135
	ext4_group_t ngroups = sbi->s_groups_count;
3136
	int ret = 0;
3137

3138
	mutex_lock(&ext4_li_mtx);
3139 3140 3141 3142 3143 3144
	if (sbi->s_li_request != NULL) {
		/*
		 * Reset timeout so it can be computed again, because
		 * s_li_wait_mult might have changed.
		 */
		sbi->s_li_request->lr_timeout = 0;
3145
		goto out;
3146
	}
3147

3148
	if (first_not_zeroed == ngroups || sb_rdonly(sb) ||
3149
	    !test_opt(sb, INIT_INODE_TABLE))
3150
		goto out;
3151 3152

	elr = ext4_li_request_new(sb, first_not_zeroed);
3153 3154 3155 3156
	if (!elr) {
		ret = -ENOMEM;
		goto out;
	}
3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168

	if (NULL == ext4_li_info) {
		ret = ext4_li_info_new();
		if (ret)
			goto out;
	}

	mutex_lock(&ext4_li_info->li_list_mtx);
	list_add(&elr->lr_request, &ext4_li_info->li_request_list);
	mutex_unlock(&ext4_li_info->li_list_mtx);

	sbi->s_li_request = elr;
3169 3170 3171 3172 3173 3174
	/*
	 * set elr to NULL here since it has been inserted to
	 * the request_list and the removal and free of it is
	 * handled by ext4_clear_request_list from now on.
	 */
	elr = NULL;
3175 3176 3177 3178 3179 3180 3181

	if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
		ret = ext4_run_lazyinit_thread();
		if (ret)
			goto out;
	}
out:
3182 3183
	mutex_unlock(&ext4_li_mtx);
	if (ret)
3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197
		kfree(elr);
	return ret;
}

/*
 * We do not need to lock anything since this is called on
 * module unload.
 */
static void ext4_destroy_lazyinit_thread(void)
{
	/*
	 * If thread exited earlier
	 * there's nothing to be done.
	 */
3198
	if (!ext4_li_info || !ext4_lazyinit_task)
3199 3200
		return;

3201
	kthread_stop(ext4_lazyinit_task);
3202 3203
}

3204 3205 3206 3207 3208 3209
static int set_journal_csum_feature_set(struct super_block *sb)
{
	int ret = 1;
	int compat, incompat;
	struct ext4_sb_info *sbi = EXT4_SB(sb);

3210
	if (ext4_has_metadata_csum(sb)) {
3211
		/* journal checksum v3 */
3212
		compat = 0;
3213
		incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
3214 3215 3216 3217 3218 3219
	} else {
		/* journal checksum v1 */
		compat = JBD2_FEATURE_COMPAT_CHECKSUM;
		incompat = 0;
	}

3220 3221 3222 3223
	jbd2_journal_clear_features(sbi->s_journal,
			JBD2_FEATURE_COMPAT_CHECKSUM, 0,
			JBD2_FEATURE_INCOMPAT_CSUM_V3 |
			JBD2_FEATURE_INCOMPAT_CSUM_V2);
3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235
	if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
		ret = jbd2_journal_set_features(sbi->s_journal,
				compat, 0,
				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
				incompat);
	} else if (test_opt(sb, JOURNAL_CHECKSUM)) {
		ret = jbd2_journal_set_features(sbi->s_journal,
				compat, 0,
				incompat);
		jbd2_journal_clear_features(sbi->s_journal, 0, 0,
				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
	} else {
3236 3237
		jbd2_journal_clear_features(sbi->s_journal, 0, 0,
				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
3238 3239 3240 3241 3242
	}

	return ret;
}

3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266
/*
 * Note: calculating the overhead so we can be compatible with
 * historical BSD practice is quite difficult in the face of
 * clusters/bigalloc.  This is because multiple metadata blocks from
 * different block group can end up in the same allocation cluster.
 * Calculating the exact overhead in the face of clustered allocation
 * requires either O(all block bitmaps) in memory or O(number of block
 * groups**2) in time.  We will still calculate the superblock for
 * older file systems --- and if we come across with a bigalloc file
 * system with zero in s_overhead_clusters the estimate will be close to
 * correct especially for very large cluster sizes --- but for newer
 * file systems, it's better to calculate this figure once at mkfs
 * time, and store it in the superblock.  If the superblock value is
 * present (even for non-bigalloc file systems), we will use it.
 */
static int count_overhead(struct super_block *sb, ext4_group_t grp,
			  char *buf)
{
	struct ext4_sb_info	*sbi = EXT4_SB(sb);
	struct ext4_group_desc	*gdp;
	ext4_fsblk_t		first_block, last_block, b;
	ext4_group_t		i, ngroups = ext4_get_groups_count(sb);
	int			s, j, count = 0;

3267
	if (!ext4_has_feature_bigalloc(sb))
3268 3269 3270
		return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
			sbi->s_itb_per_group + 2);

3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299
	first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
		(grp * EXT4_BLOCKS_PER_GROUP(sb));
	last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
	for (i = 0; i < ngroups; i++) {
		gdp = ext4_get_group_desc(sb, i, NULL);
		b = ext4_block_bitmap(sb, gdp);
		if (b >= first_block && b <= last_block) {
			ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
			count++;
		}
		b = ext4_inode_bitmap(sb, gdp);
		if (b >= first_block && b <= last_block) {
			ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
			count++;
		}
		b = ext4_inode_table(sb, gdp);
		if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
			for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
				int c = EXT4_B2C(sbi, b - first_block);
				ext4_set_bit(c, buf);
				count++;
			}
		if (i != grp)
			continue;
		s = 0;
		if (ext4_bg_has_super(sb, grp)) {
			ext4_set_bit(s++, buf);
			count++;
		}
3300 3301 3302 3303 3304
		j = ext4_bg_num_gdb(sb, grp);
		if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
			ext4_error(sb, "Invalid number of block group "
				   "descriptor blocks: %d", j);
			j = EXT4_BLOCKS_PER_GROUP(sb) - s;
3305
		}
3306 3307 3308
		count += j;
		for (; j > 0; j--)
			ext4_set_bit(EXT4_B2C(sbi, s++), buf);
3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322
	}
	if (!count)
		return 0;
	return EXT4_CLUSTERS_PER_GROUP(sb) -
		ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
}

/*
 * Compute the overhead and stash it in sbi->s_overhead
 */
int ext4_calculate_overhead(struct super_block *sb)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
3323 3324
	struct inode *j_inode;
	unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum);
3325 3326
	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
	ext4_fsblk_t overhead = 0;
3327
	char *buf = (char *) get_zeroed_page(GFP_NOFS);
3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354

	if (!buf)
		return -ENOMEM;

	/*
	 * Compute the overhead (FS structures).  This is constant
	 * for a given filesystem unless the number of block groups
	 * changes so we cache the previous value until it does.
	 */

	/*
	 * All of the blocks before first_data_block are overhead
	 */
	overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));

	/*
	 * Add the overhead found in each block group
	 */
	for (i = 0; i < ngroups; i++) {
		int blks;

		blks = count_overhead(sb, i, buf);
		overhead += blks;
		if (blks)
			memset(buf, 0, PAGE_SIZE);
		cond_resched();
	}
3355 3356 3357 3358 3359

	/*
	 * Add the internal journal blocks whether the journal has been
	 * loaded or not
	 */
3360
	if (sbi->s_journal && !sbi->journal_bdev)
3361
		overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
3362 3363 3364 3365 3366 3367 3368 3369 3370 3371
	else if (ext4_has_feature_journal(sb) && !sbi->s_journal) {
		j_inode = ext4_get_journal_inode(sb, j_inum);
		if (j_inode) {
			j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
			overhead += EXT4_NUM_B2C(sbi, j_blocks);
			iput(j_inode);
		} else {
			ext4_msg(sb, KERN_ERR, "can't get journal size");
		}
	}
3372 3373 3374 3375 3376 3377
	sbi->s_overhead = overhead;
	smp_wmb();
	free_page((unsigned long) buf);
	return 0;
}

3378
static void ext4_set_resv_clusters(struct super_block *sb)
L
Lukas Czerner 已提交
3379 3380
{
	ext4_fsblk_t resv_clusters;
3381
	struct ext4_sb_info *sbi = EXT4_SB(sb);
L
Lukas Czerner 已提交
3382

3383 3384 3385 3386 3387 3388
	/*
	 * There's no need to reserve anything when we aren't using extents.
	 * The space estimates are exact, there are no unwritten extents,
	 * hole punching doesn't need new metadata... This is needed especially
	 * to keep ext2/3 backward compatibility.
	 */
3389
	if (!ext4_has_feature_extents(sb))
3390
		return;
L
Lukas Czerner 已提交
3391 3392 3393 3394
	/*
	 * By default we reserve 2% or 4096 clusters, whichever is smaller.
	 * This should cover the situations where we can not afford to run
	 * out of space like for example punch hole, or converting
3395
	 * unwritten extents in delalloc path. In most cases such
L
Lukas Czerner 已提交
3396 3397 3398
	 * allocation would require 1, or 2 blocks, higher numbers are
	 * very rare.
	 */
3399 3400
	resv_clusters = (ext4_blocks_count(sbi->s_es) >>
			 sbi->s_cluster_bits);
L
Lukas Czerner 已提交
3401 3402 3403 3404

	do_div(resv_clusters, 50);
	resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);

3405
	atomic64_set(&sbi->s_resv_clusters, resv_clusters);
L
Lukas Czerner 已提交
3406 3407
}

3408
static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3409
{
3410
	struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
3411
	char *orig_data = kstrdup(data, GFP_KERNEL);
3412
	struct buffer_head *bh;
3413
	struct ext4_super_block *es = NULL;
3414
	struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
3415 3416
	ext4_fsblk_t block;
	ext4_fsblk_t sb_block = get_sb_block(&data);
3417
	ext4_fsblk_t logical_sb_block;
3418 3419 3420 3421
	unsigned long offset = 0;
	unsigned long journal_devnum = 0;
	unsigned long def_mount_opts;
	struct inode *root;
3422
	const char *descr;
3423
	int ret = -ENOMEM;
3424
	int blocksize, clustersize;
3425 3426
	unsigned int db_count;
	unsigned int i;
3427
	int needs_recovery, has_huge_files, has_bigalloc;
L
Laurent Vivier 已提交
3428
	__u64 blocks_count;
3429
	int err = 0;
3430
	unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
3431
	ext4_group_t first_not_zeroed;
3432

3433 3434
	if ((data && !orig_data) || !sbi)
		goto out_free_base;
3435

3436
	sbi->s_daxdev = dax_dev;
3437 3438
	sbi->s_blockgroup_lock =
		kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
3439 3440 3441
	if (!sbi->s_blockgroup_lock)
		goto out_free_base;

3442
	sb->s_fs_info = sbi;
3443
	sbi->s_sb = sb;
3444
	sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
M
Miklos Szeredi 已提交
3445
	sbi->s_sb_block = sb_block;
3446 3447 3448
	if (sb->s_bdev->bd_part)
		sbi->s_sectors_written_start =
			part_stat_read(sb->s_bdev->bd_part, sectors[1]);
3449

3450
	/* Cleanup superblock name */
3451
	strreplace(sb->s_id, '/', '!');
3452

3453
	/* -EINVAL is default */
3454
	ret = -EINVAL;
3455
	blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
3456
	if (!blocksize) {
3457
		ext4_msg(sb, KERN_ERR, "unable to set blocksize");
3458 3459 3460 3461
		goto out_fail;
	}

	/*
3462
	 * The ext4 superblock will not be buffer aligned for other than 1kB
3463 3464
	 * block sizes.  We need to calculate the offset from buffer start.
	 */
3465
	if (blocksize != EXT4_MIN_BLOCK_SIZE) {
3466 3467
		logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
		offset = do_div(logical_sb_block, blocksize);
3468
	} else {
3469
		logical_sb_block = sb_block;
3470 3471
	}

3472
	if (!(bh = sb_bread_unmovable(sb, logical_sb_block))) {
3473
		ext4_msg(sb, KERN_ERR, "unable to read superblock");
3474 3475 3476 3477
		goto out_fail;
	}
	/*
	 * Note: s_es must be initialized as soon as possible because
3478
	 *       some ext4 macro-instructions depend on its value
3479
	 */
3480
	es = (struct ext4_super_block *) (bh->b_data + offset);
3481 3482
	sbi->s_es = es;
	sb->s_magic = le16_to_cpu(es->s_magic);
3483 3484
	if (sb->s_magic != EXT4_SUPER_MAGIC)
		goto cantfind_ext4;
T
Theodore Ts'o 已提交
3485
	sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
3486

3487
	/* Warn if metadata_csum and gdt_csum are both set. */
3488 3489
	if (ext4_has_feature_metadata_csum(sb) &&
	    ext4_has_feature_gdt_csum(sb))
3490
		ext4_warning(sb, "metadata_csum and uninit_bg are "
3491 3492
			     "redundant flags; please run fsck.");

3493 3494 3495 3496 3497 3498 3499 3500
	/* Check for a known checksum algorithm */
	if (!ext4_verify_csum_type(sb, es)) {
		ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
			 "unknown checksum algorithm.");
		silent = 1;
		goto cantfind_ext4;
	}

3501
	/* Load the checksum driver */
3502 3503 3504 3505 3506 3507
	sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
	if (IS_ERR(sbi->s_chksum_driver)) {
		ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
		ret = PTR_ERR(sbi->s_chksum_driver);
		sbi->s_chksum_driver = NULL;
		goto failed_mount;
3508 3509
	}

3510 3511 3512 3513 3514
	/* Check superblock checksum */
	if (!ext4_superblock_csum_verify(sb, es)) {
		ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
			 "invalid superblock checksum.  Run e2fsck?");
		silent = 1;
3515
		ret = -EFSBADCRC;
3516 3517 3518 3519
		goto cantfind_ext4;
	}

	/* Precompute checksum seed for all metadata */
3520
	if (ext4_has_feature_csum_seed(sb))
3521
		sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
T
Tahsin Erdogan 已提交
3522
	else if (ext4_has_metadata_csum(sb) || ext4_has_feature_ea_inode(sb))
3523 3524 3525
		sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
					       sizeof(es->s_uuid));

3526 3527
	/* Set defaults before we parse the mount options */
	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
3528
	set_opt(sb, INIT_INODE_TABLE);
3529
	if (def_mount_opts & EXT4_DEFM_DEBUG)
3530
		set_opt(sb, DEBUG);
3531
	if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
3532
		set_opt(sb, GRPID);
3533
	if (def_mount_opts & EXT4_DEFM_UID16)
3534
		set_opt(sb, NO_UID32);
3535 3536
	/* xattr user namespace & acls are now defaulted on */
	set_opt(sb, XATTR_USER);
T
Theodore Ts'o 已提交
3537
#ifdef CONFIG_EXT4_FS_POSIX_ACL
3538
	set_opt(sb, POSIX_ACL);
3539
#endif
3540 3541 3542 3543
	/* don't forget to enable journal_csum when metadata_csum is enabled. */
	if (ext4_has_metadata_csum(sb))
		set_opt(sb, JOURNAL_CHECKSUM);

3544
	if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
3545
		set_opt(sb, JOURNAL_DATA);
3546
	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
3547
		set_opt(sb, ORDERED_DATA);
3548
	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
3549
		set_opt(sb, WRITEBACK_DATA);
3550 3551

	if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
3552
		set_opt(sb, ERRORS_PANIC);
3553
	else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
3554
		set_opt(sb, ERRORS_CONT);
3555
	else
3556
		set_opt(sb, ERRORS_RO);
3557 3558
	/* block_validity enabled by default; disable with noblock_validity */
	set_opt(sb, BLOCK_VALIDITY);
3559
	if (def_mount_opts & EXT4_DEFM_DISCARD)
3560
		set_opt(sb, DISCARD);
3561

3562 3563
	sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
	sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
3564 3565 3566
	sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
	sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
	sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
3567

3568
	if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
3569
		set_opt(sb, BARRIER);
3570

3571 3572 3573 3574
	/*
	 * enable delayed allocation by default
	 * Use -o nodelalloc to turn it off
	 */
3575
	if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) &&
3576
	    ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
3577
		set_opt(sb, DELALLOC);
3578

3579 3580 3581 3582 3583 3584
	/*
	 * set default s_li_wait_mult for lazyinit, for the case there is
	 * no mount option specified.
	 */
	sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;

3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597
	if (sbi->s_es->s_mount_opts[0]) {
		char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
					      sizeof(sbi->s_es->s_mount_opts),
					      GFP_KERNEL);
		if (!s_mount_opts)
			goto failed_mount;
		if (!parse_options(s_mount_opts, sb, &journal_devnum,
				   &journal_ioprio, 0)) {
			ext4_msg(sb, KERN_WARNING,
				 "failed to parse options in superblock: %s",
				 s_mount_opts);
		}
		kfree(s_mount_opts);
3598
	}
3599
	sbi->s_def_mount_opt = sbi->s_mount_opt;
3600
	if (!parse_options((char *) data, sb, &journal_devnum,
3601
			   &journal_ioprio, 0))
3602 3603
		goto failed_mount;

3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614
	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
		printk_once(KERN_WARNING "EXT4-fs: Warning: mounting "
			    "with data=journal disables delayed "
			    "allocation and O_DIRECT support!\n");
		if (test_opt2(sb, EXPLICIT_DELALLOC)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and delalloc");
			goto failed_mount;
		}
		if (test_opt(sb, DIOREAD_NOLOCK)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
3615
				 "both data=journal and dioread_nolock");
3616 3617
			goto failed_mount;
		}
R
Ross Zwisler 已提交
3618 3619 3620 3621 3622
		if (test_opt(sb, DAX)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and dax");
			goto failed_mount;
		}
3623 3624 3625 3626 3627
		if (ext4_has_feature_encrypt(sb)) {
			ext4_msg(sb, KERN_WARNING,
				 "encrypted files will use data=ordered "
				 "instead of data journaling mode");
		}
3628 3629
		if (test_opt(sb, DELALLOC))
			clear_opt(sb, DELALLOC);
3630 3631
	} else {
		sb->s_iflags |= SB_I_CGROUPWB;
3632 3633
	}

3634 3635
	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
		(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
3636

3637
	if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
3638 3639 3640
	    (ext4_has_compat_features(sb) ||
	     ext4_has_ro_compat_features(sb) ||
	     ext4_has_incompat_features(sb)))
3641 3642 3643
		ext4_msg(sb, KERN_WARNING,
		       "feature flags set on rev 0 fs, "
		       "running e2fsck is recommended");
3644

3645 3646
	if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) {
		set_opt2(sb, HURD_COMPAT);
3647
		if (ext4_has_feature_64bit(sb)) {
3648 3649 3650 3651
			ext4_msg(sb, KERN_ERR,
				 "The Hurd can't support 64-bit file systems");
			goto failed_mount;
		}
T
Tahsin Erdogan 已提交
3652 3653 3654 3655 3656 3657 3658 3659 3660 3661

		/*
		 * ea_inode feature uses l_i_version field which is not
		 * available in HURD_COMPAT mode.
		 */
		if (ext4_has_feature_ea_inode(sb)) {
			ext4_msg(sb, KERN_ERR,
				 "ea_inode feature is not supported for Hurd");
			goto failed_mount;
		}
3662 3663
	}

3664 3665 3666 3667 3668
	if (IS_EXT2_SB(sb)) {
		if (ext2_feature_set_ok(sb))
			ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
				 "using the ext4 subsystem");
		else {
3669 3670 3671 3672 3673 3674
			/*
			 * If we're probing be silent, if this looks like
			 * it's actually an ext[34] filesystem.
			 */
			if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
				goto failed_mount;
3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685
			ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
				 "to feature incompatibilities");
			goto failed_mount;
		}
	}

	if (IS_EXT3_SB(sb)) {
		if (ext3_feature_set_ok(sb))
			ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
				 "using the ext4 subsystem");
		else {
3686 3687 3688 3689 3690 3691
			/*
			 * If we're probing be silent, if this looks like
			 * it's actually an ext4 filesystem.
			 */
			if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
				goto failed_mount;
3692 3693 3694 3695 3696 3697
			ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
				 "to feature incompatibilities");
			goto failed_mount;
		}
	}

3698 3699 3700 3701 3702
	/*
	 * Check feature flags regardless of the revision level, since we
	 * previously didn't change the revision level when setting the flags,
	 * so there is a chance incompat flags are set on a rev 0 filesystem.
	 */
3703
	if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
3704
		goto failed_mount;
3705

J
Jan Kara 已提交
3706
	blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
3707 3708
	if (blocksize < EXT4_MIN_BLOCK_SIZE ||
	    blocksize > EXT4_MAX_BLOCK_SIZE) {
3709
		ext4_msg(sb, KERN_ERR,
3710 3711 3712 3713 3714 3715 3716 3717 3718
		       "Unsupported filesystem blocksize %d (%d log_block_size)",
			 blocksize, le32_to_cpu(es->s_log_block_size));
		goto failed_mount;
	}
	if (le32_to_cpu(es->s_log_block_size) >
	    (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
		ext4_msg(sb, KERN_ERR,
			 "Invalid log block size: %u",
			 le32_to_cpu(es->s_log_block_size));
3719 3720 3721
		goto failed_mount;
	}

3722 3723 3724 3725 3726 3727 3728
	if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
		ext4_msg(sb, KERN_ERR,
			 "Number of reserved GDT blocks insanely large: %d",
			 le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
		goto failed_mount;
	}

R
Ross Zwisler 已提交
3729
	if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
3730 3731 3732
		if (ext4_has_feature_inline_data(sb)) {
			ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem"
					" that may contain inline data");
3733
			sbi->s_mount_opt &= ~EXT4_MOUNT_DAX;
3734
		}
3735
		err = bdev_dax_supported(sb->s_bdev, blocksize);
3736 3737 3738 3739 3740
		if (err) {
			ext4_msg(sb, KERN_ERR,
				"DAX unsupported by block device. Turning off DAX.");
			sbi->s_mount_opt &= ~EXT4_MOUNT_DAX;
		}
R
Ross Zwisler 已提交
3741 3742
	}

3743
	if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
3744 3745 3746 3747 3748
		ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
			 es->s_encryption_level);
		goto failed_mount;
	}

3749
	if (sb->s_blocksize != blocksize) {
3750 3751
		/* Validate the filesystem blocksize */
		if (!sb_set_blocksize(sb, blocksize)) {
3752
			ext4_msg(sb, KERN_ERR, "bad block size %d",
3753
					blocksize);
3754 3755 3756
			goto failed_mount;
		}

3757
		brelse(bh);
3758 3759
		logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
		offset = do_div(logical_sb_block, blocksize);
3760
		bh = sb_bread_unmovable(sb, logical_sb_block);
3761
		if (!bh) {
3762 3763
			ext4_msg(sb, KERN_ERR,
			       "Can't read superblock on 2nd try");
3764 3765
			goto failed_mount;
		}
3766
		es = (struct ext4_super_block *)(bh->b_data + offset);
3767
		sbi->s_es = es;
3768
		if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
3769 3770
			ext4_msg(sb, KERN_ERR,
			       "Magic mismatch, very weird!");
3771 3772 3773 3774
			goto failed_mount;
		}
	}

3775
	has_huge_files = ext4_has_feature_huge_file(sb);
3776 3777 3778
	sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
						      has_huge_files);
	sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
3779

3780 3781 3782
	if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
		sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
		sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
3783 3784 3785
	} else {
		sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
		sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
3786
		if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
V
Vignesh Babu 已提交
3787
		    (!is_power_of_2(sbi->s_inode_size)) ||
3788
		    (sbi->s_inode_size > blocksize)) {
3789 3790
			ext4_msg(sb, KERN_ERR,
			       "unsupported inode size: %d",
3791
			       sbi->s_inode_size);
3792 3793
			goto failed_mount;
		}
K
Kalpak Shah 已提交
3794 3795
		if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE)
			sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2);
3796
	}
3797

3798
	sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
3799
	if (ext4_has_feature_64bit(sb)) {
3800
		if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
3801
		    sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
V
vignesh babu 已提交
3802
		    !is_power_of_2(sbi->s_desc_size)) {
3803 3804
			ext4_msg(sb, KERN_ERR,
			       "unsupported descriptor size %lu",
3805 3806 3807 3808 3809
			       sbi->s_desc_size);
			goto failed_mount;
		}
	} else
		sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
3810

3811 3812
	sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
	sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
3813

3814
	sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
3815
	if (sbi->s_inodes_per_block == 0)
3816
		goto cantfind_ext4;
3817 3818 3819 3820 3821 3822
	if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
	    sbi->s_inodes_per_group > blocksize * 8) {
		ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
			 sbi->s_blocks_per_group);
		goto failed_mount;
	}
3823 3824
	sbi->s_itb_per_group = sbi->s_inodes_per_group /
					sbi->s_inodes_per_block;
3825
	sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
3826 3827
	sbi->s_sbh = bh;
	sbi->s_mount_state = le16_to_cpu(es->s_state);
3828 3829
	sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
	sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
3830

3831
	for (i = 0; i < 4; i++)
3832 3833
		sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
	sbi->s_def_hash_version = es->s_def_hash_version;
3834
	if (ext4_has_feature_dir_index(sb)) {
3835 3836 3837 3838
		i = le32_to_cpu(es->s_flags);
		if (i & EXT2_FLAGS_UNSIGNED_HASH)
			sbi->s_hash_unsigned = 3;
		else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
3839
#ifdef __CHAR_UNSIGNED__
3840
			if (!sb_rdonly(sb))
3841 3842 3843
				es->s_flags |=
					cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
			sbi->s_hash_unsigned = 3;
3844
#else
3845
			if (!sb_rdonly(sb))
3846 3847
				es->s_flags |=
					cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
3848
#endif
3849
		}
3850
	}
3851

3852 3853
	/* Handle clustersize */
	clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
3854
	has_bigalloc = ext4_has_feature_bigalloc(sb);
3855 3856 3857 3858 3859 3860 3861
	if (has_bigalloc) {
		if (clustersize < blocksize) {
			ext4_msg(sb, KERN_ERR,
				 "cluster size (%d) smaller than "
				 "block size (%d)", clustersize, blocksize);
			goto failed_mount;
		}
3862 3863 3864 3865 3866 3867 3868
		if (le32_to_cpu(es->s_log_cluster_size) >
		    (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
			ext4_msg(sb, KERN_ERR,
				 "Invalid log cluster size: %u",
				 le32_to_cpu(es->s_log_cluster_size));
			goto failed_mount;
		}
3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901
		sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
			le32_to_cpu(es->s_log_block_size);
		sbi->s_clusters_per_group =
			le32_to_cpu(es->s_clusters_per_group);
		if (sbi->s_clusters_per_group > blocksize * 8) {
			ext4_msg(sb, KERN_ERR,
				 "#clusters per group too big: %lu",
				 sbi->s_clusters_per_group);
			goto failed_mount;
		}
		if (sbi->s_blocks_per_group !=
		    (sbi->s_clusters_per_group * (clustersize / blocksize))) {
			ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and "
				 "clusters per group (%lu) inconsistent",
				 sbi->s_blocks_per_group,
				 sbi->s_clusters_per_group);
			goto failed_mount;
		}
	} else {
		if (clustersize != blocksize) {
			ext4_warning(sb, "fragment/cluster size (%d) != "
				     "block size (%d)", clustersize,
				     blocksize);
			clustersize = blocksize;
		}
		if (sbi->s_blocks_per_group > blocksize * 8) {
			ext4_msg(sb, KERN_ERR,
				 "#blocks per group too big: %lu",
				 sbi->s_blocks_per_group);
			goto failed_mount;
		}
		sbi->s_clusters_per_group = sbi->s_blocks_per_group;
		sbi->s_cluster_bits = 0;
3902
	}
3903 3904
	sbi->s_cluster_ratio = clustersize / blocksize;

3905 3906 3907 3908
	/* Do we have standard group size of clustersize * 8 blocks ? */
	if (sbi->s_blocks_per_group == clustersize << 3)
		set_opt2(sb, STD_GROUP_SIZE);

3909 3910 3911 3912
	/*
	 * Test whether we have more sectors than will fit in sector_t,
	 * and whether the max offset is addressable by the page cache.
	 */
3913
	err = generic_check_addressable(sb->s_blocksize_bits,
3914
					ext4_blocks_count(es));
3915
	if (err) {
3916
		ext4_msg(sb, KERN_ERR, "filesystem"
3917
			 " too large to mount safely on this system");
3918
		if (sizeof(sector_t) < 8)
3919
			ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled");
3920 3921 3922
		goto failed_mount;
	}

3923 3924
	if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
		goto cantfind_ext4;
3925

3926 3927 3928
	/* check blocks count against device size */
	blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
	if (blocks_count && ext4_blocks_count(es) > blocks_count) {
3929 3930
		ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
		       "exceeds size of device (%llu blocks)",
3931 3932 3933 3934
		       ext4_blocks_count(es), blocks_count);
		goto failed_mount;
	}

3935 3936 3937 3938 3939
	/*
	 * It makes no sense for the first data block to be beyond the end
	 * of the filesystem.
	 */
	if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
3940
		ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
3941 3942 3943
			 "block %u is beyond end of filesystem (%llu)",
			 le32_to_cpu(es->s_first_data_block),
			 ext4_blocks_count(es));
3944 3945
		goto failed_mount;
	}
L
Laurent Vivier 已提交
3946 3947 3948 3949
	blocks_count = (ext4_blocks_count(es) -
			le32_to_cpu(es->s_first_data_block) +
			EXT4_BLOCKS_PER_GROUP(sb) - 1);
	do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
3950
	if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
3951
		ext4_msg(sb, KERN_WARNING, "groups count too large: %u "
3952
		       "(block count %llu, first data block %u, "
3953
		       "blocks per group %lu)", sbi->s_groups_count,
3954 3955 3956 3957 3958
		       ext4_blocks_count(es),
		       le32_to_cpu(es->s_first_data_block),
		       EXT4_BLOCKS_PER_GROUP(sb));
		goto failed_mount;
	}
L
Laurent Vivier 已提交
3959
	sbi->s_groups_count = blocks_count;
3960 3961
	sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
			(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
3962 3963
	db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
		   EXT4_DESC_PER_BLOCK(sb);
3964
	if (ext4_has_feature_meta_bg(sb)) {
3965
		if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
3966 3967 3968 3969 3970 3971 3972
			ext4_msg(sb, KERN_WARNING,
				 "first meta block group too large: %u "
				 "(group descriptor block count %u)",
				 le32_to_cpu(es->s_first_meta_bg), db_count);
			goto failed_mount;
		}
	}
M
Michal Hocko 已提交
3973
	sbi->s_group_desc = kvmalloc(db_count *
3974 3975
					  sizeof(struct buffer_head *),
					  GFP_KERNEL);
3976
	if (sbi->s_group_desc == NULL) {
3977
		ext4_msg(sb, KERN_ERR, "not enough memory");
3978
		ret = -ENOMEM;
3979 3980 3981
		goto failed_mount;
	}

3982
	bgl_lock_init(sbi->s_blockgroup_lock);
3983

3984 3985 3986 3987 3988 3989
	/* Pre-read the descriptors into the buffer cache */
	for (i = 0; i < db_count; i++) {
		block = descriptor_loc(sb, logical_sb_block, i);
		sb_breadahead(sb, block);
	}

3990
	for (i = 0; i < db_count; i++) {
3991
		block = descriptor_loc(sb, logical_sb_block, i);
3992
		sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
3993
		if (!sbi->s_group_desc[i]) {
3994 3995
			ext4_msg(sb, KERN_ERR,
			       "can't read group descriptor %d", i);
3996 3997 3998 3999
			db_count = i;
			goto failed_mount2;
		}
	}
4000
	if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
4001
		ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
4002
		ret = -EFSCORRUPTED;
4003
		goto failed_mount2;
4004
	}
4005

4006
	sbi->s_gdb_count = db_count;
4007

4008
	timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
4009

4010
	/* Register extent status tree shrinker */
4011
	if (ext4_es_register_shrinker(sbi))
4012 4013
		goto failed_mount3;

4014
	sbi->s_stripe = ext4_get_stripe_size(sbi);
4015
	sbi->s_extent_max_zeroout_kb = 32;
4016

4017 4018 4019
	/*
	 * set up enough so that it can read an inode
	 */
4020
	sb->s_op = &ext4_sops;
4021 4022
	sb->s_export_op = &ext4_export_ops;
	sb->s_xattr = ext4_xattr_handlers;
4023
#ifdef CONFIG_EXT4_FS_ENCRYPTION
4024
	sb->s_cop = &ext4_cryptops;
4025
#endif
4026
#ifdef CONFIG_QUOTA
4027
	sb->dq_op = &ext4_quota_operations;
4028
	if (ext4_has_feature_quota(sb))
4029
		sb->s_qcop = &dquot_quotactl_sysfile_ops;
4030 4031
	else
		sb->s_qcop = &ext4_qctl_operations;
L
Li Xi 已提交
4032
	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
4033
#endif
4034
	memcpy(&sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
4035

4036
	INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
4037
	mutex_init(&sbi->s_orphan_lock);
4038 4039 4040 4041

	sb->s_root = NULL;

	needs_recovery = (es->s_last_orphan != 0 ||
4042
			  ext4_has_feature_journal_needs_recovery(sb));
4043

4044
	if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb))
4045
		if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
4046
			goto failed_mount3a;
4047

4048 4049 4050 4051
	/*
	 * The first inode we look at is the journal inode.  Don't try
	 * root first: it may be modified in the journal!
	 */
4052
	if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
4053 4054
		err = ext4_load_journal(sb, es, journal_devnum);
		if (err)
4055
			goto failed_mount3a;
4056
	} else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) &&
4057
		   ext4_has_feature_journal_needs_recovery(sb)) {
4058 4059
		ext4_msg(sb, KERN_ERR, "required journal recovery "
		       "suppressed and not mounted read-only");
4060
		goto failed_mount_wq;
4061
	} else {
4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086
		/* Nojournal mode, all journal mount options are illegal */
		if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "journal_checksum, fs mounted w/o journal");
			goto failed_mount_wq;
		}
		if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "journal_async_commit, fs mounted w/o journal");
			goto failed_mount_wq;
		}
		if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "commit=%lu, fs mounted w/o journal",
				 sbi->s_commit_interval / HZ);
			goto failed_mount_wq;
		}
		if (EXT4_MOUNT_DATA_FLAGS &
		    (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "data=, fs mounted w/o journal");
			goto failed_mount_wq;
		}
		sbi->s_def_mount_opt &= EXT4_MOUNT_JOURNAL_CHECKSUM;
		clear_opt(sb, JOURNAL_CHECKSUM);
4087
		clear_opt(sb, DATA_FLAGS);
4088 4089 4090
		sbi->s_journal = NULL;
		needs_recovery = 0;
		goto no_journal;
4091 4092
	}

4093
	if (ext4_has_feature_64bit(sb) &&
4094 4095
	    !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
				       JBD2_FEATURE_INCOMPAT_64BIT)) {
4096
		ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
4097
		goto failed_mount_wq;
4098 4099
	}

4100 4101 4102 4103
	if (!set_journal_csum_feature_set(sb)) {
		ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
			 "feature set");
		goto failed_mount_wq;
4104
	}
4105

4106 4107 4108 4109 4110
	/* We have now updated the journal if required, so we can
	 * validate the data journaling mode. */
	switch (test_opt(sb, DATA_FLAGS)) {
	case 0:
		/* No mode set, assume a default based on the journal
A
Andrew Morton 已提交
4111 4112 4113
		 * capabilities: ORDERED_DATA if the journal can
		 * cope, else JOURNAL_DATA
		 */
4114
		if (jbd2_journal_check_available_features
4115
		    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
4116
			set_opt(sb, ORDERED_DATA);
4117 4118
			sbi->s_def_mount_opt |= EXT4_MOUNT_ORDERED_DATA;
		} else {
4119
			set_opt(sb, JOURNAL_DATA);
4120 4121
			sbi->s_def_mount_opt |= EXT4_MOUNT_JOURNAL_DATA;
		}
4122 4123
		break;

4124 4125
	case EXT4_MOUNT_ORDERED_DATA:
	case EXT4_MOUNT_WRITEBACK_DATA:
4126 4127
		if (!jbd2_journal_check_available_features
		    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
4128 4129
			ext4_msg(sb, KERN_ERR, "Journal does not support "
			       "requested data journaling mode");
4130
			goto failed_mount_wq;
4131 4132 4133 4134
		}
	default:
		break;
	}
4135 4136 4137 4138 4139 4140 4141 4142

	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
	    test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
		ext4_msg(sb, KERN_ERR, "can't mount with "
			"journal_async_commit in data=ordered mode");
		goto failed_mount_wq;
	}

4143
	set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
4144

B
Bobi Jam 已提交
4145 4146
	sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;

4147
no_journal:
4148 4149 4150
	if (!test_opt(sb, NO_MBCACHE)) {
		sbi->s_ea_block_cache = ext4_xattr_create_cache();
		if (!sbi->s_ea_block_cache) {
T
Tahsin Erdogan 已提交
4151
			ext4_msg(sb, KERN_ERR,
4152
				 "Failed to create ea_block_cache");
T
Tahsin Erdogan 已提交
4153 4154
			goto failed_mount_wq;
		}
4155 4156 4157 4158 4159 4160 4161 4162 4163

		if (ext4_has_feature_ea_inode(sb)) {
			sbi->s_ea_inode_cache = ext4_xattr_create_cache();
			if (!sbi->s_ea_inode_cache) {
				ext4_msg(sb, KERN_ERR,
					 "Failed to create ea_inode_cache");
				goto failed_mount_wq;
			}
		}
4164 4165
	}

4166
	if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) &&
4167
	    (blocksize != PAGE_SIZE)) {
4168 4169 4170 4171 4172
		ext4_msg(sb, KERN_ERR,
			 "Unsupported blocksize for fs encryption");
		goto failed_mount_wq;
	}

4173
	if (DUMMY_ENCRYPTION_ENABLED(sbi) && !sb_rdonly(sb) &&
4174 4175
	    !ext4_has_feature_encrypt(sb)) {
		ext4_set_feature_encrypt(sb);
4176 4177 4178
		ext4_commit_super(sb, 1);
	}

4179 4180 4181 4182 4183 4184 4185
	/*
	 * Get the # of file system overhead blocks from the
	 * superblock if present.
	 */
	if (es->s_overhead_clusters)
		sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
	else {
4186 4187
		err = ext4_calculate_overhead(sb);
		if (err)
4188 4189 4190
			goto failed_mount_wq;
	}

T
Tejun Heo 已提交
4191 4192 4193 4194
	/*
	 * The maximum number of concurrent works can be high and
	 * concurrency isn't really necessary.  Limit it to 1.
	 */
4195 4196 4197 4198
	EXT4_SB(sb)->rsv_conversion_wq =
		alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
	if (!EXT4_SB(sb)->rsv_conversion_wq) {
		printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
4199
		ret = -ENOMEM;
4200 4201 4202
		goto failed_mount4;
	}

4203
	/*
4204
	 * The jbd2_journal_load will have done any necessary log recovery,
4205 4206 4207
	 * so we can safely mount the rest of the filesystem now.
	 */

4208 4209
	root = ext4_iget(sb, EXT4_ROOT_INO);
	if (IS_ERR(root)) {
4210
		ext4_msg(sb, KERN_ERR, "get root inode failed");
4211
		ret = PTR_ERR(root);
4212
		root = NULL;
4213 4214 4215
		goto failed_mount4;
	}
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
4216
		ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
A
Al Viro 已提交
4217
		iput(root);
4218 4219
		goto failed_mount4;
	}
4220
	sb->s_root = d_make_root(root);
4221
	if (!sb->s_root) {
4222
		ext4_msg(sb, KERN_ERR, "get root dentry failed");
4223 4224 4225
		ret = -ENOMEM;
		goto failed_mount4;
	}
4226

4227
	if (ext4_setup_super(sb, es, sb_rdonly(sb)))
4228
		sb->s_flags |= SB_RDONLY;
K
Kalpak Shah 已提交
4229 4230

	/* determine the minimum size of new large inodes, if present */
4231 4232
	if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
	    sbi->s_want_extra_isize == 0) {
K
Kalpak Shah 已提交
4233 4234
		sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
						     EXT4_GOOD_OLD_INODE_SIZE;
4235
		if (ext4_has_feature_extra_isize(sb)) {
K
Kalpak Shah 已提交
4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250
			if (sbi->s_want_extra_isize <
			    le16_to_cpu(es->s_want_extra_isize))
				sbi->s_want_extra_isize =
					le16_to_cpu(es->s_want_extra_isize);
			if (sbi->s_want_extra_isize <
			    le16_to_cpu(es->s_min_extra_isize))
				sbi->s_want_extra_isize =
					le16_to_cpu(es->s_min_extra_isize);
		}
	}
	/* Check if enough inode space is available */
	if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
							sbi->s_inode_size) {
		sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
						       EXT4_GOOD_OLD_INODE_SIZE;
4251 4252
		ext4_msg(sb, KERN_INFO, "required extra inode space not"
			 "available");
K
Kalpak Shah 已提交
4253 4254
	}

4255
	ext4_set_resv_clusters(sb);
L
Lukas Czerner 已提交
4256

4257 4258
	err = ext4_setup_system_zone(sb);
	if (err) {
4259
		ext4_msg(sb, KERN_ERR, "failed to initialize system "
4260
			 "zone (%d)", err);
4261 4262 4263 4264 4265 4266 4267 4268
		goto failed_mount4a;
	}

	ext4_ext_init(sb);
	err = ext4_mb_init(sb);
	if (err) {
		ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
			 err);
4269
		goto failed_mount5;
4270 4271
	}

4272 4273 4274
	block = ext4_count_free_clusters(sb);
	ext4_free_blocks_count_set(sbi->s_es, 
				   EXT4_C2B(sbi, block));
4275 4276
	err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
				  GFP_KERNEL);
4277 4278 4279
	if (!err) {
		unsigned long freei = ext4_count_free_inodes(sb);
		sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
4280 4281
		err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
					  GFP_KERNEL);
4282 4283 4284
	}
	if (!err)
		err = percpu_counter_init(&sbi->s_dirs_counter,
4285
					  ext4_count_dirs(sb), GFP_KERNEL);
4286
	if (!err)
4287 4288
		err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
					  GFP_KERNEL);
4289 4290 4291
	if (!err)
		err = percpu_init_rwsem(&sbi->s_journal_flag_rwsem);

4292 4293 4294 4295 4296
	if (err) {
		ext4_msg(sb, KERN_ERR, "insufficient memory");
		goto failed_mount6;
	}

4297
	if (ext4_has_feature_flex_bg(sb))
4298 4299 4300 4301 4302 4303 4304
		if (!ext4_fill_flex_info(sb)) {
			ext4_msg(sb, KERN_ERR,
			       "unable to initialize "
			       "flex_bg meta info!");
			goto failed_mount6;
		}

4305 4306
	err = ext4_register_li_request(sb, first_not_zeroed);
	if (err)
4307
		goto failed_mount6;
4308

4309
	err = ext4_register_sysfs(sb);
4310 4311
	if (err)
		goto failed_mount7;
T
Theodore Ts'o 已提交
4312

4313 4314
#ifdef CONFIG_QUOTA
	/* Enable quota usage during mount. */
4315
	if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) {
4316 4317 4318 4319 4320 4321
		err = ext4_enable_quotas(sb);
		if (err)
			goto failed_mount8;
	}
#endif  /* CONFIG_QUOTA */

4322 4323 4324
	EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
	ext4_orphan_cleanup(sb, es);
	EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
4325
	if (needs_recovery) {
4326
		ext4_msg(sb, KERN_INFO, "recovery complete");
4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338
		ext4_mark_recovery_complete(sb, es);
	}
	if (EXT4_SB(sb)->s_journal) {
		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
			descr = " journalled data mode";
		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
			descr = " ordered data mode";
		else
			descr = " writeback data mode";
	} else
		descr = "out journal";

4339 4340 4341 4342 4343 4344 4345 4346
	if (test_opt(sb, DISCARD)) {
		struct request_queue *q = bdev_get_queue(sb->s_bdev);
		if (!blk_queue_discard(q))
			ext4_msg(sb, KERN_WARNING,
				 "mounting with \"discard\" option, but "
				 "the device does not support discard");
	}

4347 4348
	if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
		ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
4349 4350 4351
			 "Opts: %.*s%s%s", descr,
			 (int) sizeof(sbi->s_es->s_mount_opts),
			 sbi->s_es->s_mount_opts,
4352
			 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
4353

4354 4355
	if (es->s_error_count)
		mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
4356

4357 4358 4359 4360 4361
	/* Enable message ratelimiting. Default is 10 messages per 5 secs. */
	ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
	ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
	ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);

4362
	kfree(orig_data);
4363 4364
	return 0;

4365
cantfind_ext4:
4366
	if (!silent)
4367
		ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
4368 4369
	goto failed_mount;

4370 4371
#ifdef CONFIG_QUOTA
failed_mount8:
4372
	ext4_unregister_sysfs(sb);
4373
#endif
4374 4375 4376
failed_mount7:
	ext4_unregister_li_request(sb);
failed_mount6:
4377
	ext4_mb_release(sb);
4378
	if (sbi->s_flex_groups)
A
Al Viro 已提交
4379
		kvfree(sbi->s_flex_groups);
4380 4381 4382 4383
	percpu_counter_destroy(&sbi->s_freeclusters_counter);
	percpu_counter_destroy(&sbi->s_freeinodes_counter);
	percpu_counter_destroy(&sbi->s_dirs_counter);
	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
4384
failed_mount5:
4385 4386 4387
	ext4_ext_release(sb);
	ext4_release_system_zone(sb);
failed_mount4a:
A
Al Viro 已提交
4388
	dput(sb->s_root);
4389
	sb->s_root = NULL;
A
Al Viro 已提交
4390
failed_mount4:
4391
	ext4_msg(sb, KERN_ERR, "mount failed");
4392 4393
	if (EXT4_SB(sb)->rsv_conversion_wq)
		destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
4394
failed_mount_wq:
T
Tahsin Erdogan 已提交
4395 4396 4397 4398
	if (sbi->s_ea_inode_cache) {
		ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
		sbi->s_ea_inode_cache = NULL;
	}
4399 4400 4401
	if (sbi->s_ea_block_cache) {
		ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
		sbi->s_ea_block_cache = NULL;
J
Jan Kara 已提交
4402
	}
4403 4404 4405 4406
	if (sbi->s_journal) {
		jbd2_journal_destroy(sbi->s_journal);
		sbi->s_journal = NULL;
	}
4407
failed_mount3a:
4408
	ext4_es_unregister_shrinker(sbi);
4409
failed_mount3:
4410
	del_timer_sync(&sbi->s_err_report);
4411 4412
	if (sbi->s_mmp_tsk)
		kthread_stop(sbi->s_mmp_tsk);
4413 4414 4415
failed_mount2:
	for (i = 0; i < db_count; i++)
		brelse(sbi->s_group_desc[i]);
A
Al Viro 已提交
4416
	kvfree(sbi->s_group_desc);
4417
failed_mount:
4418 4419
	if (sbi->s_chksum_driver)
		crypto_free_shash(sbi->s_chksum_driver);
4420
#ifdef CONFIG_QUOTA
J
Jan Kara 已提交
4421
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
4422 4423
		kfree(sbi->s_qf_names[i]);
#endif
4424
	ext4_blkdev_remove(sbi);
4425 4426 4427
	brelse(bh);
out_fail:
	sb->s_fs_info = NULL;
4428
	kfree(sbi->s_blockgroup_lock);
4429
out_free_base:
4430
	kfree(sbi);
4431
	kfree(orig_data);
4432
	fs_put_dax(dax_dev);
4433
	return err ? err : ret;
4434 4435 4436 4437 4438 4439 4440
}

/*
 * Setup any per-fs journal parameters now.  We'll do this both on
 * initial mount, once the journal has been initialised but before we've
 * done any recovery; and again on any subsequent remount.
 */
4441
static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
4442
{
4443
	struct ext4_sb_info *sbi = EXT4_SB(sb);
4444

4445 4446 4447
	journal->j_commit_interval = sbi->s_commit_interval;
	journal->j_min_batch_time = sbi->s_min_batch_time;
	journal->j_max_batch_time = sbi->s_max_batch_time;
4448

4449
	write_lock(&journal->j_state_lock);
4450
	if (test_opt(sb, BARRIER))
4451
		journal->j_flags |= JBD2_BARRIER;
4452
	else
4453
		journal->j_flags &= ~JBD2_BARRIER;
4454 4455 4456 4457
	if (test_opt(sb, DATA_ERR_ABORT))
		journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR;
	else
		journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR;
4458
	write_unlock(&journal->j_state_lock);
4459 4460
}

4461 4462
static struct inode *ext4_get_journal_inode(struct super_block *sb,
					     unsigned int journal_inum)
4463 4464 4465
{
	struct inode *journal_inode;

4466 4467 4468 4469 4470
	/*
	 * Test for the existence of a valid inode on disk.  Bad things
	 * happen if we iget() an unused inode, as the subsequent iput()
	 * will try to delete it.
	 */
4471 4472
	journal_inode = ext4_iget(sb, journal_inum);
	if (IS_ERR(journal_inode)) {
4473
		ext4_msg(sb, KERN_ERR, "no journal found");
4474 4475 4476 4477 4478
		return NULL;
	}
	if (!journal_inode->i_nlink) {
		make_bad_inode(journal_inode);
		iput(journal_inode);
4479
		ext4_msg(sb, KERN_ERR, "journal inode is deleted");
4480 4481 4482
		return NULL;
	}

4483
	jbd_debug(2, "Journal inode found at %p: %lld bytes\n",
4484
		  journal_inode, journal_inode->i_size);
4485
	if (!S_ISREG(journal_inode->i_mode)) {
4486
		ext4_msg(sb, KERN_ERR, "invalid journal inode");
4487 4488 4489
		iput(journal_inode);
		return NULL;
	}
4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503
	return journal_inode;
}

static journal_t *ext4_get_journal(struct super_block *sb,
				   unsigned int journal_inum)
{
	struct inode *journal_inode;
	journal_t *journal;

	BUG_ON(!ext4_has_feature_journal(sb));

	journal_inode = ext4_get_journal_inode(sb, journal_inum);
	if (!journal_inode)
		return NULL;
4504

4505
	journal = jbd2_journal_init_inode(journal_inode);
4506
	if (!journal) {
4507
		ext4_msg(sb, KERN_ERR, "Could not load journal inode");
4508 4509 4510 4511
		iput(journal_inode);
		return NULL;
	}
	journal->j_private = sb;
4512
	ext4_init_journal_params(sb, journal);
4513 4514 4515
	return journal;
}

4516
static journal_t *ext4_get_dev_journal(struct super_block *sb,
4517 4518
				       dev_t j_dev)
{
4519
	struct buffer_head *bh;
4520
	journal_t *journal;
4521 4522
	ext4_fsblk_t start;
	ext4_fsblk_t len;
4523
	int hblock, blocksize;
4524
	ext4_fsblk_t sb_block;
4525
	unsigned long offset;
4526
	struct ext4_super_block *es;
4527 4528
	struct block_device *bdev;

4529
	BUG_ON(!ext4_has_feature_journal(sb));
4530

4531
	bdev = ext4_blkdev_get(j_dev, sb);
4532 4533 4534 4535
	if (bdev == NULL)
		return NULL;

	blocksize = sb->s_blocksize;
4536
	hblock = bdev_logical_block_size(bdev);
4537
	if (blocksize < hblock) {
4538 4539
		ext4_msg(sb, KERN_ERR,
			"blocksize too small for journal device");
4540 4541 4542
		goto out_bdev;
	}

4543 4544
	sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
	offset = EXT4_MIN_BLOCK_SIZE % blocksize;
4545 4546
	set_blocksize(bdev, blocksize);
	if (!(bh = __bread(bdev, sb_block, blocksize))) {
4547 4548
		ext4_msg(sb, KERN_ERR, "couldn't read superblock of "
		       "external journal");
4549 4550 4551
		goto out_bdev;
	}

4552
	es = (struct ext4_super_block *) (bh->b_data + offset);
4553
	if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
4554
	    !(le32_to_cpu(es->s_feature_incompat) &
4555
	      EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
4556 4557
		ext4_msg(sb, KERN_ERR, "external journal has "
					"bad superblock");
4558 4559 4560 4561
		brelse(bh);
		goto out_bdev;
	}

4562 4563 4564 4565 4566 4567 4568 4569 4570
	if ((le32_to_cpu(es->s_feature_ro_compat) &
	     EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
	    es->s_checksum != ext4_superblock_csum(sb, es)) {
		ext4_msg(sb, KERN_ERR, "external journal has "
				       "corrupt superblock");
		brelse(bh);
		goto out_bdev;
	}

4571
	if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
4572
		ext4_msg(sb, KERN_ERR, "journal UUID does not match");
4573 4574 4575 4576
		brelse(bh);
		goto out_bdev;
	}

L
Laurent Vivier 已提交
4577
	len = ext4_blocks_count(es);
4578 4579 4580
	start = sb_block + 1;
	brelse(bh);	/* we're done with the superblock */

4581
	journal = jbd2_journal_init_dev(bdev, sb->s_bdev,
4582 4583
					start, len, blocksize);
	if (!journal) {
4584
		ext4_msg(sb, KERN_ERR, "failed to create device journal");
4585 4586 4587
		goto out_bdev;
	}
	journal->j_private = sb;
4588
	ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer);
4589 4590
	wait_on_buffer(journal->j_sb_buffer);
	if (!buffer_uptodate(journal->j_sb_buffer)) {
4591
		ext4_msg(sb, KERN_ERR, "I/O error on journal device");
4592 4593 4594
		goto out_journal;
	}
	if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
4595 4596
		ext4_msg(sb, KERN_ERR, "External journal has more than one "
					"user (unsupported) - %d",
4597 4598 4599
			be32_to_cpu(journal->j_superblock->s_nr_users));
		goto out_journal;
	}
4600 4601
	EXT4_SB(sb)->journal_bdev = bdev;
	ext4_init_journal_params(sb, journal);
4602
	return journal;
4603

4604
out_journal:
4605
	jbd2_journal_destroy(journal);
4606
out_bdev:
4607
	ext4_blkdev_put(bdev);
4608 4609 4610
	return NULL;
}

4611 4612
static int ext4_load_journal(struct super_block *sb,
			     struct ext4_super_block *es,
4613 4614 4615 4616 4617 4618 4619 4620
			     unsigned long journal_devnum)
{
	journal_t *journal;
	unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
	dev_t journal_dev;
	int err = 0;
	int really_read_only;

4621
	BUG_ON(!ext4_has_feature_journal(sb));
4622

4623 4624
	if (journal_devnum &&
	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
4625 4626
		ext4_msg(sb, KERN_INFO, "external journal device major/minor "
			"numbers have changed");
4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637
		journal_dev = new_decode_dev(journal_devnum);
	} else
		journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));

	really_read_only = bdev_read_only(sb->s_bdev);

	/*
	 * Are we loading a blank journal or performing recovery after a
	 * crash?  For recovery, we need to check in advance whether we
	 * can get read-write access to the device.
	 */
4638
	if (ext4_has_feature_journal_needs_recovery(sb)) {
4639
		if (sb_rdonly(sb)) {
4640 4641
			ext4_msg(sb, KERN_INFO, "INFO: recovery "
					"required on readonly filesystem");
4642
			if (really_read_only) {
4643
				ext4_msg(sb, KERN_ERR, "write access "
4644 4645
					"unavailable, cannot proceed "
					"(try mounting with noload)");
4646 4647
				return -EROFS;
			}
4648 4649
			ext4_msg(sb, KERN_INFO, "write access will "
			       "be enabled during recovery");
4650 4651 4652 4653
		}
	}

	if (journal_inum && journal_dev) {
4654 4655
		ext4_msg(sb, KERN_ERR, "filesystem has both journal "
		       "and inode journals!");
4656 4657 4658 4659
		return -EINVAL;
	}

	if (journal_inum) {
4660
		if (!(journal = ext4_get_journal(sb, journal_inum)))
4661 4662
			return -EINVAL;
	} else {
4663
		if (!(journal = ext4_get_dev_journal(sb, journal_dev)))
4664 4665 4666
			return -EINVAL;
	}

4667
	if (!(journal->j_flags & JBD2_BARRIER))
4668
		ext4_msg(sb, KERN_INFO, "barriers disabled");
4669

4670
	if (!ext4_has_feature_journal_needs_recovery(sb))
4671
		err = jbd2_journal_wipe(journal, !really_read_only);
4672 4673 4674 4675 4676
	if (!err) {
		char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
		if (save)
			memcpy(save, ((char *) es) +
			       EXT4_S_ERR_START, EXT4_S_ERR_LEN);
4677
		err = jbd2_journal_load(journal);
4678 4679 4680 4681 4682
		if (save)
			memcpy(((char *) es) + EXT4_S_ERR_START,
			       save, EXT4_S_ERR_LEN);
		kfree(save);
	}
4683 4684

	if (err) {
4685
		ext4_msg(sb, KERN_ERR, "error loading journal");
4686
		jbd2_journal_destroy(journal);
4687 4688 4689
		return err;
	}

4690 4691
	EXT4_SB(sb)->s_journal = journal;
	ext4_clear_journal_err(sb, es);
4692

4693
	if (!really_read_only && journal_devnum &&
4694 4695 4696 4697
	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
		es->s_journal_dev = cpu_to_le32(journal_devnum);

		/* Make sure we flush the recovery flag to disk. */
4698
		ext4_commit_super(sb, 1);
4699 4700 4701 4702 4703
	}

	return 0;
}

4704
static int ext4_commit_super(struct super_block *sb, int sync)
4705
{
4706
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
4707
	struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
4708
	int error = 0;
4709

4710
	if (!sbh || block_device_ejected(sb))
4711
		return error;
4712 4713 4714 4715 4716 4717 4718 4719 4720 4721
	/*
	 * If the file system is mounted read-only, don't update the
	 * superblock write time.  This avoids updating the superblock
	 * write time when we are mounting the root file system
	 * read/only but we need to replay the journal; at that point,
	 * for people who are east of GMT and who make their clock
	 * tick in localtime for Windows bug-for-bug compatibility,
	 * the clock is set in the future, and this will cause e2fsck
	 * to complain and force a full file system check.
	 */
4722
	if (!(sb->s_flags & SB_RDONLY))
4723
		es->s_wtime = cpu_to_le32(get_seconds());
4724 4725 4726
	if (sb->s_bdev->bd_part)
		es->s_kbytes_written =
			cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
T
Theodore Ts'o 已提交
4727 4728
			    ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
			      EXT4_SB(sb)->s_sectors_written_start) >> 1));
4729 4730 4731
	else
		es->s_kbytes_written =
			cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
4732 4733
	if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
		ext4_free_blocks_count_set(es,
4734 4735
			EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
				&EXT4_SB(sb)->s_freeclusters_counter)));
4736 4737 4738
	if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
		es->s_free_inodes_count =
			cpu_to_le32(percpu_counter_sum_positive(
4739
				&EXT4_SB(sb)->s_freeinodes_counter));
4740
	BUFFER_TRACE(sbh, "marking dirty");
4741
	ext4_superblock_csum_set(sb);
4742 4743
	if (sync)
		lock_buffer(sbh);
4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757
	if (buffer_write_io_error(sbh)) {
		/*
		 * Oh, dear.  A previous attempt to write the
		 * superblock failed.  This could happen because the
		 * USB device was yanked out.  Or it could happen to
		 * be a transient write error and maybe the block will
		 * be remapped.  Nothing we can do but to retry the
		 * write and hope for the best.
		 */
		ext4_msg(sb, KERN_ERR, "previous I/O error to "
		       "superblock detected");
		clear_buffer_write_io_error(sbh);
		set_buffer_uptodate(sbh);
	}
4758
	mark_buffer_dirty(sbh);
4759
	if (sync) {
4760
		unlock_buffer(sbh);
4761
		error = __sync_dirty_buffer(sbh,
4762
			REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0));
4763 4764 4765 4766 4767
		if (error)
			return error;

		error = buffer_write_io_error(sbh);
		if (error) {
4768 4769
			ext4_msg(sb, KERN_ERR, "I/O error while writing "
			       "superblock");
4770 4771 4772 4773
			clear_buffer_write_io_error(sbh);
			set_buffer_uptodate(sbh);
		}
	}
4774
	return error;
4775 4776 4777 4778 4779 4780 4781
}

/*
 * Have we just finished recovery?  If so, and if we are mounting (or
 * remounting) the filesystem readonly, then we will end up with a
 * consistent fs on disk.  Record that fact.
 */
4782 4783
static void ext4_mark_recovery_complete(struct super_block *sb,
					struct ext4_super_block *es)
4784
{
4785
	journal_t *journal = EXT4_SB(sb)->s_journal;
4786

4787
	if (!ext4_has_feature_journal(sb)) {
4788 4789 4790
		BUG_ON(journal != NULL);
		return;
	}
4791
	jbd2_journal_lock_updates(journal);
4792 4793 4794
	if (jbd2_journal_flush(journal) < 0)
		goto out;

4795
	if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) {
4796
		ext4_clear_feature_journal_needs_recovery(sb);
4797
		ext4_commit_super(sb, 1);
4798
	}
4799 4800

out:
4801
	jbd2_journal_unlock_updates(journal);
4802 4803 4804 4805 4806 4807 4808
}

/*
 * If we are mounting (or read-write remounting) a filesystem whose journal
 * has recorded an error from a previous lifetime, move that error to the
 * main filesystem now.
 */
4809 4810
static void ext4_clear_journal_err(struct super_block *sb,
				   struct ext4_super_block *es)
4811 4812 4813 4814 4815
{
	journal_t *journal;
	int j_errno;
	const char *errstr;

4816
	BUG_ON(!ext4_has_feature_journal(sb));
4817

4818
	journal = EXT4_SB(sb)->s_journal;
4819 4820 4821

	/*
	 * Now check for any error status which may have been recorded in the
4822
	 * journal by a prior ext4_error() or ext4_abort()
4823 4824
	 */

4825
	j_errno = jbd2_journal_errno(journal);
4826 4827 4828
	if (j_errno) {
		char nbuf[16];

4829
		errstr = ext4_decode_error(sb, j_errno, nbuf);
4830
		ext4_warning(sb, "Filesystem error recorded "
4831
			     "from previous mount: %s", errstr);
4832
		ext4_warning(sb, "Marking fs in need of filesystem check.");
4833

4834 4835
		EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
		es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
4836
		ext4_commit_super(sb, 1);
4837

4838
		jbd2_journal_clear_err(journal);
4839
		jbd2_journal_update_sb_errno(journal);
4840 4841 4842 4843 4844 4845 4846
	}
}

/*
 * Force the running and committing transactions to commit,
 * and wait on the commit.
 */
4847
int ext4_force_commit(struct super_block *sb)
4848 4849 4850
{
	journal_t *journal;

4851
	if (sb_rdonly(sb))
4852 4853
		return 0;

4854
	journal = EXT4_SB(sb)->s_journal;
4855
	return ext4_journal_force_commit(journal);
4856 4857
}

4858
static int ext4_sync_fs(struct super_block *sb, int wait)
4859
{
4860
	int ret = 0;
4861
	tid_t target;
4862
	bool needs_barrier = false;
4863
	struct ext4_sb_info *sbi = EXT4_SB(sb);
4864

4865
	if (unlikely(ext4_forced_shutdown(sbi)))
4866 4867
		return 0;

4868
	trace_ext4_sync_fs(sb, wait);
4869
	flush_workqueue(sbi->rsv_conversion_wq);
4870 4871 4872 4873 4874
	/*
	 * Writeback quota in non-journalled quota case - journalled quota has
	 * no dirty dquots
	 */
	dquot_writeback_dquots(sb, -1);
4875 4876 4877 4878 4879
	/*
	 * Data writeback is possible w/o journal transaction, so barrier must
	 * being sent at the end of the function. But we can skip it if
	 * transaction_commit will do it for us.
	 */
4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891
	if (sbi->s_journal) {
		target = jbd2_get_latest_transaction(sbi->s_journal);
		if (wait && sbi->s_journal->j_flags & JBD2_BARRIER &&
		    !jbd2_trans_will_send_data_barrier(sbi->s_journal, target))
			needs_barrier = true;

		if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
			if (wait)
				ret = jbd2_log_wait_commit(sbi->s_journal,
							   target);
		}
	} else if (wait && test_opt(sb, BARRIER))
4892 4893 4894 4895 4896 4897
		needs_barrier = true;
	if (needs_barrier) {
		int err;
		err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
		if (!ret)
			ret = err;
4898
	}
4899 4900 4901 4902

	return ret;
}

4903 4904 4905
/*
 * LVM calls this function before a (read-only) snapshot is created.  This
 * gives us a chance to flush the journal completely and mark the fs clean.
4906 4907
 *
 * Note that only this function cannot bring a filesystem to be in a clean
4908 4909
 * state independently. It relies on upper layer to stop all data & metadata
 * modifications.
4910
 */
4911
static int ext4_freeze(struct super_block *sb)
4912
{
4913 4914
	int error = 0;
	journal_t *journal;
4915

4916
	if (sb_rdonly(sb))
4917
		return 0;
4918

4919
	journal = EXT4_SB(sb)->s_journal;
4920

4921 4922 4923
	if (journal) {
		/* Now we set up the journal barrier. */
		jbd2_journal_lock_updates(journal);
4924

4925 4926 4927 4928 4929 4930 4931
		/*
		 * Don't clear the needs_recovery flag if we failed to
		 * flush the journal.
		 */
		error = jbd2_journal_flush(journal);
		if (error < 0)
			goto out;
4932 4933

		/* Journal blocked and flushed, clear needs_recovery flag. */
4934
		ext4_clear_feature_journal_needs_recovery(sb);
4935
	}
4936 4937

	error = ext4_commit_super(sb, 1);
4938
out:
4939 4940 4941
	if (journal)
		/* we rely on upper layer to stop further updates */
		jbd2_journal_unlock_updates(journal);
4942
	return error;
4943 4944 4945 4946 4947 4948
}

/*
 * Called by LVM after the snapshot is done.  We need to reset the RECOVER
 * flag here, even though the filesystem is not technically dirty yet.
 */
4949
static int ext4_unfreeze(struct super_block *sb)
4950
{
4951
	if (sb_rdonly(sb) || ext4_forced_shutdown(EXT4_SB(sb)))
4952 4953
		return 0;

4954 4955
	if (EXT4_SB(sb)->s_journal) {
		/* Reset the needs_recovery flag before the fs is unlocked. */
4956
		ext4_set_feature_journal_needs_recovery(sb);
4957 4958
	}

4959
	ext4_commit_super(sb, 1);
4960
	return 0;
4961 4962
}

4963 4964 4965 4966 4967
/*
 * Structure to save mount options for ext4_remount's benefit
 */
struct ext4_mount_options {
	unsigned long s_mount_opt;
4968
	unsigned long s_mount_opt2;
4969 4970
	kuid_t s_resuid;
	kgid_t s_resgid;
4971 4972 4973 4974
	unsigned long s_commit_interval;
	u32 s_min_batch_time, s_max_batch_time;
#ifdef CONFIG_QUOTA
	int s_jquota_fmt;
J
Jan Kara 已提交
4975
	char *s_qf_names[EXT4_MAXQUOTAS];
4976 4977 4978
#endif
};

4979
static int ext4_remount(struct super_block *sb, int *flags, char *data)
4980
{
4981
	struct ext4_super_block *es;
4982
	struct ext4_sb_info *sbi = EXT4_SB(sb);
4983
	unsigned long old_sb_flags;
4984
	struct ext4_mount_options old_opts;
4985
	int enable_quota = 0;
4986
	ext4_group_t g;
4987
	unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
4988
	int err = 0;
4989
#ifdef CONFIG_QUOTA
4990
	int i, j;
4991
#endif
4992
	char *orig_data = kstrdup(data, GFP_KERNEL);
4993 4994 4995 4996

	/* Store the original options */
	old_sb_flags = sb->s_flags;
	old_opts.s_mount_opt = sbi->s_mount_opt;
4997
	old_opts.s_mount_opt2 = sbi->s_mount_opt2;
4998 4999 5000
	old_opts.s_resuid = sbi->s_resuid;
	old_opts.s_resgid = sbi->s_resgid;
	old_opts.s_commit_interval = sbi->s_commit_interval;
5001 5002
	old_opts.s_min_batch_time = sbi->s_min_batch_time;
	old_opts.s_max_batch_time = sbi->s_max_batch_time;
5003 5004
#ifdef CONFIG_QUOTA
	old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
J
Jan Kara 已提交
5005
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
5006 5007 5008 5009 5010 5011
		if (sbi->s_qf_names[i]) {
			old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
							 GFP_KERNEL);
			if (!old_opts.s_qf_names[i]) {
				for (j = 0; j < i; j++)
					kfree(old_opts.s_qf_names[j]);
5012
				kfree(orig_data);
5013 5014 5015 5016
				return -ENOMEM;
			}
		} else
			old_opts.s_qf_names[i] = NULL;
5017
#endif
5018 5019
	if (sbi->s_journal && sbi->s_journal->j_task->io_context)
		journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
5020

5021
	if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
5022 5023 5024 5025
		err = -EINVAL;
		goto restore_opts;
	}

5026
	if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
5027 5028
	    test_opt(sb, JOURNAL_CHECKSUM)) {
		ext4_msg(sb, KERN_ERR, "changing journal_checksum "
5029 5030
			 "during remount not supported; ignoring");
		sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
5031 5032
	}

5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045
	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
		if (test_opt2(sb, EXPLICIT_DELALLOC)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and delalloc");
			err = -EINVAL;
			goto restore_opts;
		}
		if (test_opt(sb, DIOREAD_NOLOCK)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and dioread_nolock");
			err = -EINVAL;
			goto restore_opts;
		}
R
Ross Zwisler 已提交
5046 5047 5048 5049 5050 5051
		if (test_opt(sb, DAX)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and dax");
			err = -EINVAL;
			goto restore_opts;
		}
5052 5053 5054 5055 5056 5057 5058
	} else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) {
		if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				"journal_async_commit in data=ordered mode");
			err = -EINVAL;
			goto restore_opts;
		}
R
Ross Zwisler 已提交
5059 5060
	}

5061 5062 5063 5064 5065 5066
	if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_NO_MBCACHE) {
		ext4_msg(sb, KERN_ERR, "can't enable nombcache during remount");
		err = -EINVAL;
		goto restore_opts;
	}

R
Ross Zwisler 已提交
5067 5068 5069 5070
	if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) {
		ext4_msg(sb, KERN_WARNING, "warning: refusing change of "
			"dax flag with busy inodes while remounting");
		sbi->s_mount_opt ^= EXT4_MOUNT_DAX;
5071 5072
	}

5073
	if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
5074
		ext4_abort(sb, "Abort forced by user");
5075

5076 5077
	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
		(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
5078 5079 5080

	es = sbi->s_es;

5081
	if (sbi->s_journal) {
5082
		ext4_init_journal_params(sb, sbi->s_journal);
5083 5084
		set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
	}
5085

5086 5087
	if (*flags & SB_LAZYTIME)
		sb->s_flags |= SB_LAZYTIME;
5088

5089
	if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
5090
		if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
5091 5092 5093 5094
			err = -EROFS;
			goto restore_opts;
		}

5095
		if (*flags & SB_RDONLY) {
5096 5097 5098
			err = sync_filesystem(sb);
			if (err < 0)
				goto restore_opts;
5099 5100
			err = dquot_suspend(sb, -1);
			if (err < 0)
5101 5102
				goto restore_opts;

5103 5104 5105 5106
			/*
			 * First of all, the unconditional stuff we have to do
			 * to disable replay of the journal when we next remount
			 */
5107
			sb->s_flags |= SB_RDONLY;
5108 5109 5110 5111 5112 5113

			/*
			 * OK, test if we are remounting a valid rw partition
			 * readonly, and if so set the rdonly flag and then
			 * mark the partition as valid again.
			 */
5114 5115
			if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) &&
			    (sbi->s_mount_state & EXT4_VALID_FS))
5116 5117
				es->s_state = cpu_to_le16(sbi->s_mount_state);

5118
			if (sbi->s_journal)
5119
				ext4_mark_recovery_complete(sb, es);
5120
		} else {
5121
			/* Make sure we can mount this feature set readwrite */
5122
			if (ext4_has_feature_readonly(sb) ||
D
Darrick J. Wong 已提交
5123
			    !ext4_feature_set_ok(sb, 0)) {
5124 5125 5126
				err = -EROFS;
				goto restore_opts;
			}
5127 5128
			/*
			 * Make sure the group descriptor checksums
5129
			 * are sane.  If they aren't, refuse to remount r/w.
5130 5131 5132 5133 5134
			 */
			for (g = 0; g < sbi->s_groups_count; g++) {
				struct ext4_group_desc *gdp =
					ext4_get_group_desc(sb, g, NULL);

5135
				if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
5136 5137
					ext4_msg(sb, KERN_ERR,
	       "ext4_remount: Checksum for group %u failed (%u!=%u)",
5138
		g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)),
5139
					       le16_to_cpu(gdp->bg_checksum));
5140
					err = -EFSBADCRC;
5141 5142 5143 5144
					goto restore_opts;
				}
			}

5145 5146 5147 5148 5149 5150
			/*
			 * If we have an unprocessed orphan list hanging
			 * around from a previously readonly bdev mount,
			 * require a full umount/remount for now.
			 */
			if (es->s_last_orphan) {
5151
				ext4_msg(sb, KERN_WARNING, "Couldn't "
5152 5153
				       "remount RDWR because of unprocessed "
				       "orphan inode list.  Please "
5154
				       "umount/remount instead");
5155 5156 5157 5158
				err = -EINVAL;
				goto restore_opts;
			}

5159 5160 5161 5162 5163 5164
			/*
			 * Mounting a RDONLY partition read-write, so reread
			 * and store the current valid flag.  (It may have
			 * been changed by e2fsck since we originally mounted
			 * the partition.)
			 */
5165 5166
			if (sbi->s_journal)
				ext4_clear_journal_err(sb, es);
5167
			sbi->s_mount_state = le16_to_cpu(es->s_state);
5168
			if (!ext4_setup_super(sb, es, 0))
5169
				sb->s_flags &= ~SB_RDONLY;
5170
			if (ext4_has_feature_mmp(sb))
5171 5172 5173 5174 5175
				if (ext4_multi_mount_protect(sb,
						le64_to_cpu(es->s_mmp_block))) {
					err = -EROFS;
					goto restore_opts;
				}
5176
			enable_quota = 1;
5177 5178
		}
	}
5179 5180 5181 5182 5183

	/*
	 * Reinitialize lazy itable initialization thread based on
	 * current settings
	 */
5184
	if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE))
5185 5186 5187 5188 5189 5190 5191
		ext4_unregister_li_request(sb);
	else {
		ext4_group_t first_not_zeroed;
		first_not_zeroed = ext4_has_uninit_itable(sb);
		ext4_register_li_request(sb, first_not_zeroed);
	}

5192
	ext4_setup_system_zone(sb);
5193
	if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY))
5194
		ext4_commit_super(sb, 1);
5195

5196 5197
#ifdef CONFIG_QUOTA
	/* Release old quota file names */
J
Jan Kara 已提交
5198
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
5199
		kfree(old_opts.s_qf_names[i]);
5200 5201 5202
	if (enable_quota) {
		if (sb_any_quota_suspended(sb))
			dquot_resume(sb, -1);
5203
		else if (ext4_has_feature_quota(sb)) {
5204
			err = ext4_enable_quotas(sb);
5205
			if (err)
5206 5207 5208
				goto restore_opts;
		}
	}
5209
#endif
5210

5211
	*flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
5212 5213
	ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
	kfree(orig_data);
5214
	return 0;
5215

5216 5217 5218
restore_opts:
	sb->s_flags = old_sb_flags;
	sbi->s_mount_opt = old_opts.s_mount_opt;
5219
	sbi->s_mount_opt2 = old_opts.s_mount_opt2;
5220 5221 5222
	sbi->s_resuid = old_opts.s_resuid;
	sbi->s_resgid = old_opts.s_resgid;
	sbi->s_commit_interval = old_opts.s_commit_interval;
5223 5224
	sbi->s_min_batch_time = old_opts.s_min_batch_time;
	sbi->s_max_batch_time = old_opts.s_max_batch_time;
5225 5226
#ifdef CONFIG_QUOTA
	sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
J
Jan Kara 已提交
5227
	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
5228
		kfree(sbi->s_qf_names[i]);
5229 5230 5231
		sbi->s_qf_names[i] = old_opts.s_qf_names[i];
	}
#endif
5232
	kfree(orig_data);
5233 5234 5235
	return err;
}

L
Li Xi 已提交
5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248
#ifdef CONFIG_QUOTA
static int ext4_statfs_project(struct super_block *sb,
			       kprojid_t projid, struct kstatfs *buf)
{
	struct kqid qid;
	struct dquot *dquot;
	u64 limit;
	u64 curblock;

	qid = make_kqid_projid(projid);
	dquot = dqget(sb, qid);
	if (IS_ERR(dquot))
		return PTR_ERR(dquot);
5249
	spin_lock(&dquot->dq_dqb_lock);
L
Li Xi 已提交
5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271

	limit = (dquot->dq_dqb.dqb_bsoftlimit ?
		 dquot->dq_dqb.dqb_bsoftlimit :
		 dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
	if (limit && buf->f_blocks > limit) {
		curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
		buf->f_blocks = limit;
		buf->f_bfree = buf->f_bavail =
			(buf->f_blocks > curblock) ?
			 (buf->f_blocks - curblock) : 0;
	}

	limit = dquot->dq_dqb.dqb_isoftlimit ?
		dquot->dq_dqb.dqb_isoftlimit :
		dquot->dq_dqb.dqb_ihardlimit;
	if (limit && buf->f_files > limit) {
		buf->f_files = limit;
		buf->f_ffree =
			(buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
			 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
	}

5272
	spin_unlock(&dquot->dq_dqb_lock);
L
Li Xi 已提交
5273 5274 5275 5276 5277
	dqput(dquot);
	return 0;
}
#endif

5278
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
5279 5280
{
	struct super_block *sb = dentry->d_sb;
5281 5282
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
L
Lukas Czerner 已提交
5283
	ext4_fsblk_t overhead = 0, resv_blocks;
P
Pekka Enberg 已提交
5284
	u64 fsid;
5285
	s64 bfree;
L
Lukas Czerner 已提交
5286
	resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters));
5287

5288 5289
	if (!test_opt(sb, MINIX_DF))
		overhead = sbi->s_overhead;
5290

5291
	buf->f_type = EXT4_SUPER_MAGIC;
5292
	buf->f_bsize = sb->s_blocksize;
5293
	buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead);
5294 5295
	bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
		percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
5296
	/* prevent underflow in case that few free space is available */
5297
	buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
L
Lukas Czerner 已提交
5298 5299 5300
	buf->f_bavail = buf->f_bfree -
			(ext4_r_blocks_count(es) + resv_blocks);
	if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks))
5301 5302
		buf->f_bavail = 0;
	buf->f_files = le32_to_cpu(es->s_inodes_count);
P
Peter Zijlstra 已提交
5303
	buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
5304
	buf->f_namelen = EXT4_NAME_LEN;
P
Pekka Enberg 已提交
5305 5306 5307 5308
	fsid = le64_to_cpup((void *)es->s_uuid) ^
	       le64_to_cpup((void *)es->s_uuid + sizeof(u64));
	buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
	buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
5309

L
Li Xi 已提交
5310 5311 5312 5313 5314
#ifdef CONFIG_QUOTA
	if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) &&
	    sb_has_quota_limits_enabled(sb, PRJQUOTA))
		ext4_statfs_project(sb, EXT4_I(dentry->d_inode)->i_projid, buf);
#endif
5315 5316 5317 5318 5319 5320
	return 0;
}


#ifdef CONFIG_QUOTA

J
Jan Kara 已提交
5321 5322 5323 5324
/*
 * Helper functions so that transaction is started before we acquire dqio_sem
 * to keep correct lock ordering of transaction > dqio_sem
 */
5325 5326
static inline struct inode *dquot_to_inode(struct dquot *dquot)
{
5327
	return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
5328 5329
}

5330
static int ext4_write_dquot(struct dquot *dquot)
5331 5332 5333 5334 5335 5336
{
	int ret, err;
	handle_t *handle;
	struct inode *inode;

	inode = dquot_to_inode(dquot);
5337
	handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
5338
				    EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
5339 5340 5341
	if (IS_ERR(handle))
		return PTR_ERR(handle);
	ret = dquot_commit(dquot);
5342
	err = ext4_journal_stop(handle);
5343 5344 5345 5346 5347
	if (!ret)
		ret = err;
	return ret;
}

5348
static int ext4_acquire_dquot(struct dquot *dquot)
5349 5350 5351 5352
{
	int ret, err;
	handle_t *handle;

5353
	handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
5354
				    EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
5355 5356 5357
	if (IS_ERR(handle))
		return PTR_ERR(handle);
	ret = dquot_acquire(dquot);
5358
	err = ext4_journal_stop(handle);
5359 5360 5361 5362 5363
	if (!ret)
		ret = err;
	return ret;
}

5364
static int ext4_release_dquot(struct dquot *dquot)
5365 5366 5367 5368
{
	int ret, err;
	handle_t *handle;

5369
	handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
5370
				    EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
J
Jan Kara 已提交
5371 5372 5373
	if (IS_ERR(handle)) {
		/* Release dquot anyway to avoid endless cycle in dqput() */
		dquot_release(dquot);
5374
		return PTR_ERR(handle);
J
Jan Kara 已提交
5375
	}
5376
	ret = dquot_release(dquot);
5377
	err = ext4_journal_stop(handle);
5378 5379 5380 5381 5382
	if (!ret)
		ret = err;
	return ret;
}

5383
static int ext4_mark_dquot_dirty(struct dquot *dquot)
5384
{
5385 5386 5387
	struct super_block *sb = dquot->dq_sb;
	struct ext4_sb_info *sbi = EXT4_SB(sb);

5388
	/* Are we journaling quotas? */
5389
	if (ext4_has_feature_quota(sb) ||
5390
	    sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
5391
		dquot_mark_dquot_dirty(dquot);
5392
		return ext4_write_dquot(dquot);
5393 5394 5395 5396 5397
	} else {
		return dquot_mark_dquot_dirty(dquot);
	}
}

5398
static int ext4_write_info(struct super_block *sb, int type)
5399 5400 5401 5402 5403
{
	int ret, err;
	handle_t *handle;

	/* Data block + inode block */
5404
	handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2);
5405 5406 5407
	if (IS_ERR(handle))
		return PTR_ERR(handle);
	ret = dquot_commit_info(sb, type);
5408
	err = ext4_journal_stop(handle);
5409 5410 5411 5412 5413 5414 5415 5416 5417
	if (!ret)
		ret = err;
	return ret;
}

/*
 * Turn on quotas during mount time - we need to find
 * the quota file and such...
 */
5418
static int ext4_quota_on_mount(struct super_block *sb, int type)
5419
{
5420 5421
	return dquot_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type],
					EXT4_SB(sb)->s_jquota_fmt, type);
5422 5423
}

5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437
static void lockdep_set_quota_inode(struct inode *inode, int subclass)
{
	struct ext4_inode_info *ei = EXT4_I(inode);

	/* The first argument of lockdep_set_subclass has to be
	 * *exactly* the same as the argument to init_rwsem() --- in
	 * this case, in init_once() --- or lockdep gets unhappy
	 * because the name of the lock is set using the
	 * stringification of the argument to init_rwsem().
	 */
	(void) ei;	/* shut up clang warning if !CONFIG_LOCKDEP */
	lockdep_set_subclass(&ei->i_data_sem, subclass);
}

5438 5439 5440
/*
 * Standard function to be called on quota_on
 */
5441
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
A
Al Viro 已提交
5442
			 const struct path *path)
5443 5444 5445 5446 5447
{
	int err;

	if (!test_opt(sb, QUOTA))
		return -EINVAL;
5448

5449
	/* Quotafile not on the same filesystem? */
5450
	if (path->dentry->d_sb != sb)
5451
		return -EXDEV;
5452 5453
	/* Journaling quota? */
	if (EXT4_SB(sb)->s_qf_names[type]) {
5454
		/* Quotafile not in fs root? */
5455
		if (path->dentry->d_parent != sb->s_root)
5456 5457 5458
			ext4_msg(sb, KERN_WARNING,
				"Quota file not on filesystem root. "
				"Journaled quota will not work");
5459 5460 5461 5462 5463 5464 5465
		sb_dqopt(sb)->flags |= DQUOT_NOLIST_DIRTY;
	} else {
		/*
		 * Clear the flag just in case mount options changed since
		 * last time.
		 */
		sb_dqopt(sb)->flags &= ~DQUOT_NOLIST_DIRTY;
5466
	}
5467 5468 5469 5470 5471

	/*
	 * When we journal data on quota file, we have to flush journal to see
	 * all updates to the file when we bypass pagecache...
	 */
5472
	if (EXT4_SB(sb)->s_journal &&
5473
	    ext4_should_journal_data(d_inode(path->dentry))) {
5474 5475 5476 5477 5478
		/*
		 * We don't need to lock updates but journal_flush() could
		 * otherwise be livelocked...
		 */
		jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
5479
		err = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
5480
		jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
5481
		if (err)
5482
			return err;
5483
	}
5484

5485 5486
	lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
	err = dquot_quota_on(sb, type, format_id, path);
5487
	if (err) {
5488 5489
		lockdep_set_quota_inode(path->dentry->d_inode,
					     I_DATA_SEM_NORMAL);
5490 5491 5492 5493
	} else {
		struct inode *inode = d_inode(path->dentry);
		handle_t *handle;

5494 5495 5496 5497 5498
		/*
		 * Set inode flags to prevent userspace from messing with quota
		 * files. If this fails, we return success anyway since quotas
		 * are already enabled and this is not a hard failure.
		 */
5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510
		inode_lock(inode);
		handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
		if (IS_ERR(handle))
			goto unlock_inode;
		EXT4_I(inode)->i_flags |= EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL;
		inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
				S_NOATIME | S_IMMUTABLE);
		ext4_mark_inode_dirty(handle, inode);
		ext4_journal_stop(handle);
	unlock_inode:
		inode_unlock(inode);
	}
5511
	return err;
5512 5513
}

5514 5515 5516 5517 5518
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
			     unsigned int flags)
{
	int err;
	struct inode *qf_inode;
J
Jan Kara 已提交
5519
	unsigned long qf_inums[EXT4_MAXQUOTAS] = {
5520
		le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
L
Li Xi 已提交
5521 5522
		le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
		le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
5523 5524
	};

5525
	BUG_ON(!ext4_has_feature_quota(sb));
5526 5527 5528 5529 5530 5531 5532 5533 5534 5535

	if (!qf_inums[type])
		return -EPERM;

	qf_inode = ext4_iget(sb, qf_inums[type]);
	if (IS_ERR(qf_inode)) {
		ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
		return PTR_ERR(qf_inode);
	}

J
Jan Kara 已提交
5536 5537
	/* Don't account quota for quota files to avoid recursion */
	qf_inode->i_flags |= S_NOQUOTA;
5538
	lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
5539 5540
	err = dquot_enable(qf_inode, type, format_id, flags);
	iput(qf_inode);
5541 5542
	if (err)
		lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
5543 5544 5545 5546 5547 5548 5549 5550

	return err;
}

/* Enable usage tracking for all quota types. */
static int ext4_enable_quotas(struct super_block *sb)
{
	int type, err = 0;
J
Jan Kara 已提交
5551
	unsigned long qf_inums[EXT4_MAXQUOTAS] = {
5552
		le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
L
Li Xi 已提交
5553 5554
		le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
		le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
5555
	};
5556 5557 5558 5559 5560
	bool quota_mopt[EXT4_MAXQUOTAS] = {
		test_opt(sb, USRQUOTA),
		test_opt(sb, GRPQUOTA),
		test_opt(sb, PRJQUOTA),
	};
5561

5562
	sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
J
Jan Kara 已提交
5563
	for (type = 0; type < EXT4_MAXQUOTAS; type++) {
5564 5565
		if (qf_inums[type]) {
			err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
5566 5567
				DQUOT_USAGE_ENABLED |
				(quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
5568
			if (err) {
5569 5570 5571
				for (type--; type >= 0; type--)
					dquot_quota_off(sb, type);

5572
				ext4_warning(sb,
5573 5574 5575
					"Failed to enable quota tracking "
					"(type=%d, err=%d). Please run "
					"e2fsck to fix.", type, err);
5576 5577 5578 5579 5580 5581 5582
				return err;
			}
		}
	}
	return 0;
}

5583 5584
static int ext4_quota_off(struct super_block *sb, int type)
{
5585 5586
	struct inode *inode = sb_dqopt(sb)->files[type];
	handle_t *handle;
5587
	int err;
5588

5589 5590 5591
	/* Force all delayed allocation blocks to be allocated.
	 * Caller already holds s_umount sem */
	if (test_opt(sb, DELALLOC))
5592 5593
		sync_filesystem(sb);

5594
	if (!inode || !igrab(inode))
5595 5596
		goto out;

5597
	err = dquot_quota_off(sb, type);
5598
	if (err || ext4_has_feature_quota(sb))
5599 5600 5601
		goto out_put;

	inode_lock(inode);
5602 5603 5604 5605 5606
	/*
	 * Update modification times of quota files when userspace can
	 * start looking at them. If we fail, we return success anyway since
	 * this is not a hard failure and quotas are already disabled.
	 */
5607
	handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
5608
	if (IS_ERR(handle))
5609 5610 5611
		goto out_unlock;
	EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL);
	inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
5612
	inode->i_mtime = inode->i_ctime = current_time(inode);
5613 5614
	ext4_mark_inode_dirty(handle, inode);
	ext4_journal_stop(handle);
5615 5616 5617
out_unlock:
	inode_unlock(inode);
out_put:
5618
	lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL);
5619 5620
	iput(inode);
	return err;
5621
out:
5622 5623 5624
	return dquot_quota_off(sb, type);
}

5625 5626
/* Read data from quotafile - avoid pagecache and such because we cannot afford
 * acquiring the locks... As quota files are never truncated and quota code
L
Lucas De Marchi 已提交
5627
 * itself serializes the operations (and no one else should touch the files)
5628
 * we don't have to be afraid of races */
5629
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
5630 5631 5632
			       size_t len, loff_t off)
{
	struct inode *inode = sb_dqopt(sb)->files[type];
A
Aneesh Kumar K.V 已提交
5633
	ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647
	int offset = off & (sb->s_blocksize - 1);
	int tocopy;
	size_t toread;
	struct buffer_head *bh;
	loff_t i_size = i_size_read(inode);

	if (off > i_size)
		return 0;
	if (off+len > i_size)
		len = i_size-off;
	toread = len;
	while (toread > 0) {
		tocopy = sb->s_blocksize - offset < toread ?
				sb->s_blocksize - offset : toread;
5648 5649 5650
		bh = ext4_bread(NULL, inode, blk, 0);
		if (IS_ERR(bh))
			return PTR_ERR(bh);
5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665
		if (!bh)	/* A hole? */
			memset(data, 0, tocopy);
		else
			memcpy(data, bh->b_data+offset, tocopy);
		brelse(bh);
		offset = 0;
		toread -= tocopy;
		data += tocopy;
		blk++;
	}
	return len;
}

/* Write to quotafile (we know the transaction is already started and has
 * enough credits) */
5666
static ssize_t ext4_quota_write(struct super_block *sb, int type,
5667 5668 5669
				const char *data, size_t len, loff_t off)
{
	struct inode *inode = sb_dqopt(sb)->files[type];
A
Aneesh Kumar K.V 已提交
5670
	ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
5671
	int err, offset = off & (sb->s_blocksize - 1);
5672
	int retries = 0;
5673 5674 5675
	struct buffer_head *bh;
	handle_t *handle = journal_current_handle();

5676
	if (EXT4_SB(sb)->s_journal && !handle) {
5677 5678
		ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
			" cancelled because transaction is not started",
J
Jan Kara 已提交
5679 5680 5681
			(unsigned long long)off, (unsigned long long)len);
		return -EIO;
	}
5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692
	/*
	 * Since we account only one data block in transaction credits,
	 * then it is impossible to cross a block boundary.
	 */
	if (sb->s_blocksize - offset < len) {
		ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
			" cancelled because not block aligned",
			(unsigned long long)off, (unsigned long long)len);
		return -EIO;
	}

5693 5694 5695 5696 5697 5698
	do {
		bh = ext4_bread(handle, inode, blk,
				EXT4_GET_BLOCKS_CREATE |
				EXT4_GET_BLOCKS_METADATA_NOFAIL);
	} while (IS_ERR(bh) && (PTR_ERR(bh) == -ENOSPC) &&
		 ext4_should_retry_alloc(inode->i_sb, &retries));
5699 5700
	if (IS_ERR(bh))
		return PTR_ERR(bh);
5701 5702
	if (!bh)
		goto out;
5703
	BUFFER_TRACE(bh, "get write access");
5704 5705 5706
	err = ext4_journal_get_write_access(handle, bh);
	if (err) {
		brelse(bh);
5707
		return err;
5708
	}
5709 5710 5711 5712
	lock_buffer(bh);
	memcpy(bh->b_data+offset, data, len);
	flush_dcache_page(bh->b_page);
	unlock_buffer(bh);
5713
	err = ext4_handle_dirty_metadata(handle, NULL, bh);
5714
	brelse(bh);
5715
out:
5716 5717
	if (inode->i_size < off + len) {
		i_size_write(inode, off + len);
5718
		EXT4_I(inode)->i_disksize = inode->i_size;
5719
		ext4_mark_inode_dirty(handle, inode);
5720
	}
5721
	return len;
5722 5723
}

5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734
static int ext4_get_next_id(struct super_block *sb, struct kqid *qid)
{
	const struct quota_format_ops	*ops;

	if (!sb_has_quota_loaded(sb, qid->type))
		return -ESRCH;
	ops = sb_dqopt(sb)->ops[qid->type];
	if (!ops || !ops->get_next_id)
		return -ENOSYS;
	return dquot_get_next_id(sb, qid);
}
5735 5736
#endif

A
Al Viro 已提交
5737 5738
static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
		       const char *dev_name, void *data)
5739
{
A
Al Viro 已提交
5740
	return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super);
5741 5742
}

J
Jan Kara 已提交
5743
#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755
static inline void register_as_ext2(void)
{
	int err = register_filesystem(&ext2_fs_type);
	if (err)
		printk(KERN_WARNING
		       "EXT4-fs: Unable to register as ext2 (%d)\n", err);
}

static inline void unregister_as_ext2(void)
{
	unregister_filesystem(&ext2_fs_type);
}
5756 5757 5758

static inline int ext2_feature_set_ok(struct super_block *sb)
{
5759
	if (ext4_has_unknown_ext2_incompat_features(sb))
5760
		return 0;
5761
	if (sb_rdonly(sb))
5762
		return 1;
5763
	if (ext4_has_unknown_ext2_ro_compat_features(sb))
5764 5765 5766
		return 0;
	return 1;
}
5767 5768 5769
#else
static inline void register_as_ext2(void) { }
static inline void unregister_as_ext2(void) { }
5770
static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784
#endif

static inline void register_as_ext3(void)
{
	int err = register_filesystem(&ext3_fs_type);
	if (err)
		printk(KERN_WARNING
		       "EXT4-fs: Unable to register as ext3 (%d)\n", err);
}

static inline void unregister_as_ext3(void)
{
	unregister_filesystem(&ext3_fs_type);
}
5785 5786 5787

static inline int ext3_feature_set_ok(struct super_block *sb)
{
5788
	if (ext4_has_unknown_ext3_incompat_features(sb))
5789
		return 0;
5790
	if (!ext4_has_feature_journal(sb))
5791
		return 0;
5792
	if (sb_rdonly(sb))
5793
		return 1;
5794
	if (ext4_has_unknown_ext3_ro_compat_features(sb))
5795 5796 5797
		return 0;
	return 1;
}
5798

T
Theodore Ts'o 已提交
5799 5800 5801
static struct file_system_type ext4_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "ext4",
A
Al Viro 已提交
5802
	.mount		= ext4_mount,
T
Theodore Ts'o 已提交
5803 5804 5805
	.kill_sb	= kill_block_super,
	.fs_flags	= FS_REQUIRES_DEV,
};
5806
MODULE_ALIAS_FS("ext4");
T
Theodore Ts'o 已提交
5807

5808 5809 5810
/* Shared across all ext4 file systems */
wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];

5811
static int __init ext4_init_fs(void)
5812
{
5813
	int i, err;
5814

5815
	ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
5816 5817 5818
	ext4_li_info = NULL;
	mutex_init(&ext4_li_mtx);

5819
	/* Build-time check for flags consistency */
5820
	ext4_check_flag_values();
5821

5822
	for (i = 0; i < EXT4_WQ_HASH_SZ; i++)
5823 5824
		init_waitqueue_head(&ext4__ioend_wq[i]);

5825
	err = ext4_init_es();
5826 5827
	if (err)
		return err;
5828 5829 5830

	err = ext4_init_pageio();
	if (err)
5831
		goto out5;
5832

5833
	err = ext4_init_system_zone();
5834
	if (err)
5835
		goto out4;
5836

5837
	err = ext4_init_sysfs();
T
Theodore Ts'o 已提交
5838
	if (err)
5839
		goto out3;
5840

5841
	err = ext4_init_mballoc();
5842 5843
	if (err)
		goto out2;
5844 5845 5846
	err = init_inodecache();
	if (err)
		goto out1;
5847
	register_as_ext3();
5848
	register_as_ext2();
T
Theodore Ts'o 已提交
5849
	err = register_filesystem(&ext4_fs_type);
5850 5851
	if (err)
		goto out;
5852

5853 5854
	return 0;
out:
5855 5856
	unregister_as_ext2();
	unregister_as_ext3();
5857 5858
	destroy_inodecache();
out1:
5859
	ext4_exit_mballoc();
5860
out2:
5861 5862
	ext4_exit_sysfs();
out3:
5863
	ext4_exit_system_zone();
5864
out4:
5865
	ext4_exit_pageio();
5866
out5:
5867 5868
	ext4_exit_es();

5869 5870 5871
	return err;
}

5872
static void __exit ext4_exit_fs(void)
5873
{
5874
	ext4_destroy_lazyinit_thread();
5875 5876
	unregister_as_ext2();
	unregister_as_ext3();
T
Theodore Ts'o 已提交
5877
	unregister_filesystem(&ext4_fs_type);
5878
	destroy_inodecache();
5879
	ext4_exit_mballoc();
5880
	ext4_exit_sysfs();
5881 5882
	ext4_exit_system_zone();
	ext4_exit_pageio();
5883
	ext4_exit_es();
5884 5885 5886
}

MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
5887
MODULE_DESCRIPTION("Fourth Extended Filesystem");
5888
MODULE_LICENSE("GPL");
5889
MODULE_SOFTDEP("pre: crc32c");
5890 5891
module_init(ext4_init_fs)
module_exit(ext4_exit_fs)