super.c 189.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 *  linux/fs/ext4/super.c
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/inode.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/module.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/time.h>
24
#include <linux/vmalloc.h>
25 26 27
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/blkdev.h>
28
#include <linux/backing-dev.h>
29 30
#include <linux/parser.h>
#include <linux/buffer_head.h>
31
#include <linux/exportfs.h>
32 33 34 35 36 37
#include <linux/vfs.h>
#include <linux/random.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/quotaops.h>
#include <linux/seq_file.h>
T
Theodore Ts'o 已提交
38
#include <linux/ctype.h>
V
Vignesh Babu 已提交
39
#include <linux/log2.h>
A
Andreas Dilger 已提交
40
#include <linux/crc16.h>
41
#include <linux/dax.h>
D
Dan Magenheimer 已提交
42
#include <linux/cleancache.h>
43
#include <linux/uaccess.h>
J
Jeff Layton 已提交
44
#include <linux/iversion.h>
45
#include <linux/unicode.h>
46
#include <linux/part_stat.h>
47 48 49
#include <linux/kthread.h>
#include <linux/freezer.h>

50
#include "ext4.h"
51
#include "ext4_extents.h"	/* Needed for trace points definition */
52
#include "ext4_jbd2.h"
53 54
#include "xattr.h"
#include "acl.h"
55
#include "mballoc.h"
D
Darrick J. Wong 已提交
56
#include "fsmap.h"
57

58 59 60
#define CREATE_TRACE_POINTS
#include <trace/events/ext4.h>

61 62
static struct ext4_lazy_init *ext4_li_info;
static struct mutex ext4_li_mtx;
63
static struct ratelimit_state ext4_mount_msg_ratelimit;
64

65
static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
66
			     unsigned long journal_devnum);
67
static int ext4_show_options(struct seq_file *seq, struct dentry *root);
68
static int ext4_commit_super(struct super_block *sb);
69
static int ext4_mark_recovery_complete(struct super_block *sb,
70
					struct ext4_super_block *es);
71 72
static int ext4_clear_journal_err(struct super_block *sb,
				  struct ext4_super_block *es);
73
static int ext4_sync_fs(struct super_block *sb, int wait);
74 75
static int ext4_remount(struct super_block *sb, int *flags, char *data);
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
76 77
static int ext4_unfreeze(struct super_block *sb);
static int ext4_freeze(struct super_block *sb);
A
Al Viro 已提交
78 79
static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
		       const char *dev_name, void *data);
80 81
static inline int ext2_feature_set_ok(struct super_block *sb);
static inline int ext3_feature_set_ok(struct super_block *sb);
82
static int ext4_feature_set_ok(struct super_block *sb, int readonly);
83 84
static void ext4_destroy_lazyinit_thread(void);
static void ext4_unregister_li_request(struct super_block *sb);
85
static void ext4_clear_request_list(void);
86 87
static struct inode *ext4_get_journal_inode(struct super_block *sb,
					    unsigned int journal_inum);
88

J
Jan Kara 已提交
89 90 91 92 93 94 95
/*
 * Lock ordering
 *
 * Note the difference between i_mmap_sem (EXT4_I(inode)->i_mmap_sem) and
 * i_mmap_rwsem (inode->i_mmap_rwsem)!
 *
 * page fault path:
96
 * mmap_lock -> sb_start_pagefault -> i_mmap_sem (r) -> transaction start ->
J
Jan Kara 已提交
97 98 99
 *   page lock -> i_data_sem (rw)
 *
 * buffered write path:
100
 * sb_start_write -> i_mutex -> mmap_lock
J
Jan Kara 已提交
101 102 103 104
 * sb_start_write -> i_mutex -> transaction start -> page lock ->
 *   i_data_sem (rw)
 *
 * truncate:
105 106 107
 * sb_start_write -> i_mutex -> i_mmap_sem (w) -> i_mmap_rwsem (w) -> page lock
 * sb_start_write -> i_mutex -> i_mmap_sem (w) -> transaction start ->
 *   i_data_sem (rw)
J
Jan Kara 已提交
108 109
 *
 * direct IO:
110
 * sb_start_write -> i_mutex -> mmap_lock
111
 * sb_start_write -> i_mutex -> transaction start -> i_data_sem (rw)
J
Jan Kara 已提交
112 113 114 115 116
 *
 * writepages:
 * transaction start -> page lock(s) -> i_data_sem (rw)
 */

J
Jan Kara 已提交
117
#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
118 119 120 121 122 123 124
static struct file_system_type ext2_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "ext2",
	.mount		= ext4_mount,
	.kill_sb	= kill_block_super,
	.fs_flags	= FS_REQUIRES_DEV,
};
125
MODULE_ALIAS_FS("ext2");
126
MODULE_ALIAS("ext2");
127 128 129 130 131 132
#define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
#else
#define IS_EXT2_SB(sb) (0)
#endif


133 134 135
static struct file_system_type ext3_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "ext3",
A
Al Viro 已提交
136
	.mount		= ext4_mount,
137 138 139
	.kill_sb	= kill_block_super,
	.fs_flags	= FS_REQUIRES_DEV,
};
140
MODULE_ALIAS_FS("ext3");
141
MODULE_ALIAS("ext3");
142
#define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
L
Laurent Vivier 已提交
143

144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205

static inline void __ext4_read_bh(struct buffer_head *bh, int op_flags,
				  bh_end_io_t *end_io)
{
	/*
	 * buffer's verified bit is no longer valid after reading from
	 * disk again due to write out error, clear it to make sure we
	 * recheck the buffer contents.
	 */
	clear_buffer_verified(bh);

	bh->b_end_io = end_io ? end_io : end_buffer_read_sync;
	get_bh(bh);
	submit_bh(REQ_OP_READ, op_flags, bh);
}

void ext4_read_bh_nowait(struct buffer_head *bh, int op_flags,
			 bh_end_io_t *end_io)
{
	BUG_ON(!buffer_locked(bh));

	if (ext4_buffer_uptodate(bh)) {
		unlock_buffer(bh);
		return;
	}
	__ext4_read_bh(bh, op_flags, end_io);
}

int ext4_read_bh(struct buffer_head *bh, int op_flags, bh_end_io_t *end_io)
{
	BUG_ON(!buffer_locked(bh));

	if (ext4_buffer_uptodate(bh)) {
		unlock_buffer(bh);
		return 0;
	}

	__ext4_read_bh(bh, op_flags, end_io);

	wait_on_buffer(bh);
	if (buffer_uptodate(bh))
		return 0;
	return -EIO;
}

int ext4_read_bh_lock(struct buffer_head *bh, int op_flags, bool wait)
{
	if (trylock_buffer(bh)) {
		if (wait)
			return ext4_read_bh(bh, op_flags, NULL);
		ext4_read_bh_nowait(bh, op_flags, NULL);
		return 0;
	}
	if (wait) {
		wait_on_buffer(bh);
		if (buffer_uptodate(bh))
			return 0;
		return -EIO;
	}
	return 0;
}

206
/*
207
 * This works like __bread_gfp() except it uses ERR_PTR for error
208 209 210 211
 * returns.  Currently with sb_bread it's impossible to distinguish
 * between ENOMEM and EIO situations (since both result in a NULL
 * return.
 */
212 213 214
static struct buffer_head *__ext4_sb_bread_gfp(struct super_block *sb,
					       sector_t block, int op_flags,
					       gfp_t gfp)
215
{
216 217
	struct buffer_head *bh;
	int ret;
218

219
	bh = sb_getblk_gfp(sb, block, gfp);
220 221
	if (bh == NULL)
		return ERR_PTR(-ENOMEM);
222
	if (ext4_buffer_uptodate(bh))
223
		return bh;
224 225 226 227 228 229 230

	ret = ext4_read_bh_lock(bh, REQ_META | op_flags, true);
	if (ret) {
		put_bh(bh);
		return ERR_PTR(ret);
	}
	return bh;
231 232
}

233 234 235 236 237 238 239 240 241 242 243 244
struct buffer_head *ext4_sb_bread(struct super_block *sb, sector_t block,
				   int op_flags)
{
	return __ext4_sb_bread_gfp(sb, block, op_flags, __GFP_MOVABLE);
}

struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb,
					    sector_t block)
{
	return __ext4_sb_bread_gfp(sb, block, 0, 0);
}

245 246 247 248 249 250 251 252
void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block)
{
	struct buffer_head *bh = sb_getblk_gfp(sb, block, 0);

	if (likely(bh)) {
		ext4_read_bh_lock(bh, REQ_RAHEAD, false);
		brelse(bh);
	}
253 254
}

255 256 257
static int ext4_verify_csum_type(struct super_block *sb,
				 struct ext4_super_block *es)
{
258
	if (!ext4_has_feature_metadata_csum(sb))
259 260 261 262 263
		return 1;

	return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
}

264 265 266 267 268 269 270 271 272 273 274 275
static __le32 ext4_superblock_csum(struct super_block *sb,
				   struct ext4_super_block *es)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	int offset = offsetof(struct ext4_super_block, s_checksum);
	__u32 csum;

	csum = ext4_chksum(sbi, ~0, (char *)es, offset);

	return cpu_to_le32(csum);
}

276 277
static int ext4_superblock_csum_verify(struct super_block *sb,
				       struct ext4_super_block *es)
278
{
279
	if (!ext4_has_metadata_csum(sb))
280 281 282 283 284
		return 1;

	return es->s_checksum == ext4_superblock_csum(sb, es);
}

285
void ext4_superblock_csum_set(struct super_block *sb)
286
{
287 288
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;

289
	if (!ext4_has_metadata_csum(sb))
290 291 292 293 294
		return;

	es->s_checksum = ext4_superblock_csum(sb, es);
}

295 296
ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
			       struct ext4_group_desc *bg)
L
Laurent Vivier 已提交
297
{
298
	return le32_to_cpu(bg->bg_block_bitmap_lo) |
299
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
300
		 (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
L
Laurent Vivier 已提交
301 302
}

303 304
ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
			       struct ext4_group_desc *bg)
L
Laurent Vivier 已提交
305
{
306
	return le32_to_cpu(bg->bg_inode_bitmap_lo) |
307
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
308
		 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
L
Laurent Vivier 已提交
309 310
}

311 312
ext4_fsblk_t ext4_inode_table(struct super_block *sb,
			      struct ext4_group_desc *bg)
L
Laurent Vivier 已提交
313
{
314
	return le32_to_cpu(bg->bg_inode_table_lo) |
315
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
316
		 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
L
Laurent Vivier 已提交
317 318
}

319 320
__u32 ext4_free_group_clusters(struct super_block *sb,
			       struct ext4_group_desc *bg)
321 322 323
{
	return le16_to_cpu(bg->bg_free_blocks_count_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
324
		 (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
325 326 327 328 329 330 331
}

__u32 ext4_free_inodes_count(struct super_block *sb,
			      struct ext4_group_desc *bg)
{
	return le16_to_cpu(bg->bg_free_inodes_count_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
332
		 (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
333 334 335 336 337 338 339
}

__u32 ext4_used_dirs_count(struct super_block *sb,
			      struct ext4_group_desc *bg)
{
	return le16_to_cpu(bg->bg_used_dirs_count_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
340
		 (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
341 342 343 344 345 346 347
}

__u32 ext4_itable_unused_count(struct super_block *sb,
			      struct ext4_group_desc *bg)
{
	return le16_to_cpu(bg->bg_itable_unused_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
348
		 (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
349 350
}

351 352
void ext4_block_bitmap_set(struct super_block *sb,
			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
L
Laurent Vivier 已提交
353
{
354
	bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
355 356
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
L
Laurent Vivier 已提交
357 358
}

359 360
void ext4_inode_bitmap_set(struct super_block *sb,
			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
L
Laurent Vivier 已提交
361
{
362
	bg->bg_inode_bitmap_lo  = cpu_to_le32((u32)blk);
363 364
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
L
Laurent Vivier 已提交
365 366
}

367 368
void ext4_inode_table_set(struct super_block *sb,
			  struct ext4_group_desc *bg, ext4_fsblk_t blk)
L
Laurent Vivier 已提交
369
{
370
	bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
371 372
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
L
Laurent Vivier 已提交
373 374
}

375 376
void ext4_free_group_clusters_set(struct super_block *sb,
				  struct ext4_group_desc *bg, __u32 count)
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
{
	bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
}

void ext4_free_inodes_set(struct super_block *sb,
			  struct ext4_group_desc *bg, __u32 count)
{
	bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
}

void ext4_used_dirs_set(struct super_block *sb,
			  struct ext4_group_desc *bg, __u32 count)
{
	bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
}

void ext4_itable_unused_set(struct super_block *sb,
			  struct ext4_group_desc *bg, __u32 count)
{
	bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
}

407
static void __ext4_update_tstamp(__le32 *lo, __u8 *hi, time64_t now)
408 409 410 411 412 413 414 415 416 417 418 419
{
	now = clamp_val(now, 0, (1ull << 40) - 1);

	*lo = cpu_to_le32(lower_32_bits(now));
	*hi = upper_32_bits(now);
}

static time64_t __ext4_get_tstamp(__le32 *lo, __u8 *hi)
{
	return ((time64_t)(*hi) << 32) + le32_to_cpu(*lo);
}
#define ext4_update_tstamp(es, tstamp) \
420 421
	__ext4_update_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi, \
			     ktime_get_real_seconds())
422 423
#define ext4_get_tstamp(es, tstamp) \
	__ext4_get_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi)
424

425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
/*
 * The del_gendisk() function uninitializes the disk-specific data
 * structures, including the bdi structure, without telling anyone
 * else.  Once this happens, any attempt to call mark_buffer_dirty()
 * (for example, by ext4_commit_super), will cause a kernel OOPS.
 * This is a kludge to prevent these oops until we can put in a proper
 * hook in del_gendisk() to inform the VFS and file system layers.
 */
static int block_device_ejected(struct super_block *sb)
{
	struct inode *bd_inode = sb->s_bdev->bd_inode;
	struct backing_dev_info *bdi = inode_to_bdi(bd_inode);

	return bdi->dev == NULL;
}

B
Bobi Jam 已提交
441 442 443 444 445
static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
{
	struct super_block		*sb = journal->j_private;
	struct ext4_sb_info		*sbi = EXT4_SB(sb);
	int				error = is_journal_aborted(journal);
446
	struct ext4_journal_cb_entry	*jce;
B
Bobi Jam 已提交
447

448
	BUG_ON(txn->t_state == T_FINISHED);
449 450 451

	ext4_process_freed_data(sb, txn->t_tid);

B
Bobi Jam 已提交
452
	spin_lock(&sbi->s_md_lock);
453 454 455
	while (!list_empty(&txn->t_private_list)) {
		jce = list_entry(txn->t_private_list.next,
				 struct ext4_journal_cb_entry, jce_list);
B
Bobi Jam 已提交
456 457 458 459 460 461 462
		list_del_init(&jce->jce_list);
		spin_unlock(&sbi->s_md_lock);
		jce->jce_func(sb, jce, error);
		spin_lock(&sbi->s_md_lock);
	}
	spin_unlock(&sbi->s_md_lock);
}
463

464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
/*
 * This writepage callback for write_cache_pages()
 * takes care of a few cases after page cleaning.
 *
 * write_cache_pages() already checks for dirty pages
 * and calls clear_page_dirty_for_io(), which we want,
 * to write protect the pages.
 *
 * However, we may have to redirty a page (see below.)
 */
static int ext4_journalled_writepage_callback(struct page *page,
					      struct writeback_control *wbc,
					      void *data)
{
	transaction_t *transaction = (transaction_t *) data;
	struct buffer_head *bh, *head;
	struct journal_head *jh;

	bh = head = page_buffers(page);
	do {
		/*
		 * We have to redirty a page in these cases:
		 * 1) If buffer is dirty, it means the page was dirty because it
		 * contains a buffer that needs checkpointing. So the dirty bit
		 * needs to be preserved so that checkpointing writes the buffer
		 * properly.
		 * 2) If buffer is not part of the committing transaction
		 * (we may have just accidentally come across this buffer because
		 * inode range tracking is not exact) or if the currently running
		 * transaction already contains this buffer as well, dirty bit
		 * needs to be preserved so that the buffer gets writeprotected
		 * properly on running transaction's commit.
		 */
		jh = bh2jh(bh);
		if (buffer_dirty(bh) ||
		    (jh && (jh->b_transaction != transaction ||
			    jh->b_next_transaction))) {
			redirty_page_for_writepage(wbc, page);
			goto out;
		}
	} while ((bh = bh->b_this_page) != head);

out:
	return AOP_WRITEPAGE_ACTIVATE;
}

static int ext4_journalled_submit_inode_data_buffers(struct jbd2_inode *jinode)
{
	struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
	struct writeback_control wbc = {
		.sync_mode =  WB_SYNC_ALL,
		.nr_to_write = LONG_MAX,
		.range_start = jinode->i_dirty_start,
		.range_end = jinode->i_dirty_end,
        };

	return write_cache_pages(mapping, &wbc,
				 ext4_journalled_writepage_callback,
				 jinode->i_transaction);
}

static int ext4_journal_submit_inode_data_buffers(struct jbd2_inode *jinode)
{
	int ret;

	if (ext4_should_journal_data(jinode->i_vfs_inode))
		ret = ext4_journalled_submit_inode_data_buffers(jinode);
	else
		ret = jbd2_journal_submit_inode_data_buffers(jinode);

	return ret;
}

static int ext4_journal_finish_inode_data_buffers(struct jbd2_inode *jinode)
{
	int ret = 0;

	if (!ext4_should_journal_data(jinode->i_vfs_inode))
		ret = jbd2_journal_finish_inode_data_buffers(jinode);

	return ret;
}

547 548 549 550 551 552
static bool system_going_down(void)
{
	return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
		|| system_state == SYSTEM_RESTART;
}

J
Jan Kara 已提交
553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
struct ext4_err_translation {
	int code;
	int errno;
};

#define EXT4_ERR_TRANSLATE(err) { .code = EXT4_ERR_##err, .errno = err }

static struct ext4_err_translation err_translation[] = {
	EXT4_ERR_TRANSLATE(EIO),
	EXT4_ERR_TRANSLATE(ENOMEM),
	EXT4_ERR_TRANSLATE(EFSBADCRC),
	EXT4_ERR_TRANSLATE(EFSCORRUPTED),
	EXT4_ERR_TRANSLATE(ENOSPC),
	EXT4_ERR_TRANSLATE(ENOKEY),
	EXT4_ERR_TRANSLATE(EROFS),
	EXT4_ERR_TRANSLATE(EFBIG),
	EXT4_ERR_TRANSLATE(EEXIST),
	EXT4_ERR_TRANSLATE(ERANGE),
	EXT4_ERR_TRANSLATE(EOVERFLOW),
	EXT4_ERR_TRANSLATE(EBUSY),
	EXT4_ERR_TRANSLATE(ENOTDIR),
	EXT4_ERR_TRANSLATE(ENOTEMPTY),
	EXT4_ERR_TRANSLATE(ESHUTDOWN),
	EXT4_ERR_TRANSLATE(EFAULT),
};

static int ext4_errno_to_code(int errno)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(err_translation); i++)
		if (err_translation[i].errno == errno)
			return err_translation[i].code;
	return EXT4_ERR_UNKNOWN;
}

J
Jan Kara 已提交
589 590 591 592
static void __save_error_info(struct super_block *sb, int error,
			      __u32 ino, __u64 block,
			      const char *func, unsigned int line)
{
593
	struct ext4_sb_info *sbi = EXT4_SB(sb);
J
Jan Kara 已提交
594

J
Jan Kara 已提交
595 596 597
	/* We default to EFSCORRUPTED error... */
	if (error == 0)
		error = EFSCORRUPTED;
598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615

	spin_lock(&sbi->s_error_lock);
	sbi->s_add_error_count++;
	sbi->s_last_error_code = error;
	sbi->s_last_error_line = line;
	sbi->s_last_error_ino = ino;
	sbi->s_last_error_block = block;
	sbi->s_last_error_func = func;
	sbi->s_last_error_time = ktime_get_real_seconds();
	if (!sbi->s_first_error_time) {
		sbi->s_first_error_code = error;
		sbi->s_first_error_line = line;
		sbi->s_first_error_ino = ino;
		sbi->s_first_error_block = block;
		sbi->s_first_error_func = func;
		sbi->s_first_error_time = sbi->s_last_error_time;
	}
	spin_unlock(&sbi->s_error_lock);
J
Jan Kara 已提交
616 617 618 619 620 621 622 623
}

static void save_error_info(struct super_block *sb, int error,
			    __u32 ino, __u64 block,
			    const char *func, unsigned int line)
{
	__save_error_info(sb, error, ino, block, func, line);
	if (!bdev_read_only(sb->s_bdev))
624
		ext4_commit_super(sb);
J
Jan Kara 已提交
625 626
}

627 628 629 630
/* Deal with the reporting of failure conditions on a filesystem such as
 * inconsistencies detected or read IO failures.
 *
 * On ext2, we can store the error state of the filesystem in the
631
 * superblock.  That is not possible on ext4, because we may have other
632 633 634 635 636
 * write ordering constraints on the superblock which prevent us from
 * writing it out straight away; and given that the journal is about to
 * be aborted, we can't rely on the current, or future, transactions to
 * write out the superblock safely.
 *
637
 * We'll just use the jbd2_journal_abort() error code to record an error in
638
 * the journal instead.  On recovery, the journal will complain about
639
 * that error until we've noted it down and cleared it.
640 641 642 643 644 645
 *
 * If force_ro is set, we unconditionally force the filesystem into an
 * ABORT|READONLY state, unless the error response on the fs has been set to
 * panic in which case we take the easy way out and panic immediately. This is
 * used to deal with unrecoverable failures such as journal IO errors or ENOMEM
 * at a critical moment in log management.
646
 */
647 648 649
static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
			      __u32 ino, __u64 block,
			      const char *func, unsigned int line)
650
{
651 652
	journal_t *journal = EXT4_SB(sb)->s_journal;

653
	EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
654 655 656
	if (test_opt(sb, WARN_ON_ERROR))
		WARN_ON_ONCE(1);

657 658 659
	if (!bdev_read_only(sb->s_bdev))
		save_error_info(sb, error, ino, block, func, line);

660
	if (sb_rdonly(sb) || (!force_ro && test_opt(sb, ERRORS_CONT)))
661 662
		return;

663 664 665
	ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED);
	if (journal)
		jbd2_journal_abort(journal, -EIO);
666 667 668 669 670
	/*
	 * We force ERRORS_RO behavior when system is rebooting. Otherwise we
	 * could panic during 'reboot -f' as the underlying device got already
	 * disabled.
	 */
671
	if (test_opt(sb, ERRORS_PANIC) && !system_going_down()) {
672
		panic("EXT4-fs (device %s): panic forced after error\n",
673
			sb->s_id);
674
	}
675 676 677 678 679 680 681
	ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
	/*
	 * Make sure updated value of ->s_mount_flags will be visible before
	 * ->s_flags update
	 */
	smp_wmb();
	sb->s_flags |= SB_RDONLY;
682 683
}

684 685 686 687 688
static void flush_stashed_error_work(struct work_struct *work)
{
	struct ext4_sb_info *sbi = container_of(work, struct ext4_sb_info,
						s_error_work);

689
	ext4_commit_super(sbi->s_sb);
690 691
}

692 693 694 695
#define ext4_error_ratelimit(sb)					\
		___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state),	\
			     "EXT4-fs error")

696
void __ext4_error(struct super_block *sb, const char *function,
697
		  unsigned int line, bool force_ro, int error, __u64 block,
698
		  const char *fmt, ...)
699
{
J
Joe Perches 已提交
700
	struct va_format vaf;
701 702
	va_list args;

703 704 705
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return;

706
	trace_ext4_error(sb, function, line);
707 708 709 710 711 712 713 714 715
	if (ext4_error_ratelimit(sb)) {
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		printk(KERN_CRIT
		       "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
		       sb->s_id, function, line, current->comm, &vaf);
		va_end(args);
	}
716
	ext4_handle_error(sb, force_ro, error, 0, block, function, line);
717 718
}

719
void __ext4_error_inode(struct inode *inode, const char *function,
720
			unsigned int line, ext4_fsblk_t block, int error,
721
			const char *fmt, ...)
722 723
{
	va_list args;
724
	struct va_format vaf;
725

726 727 728
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return;

729
	trace_ext4_error(inode->i_sb, function, line);
730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
	if (ext4_error_ratelimit(inode->i_sb)) {
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		if (block)
			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
			       "inode #%lu: block %llu: comm %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       block, current->comm, &vaf);
		else
			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
			       "inode #%lu: comm %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       current->comm, &vaf);
		va_end(args);
	}
746 747
	ext4_handle_error(inode->i_sb, false, error, inode->i_ino, block,
			  function, line);
748 749
}

750 751 752
void __ext4_error_file(struct file *file, const char *function,
		       unsigned int line, ext4_fsblk_t block,
		       const char *fmt, ...)
753 754
{
	va_list args;
755
	struct va_format vaf;
A
Al Viro 已提交
756
	struct inode *inode = file_inode(file);
757 758
	char pathname[80], *path;

759 760 761
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return;

762
	trace_ext4_error(inode->i_sb, function, line);
763
	if (ext4_error_ratelimit(inode->i_sb)) {
M
Miklos Szeredi 已提交
764
		path = file_path(file, pathname, sizeof(pathname));
765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783
		if (IS_ERR(path))
			path = "(unknown)";
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		if (block)
			printk(KERN_CRIT
			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
			       "block %llu: comm %s: path %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       block, current->comm, path, &vaf);
		else
			printk(KERN_CRIT
			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
			       "comm %s: path %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       current->comm, path, &vaf);
		va_end(args);
	}
784 785
	ext4_handle_error(inode->i_sb, false, EFSCORRUPTED, inode->i_ino, block,
			  function, line);
786 787
}

788 789
const char *ext4_decode_error(struct super_block *sb, int errno,
			      char nbuf[16])
790 791 792 793
{
	char *errstr = NULL;

	switch (errno) {
794 795 796 797 798 799
	case -EFSCORRUPTED:
		errstr = "Corrupt filesystem";
		break;
	case -EFSBADCRC:
		errstr = "Filesystem failed CRC";
		break;
800 801 802 803 804 805 806
	case -EIO:
		errstr = "IO failure";
		break;
	case -ENOMEM:
		errstr = "Out of memory";
		break;
	case -EROFS:
807 808
		if (!sb || (EXT4_SB(sb)->s_journal &&
			    EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
			errstr = "Journal has aborted";
		else
			errstr = "Readonly filesystem";
		break;
	default:
		/* If the caller passed in an extra buffer for unknown
		 * errors, textualise them now.  Else we just return
		 * NULL. */
		if (nbuf) {
			/* Check for truncated error codes... */
			if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
				errstr = nbuf;
		}
		break;
	}

	return errstr;
}

828
/* __ext4_std_error decodes expected errors from journaling functions
829 830
 * automatically and invokes the appropriate error response.  */

831 832
void __ext4_std_error(struct super_block *sb, const char *function,
		      unsigned int line, int errno)
833 834 835 836
{
	char nbuf[16];
	const char *errstr;

837 838 839
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return;

840 841 842
	/* Special case: if the error is EROFS, and we're not already
	 * inside a transaction, then there's really no point in logging
	 * an error. */
843
	if (errno == -EROFS && journal_current_handle() == NULL && sb_rdonly(sb))
844 845
		return;

846 847 848 849 850
	if (ext4_error_ratelimit(sb)) {
		errstr = ext4_decode_error(sb, errno, nbuf);
		printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
		       sb->s_id, function, line, errstr);
	}
851

852
	ext4_handle_error(sb, false, -errno, 0, 0, function, line);
853 854
}

855 856
void __ext4_msg(struct super_block *sb,
		const char *prefix, const char *fmt, ...)
857
{
J
Joe Perches 已提交
858
	struct va_format vaf;
859 860
	va_list args;

861
	atomic_inc(&EXT4_SB(sb)->s_msg_count);
862 863 864
	if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
		return;

865
	va_start(args, fmt);
J
Joe Perches 已提交
866 867 868
	vaf.fmt = fmt;
	vaf.va = &args;
	printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
869 870 871
	va_end(args);
}

872 873 874 875 876 877
static int ext4_warning_ratelimit(struct super_block *sb)
{
	atomic_inc(&EXT4_SB(sb)->s_warning_count);
	return ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state),
			    "EXT4-fs warning");
}
878

879
void __ext4_warning(struct super_block *sb, const char *function,
880
		    unsigned int line, const char *fmt, ...)
881
{
J
Joe Perches 已提交
882
	struct va_format vaf;
883 884
	va_list args;

885
	if (!ext4_warning_ratelimit(sb))
886 887
		return;

888
	va_start(args, fmt);
J
Joe Perches 已提交
889 890 891 892
	vaf.fmt = fmt;
	vaf.va = &args;
	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
	       sb->s_id, function, line, &vaf);
893 894 895
	va_end(args);
}

896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913
void __ext4_warning_inode(const struct inode *inode, const char *function,
			  unsigned int line, const char *fmt, ...)
{
	struct va_format vaf;
	va_list args;

	if (!ext4_warning_ratelimit(inode->i_sb))
		return;

	va_start(args, fmt);
	vaf.fmt = fmt;
	vaf.va = &args;
	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: "
	       "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id,
	       function, line, inode->i_ino, current->comm, &vaf);
	va_end(args);
}

914 915 916 917
void __ext4_grp_locked_error(const char *function, unsigned int line,
			     struct super_block *sb, ext4_group_t grp,
			     unsigned long ino, ext4_fsblk_t block,
			     const char *fmt, ...)
918 919 920
__releases(bitlock)
__acquires(bitlock)
{
J
Joe Perches 已提交
921
	struct va_format vaf;
922 923
	va_list args;

924 925 926
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return;

927
	trace_ext4_error(sb, function, line);
928 929 930 931 932 933 934 935 936 937 938 939 940 941
	if (ext4_error_ratelimit(sb)) {
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
		       sb->s_id, function, line, grp);
		if (ino)
			printk(KERN_CONT "inode %lu: ", ino);
		if (block)
			printk(KERN_CONT "block %llu:",
			       (unsigned long long) block);
		printk(KERN_CONT "%pV\n", &vaf);
		va_end(args);
	}
942 943

	if (test_opt(sb, ERRORS_CONT)) {
944 945
		if (test_opt(sb, WARN_ON_ERROR))
			WARN_ON_ONCE(1);
946
		EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
947
		__save_error_info(sb, EFSCORRUPTED, ino, block, function, line);
948 949
		if (!bdev_read_only(sb->s_bdev))
			schedule_work(&EXT4_SB(sb)->s_error_work);
950 951 952
		return;
	}
	ext4_unlock_group(sb, grp);
953
	ext4_handle_error(sb, false, EFSCORRUPTED, ino, block, function, line);
954 955 956 957 958 959
	/*
	 * We only get here in the ERRORS_RO case; relocking the group
	 * may be dangerous, but nothing bad will happen since the
	 * filesystem will have already been marked read/only and the
	 * journal has been aborted.  We return 1 as a hint to callers
	 * who might what to use the return value from
L
Lucas De Marchi 已提交
960
	 * ext4_grp_locked_error() to distinguish between the
961 962 963 964 965 966 967 968
	 * ERRORS_CONT and ERRORS_RO case, and perhaps return more
	 * aggressively from the ext4 function in question, with a
	 * more appropriate error code.
	 */
	ext4_lock_group(sb, grp);
	return;
}

969 970 971 972 973 974 975
void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
				     ext4_group_t group,
				     unsigned int flags)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
	struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
976 977 978 979 980 981 982 983
	int ret;

	if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) {
		ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
					    &grp->bb_state);
		if (!ret)
			percpu_counter_sub(&sbi->s_freeclusters_counter,
					   grp->bb_free);
984 985
	}

986 987 988 989
	if (flags & EXT4_GROUP_INFO_IBITMAP_CORRUPT) {
		ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT,
					    &grp->bb_state);
		if (!ret && gdp) {
990 991 992 993 994 995 996 997 998
			int count;

			count = ext4_free_inodes_count(sb, gdp);
			percpu_counter_sub(&sbi->s_freeinodes_counter,
					   count);
		}
	}
}

999
void ext4_update_dynamic_rev(struct super_block *sb)
1000
{
1001
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
1002

1003
	if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
1004 1005
		return;

1006
	ext4_warning(sb,
1007 1008
		     "updating to rev %d because of new feature flag, "
		     "running e2fsck is recommended",
1009
		     EXT4_DYNAMIC_REV);
1010

1011 1012 1013
	es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
	es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
	es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
	/* leave es->s_feature_*compat flags alone */
	/* es->s_uuid will be set by e2fsck if empty */

	/*
	 * The rest of the superblock fields should be zero, and if not it
	 * means they are likely already in use, so leave them alone.  We
	 * can leave it up to e2fsck to clean up any inconsistencies there.
	 */
}

/*
 * Open the external journal device
 */
1027
static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
1028 1029 1030
{
	struct block_device *bdev;

1031
	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
1032 1033 1034 1035 1036
	if (IS_ERR(bdev))
		goto fail;
	return bdev;

fail:
C
Christoph Hellwig 已提交
1037 1038 1039
	ext4_msg(sb, KERN_ERR,
		 "failed to open journal device unknown-block(%u,%u) %ld",
		 MAJOR(dev), MINOR(dev), PTR_ERR(bdev));
1040 1041 1042 1043 1044 1045
	return NULL;
}

/*
 * Release the journal device
 */
A
Al Viro 已提交
1046
static void ext4_blkdev_put(struct block_device *bdev)
1047
{
A
Al Viro 已提交
1048
	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1049 1050
}

A
Al Viro 已提交
1051
static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
1052 1053
{
	struct block_device *bdev;
1054
	bdev = sbi->s_journal_bdev;
1055
	if (bdev) {
A
Al Viro 已提交
1056
		ext4_blkdev_put(bdev);
1057
		sbi->s_journal_bdev = NULL;
1058 1059 1060 1061 1062
	}
}

static inline struct inode *orphan_list_entry(struct list_head *l)
{
1063
	return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
1064 1065
}

1066
static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
1067 1068 1069
{
	struct list_head *l;

1070 1071
	ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
		 le32_to_cpu(sbi->s_es->s_last_orphan));
1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083

	printk(KERN_ERR "sb_info orphan list:\n");
	list_for_each(l, &sbi->s_orphan) {
		struct inode *inode = orphan_list_entry(l);
		printk(KERN_ERR "  "
		       "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
		       inode->i_sb->s_id, inode->i_ino, inode,
		       inode->i_mode, inode->i_nlink,
		       NEXT_ORPHAN(inode));
	}
}

1084 1085 1086 1087 1088 1089 1090
#ifdef CONFIG_QUOTA
static int ext4_quota_off(struct super_block *sb, int type);

static inline void ext4_quota_off_umount(struct super_block *sb)
{
	int type;

1091 1092 1093
	/* Use our quota_off function to clear inode flags etc. */
	for (type = 0; type < EXT4_MAXQUOTAS; type++)
		ext4_quota_off(sb, type);
1094
}
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106

/*
 * This is a helper function which is used in the mount/remount
 * codepaths (which holds s_umount) to fetch the quota file name.
 */
static inline char *get_qf_name(struct super_block *sb,
				struct ext4_sb_info *sbi,
				int type)
{
	return rcu_dereference_protected(sbi->s_qf_names[type],
					 lockdep_is_held(&sb->s_umount));
}
1107 1108 1109 1110 1111 1112
#else
static inline void ext4_quota_off_umount(struct super_block *sb)
{
}
#endif

1113
static void ext4_put_super(struct super_block *sb)
1114
{
1115 1116
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
1117
	struct buffer_head **group_desc;
1118
	struct flex_groups **flex_groups;
1119
	int aborted = 0;
1120
	int i, err;
1121

1122
	ext4_unregister_li_request(sb);
1123
	ext4_quota_off_umount(sb);
1124

1125
	flush_work(&sbi->s_error_work);
1126
	destroy_workqueue(sbi->rsv_conversion_wq);
1127

1128 1129 1130 1131 1132 1133 1134
	/*
	 * Unregister sysfs before destroying jbd2 journal.
	 * Since we could still access attr_journal_task attribute via sysfs
	 * path which could have sbi->s_journal->j_task as NULL
	 */
	ext4_unregister_sysfs(sb);

1135
	if (sbi->s_journal) {
1136
		aborted = is_journal_aborted(sbi->s_journal);
1137 1138
		err = jbd2_journal_destroy(sbi->s_journal);
		sbi->s_journal = NULL;
1139
		if ((err < 0) && !aborted) {
1140
			ext4_abort(sb, -err, "Couldn't clean up the journal");
1141
		}
1142
	}
1143

1144
	ext4_es_unregister_shrinker(sbi);
1145
	del_timer_sync(&sbi->s_err_report);
1146 1147 1148 1149
	ext4_release_system_zone(sb);
	ext4_mb_release(sb);
	ext4_ext_release(sb);

1150
	if (!sb_rdonly(sb) && !aborted) {
1151
		ext4_clear_feature_journal_needs_recovery(sb);
1152 1153
		es->s_state = cpu_to_le16(sbi->s_mount_state);
	}
1154
	if (!sb_rdonly(sb))
1155
		ext4_commit_super(sb);
1156

1157 1158
	rcu_read_lock();
	group_desc = rcu_dereference(sbi->s_group_desc);
1159
	for (i = 0; i < sbi->s_gdb_count; i++)
1160 1161
		brelse(group_desc[i]);
	kvfree(group_desc);
1162 1163 1164 1165 1166 1167
	flex_groups = rcu_dereference(sbi->s_flex_groups);
	if (flex_groups) {
		for (i = 0; i < sbi->s_flex_groups_allocated; i++)
			kvfree(flex_groups[i]);
		kvfree(flex_groups);
	}
1168
	rcu_read_unlock();
1169
	percpu_counter_destroy(&sbi->s_freeclusters_counter);
1170 1171
	percpu_counter_destroy(&sbi->s_freeinodes_counter);
	percpu_counter_destroy(&sbi->s_dirs_counter);
1172
	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
1173
	percpu_free_rwsem(&sbi->s_writepages_rwsem);
1174
#ifdef CONFIG_QUOTA
J
Jan Kara 已提交
1175
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
1176
		kfree(get_qf_name(sb, sbi, i));
1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
#endif

	/* Debugging code just in case the in-memory inode orphan list
	 * isn't empty.  The on-disk one can be non-empty if we've
	 * detected an error and taken the fs readonly, but the
	 * in-memory list had better be clean by this point. */
	if (!list_empty(&sbi->s_orphan))
		dump_orphan_list(sb, sbi);
	J_ASSERT(list_empty(&sbi->s_orphan));

1187
	sync_blockdev(sb->s_bdev);
1188
	invalidate_bdev(sb->s_bdev);
1189
	if (sbi->s_journal_bdev && sbi->s_journal_bdev != sb->s_bdev) {
1190 1191 1192 1193 1194
		/*
		 * Invalidate the journal device's buffers.  We don't want them
		 * floating about in memory - the physical journal device may
		 * hotswapped, and it breaks the `ro-after' testing code.
		 */
1195 1196
		sync_blockdev(sbi->s_journal_bdev);
		invalidate_bdev(sbi->s_journal_bdev);
1197
		ext4_blkdev_remove(sbi);
1198
	}
1199 1200 1201 1202 1203 1204 1205

	ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
	sbi->s_ea_inode_cache = NULL;

	ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
	sbi->s_ea_block_cache = NULL;

1206 1207
	if (sbi->s_mmp_tsk)
		kthread_stop(sbi->s_mmp_tsk);
1208
	brelse(sbi->s_sbh);
1209
	sb->s_fs_info = NULL;
T
Theodore Ts'o 已提交
1210 1211 1212 1213 1214 1215
	/*
	 * Now that we are completely done shutting down the
	 * superblock, we need to actually destroy the kobject.
	 */
	kobject_put(&sbi->s_kobj);
	wait_for_completion(&sbi->s_kobj_unregister);
1216 1217
	if (sbi->s_chksum_driver)
		crypto_free_shash(sbi->s_chksum_driver);
1218
	kfree(sbi->s_blockgroup_lock);
1219
	fs_put_dax(sbi->s_daxdev);
1220
	fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
1221
#ifdef CONFIG_UNICODE
1222
	utf8_unload(sb->s_encoding);
1223
#endif
1224 1225 1226
	kfree(sbi);
}

1227
static struct kmem_cache *ext4_inode_cachep;
1228 1229 1230 1231

/*
 * Called inside transaction, so use GFP_NOFS
 */
1232
static struct inode *ext4_alloc_inode(struct super_block *sb)
1233
{
1234
	struct ext4_inode_info *ei;
1235

C
Christoph Lameter 已提交
1236
	ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
1237 1238
	if (!ei)
		return NULL;
1239

J
Jeff Layton 已提交
1240
	inode_set_iversion(&ei->vfs_inode, 1);
1241
	spin_lock_init(&ei->i_raw_lock);
1242
	INIT_LIST_HEAD(&ei->i_prealloc_list);
1243
	atomic_set(&ei->i_prealloc_active, 0);
1244
	spin_lock_init(&ei->i_prealloc_lock);
Z
Zheng Liu 已提交
1245 1246
	ext4_es_init_tree(&ei->i_es_tree);
	rwlock_init(&ei->i_es_lock);
1247
	INIT_LIST_HEAD(&ei->i_es_list);
1248
	ei->i_es_all_nr = 0;
1249
	ei->i_es_shk_nr = 0;
1250
	ei->i_es_shrink_lblk = 0;
1251 1252
	ei->i_reserved_data_blocks = 0;
	spin_lock_init(&(ei->i_block_reservation_lock));
1253
	ext4_init_pending_tree(&ei->i_pending_tree);
1254 1255
#ifdef CONFIG_QUOTA
	ei->i_reserved_quota = 0;
J
Jan Kara 已提交
1256
	memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
1257
#endif
1258
	ei->jinode = NULL;
1259
	INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
1260
	spin_lock_init(&ei->i_completed_io_lock);
1261 1262
	ei->i_sync_tid = 0;
	ei->i_datasync_tid = 0;
1263
	atomic_set(&ei->i_unwritten, 0);
1264
	INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
1265 1266
	ext4_fc_init_inode(&ei->vfs_inode);
	mutex_init(&ei->i_fc_lock);
1267 1268 1269
	return &ei->vfs_inode;
}

1270 1271 1272 1273
static int ext4_drop_inode(struct inode *inode)
{
	int drop = generic_drop_inode(inode);

E
Eric Biggers 已提交
1274 1275 1276
	if (!drop)
		drop = fscrypt_drop_inode(inode);

1277 1278 1279 1280
	trace_ext4_drop_inode(inode, drop);
	return drop;
}

A
Al Viro 已提交
1281
static void ext4_free_in_core_inode(struct inode *inode)
N
Nick Piggin 已提交
1282
{
1283
	fscrypt_free_inode(inode);
1284 1285 1286 1287
	if (!list_empty(&(EXT4_I(inode)->i_fc_list))) {
		pr_warn("%s: inode %ld still in fc list",
			__func__, inode->i_ino);
	}
N
Nick Piggin 已提交
1288 1289 1290
	kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
}

1291
static void ext4_destroy_inode(struct inode *inode)
1292
{
1293
	if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
1294 1295 1296
		ext4_msg(inode->i_sb, KERN_ERR,
			 "Inode %lu (%p): orphan list check failed!",
			 inode->i_ino, EXT4_I(inode));
1297 1298 1299 1300 1301
		print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
				EXT4_I(inode), sizeof(struct ext4_inode_info),
				true);
		dump_stack();
	}
1302 1303
}

1304
static void init_once(void *foo)
1305
{
1306
	struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
1307

C
Christoph Lameter 已提交
1308 1309
	INIT_LIST_HEAD(&ei->i_orphan);
	init_rwsem(&ei->xattr_sem);
1310
	init_rwsem(&ei->i_data_sem);
1311
	init_rwsem(&ei->i_mmap_sem);
C
Christoph Lameter 已提交
1312
	inode_init_once(&ei->vfs_inode);
1313
	ext4_fc_init_inode(&ei->vfs_inode);
1314 1315
}

1316
static int __init init_inodecache(void)
1317
{
1318 1319 1320 1321 1322 1323 1324
	ext4_inode_cachep = kmem_cache_create_usercopy("ext4_inode_cache",
				sizeof(struct ext4_inode_info), 0,
				(SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
					SLAB_ACCOUNT),
				offsetof(struct ext4_inode_info, i_data),
				sizeof_field(struct ext4_inode_info, i_data),
				init_once);
1325
	if (ext4_inode_cachep == NULL)
1326 1327 1328 1329 1330 1331
		return -ENOMEM;
	return 0;
}

static void destroy_inodecache(void)
{
1332 1333 1334 1335 1336
	/*
	 * Make sure all delayed rcu free inodes are flushed before we
	 * destroy cache.
	 */
	rcu_barrier();
1337
	kmem_cache_destroy(ext4_inode_cachep);
1338 1339
}

A
Al Viro 已提交
1340
void ext4_clear_inode(struct inode *inode)
1341
{
1342
	ext4_fc_del(inode);
A
Al Viro 已提交
1343
	invalidate_inode_buffers(inode);
1344
	clear_inode(inode);
1345
	ext4_discard_preallocations(inode, 0);
1346
	ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
J
Jan Kara 已提交
1347
	dquot_drop(inode);
1348 1349 1350 1351 1352 1353
	if (EXT4_I(inode)->jinode) {
		jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
					       EXT4_I(inode)->jinode);
		jbd2_free_inode(EXT4_I(inode)->jinode);
		EXT4_I(inode)->jinode = NULL;
	}
1354
	fscrypt_put_encryption_info(inode);
E
Eric Biggers 已提交
1355
	fsverity_cleanup_inode(inode);
1356 1357
}

C
Christoph Hellwig 已提交
1358
static struct inode *ext4_nfs_get_inode(struct super_block *sb,
1359
					u64 ino, u32 generation)
1360 1361 1362
{
	struct inode *inode;

1363
	/*
1364 1365 1366
	 * Currently we don't know the generation for parent directory, so
	 * a generation of 0 means "accept any"
	 */
1367
	inode = ext4_iget(sb, ino, EXT4_IGET_HANDLE);
1368 1369 1370
	if (IS_ERR(inode))
		return ERR_CAST(inode);
	if (generation && inode->i_generation != generation) {
1371 1372 1373
		iput(inode);
		return ERR_PTR(-ESTALE);
	}
C
Christoph Hellwig 已提交
1374 1375 1376 1377 1378

	return inode;
}

static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
1379
					int fh_len, int fh_type)
C
Christoph Hellwig 已提交
1380 1381 1382 1383 1384 1385
{
	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
				    ext4_nfs_get_inode);
}

static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
1386
					int fh_len, int fh_type)
C
Christoph Hellwig 已提交
1387 1388 1389
{
	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
				    ext4_nfs_get_inode);
1390 1391
}

1392 1393 1394 1395 1396 1397 1398 1399 1400 1401
static int ext4_nfs_commit_metadata(struct inode *inode)
{
	struct writeback_control wbc = {
		.sync_mode = WB_SYNC_ALL
	};

	trace_ext4_nfs_commit_metadata(inode);
	return ext4_write_inode(inode, &wbc);
}

1402 1403 1404 1405 1406 1407
/*
 * Try to release metadata pages (indirect blocks, directories) which are
 * mapped via the block device.  Since these pages could have journal heads
 * which would prevent try_to_free_buffers() from freeing them, we must use
 * jbd2 layer's try_to_free_buffers() function to release them.
 */
1408 1409
static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
				 gfp_t wait)
1410 1411 1412 1413 1414 1415 1416
{
	journal_t *journal = EXT4_SB(sb)->s_journal;

	WARN_ON(PageChecked(page));
	if (!page_has_buffers(page))
		return 0;
	if (journal)
1417 1418
		return jbd2_journal_try_to_free_buffers(journal, page);

1419 1420 1421
	return try_to_free_buffers(page);
}

1422
#ifdef CONFIG_FS_ENCRYPTION
1423 1424 1425 1426 1427 1428 1429 1430 1431
static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
{
	return ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
				 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len);
}

static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
							void *fs_data)
{
1432
	handle_t *handle = fs_data;
1433
	int res, res2, credits, retries = 0;
1434

1435 1436 1437 1438 1439 1440 1441 1442
	/*
	 * Encrypting the root directory is not allowed because e2fsck expects
	 * lost+found to exist and be unencrypted, and encrypting the root
	 * directory would imply encrypting the lost+found directory as well as
	 * the filename "lost+found" itself.
	 */
	if (inode->i_ino == EXT4_ROOT_INO)
		return -EPERM;
1443

1444 1445 1446
	if (WARN_ON_ONCE(IS_DAX(inode) && i_size_read(inode)))
		return -EINVAL;

I
Ira Weiny 已提交
1447 1448 1449
	if (ext4_test_inode_flag(inode, EXT4_INODE_DAX))
		return -EOPNOTSUPP;

1450 1451 1452 1453
	res = ext4_convert_inline_data(inode);
	if (res)
		return res;

1454 1455 1456 1457 1458 1459 1460
	/*
	 * If a journal handle was specified, then the encryption context is
	 * being set on a new inode via inheritance and is part of a larger
	 * transaction to create the inode.  Otherwise the encryption context is
	 * being set on an existing inode in its own transaction.  Only in the
	 * latter case should the "retry on ENOSPC" logic be used.
	 */
1461

1462 1463 1464 1465 1466
	if (handle) {
		res = ext4_xattr_set_handle(handle, inode,
					    EXT4_XATTR_INDEX_ENCRYPTION,
					    EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
					    ctx, len, 0);
1467 1468 1469 1470
		if (!res) {
			ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
			ext4_clear_inode_state(inode,
					EXT4_STATE_MAY_INLINE_DATA);
1471
			/*
1472 1473
			 * Update inode->i_flags - S_ENCRYPTED will be enabled,
			 * S_DAX may be disabled
1474
			 */
1475
			ext4_set_inode_flags(inode, false);
1476 1477 1478 1479
		}
		return res;
	}

1480 1481 1482
	res = dquot_initialize(inode);
	if (res)
		return res;
1483
retry:
1484 1485
	res = ext4_xattr_set_credits(inode, len, false /* is_create */,
				     &credits);
T
Tahsin Erdogan 已提交
1486 1487 1488
	if (res)
		return res;

1489
	handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
1490 1491 1492
	if (IS_ERR(handle))
		return PTR_ERR(handle);

1493 1494 1495
	res = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_ENCRYPTION,
				    EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
				    ctx, len, 0);
1496 1497
	if (!res) {
		ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
1498 1499 1500 1501
		/*
		 * Update inode->i_flags - S_ENCRYPTED will be enabled,
		 * S_DAX may be disabled
		 */
1502
		ext4_set_inode_flags(inode, false);
1503 1504 1505 1506 1507
		res = ext4_mark_inode_dirty(handle, inode);
		if (res)
			EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
	}
	res2 = ext4_journal_stop(handle);
1508 1509 1510

	if (res == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
		goto retry;
1511 1512 1513 1514 1515
	if (!res)
		res = res2;
	return res;
}

1516
static const union fscrypt_policy *ext4_get_dummy_policy(struct super_block *sb)
1517
{
1518
	return EXT4_SB(sb)->s_dummy_enc_policy.policy;
1519 1520
}

1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532
static bool ext4_has_stable_inodes(struct super_block *sb)
{
	return ext4_has_feature_stable_inodes(sb);
}

static void ext4_get_ino_and_lblk_bits(struct super_block *sb,
				       int *ino_bits_ret, int *lblk_bits_ret)
{
	*ino_bits_ret = 8 * sizeof(EXT4_SB(sb)->s_es->s_inodes_count);
	*lblk_bits_ret = 8 * sizeof(ext4_lblk_t);
}

1533
static const struct fscrypt_operations ext4_cryptops = {
1534
	.key_prefix		= "ext4:",
1535 1536
	.get_context		= ext4_get_context,
	.set_context		= ext4_set_context,
1537
	.get_dummy_policy	= ext4_get_dummy_policy,
1538
	.empty_dir		= ext4_empty_dir,
1539
	.max_namelen		= EXT4_NAME_LEN,
1540 1541
	.has_stable_inodes	= ext4_has_stable_inodes,
	.get_ino_and_lblk_bits	= ext4_get_ino_and_lblk_bits,
1542 1543 1544
};
#endif

1545
#ifdef CONFIG_QUOTA
1546
static const char * const quotatypes[] = INITQFNAMES;
L
Li Xi 已提交
1547
#define QTYPE2NAME(t) (quotatypes[t])
1548

1549 1550 1551 1552 1553
static int ext4_write_dquot(struct dquot *dquot);
static int ext4_acquire_dquot(struct dquot *dquot);
static int ext4_release_dquot(struct dquot *dquot);
static int ext4_mark_dquot_dirty(struct dquot *dquot);
static int ext4_write_info(struct super_block *sb, int type);
1554
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
A
Al Viro 已提交
1555
			 const struct path *path);
1556 1557
static int ext4_quota_on_mount(struct super_block *sb, int type);
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
1558
			       size_t len, loff_t off);
1559
static ssize_t ext4_quota_write(struct super_block *sb, int type,
1560
				const char *data, size_t len, loff_t off);
1561 1562 1563
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
			     unsigned int flags);
static int ext4_enable_quotas(struct super_block *sb);
1564

J
Jan Kara 已提交
1565 1566 1567 1568 1569
static struct dquot **ext4_get_dquots(struct inode *inode)
{
	return EXT4_I(inode)->i_dquot;
}

1570
static const struct dquot_operations ext4_quota_operations = {
1571 1572 1573 1574 1575 1576 1577 1578 1579 1580
	.get_reserved_space	= ext4_get_reserved_space,
	.write_dquot		= ext4_write_dquot,
	.acquire_dquot		= ext4_acquire_dquot,
	.release_dquot		= ext4_release_dquot,
	.mark_dirty		= ext4_mark_dquot_dirty,
	.write_info		= ext4_write_info,
	.alloc_dquot		= dquot_alloc,
	.destroy_dquot		= dquot_destroy,
	.get_projid		= ext4_get_projid,
	.get_inode_usage	= ext4_get_inode_usage,
C
Chengguang Xu 已提交
1581
	.get_next_id		= dquot_get_next_id,
1582 1583
};

1584
static const struct quotactl_ops ext4_qctl_operations = {
1585
	.quota_on	= ext4_quota_on,
1586
	.quota_off	= ext4_quota_off,
1587
	.quota_sync	= dquot_quota_sync,
1588
	.get_state	= dquot_get_state,
1589 1590
	.set_info	= dquot_set_dqinfo,
	.get_dqblk	= dquot_get_dqblk,
1591 1592
	.set_dqblk	= dquot_set_dqblk,
	.get_nextdqblk	= dquot_get_next_dqblk,
1593 1594 1595
};
#endif

1596
static const struct super_operations ext4_sops = {
1597
	.alloc_inode	= ext4_alloc_inode,
A
Al Viro 已提交
1598
	.free_inode	= ext4_free_in_core_inode,
1599 1600 1601
	.destroy_inode	= ext4_destroy_inode,
	.write_inode	= ext4_write_inode,
	.dirty_inode	= ext4_dirty_inode,
1602
	.drop_inode	= ext4_drop_inode,
A
Al Viro 已提交
1603
	.evict_inode	= ext4_evict_inode,
1604 1605
	.put_super	= ext4_put_super,
	.sync_fs	= ext4_sync_fs,
1606 1607
	.freeze_fs	= ext4_freeze,
	.unfreeze_fs	= ext4_unfreeze,
1608 1609 1610
	.statfs		= ext4_statfs,
	.remount_fs	= ext4_remount,
	.show_options	= ext4_show_options,
1611
#ifdef CONFIG_QUOTA
1612 1613
	.quota_read	= ext4_quota_read,
	.quota_write	= ext4_quota_write,
J
Jan Kara 已提交
1614
	.get_dquots	= ext4_get_dquots,
1615
#endif
1616
	.bdev_try_to_free_page = bdev_try_to_free_page,
1617 1618
};

1619
static const struct export_operations ext4_export_ops = {
C
Christoph Hellwig 已提交
1620 1621
	.fh_to_dentry = ext4_fh_to_dentry,
	.fh_to_parent = ext4_fh_to_parent,
1622
	.get_parent = ext4_get_parent,
1623
	.commit_metadata = ext4_nfs_commit_metadata,
1624 1625 1626 1627 1628
};

enum {
	Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
	Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
1629
	Opt_nouid32, Opt_debug, Opt_removed,
1630
	Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
1631
	Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
1632 1633
	Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
	Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
1634
	Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
1635
	Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
1636
	Opt_inlinecrypt,
1637
	Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
J
Jan Kara 已提交
1638
	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
T
Theodore Ts'o 已提交
1639
	Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
1640 1641
	Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version,
	Opt_dax, Opt_dax_always, Opt_dax_inode, Opt_dax_never,
1642 1643
	Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error,
	Opt_nowarn_on_error, Opt_mblk_io_submit,
1644
	Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize,
1645
	Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
1646
	Opt_inode_readahead_blks, Opt_journal_ioprio,
1647
	Opt_dioread_nolock, Opt_dioread_lock,
1648
	Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
1649
	Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache,
1650
	Opt_prefetch_block_bitmaps,
1651
#ifdef CONFIG_EXT4_DEBUG
1652
	Opt_fc_debug_max_replay, Opt_fc_debug_force
1653
#endif
1654 1655
};

1656
static const match_table_t tokens = {
1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670
	{Opt_bsd_df, "bsddf"},
	{Opt_minix_df, "minixdf"},
	{Opt_grpid, "grpid"},
	{Opt_grpid, "bsdgroups"},
	{Opt_nogrpid, "nogrpid"},
	{Opt_nogrpid, "sysvgroups"},
	{Opt_resgid, "resgid=%u"},
	{Opt_resuid, "resuid=%u"},
	{Opt_sb, "sb=%u"},
	{Opt_err_cont, "errors=continue"},
	{Opt_err_panic, "errors=panic"},
	{Opt_err_ro, "errors=remount-ro"},
	{Opt_nouid32, "nouid32"},
	{Opt_debug, "debug"},
1671 1672
	{Opt_removed, "oldalloc"},
	{Opt_removed, "orlov"},
1673 1674 1675 1676
	{Opt_user_xattr, "user_xattr"},
	{Opt_nouser_xattr, "nouser_xattr"},
	{Opt_acl, "acl"},
	{Opt_noacl, "noacl"},
1677
	{Opt_noload, "norecovery"},
1678
	{Opt_noload, "noload"},
1679 1680
	{Opt_removed, "nobh"},
	{Opt_removed, "bh"},
1681
	{Opt_commit, "commit=%u"},
1682 1683
	{Opt_min_batch_time, "min_batch_time=%u"},
	{Opt_max_batch_time, "max_batch_time=%u"},
1684
	{Opt_journal_dev, "journal_dev=%u"},
1685
	{Opt_journal_path, "journal_path=%s"},
1686
	{Opt_journal_checksum, "journal_checksum"},
1687
	{Opt_nojournal_checksum, "nojournal_checksum"},
1688
	{Opt_journal_async_commit, "journal_async_commit"},
1689 1690 1691 1692
	{Opt_abort, "abort"},
	{Opt_data_journal, "data=journal"},
	{Opt_data_ordered, "data=ordered"},
	{Opt_data_writeback, "data=writeback"},
1693 1694
	{Opt_data_err_abort, "data_err=abort"},
	{Opt_data_err_ignore, "data_err=ignore"},
1695 1696 1697 1698 1699 1700
	{Opt_offusrjquota, "usrjquota="},
	{Opt_usrjquota, "usrjquota=%s"},
	{Opt_offgrpjquota, "grpjquota="},
	{Opt_grpjquota, "grpjquota=%s"},
	{Opt_jqfmt_vfsold, "jqfmt=vfsold"},
	{Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
J
Jan Kara 已提交
1701
	{Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
1702 1703 1704 1705
	{Opt_grpquota, "grpquota"},
	{Opt_noquota, "noquota"},
	{Opt_quota, "quota"},
	{Opt_usrquota, "usrquota"},
1706
	{Opt_prjquota, "prjquota"},
1707
	{Opt_barrier, "barrier=%u"},
T
Theodore Ts'o 已提交
1708 1709
	{Opt_barrier, "barrier"},
	{Opt_nobarrier, "nobarrier"},
1710
	{Opt_i_version, "i_version"},
R
Ross Zwisler 已提交
1711
	{Opt_dax, "dax"},
1712 1713 1714
	{Opt_dax_always, "dax=always"},
	{Opt_dax_inode, "dax=inode"},
	{Opt_dax_never, "dax=never"},
1715
	{Opt_stripe, "stripe=%u"},
1716
	{Opt_delalloc, "delalloc"},
1717 1718
	{Opt_warn_on_error, "warn_on_error"},
	{Opt_nowarn_on_error, "nowarn_on_error"},
1719 1720
	{Opt_lazytime, "lazytime"},
	{Opt_nolazytime, "nolazytime"},
1721
	{Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"},
1722
	{Opt_nodelalloc, "nodelalloc"},
1723 1724
	{Opt_removed, "mblk_io_submit"},
	{Opt_removed, "nomblk_io_submit"},
1725 1726
	{Opt_block_validity, "block_validity"},
	{Opt_noblock_validity, "noblock_validity"},
1727
	{Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
1728
	{Opt_journal_ioprio, "journal_ioprio=%u"},
1729
	{Opt_auto_da_alloc, "auto_da_alloc=%u"},
T
Theodore Ts'o 已提交
1730 1731
	{Opt_auto_da_alloc, "auto_da_alloc"},
	{Opt_noauto_da_alloc, "noauto_da_alloc"},
1732
	{Opt_dioread_nolock, "dioread_nolock"},
1733
	{Opt_dioread_lock, "nodioread_nolock"},
1734
	{Opt_dioread_lock, "dioread_lock"},
1735 1736
	{Opt_discard, "discard"},
	{Opt_nodiscard, "nodiscard"},
1737 1738 1739
	{Opt_init_itable, "init_itable=%u"},
	{Opt_init_itable, "init_itable"},
	{Opt_noinit_itable, "noinit_itable"},
1740
#ifdef CONFIG_EXT4_DEBUG
1741
	{Opt_fc_debug_force, "fc_debug_force"},
1742 1743
	{Opt_fc_debug_max_replay, "fc_debug_max_replay=%u"},
#endif
1744
	{Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
1745
	{Opt_test_dummy_encryption, "test_dummy_encryption=%s"},
1746
	{Opt_test_dummy_encryption, "test_dummy_encryption"},
1747
	{Opt_inlinecrypt, "inlinecrypt"},
1748 1749
	{Opt_nombcache, "nombcache"},
	{Opt_nombcache, "no_mbcache"},	/* for backward compatibility */
1750
	{Opt_prefetch_block_bitmaps, "prefetch_block_bitmaps"},
1751 1752 1753 1754 1755
	{Opt_removed, "check=none"},	/* mount option from ext2/3 */
	{Opt_removed, "nocheck"},	/* mount option from ext2/3 */
	{Opt_removed, "reservation"},	/* mount option from ext2/3 */
	{Opt_removed, "noreservation"}, /* mount option from ext2/3 */
	{Opt_removed, "journal=%u"},	/* mount option from ext2/3 */
J
Josef Bacik 已提交
1756
	{Opt_err, NULL},
1757 1758
};

1759
static ext4_fsblk_t get_sb_block(void **data)
1760
{
1761
	ext4_fsblk_t	sb_block;
1762 1763 1764 1765
	char		*options = (char *) *data;

	if (!options || strncmp(options, "sb=", 3) != 0)
		return 1;	/* Default location */
1766

1767
	options += 3;
1768
	/* TODO: use simple_strtoll with >32bit ext4 */
1769 1770
	sb_block = simple_strtoul(options, &options, 0);
	if (*options && *options != ',') {
1771
		printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n",
1772 1773 1774 1775 1776 1777
		       (char *) *data);
		return 1;
	}
	if (*options == ',')
		options++;
	*data = (void *) options;
1778

1779 1780 1781
	return sb_block;
}

1782
#define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
1783 1784
static const char deprecated_msg[] =
	"Mount option \"%s\" will be removed by %s\n"
1785
	"Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
1786

D
Dmitry Monakhov 已提交
1787 1788 1789 1790
#ifdef CONFIG_QUOTA
static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
1791
	char *qname, *old_qname = get_qf_name(sb, sbi, qtype);
1792
	int ret = -1;
D
Dmitry Monakhov 已提交
1793

1794
	if (sb_any_quota_loaded(sb) && !old_qname) {
D
Dmitry Monakhov 已提交
1795 1796 1797
		ext4_msg(sb, KERN_ERR,
			"Cannot change journaled "
			"quota options when quota turned on");
1798
		return -1;
D
Dmitry Monakhov 已提交
1799
	}
1800
	if (ext4_has_feature_quota(sb)) {
1801 1802 1803
		ext4_msg(sb, KERN_INFO, "Journaled quota options "
			 "ignored when QUOTA feature is enabled");
		return 1;
1804
	}
D
Dmitry Monakhov 已提交
1805 1806 1807 1808
	qname = match_strdup(args);
	if (!qname) {
		ext4_msg(sb, KERN_ERR,
			"Not enough memory for storing quotafile name");
1809
		return -1;
D
Dmitry Monakhov 已提交
1810
	}
1811 1812
	if (old_qname) {
		if (strcmp(old_qname, qname) == 0)
1813 1814 1815 1816 1817 1818
			ret = 1;
		else
			ext4_msg(sb, KERN_ERR,
				 "%s quota file already specified",
				 QTYPE2NAME(qtype));
		goto errout;
D
Dmitry Monakhov 已提交
1819
	}
1820
	if (strchr(qname, '/')) {
D
Dmitry Monakhov 已提交
1821 1822
		ext4_msg(sb, KERN_ERR,
			"quotafile must be on filesystem root");
1823
		goto errout;
D
Dmitry Monakhov 已提交
1824
	}
1825
	rcu_assign_pointer(sbi->s_qf_names[qtype], qname);
1826
	set_opt(sb, QUOTA);
D
Dmitry Monakhov 已提交
1827
	return 1;
1828 1829 1830
errout:
	kfree(qname);
	return ret;
D
Dmitry Monakhov 已提交
1831 1832 1833 1834 1835 1836
}

static int clear_qf_name(struct super_block *sb, int qtype)
{

	struct ext4_sb_info *sbi = EXT4_SB(sb);
1837
	char *old_qname = get_qf_name(sb, sbi, qtype);
D
Dmitry Monakhov 已提交
1838

1839
	if (sb_any_quota_loaded(sb) && old_qname) {
D
Dmitry Monakhov 已提交
1840 1841
		ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
			" when quota turned on");
1842
		return -1;
D
Dmitry Monakhov 已提交
1843
	}
1844 1845 1846
	rcu_assign_pointer(sbi->s_qf_names[qtype], NULL);
	synchronize_rcu();
	kfree(old_qname);
D
Dmitry Monakhov 已提交
1847 1848 1849 1850
	return 1;
}
#endif

1851 1852 1853 1854 1855 1856
#define MOPT_SET	0x0001
#define MOPT_CLEAR	0x0002
#define MOPT_NOSUPPORT	0x0004
#define MOPT_EXPLICIT	0x0008
#define MOPT_CLEAR_ERR	0x0010
#define MOPT_GTE0	0x0020
1857
#ifdef CONFIG_QUOTA
1858 1859 1860 1861 1862
#define MOPT_Q		0
#define MOPT_QFMT	0x0040
#else
#define MOPT_Q		MOPT_NOSUPPORT
#define MOPT_QFMT	MOPT_NOSUPPORT
1863
#endif
1864
#define MOPT_DATAJ	0x0080
1865 1866 1867
#define MOPT_NO_EXT2	0x0100
#define MOPT_NO_EXT3	0x0200
#define MOPT_EXT4_ONLY	(MOPT_NO_EXT2 | MOPT_NO_EXT3)
1868
#define MOPT_STRING	0x0400
1869
#define MOPT_SKIP	0x0800
1870
#define	MOPT_2		0x1000
1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882

static const struct mount_opts {
	int	token;
	int	mount_opt;
	int	flags;
} ext4_mount_opts[] = {
	{Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET},
	{Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR},
	{Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET},
	{Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR},
	{Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET},
	{Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR},
1883 1884 1885 1886
	{Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK,
	 MOPT_EXT4_ONLY | MOPT_SET},
	{Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK,
	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1887 1888
	{Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET},
	{Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR},
1889 1890 1891
	{Opt_delalloc, EXT4_MOUNT_DELALLOC,
	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
	{Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
1892
	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1893 1894
	{Opt_warn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_SET},
	{Opt_nowarn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_CLEAR},
1895 1896
	{Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1897
	{Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1898
	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1899
	{Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
1900
				    EXT4_MOUNT_JOURNAL_CHECKSUM),
1901
	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1902
	{Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET},
1903 1904 1905
	{Opt_err_panic, EXT4_MOUNT_ERRORS_PANIC, MOPT_SET | MOPT_CLEAR_ERR},
	{Opt_err_ro, EXT4_MOUNT_ERRORS_RO, MOPT_SET | MOPT_CLEAR_ERR},
	{Opt_err_cont, EXT4_MOUNT_ERRORS_CONT, MOPT_SET | MOPT_CLEAR_ERR},
1906
	{Opt_data_err_abort, EXT4_MOUNT_DATA_ERR_ABORT,
1907
	 MOPT_NO_EXT2},
1908
	{Opt_data_err_ignore, EXT4_MOUNT_DATA_ERR_ABORT,
1909
	 MOPT_NO_EXT2},
1910 1911 1912 1913 1914 1915 1916 1917 1918 1919
	{Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET},
	{Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR},
	{Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
	{Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
	{Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
	{Opt_commit, 0, MOPT_GTE0},
	{Opt_max_batch_time, 0, MOPT_GTE0},
	{Opt_min_batch_time, 0, MOPT_GTE0},
	{Opt_inode_readahead_blks, 0, MOPT_GTE0},
	{Opt_init_itable, 0, MOPT_GTE0},
1920 1921 1922 1923 1924 1925 1926
	{Opt_dax, EXT4_MOUNT_DAX_ALWAYS, MOPT_SET | MOPT_SKIP},
	{Opt_dax_always, EXT4_MOUNT_DAX_ALWAYS,
		MOPT_EXT4_ONLY | MOPT_SET | MOPT_SKIP},
	{Opt_dax_inode, EXT4_MOUNT2_DAX_INODE,
		MOPT_EXT4_ONLY | MOPT_SET | MOPT_SKIP},
	{Opt_dax_never, EXT4_MOUNT2_DAX_NEVER,
		MOPT_EXT4_ONLY | MOPT_SET | MOPT_SKIP},
1927
	{Opt_stripe, 0, MOPT_GTE0},
1928 1929
	{Opt_resuid, 0, MOPT_GTE0},
	{Opt_resgid, 0, MOPT_GTE0},
1930 1931 1932
	{Opt_journal_dev, 0, MOPT_NO_EXT2 | MOPT_GTE0},
	{Opt_journal_path, 0, MOPT_NO_EXT2 | MOPT_STRING},
	{Opt_journal_ioprio, 0, MOPT_NO_EXT2 | MOPT_GTE0},
1933 1934 1935 1936
	{Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
	{Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
	{Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA,
	 MOPT_NO_EXT2 | MOPT_DATAJ},
1937 1938
	{Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
	{Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR},
T
Theodore Ts'o 已提交
1939
#ifdef CONFIG_EXT4_FS_POSIX_ACL
1940 1941
	{Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET},
	{Opt_noacl, EXT4_MOUNT_POSIX_ACL, MOPT_CLEAR},
1942
#else
1943 1944
	{Opt_acl, 0, MOPT_NOSUPPORT},
	{Opt_noacl, 0, MOPT_NOSUPPORT},
1945
#endif
1946 1947
	{Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET},
	{Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET},
1948
	{Opt_debug_want_extra_isize, 0, MOPT_GTE0},
1949 1950 1951 1952 1953
	{Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q},
	{Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA,
							MOPT_SET | MOPT_Q},
	{Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA,
							MOPT_SET | MOPT_Q},
1954 1955
	{Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA,
							MOPT_SET | MOPT_Q},
1956
	{Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
1957 1958
		       EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
							MOPT_CLEAR | MOPT_Q},
1959 1960
	{Opt_usrjquota, 0, MOPT_Q | MOPT_STRING},
	{Opt_grpjquota, 0, MOPT_Q | MOPT_STRING},
1961 1962 1963 1964 1965
	{Opt_offusrjquota, 0, MOPT_Q},
	{Opt_offgrpjquota, 0, MOPT_Q},
	{Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
	{Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
	{Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
1966
	{Opt_max_dir_size_kb, 0, MOPT_GTE0},
1967
	{Opt_test_dummy_encryption, 0, MOPT_STRING},
1968
	{Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
1969 1970
	{Opt_prefetch_block_bitmaps, EXT4_MOUNT_PREFETCH_BLOCK_BITMAPS,
	 MOPT_SET},
1971
#ifdef CONFIG_EXT4_DEBUG
1972 1973
	{Opt_fc_debug_force, EXT4_MOUNT2_JOURNAL_FAST_COMMIT,
	 MOPT_SET | MOPT_2 | MOPT_EXT4_ONLY},
1974 1975
	{Opt_fc_debug_max_replay, 0, MOPT_GTE0},
#endif
1976 1977 1978
	{Opt_err, 0, 0}
};

1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008
#ifdef CONFIG_UNICODE
static const struct ext4_sb_encodings {
	__u16 magic;
	char *name;
	char *version;
} ext4_sb_encoding_map[] = {
	{EXT4_ENC_UTF8_12_1, "utf8", "12.1.0"},
};

static int ext4_sb_read_encoding(const struct ext4_super_block *es,
				 const struct ext4_sb_encodings **encoding,
				 __u16 *flags)
{
	__u16 magic = le16_to_cpu(es->s_encoding);
	int i;

	for (i = 0; i < ARRAY_SIZE(ext4_sb_encoding_map); i++)
		if (magic == ext4_sb_encoding_map[i].magic)
			break;

	if (i >= ARRAY_SIZE(ext4_sb_encoding_map))
		return -EINVAL;

	*encoding = &ext4_sb_encoding_map[i];
	*flags = le16_to_cpu(es->s_encoding_flags);

	return 0;
}
#endif

2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023
static int ext4_set_test_dummy_encryption(struct super_block *sb,
					  const char *opt,
					  const substring_t *arg,
					  bool is_remount)
{
#ifdef CONFIG_FS_ENCRYPTION
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	int err;

	/*
	 * This mount option is just for testing, and it's not worthwhile to
	 * implement the extra complexity (e.g. RCU protection) that would be
	 * needed to allow it to be set or changed during remount.  We do allow
	 * it to be specified during remount, but only if there is no change.
	 */
2024
	if (is_remount && !sbi->s_dummy_enc_policy.policy) {
2025 2026 2027 2028
		ext4_msg(sb, KERN_WARNING,
			 "Can't set test_dummy_encryption on remount");
		return -1;
	}
2029
	err = fscrypt_set_test_dummy_encryption(sb, arg->from,
2030
						&sbi->s_dummy_enc_policy);
2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051
	if (err) {
		if (err == -EEXIST)
			ext4_msg(sb, KERN_WARNING,
				 "Can't change test_dummy_encryption on remount");
		else if (err == -EINVAL)
			ext4_msg(sb, KERN_WARNING,
				 "Value of option \"%s\" is unrecognized", opt);
		else
			ext4_msg(sb, KERN_WARNING,
				 "Error processing option \"%s\" [%d]",
				 opt, err);
		return -1;
	}
	ext4_msg(sb, KERN_WARNING, "Test dummy encryption mode enabled");
#else
	ext4_msg(sb, KERN_WARNING,
		 "Test dummy encryption mount option ignored");
#endif
	return 1;
}

2052 2053 2054 2055 2056 2057
static int handle_mount_opt(struct super_block *sb, char *opt, int token,
			    substring_t *args, unsigned long *journal_devnum,
			    unsigned int *journal_ioprio, int is_remount)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	const struct mount_opts *m;
2058 2059
	kuid_t uid;
	kgid_t gid;
2060 2061
	int arg = 0;

2062 2063 2064 2065 2066 2067 2068 2069 2070 2071
#ifdef CONFIG_QUOTA
	if (token == Opt_usrjquota)
		return set_qf_name(sb, USRQUOTA, &args[0]);
	else if (token == Opt_grpjquota)
		return set_qf_name(sb, GRPQUOTA, &args[0]);
	else if (token == Opt_offusrjquota)
		return clear_qf_name(sb, USRQUOTA);
	else if (token == Opt_offgrpjquota)
		return clear_qf_name(sb, GRPQUOTA);
#endif
2072
	switch (token) {
2073 2074 2075 2076
	case Opt_noacl:
	case Opt_nouser_xattr:
		ext4_msg(sb, KERN_WARNING, deprecated_msg, opt, "3.5");
		break;
2077 2078 2079
	case Opt_sb:
		return 1;	/* handled by get_sb_block() */
	case Opt_removed:
2080
		ext4_msg(sb, KERN_WARNING, "Ignoring removed %s option", opt);
2081 2082
		return 1;
	case Opt_abort:
2083
		ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED);
2084 2085
		return 1;
	case Opt_i_version:
M
Matthew Garrett 已提交
2086
		sb->s_flags |= SB_I_VERSION;
2087
		return 1;
2088
	case Opt_lazytime:
2089
		sb->s_flags |= SB_LAZYTIME;
2090 2091
		return 1;
	case Opt_nolazytime:
2092
		sb->s_flags &= ~SB_LAZYTIME;
2093
		return 1;
2094 2095 2096 2097 2098 2099 2100
	case Opt_inlinecrypt:
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
		sb->s_flags |= SB_INLINECRYPT;
#else
		ext4_msg(sb, KERN_ERR, "inline encryption not supported");
#endif
		return 1;
2101 2102
	}

2103 2104 2105 2106 2107 2108 2109 2110 2111 2112
	for (m = ext4_mount_opts; m->token != Opt_err; m++)
		if (token == m->token)
			break;

	if (m->token == Opt_err) {
		ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" "
			 "or missing value", opt);
		return -1;
	}

2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123
	if ((m->flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) {
		ext4_msg(sb, KERN_ERR,
			 "Mount option \"%s\" incompatible with ext2", opt);
		return -1;
	}
	if ((m->flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) {
		ext4_msg(sb, KERN_ERR,
			 "Mount option \"%s\" incompatible with ext3", opt);
		return -1;
	}

2124
	if (args->from && !(m->flags & MOPT_STRING) && match_int(args, &arg))
2125 2126 2127
		return -1;
	if (args->from && (m->flags & MOPT_GTE0) && (arg < 0))
		return -1;
2128 2129 2130
	if (m->flags & MOPT_EXPLICIT) {
		if (m->mount_opt & EXT4_MOUNT_DELALLOC) {
			set_opt2(sb, EXPLICIT_DELALLOC);
2131 2132
		} else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) {
			set_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM);
2133 2134 2135
		} else
			return -1;
	}
2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148
	if (m->flags & MOPT_CLEAR_ERR)
		clear_opt(sb, ERRORS_MASK);
	if (token == Opt_noquota && sb_any_quota_loaded(sb)) {
		ext4_msg(sb, KERN_ERR, "Cannot change quota "
			 "options when quota turned on");
		return -1;
	}

	if (m->flags & MOPT_NOSUPPORT) {
		ext4_msg(sb, KERN_ERR, "%s option not supported", opt);
	} else if (token == Opt_commit) {
		if (arg == 0)
			arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
2149 2150 2151 2152 2153 2154 2155
		else if (arg > INT_MAX / HZ) {
			ext4_msg(sb, KERN_ERR,
				 "Invalid commit interval %d, "
				 "must be smaller than %d",
				 arg, INT_MAX / HZ);
			return -1;
		}
2156
		sbi->s_commit_interval = HZ * arg;
2157
	} else if (token == Opt_debug_want_extra_isize) {
2158 2159 2160 2161 2162 2163 2164
		if ((arg & 1) ||
		    (arg < 4) ||
		    (arg > (sbi->s_inode_size - EXT4_GOOD_OLD_INODE_SIZE))) {
			ext4_msg(sb, KERN_ERR,
				 "Invalid want_extra_isize %d", arg);
			return -1;
		}
2165
		sbi->s_want_extra_isize = arg;
2166 2167 2168 2169 2170
	} else if (token == Opt_max_batch_time) {
		sbi->s_max_batch_time = arg;
	} else if (token == Opt_min_batch_time) {
		sbi->s_min_batch_time = arg;
	} else if (token == Opt_inode_readahead_blks) {
2171 2172 2173 2174
		if (arg && (arg > (1 << 30) || !is_power_of_2(arg))) {
			ext4_msg(sb, KERN_ERR,
				 "EXT4-fs: inode_readahead_blks must be "
				 "0 or a power of 2 smaller than 2^31");
2175
			return -1;
2176 2177 2178 2179 2180 2181 2182 2183 2184
		}
		sbi->s_inode_readahead_blks = arg;
	} else if (token == Opt_init_itable) {
		set_opt(sb, INIT_INODE_TABLE);
		if (!args->from)
			arg = EXT4_DEF_LI_WAIT_MULT;
		sbi->s_li_wait_mult = arg;
	} else if (token == Opt_max_dir_size_kb) {
		sbi->s_max_dir_size_kb = arg;
2185 2186 2187 2188
#ifdef CONFIG_EXT4_DEBUG
	} else if (token == Opt_fc_debug_max_replay) {
		sbi->s_fc_debug_max_replay = arg;
#endif
2189 2190 2191 2192 2193 2194
	} else if (token == Opt_stripe) {
		sbi->s_stripe = arg;
	} else if (token == Opt_resuid) {
		uid = make_kuid(current_user_ns(), arg);
		if (!uid_valid(uid)) {
			ext4_msg(sb, KERN_ERR, "Invalid uid value %d", arg);
2195 2196
			return -1;
		}
2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211
		sbi->s_resuid = uid;
	} else if (token == Opt_resgid) {
		gid = make_kgid(current_user_ns(), arg);
		if (!gid_valid(gid)) {
			ext4_msg(sb, KERN_ERR, "Invalid gid value %d", arg);
			return -1;
		}
		sbi->s_resgid = gid;
	} else if (token == Opt_journal_dev) {
		if (is_remount) {
			ext4_msg(sb, KERN_ERR,
				 "Cannot specify journal on remount");
			return -1;
		}
		*journal_devnum = arg;
2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237
	} else if (token == Opt_journal_path) {
		char *journal_path;
		struct inode *journal_inode;
		struct path path;
		int error;

		if (is_remount) {
			ext4_msg(sb, KERN_ERR,
				 "Cannot specify journal on remount");
			return -1;
		}
		journal_path = match_strdup(&args[0]);
		if (!journal_path) {
			ext4_msg(sb, KERN_ERR, "error: could not dup "
				"journal device string");
			return -1;
		}

		error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
		if (error) {
			ext4_msg(sb, KERN_ERR, "error: could not find "
				"journal device path: error %d", error);
			kfree(journal_path);
			return -1;
		}

2238
		journal_inode = d_inode(path.dentry);
2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249
		if (!S_ISBLK(journal_inode->i_mode)) {
			ext4_msg(sb, KERN_ERR, "error: journal path %s "
				"is not a block device", journal_path);
			path_put(&path);
			kfree(journal_path);
			return -1;
		}

		*journal_devnum = new_encode_dev(journal_inode->i_rdev);
		path_put(&path);
		kfree(journal_path);
2250 2251 2252 2253 2254 2255 2256 2257
	} else if (token == Opt_journal_ioprio) {
		if (arg > 7) {
			ext4_msg(sb, KERN_ERR, "Invalid journal IO priority"
				 " (must be 0-7)");
			return -1;
		}
		*journal_ioprio =
			IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
2258
	} else if (token == Opt_test_dummy_encryption) {
2259 2260
		return ext4_set_test_dummy_encryption(sb, opt, &args[0],
						      is_remount);
2261 2262 2263 2264 2265
	} else if (m->flags & MOPT_DATAJ) {
		if (is_remount) {
			if (!sbi->s_journal)
				ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
			else if (test_opt(sb, DATA_FLAGS) != m->mount_opt) {
2266
				ext4_msg(sb, KERN_ERR,
2267 2268
					 "Cannot change data mode on remount");
				return -1;
2269
			}
2270
		} else {
2271 2272
			clear_opt(sb, DATA_FLAGS);
			sbi->s_mount_opt |= m->mount_opt;
2273
		}
2274 2275 2276 2277 2278 2279 2280 2281
#ifdef CONFIG_QUOTA
	} else if (m->flags & MOPT_QFMT) {
		if (sb_any_quota_loaded(sb) &&
		    sbi->s_jquota_fmt != m->mount_opt) {
			ext4_msg(sb, KERN_ERR, "Cannot change journaled "
				 "quota options when quota turned on");
			return -1;
		}
2282
		if (ext4_has_feature_quota(sb)) {
2283 2284
			ext4_msg(sb, KERN_INFO,
				 "Quota format mount options ignored "
2285
				 "when QUOTA feature is enabled");
2286
			return 1;
2287
		}
2288
		sbi->s_jquota_fmt = m->mount_opt;
R
Ross Zwisler 已提交
2289
#endif
2290 2291
	} else if (token == Opt_dax || token == Opt_dax_always ||
		   token == Opt_dax_inode || token == Opt_dax_never) {
2292
#ifdef CONFIG_FS_DAX
2293 2294 2295
		switch (token) {
		case Opt_dax:
		case Opt_dax_always:
2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310
			if (is_remount &&
			    (!(sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) ||
			     (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER))) {
			fail_dax_change_remount:
				ext4_msg(sb, KERN_ERR, "can't change "
					 "dax mount option while remounting");
				return -1;
			}
			if (is_remount &&
			    (test_opt(sb, DATA_FLAGS) ==
			     EXT4_MOUNT_JOURNAL_DATA)) {
				    ext4_msg(sb, KERN_ERR, "can't mount with "
					     "both data=journal and dax");
				    return -1;
			}
2311 2312 2313 2314 2315 2316
			ext4_msg(sb, KERN_WARNING,
				"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
			sbi->s_mount_opt |= EXT4_MOUNT_DAX_ALWAYS;
			sbi->s_mount_opt2 &= ~EXT4_MOUNT2_DAX_NEVER;
			break;
		case Opt_dax_never:
2317 2318 2319 2320
			if (is_remount &&
			    (!(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) ||
			     (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS)))
				goto fail_dax_change_remount;
2321 2322 2323 2324
			sbi->s_mount_opt2 |= EXT4_MOUNT2_DAX_NEVER;
			sbi->s_mount_opt &= ~EXT4_MOUNT_DAX_ALWAYS;
			break;
		case Opt_dax_inode:
2325 2326 2327 2328 2329
			if (is_remount &&
			    ((sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) ||
			     (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) ||
			     !(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_INODE)))
				goto fail_dax_change_remount;
2330 2331 2332 2333 2334 2335
			sbi->s_mount_opt &= ~EXT4_MOUNT_DAX_ALWAYS;
			sbi->s_mount_opt2 &= ~EXT4_MOUNT2_DAX_NEVER;
			/* Strictly for printing options */
			sbi->s_mount_opt2 |= EXT4_MOUNT2_DAX_INODE;
			break;
		}
2336
#else
R
Ross Zwisler 已提交
2337
		ext4_msg(sb, KERN_INFO, "dax option not supported");
2338 2339
		sbi->s_mount_opt2 |= EXT4_MOUNT2_DAX_NEVER;
		sbi->s_mount_opt &= ~EXT4_MOUNT_DAX_ALWAYS;
R
Ross Zwisler 已提交
2340
		return -1;
2341
#endif
2342 2343 2344 2345
	} else if (token == Opt_data_err_abort) {
		sbi->s_mount_opt |= m->mount_opt;
	} else if (token == Opt_data_err_ignore) {
		sbi->s_mount_opt &= ~m->mount_opt;
2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356
	} else {
		if (!args->from)
			arg = 1;
		if (m->flags & MOPT_CLEAR)
			arg = !arg;
		else if (unlikely(!(m->flags & MOPT_SET))) {
			ext4_msg(sb, KERN_WARNING,
				 "buggy handling of option %s", opt);
			WARN_ON(1);
			return -1;
		}
2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367
		if (m->flags & MOPT_2) {
			if (arg != 0)
				sbi->s_mount_opt2 |= m->mount_opt;
			else
				sbi->s_mount_opt2 &= ~m->mount_opt;
		} else {
			if (arg != 0)
				sbi->s_mount_opt |= m->mount_opt;
			else
				sbi->s_mount_opt &= ~m->mount_opt;
		}
2368
	}
2369
	return 1;
2370 2371 2372 2373 2374 2375 2376
}

static int parse_options(char *options, struct super_block *sb,
			 unsigned long *journal_devnum,
			 unsigned int *journal_ioprio,
			 int is_remount)
{
2377
	struct ext4_sb_info __maybe_unused *sbi = EXT4_SB(sb);
2378
	char *p, __maybe_unused *usr_qf_name, __maybe_unused *grp_qf_name;
2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391
	substring_t args[MAX_OPT_ARGS];
	int token;

	if (!options)
		return 1;

	while ((p = strsep(&options, ",")) != NULL) {
		if (!*p)
			continue;
		/*
		 * Initialize args struct so we know whether arg was
		 * found; some options take optional arguments.
		 */
2392
		args[0].to = args[0].from = NULL;
2393 2394 2395 2396
		token = match_token(p, tokens, args);
		if (handle_mount_opt(sb, p, token, args, journal_devnum,
				     journal_ioprio, is_remount) < 0)
			return 0;
2397 2398
	}
#ifdef CONFIG_QUOTA
2399 2400 2401 2402 2403 2404 2405 2406 2407 2408
	/*
	 * We do the test below only for project quotas. 'usrquota' and
	 * 'grpquota' mount options are allowed even without quota feature
	 * to support legacy quotas in quota files.
	 */
	if (test_opt(sb, PRJQUOTA) && !ext4_has_feature_project(sb)) {
		ext4_msg(sb, KERN_ERR, "Project quota feature not enabled. "
			 "Cannot enable project quota enforcement.");
		return 0;
	}
2409 2410 2411 2412
	usr_qf_name = get_qf_name(sb, sbi, USRQUOTA);
	grp_qf_name = get_qf_name(sb, sbi, GRPQUOTA);
	if (usr_qf_name || grp_qf_name) {
		if (test_opt(sb, USRQUOTA) && usr_qf_name)
2413
			clear_opt(sb, USRQUOTA);
2414

2415
		if (test_opt(sb, GRPQUOTA) && grp_qf_name)
2416
			clear_opt(sb, GRPQUOTA);
2417

D
Dmitry Monakhov 已提交
2418
		if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
2419 2420
			ext4_msg(sb, KERN_ERR, "old and new quota "
					"format mixing");
2421 2422 2423 2424
			return 0;
		}

		if (!sbi->s_jquota_fmt) {
2425 2426
			ext4_msg(sb, KERN_ERR, "journaled quota format "
					"not specified");
2427 2428 2429 2430
			return 0;
		}
	}
#endif
2431 2432 2433 2434 2435 2436 2437 2438
	if (test_opt(sb, DIOREAD_NOLOCK)) {
		int blocksize =
			BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
		if (blocksize < PAGE_SIZE)
			ext4_msg(sb, KERN_WARNING, "Warning: mounting with an "
				 "experimental mount option 'dioread_nolock' "
				 "for blocksize < PAGE_SIZE");
	}
2439 2440 2441
	return 1;
}

2442 2443 2444 2445 2446
static inline void ext4_show_quota_options(struct seq_file *seq,
					   struct super_block *sb)
{
#if defined(CONFIG_QUOTA)
	struct ext4_sb_info *sbi = EXT4_SB(sb);
2447
	char *usr_qf_name, *grp_qf_name;
2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465

	if (sbi->s_jquota_fmt) {
		char *fmtname = "";

		switch (sbi->s_jquota_fmt) {
		case QFMT_VFS_OLD:
			fmtname = "vfsold";
			break;
		case QFMT_VFS_V0:
			fmtname = "vfsv0";
			break;
		case QFMT_VFS_V1:
			fmtname = "vfsv1";
			break;
		}
		seq_printf(seq, ",jqfmt=%s", fmtname);
	}

2466 2467 2468 2469 2470 2471 2472 2473
	rcu_read_lock();
	usr_qf_name = rcu_dereference(sbi->s_qf_names[USRQUOTA]);
	grp_qf_name = rcu_dereference(sbi->s_qf_names[GRPQUOTA]);
	if (usr_qf_name)
		seq_show_option(seq, "usrjquota", usr_qf_name);
	if (grp_qf_name)
		seq_show_option(seq, "grpjquota", grp_qf_name);
	rcu_read_unlock();
2474 2475 2476
#endif
}

2477 2478
static const char *token2str(int token)
{
2479
	const struct match_token *t;
2480 2481 2482 2483 2484 2485 2486

	for (t = tokens; t->token != Opt_err; t++)
		if (t->token == token && !strchr(t->pattern, '='))
			break;
	return t->pattern;
}

2487 2488 2489 2490 2491
/*
 * Show an option if
 *  - it's set to a non-default value OR
 *  - if the per-sb default is different from the global default
 */
2492 2493
static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
			      int nodefs)
2494 2495 2496
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
2497
	int def_errors, def_mount_opt = sbi->s_def_mount_opt;
2498
	const struct mount_opts *m;
2499
	char sep = nodefs ? '\n' : ',';
2500

2501 2502
#define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep)
#define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg)
2503 2504

	if (sbi->s_sb_block != 1)
2505 2506 2507 2508 2509
		SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block);

	for (m = ext4_mount_opts; m->token != Opt_err; m++) {
		int want_set = m->flags & MOPT_SET;
		if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
2510
		    (m->flags & MOPT_CLEAR_ERR) || m->flags & MOPT_SKIP)
2511
			continue;
2512
		if (!nodefs && !(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt)))
2513 2514 2515 2516 2517 2518
			continue; /* skip if same as the default */
		if ((want_set &&
		     (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) ||
		    (!want_set && (sbi->s_mount_opt & m->mount_opt)))
			continue; /* select Opt_noFoo vs Opt_Foo */
		SEQ_OPTS_PRINT("%s", token2str(m->token));
2519
	}
2520

2521
	if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) ||
2522
	    le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID)
2523 2524 2525
		SEQ_OPTS_PRINT("resuid=%u",
				from_kuid_munged(&init_user_ns, sbi->s_resuid));
	if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) ||
2526
	    le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID)
2527 2528
		SEQ_OPTS_PRINT("resgid=%u",
				from_kgid_munged(&init_user_ns, sbi->s_resgid));
2529
	def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors);
2530 2531
	if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO)
		SEQ_OPTS_PUTS("errors=remount-ro");
2532
	if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
2533
		SEQ_OPTS_PUTS("errors=continue");
2534
	if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
2535
		SEQ_OPTS_PUTS("errors=panic");
2536
	if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
2537
		SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ);
2538
	if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME)
2539
		SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
2540
	if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
2541
		SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
M
Matthew Garrett 已提交
2542
	if (sb->s_flags & SB_I_VERSION)
2543
		SEQ_OPTS_PUTS("i_version");
2544
	if (nodefs || sbi->s_stripe)
2545
		SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
2546 2547
	if (nodefs || EXT4_MOUNT_DATA_FLAGS &
			(sbi->s_mount_opt ^ def_mount_opt)) {
2548 2549 2550 2551 2552 2553 2554
		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
			SEQ_OPTS_PUTS("data=journal");
		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
			SEQ_OPTS_PUTS("data=ordered");
		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
			SEQ_OPTS_PUTS("data=writeback");
	}
2555 2556
	if (nodefs ||
	    sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
2557 2558
		SEQ_OPTS_PRINT("inode_readahead_blks=%u",
			       sbi->s_inode_readahead_blks);
2559

2560
	if (test_opt(sb, INIT_INODE_TABLE) && (nodefs ||
2561
		       (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
2562
		SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
2563 2564
	if (nodefs || sbi->s_max_dir_size_kb)
		SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
2565 2566
	if (test_opt(sb, DATA_ERR_ABORT))
		SEQ_OPTS_PUTS("data_err=abort");
2567 2568

	fscrypt_show_test_dummy_encryption(seq, sep, sb);
2569

2570 2571 2572
	if (sb->s_flags & SB_INLINECRYPT)
		SEQ_OPTS_PUTS("inlinecrypt");

2573 2574 2575 2576 2577 2578 2579 2580 2581 2582
	if (test_opt(sb, DAX_ALWAYS)) {
		if (IS_EXT2_SB(sb))
			SEQ_OPTS_PUTS("dax");
		else
			SEQ_OPTS_PUTS("dax=always");
	} else if (test_opt2(sb, DAX_NEVER)) {
		SEQ_OPTS_PUTS("dax=never");
	} else if (test_opt2(sb, DAX_INODE)) {
		SEQ_OPTS_PUTS("dax=inode");
	}
2583 2584 2585 2586
	ext4_show_quota_options(seq, sb);
	return 0;
}

2587 2588 2589 2590 2591
static int ext4_show_options(struct seq_file *seq, struct dentry *root)
{
	return _ext4_show_options(seq, root->d_sb, 0);
}

2592
int ext4_seq_options_show(struct seq_file *seq, void *offset)
2593 2594 2595 2596
{
	struct super_block *sb = seq->private;
	int rc;

2597
	seq_puts(seq, sb_rdonly(sb) ? "ro" : "rw");
2598 2599 2600 2601 2602
	rc = _ext4_show_options(seq, sb, 1);
	seq_puts(seq, "\n");
	return rc;
}

2603
static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
2604 2605
			    int read_only)
{
2606
	struct ext4_sb_info *sbi = EXT4_SB(sb);
2607
	int err = 0;
2608

2609
	if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
2610 2611
		ext4_msg(sb, KERN_ERR, "revision level too high, "
			 "forcing read-only mode");
2612
		err = -EROFS;
2613
		goto done;
2614 2615
	}
	if (read_only)
2616
		goto done;
2617
	if (!(sbi->s_mount_state & EXT4_VALID_FS))
2618 2619
		ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, "
			 "running e2fsck is recommended");
2620
	else if (sbi->s_mount_state & EXT4_ERROR_FS)
2621 2622 2623
		ext4_msg(sb, KERN_WARNING,
			 "warning: mounting fs with errors, "
			 "running e2fsck is recommended");
2624
	else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
2625 2626
		 le16_to_cpu(es->s_mnt_count) >=
		 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
2627 2628 2629
		ext4_msg(sb, KERN_WARNING,
			 "warning: maximal mount count reached, "
			 "running e2fsck is recommended");
2630
	else if (le32_to_cpu(es->s_checkinterval) &&
2631 2632
		 (ext4_get_tstamp(es, s_lastcheck) +
		  le32_to_cpu(es->s_checkinterval) <= ktime_get_real_seconds()))
2633 2634 2635
		ext4_msg(sb, KERN_WARNING,
			 "warning: checktime reached, "
			 "running e2fsck is recommended");
2636
	if (!sbi->s_journal)
2637
		es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
2638
	if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
2639
		es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
M
Marcin Slusarz 已提交
2640
	le16_add_cpu(&es->s_mnt_count, 1);
2641
	ext4_update_tstamp(es, s_mtime);
2642
	if (sbi->s_journal)
2643
		ext4_set_feature_journal_needs_recovery(sb);
2644

2645
	err = ext4_commit_super(sb);
2646
done:
2647
	if (test_opt(sb, DEBUG))
2648
		printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
2649
				"bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
2650 2651
			sb->s_blocksize,
			sbi->s_groups_count,
2652 2653
			EXT4_BLOCKS_PER_GROUP(sb),
			EXT4_INODES_PER_GROUP(sb),
2654
			sbi->s_mount_opt, sbi->s_mount_opt2);
2655

D
Dan Magenheimer 已提交
2656
	cleancache_init_fs(sb);
2657
	return err;
2658 2659
}

2660 2661 2662
int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
2663
	struct flex_groups **old_groups, **new_groups;
2664
	int size, i, j;
2665 2666 2667 2668 2669 2670 2671 2672

	if (!sbi->s_log_groups_per_flex)
		return 0;

	size = ext4_flex_group(sbi, ngroup - 1) + 1;
	if (size <= sbi->s_flex_groups_allocated)
		return 0;

2673 2674
	new_groups = kvzalloc(roundup_pow_of_two(size *
			      sizeof(*sbi->s_flex_groups)), GFP_KERNEL);
2675
	if (!new_groups) {
2676 2677
		ext4_msg(sb, KERN_ERR,
			 "not enough memory for %d flex group pointers", size);
2678 2679
		return -ENOMEM;
	}
2680 2681 2682 2683 2684
	for (i = sbi->s_flex_groups_allocated; i < size; i++) {
		new_groups[i] = kvzalloc(roundup_pow_of_two(
					 sizeof(struct flex_groups)),
					 GFP_KERNEL);
		if (!new_groups[i]) {
2685 2686
			for (j = sbi->s_flex_groups_allocated; j < i; j++)
				kvfree(new_groups[j]);
2687 2688 2689 2690 2691
			kvfree(new_groups);
			ext4_msg(sb, KERN_ERR,
				 "not enough memory for %d flex groups", size);
			return -ENOMEM;
		}
2692
	}
2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703
	rcu_read_lock();
	old_groups = rcu_dereference(sbi->s_flex_groups);
	if (old_groups)
		memcpy(new_groups, old_groups,
		       (sbi->s_flex_groups_allocated *
			sizeof(struct flex_groups *)));
	rcu_read_unlock();
	rcu_assign_pointer(sbi->s_flex_groups, new_groups);
	sbi->s_flex_groups_allocated = size;
	if (old_groups)
		ext4_kvfree_array_rcu(old_groups);
2704 2705 2706
	return 0;
}

2707 2708 2709 2710
static int ext4_fill_flex_info(struct super_block *sb)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_group_desc *gdp = NULL;
2711
	struct flex_groups *fg;
2712
	ext4_group_t flex_group;
2713
	int i, err;
2714

2715
	sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
2716
	if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
2717 2718 2719 2720
		sbi->s_log_groups_per_flex = 0;
		return 1;
	}

2721 2722
	err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
	if (err)
2723
		goto failed;
2724 2725

	for (i = 0; i < sbi->s_groups_count; i++) {
2726
		gdp = ext4_get_group_desc(sb, i, NULL);
2727 2728

		flex_group = ext4_flex_group(sbi, i);
2729 2730
		fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
		atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes);
2731
		atomic64_add(ext4_free_group_clusters(sb, gdp),
2732 2733
			     &fg->free_clusters);
		atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs);
2734 2735 2736 2737 2738 2739 2740
	}

	return 1;
failed:
	return 0;
}

2741
static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
2742
				   struct ext4_group_desc *gdp)
A
Andreas Dilger 已提交
2743
{
2744
	int offset = offsetof(struct ext4_group_desc, bg_checksum);
A
Andreas Dilger 已提交
2745
	__u16 crc = 0;
2746
	__le32 le_group = cpu_to_le32(block_group);
2747
	struct ext4_sb_info *sbi = EXT4_SB(sb);
A
Andreas Dilger 已提交
2748

2749
	if (ext4_has_metadata_csum(sbi->s_sb)) {
2750 2751
		/* Use new metadata_csum algorithm */
		__u32 csum32;
2752
		__u16 dummy_csum = 0;
2753 2754 2755

		csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
				     sizeof(le_group));
2756 2757 2758 2759 2760 2761 2762
		csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset);
		csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum,
				     sizeof(dummy_csum));
		offset += sizeof(dummy_csum);
		if (offset < sbi->s_desc_size)
			csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset,
					     sbi->s_desc_size - offset);
2763 2764 2765

		crc = csum32 & 0xFFFF;
		goto out;
A
Andreas Dilger 已提交
2766 2767
	}

2768
	/* old crc16 code */
2769
	if (!ext4_has_feature_gdt_csum(sb))
2770 2771
		return 0;

2772 2773 2774 2775 2776
	crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
	crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
	crc = crc16(crc, (__u8 *)gdp, offset);
	offset += sizeof(gdp->bg_checksum); /* skip checksum */
	/* for checksum of struct ext4_group_desc do the rest...*/
2777
	if (ext4_has_feature_64bit(sb) &&
2778 2779 2780 2781 2782 2783
	    offset < le16_to_cpu(sbi->s_es->s_desc_size))
		crc = crc16(crc, (__u8 *)gdp + offset,
			    le16_to_cpu(sbi->s_es->s_desc_size) -
				offset);

out:
A
Andreas Dilger 已提交
2784 2785 2786
	return cpu_to_le16(crc);
}

2787
int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
A
Andreas Dilger 已提交
2788 2789
				struct ext4_group_desc *gdp)
{
2790
	if (ext4_has_group_desc_csum(sb) &&
2791
	    (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp)))
A
Andreas Dilger 已提交
2792 2793 2794 2795 2796
		return 0;

	return 1;
}

2797 2798 2799 2800 2801
void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
			      struct ext4_group_desc *gdp)
{
	if (!ext4_has_group_desc_csum(sb))
		return;
2802
	gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp);
2803 2804
}

2805
/* Called at mount-time, super-block is locked */
2806
static int ext4_check_descriptors(struct super_block *sb,
2807
				  ext4_fsblk_t sb_block,
2808
				  ext4_group_t *first_not_zeroed)
2809
{
2810 2811 2812
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
	ext4_fsblk_t last_block;
2813
	ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
L
Laurent Vivier 已提交
2814 2815 2816
	ext4_fsblk_t block_bitmap;
	ext4_fsblk_t inode_bitmap;
	ext4_fsblk_t inode_table;
J
Jose R. Santos 已提交
2817
	int flexbg_flag = 0;
2818
	ext4_group_t i, grp = sbi->s_groups_count;
2819

2820
	if (ext4_has_feature_flex_bg(sb))
J
Jose R. Santos 已提交
2821 2822
		flexbg_flag = 1;

2823
	ext4_debug("Checking group descriptors");
2824

2825 2826 2827
	for (i = 0; i < sbi->s_groups_count; i++) {
		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);

J
Jose R. Santos 已提交
2828
		if (i == sbi->s_groups_count - 1 || flexbg_flag)
L
Laurent Vivier 已提交
2829
			last_block = ext4_blocks_count(sbi->s_es) - 1;
2830 2831
		else
			last_block = first_block +
2832
				(EXT4_BLOCKS_PER_GROUP(sb) - 1);
2833

2834 2835 2836 2837
		if ((grp == sbi->s_groups_count) &&
		   !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
			grp = i;

2838
		block_bitmap = ext4_block_bitmap(sb, gdp);
2839 2840 2841 2842
		if (block_bitmap == sb_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Block bitmap for group %u overlaps "
				 "superblock", i);
2843 2844
			if (!sb_rdonly(sb))
				return 0;
2845
		}
2846 2847 2848 2849 2850 2851 2852 2853
		if (block_bitmap >= sb_block + 1 &&
		    block_bitmap <= last_bg_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Block bitmap for group %u overlaps "
				 "block group descriptors", i);
			if (!sb_rdonly(sb))
				return 0;
		}
2854
		if (block_bitmap < first_block || block_bitmap > last_block) {
2855
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2856
			       "Block bitmap for group %u not in group "
2857
			       "(block %llu)!", i, block_bitmap);
2858 2859
			return 0;
		}
2860
		inode_bitmap = ext4_inode_bitmap(sb, gdp);
2861 2862 2863 2864
		if (inode_bitmap == sb_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Inode bitmap for group %u overlaps "
				 "superblock", i);
2865 2866
			if (!sb_rdonly(sb))
				return 0;
2867
		}
2868 2869 2870 2871 2872 2873 2874 2875
		if (inode_bitmap >= sb_block + 1 &&
		    inode_bitmap <= last_bg_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Inode bitmap for group %u overlaps "
				 "block group descriptors", i);
			if (!sb_rdonly(sb))
				return 0;
		}
2876
		if (inode_bitmap < first_block || inode_bitmap > last_block) {
2877
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2878
			       "Inode bitmap for group %u not in group "
2879
			       "(block %llu)!", i, inode_bitmap);
2880 2881
			return 0;
		}
2882
		inode_table = ext4_inode_table(sb, gdp);
2883 2884 2885 2886
		if (inode_table == sb_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Inode table for group %u overlaps "
				 "superblock", i);
2887 2888
			if (!sb_rdonly(sb))
				return 0;
2889
		}
2890 2891 2892 2893 2894 2895 2896 2897
		if (inode_table >= sb_block + 1 &&
		    inode_table <= last_bg_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Inode table for group %u overlaps "
				 "block group descriptors", i);
			if (!sb_rdonly(sb))
				return 0;
		}
L
Laurent Vivier 已提交
2898
		if (inode_table < first_block ||
2899
		    inode_table + sbi->s_itb_per_group - 1 > last_block) {
2900
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2901
			       "Inode table for group %u not in group "
2902
			       "(block %llu)!", i, inode_table);
2903 2904
			return 0;
		}
2905
		ext4_lock_group(sb, i);
2906
		if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
2907 2908
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Checksum for group %u failed (%u!=%u)",
2909
				 i, le16_to_cpu(ext4_group_desc_csum(sb, i,
2910
				     gdp)), le16_to_cpu(gdp->bg_checksum));
2911
			if (!sb_rdonly(sb)) {
2912
				ext4_unlock_group(sb, i);
2913
				return 0;
2914
			}
A
Andreas Dilger 已提交
2915
		}
2916
		ext4_unlock_group(sb, i);
J
Jose R. Santos 已提交
2917 2918
		if (!flexbg_flag)
			first_block += EXT4_BLOCKS_PER_GROUP(sb);
2919
	}
2920 2921
	if (NULL != first_not_zeroed)
		*first_not_zeroed = grp;
2922 2923 2924
	return 1;
}

2925
/* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at
2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937
 * the superblock) which were deleted from all directories, but held open by
 * a process at the time of a crash.  We walk the list and try to delete these
 * inodes at recovery time (only with a read-write filesystem).
 *
 * In order to keep the orphan inode chain consistent during traversal (in
 * case of crash during recovery), we link each inode into the superblock
 * orphan list_head and handle it the same way as an inode deletion during
 * normal operation (which journals the operations for us).
 *
 * We only do an iget() and an iput() on each inode, which is very safe if we
 * accidentally point at an in-use or already deleted inode.  The worst that
 * can happen in this case is that we get a "bit already cleared" message from
2938
 * ext4_free_inode().  The only reason we would point at a wrong inode is if
2939 2940 2941
 * e2fsck was run on this filesystem, and it must have already done the orphan
 * inode cleanup for us, so we can safely abort without any further action.
 */
2942 2943
static void ext4_orphan_cleanup(struct super_block *sb,
				struct ext4_super_block *es)
2944 2945
{
	unsigned int s_flags = sb->s_flags;
2946
	int ret, nr_orphans = 0, nr_truncates = 0;
2947
#ifdef CONFIG_QUOTA
2948
	int quota_update = 0;
2949 2950 2951 2952 2953 2954 2955
	int i;
#endif
	if (!es->s_last_orphan) {
		jbd_debug(4, "no orphan inodes to clean up\n");
		return;
	}

2956
	if (bdev_read_only(sb->s_bdev)) {
2957 2958
		ext4_msg(sb, KERN_ERR, "write access "
			"unavailable, skipping orphan cleanup");
2959 2960 2961
		return;
	}

2962 2963 2964 2965 2966 2967 2968
	/* Check if feature set would not allow a r/w mount */
	if (!ext4_feature_set_ok(sb, 0)) {
		ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
			 "unknown ROCOMPAT features");
		return;
	}

2969
	if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
2970
		/* don't clear list on RO mount w/ errors */
2971
		if (es->s_last_orphan && !(s_flags & SB_RDONLY)) {
2972
			ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
2973
				  "clearing orphan list.\n");
2974 2975
			es->s_last_orphan = 0;
		}
2976 2977 2978 2979
		jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
		return;
	}

2980
	if (s_flags & SB_RDONLY) {
2981
		ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
2982
		sb->s_flags &= ~SB_RDONLY;
2983 2984 2985
	}
#ifdef CONFIG_QUOTA
	/* Needed for iput() to work correctly and not trash data */
2986
	sb->s_flags |= SB_ACTIVE;
2987 2988 2989 2990 2991

	/*
	 * Turn on quotas which were not enabled for read-only mounts if
	 * filesystem has quota feature, so that they are updated correctly.
	 */
2992
	if (ext4_has_feature_quota(sb) && (s_flags & SB_RDONLY)) {
2993 2994 2995 2996 2997 2998 2999 3000 3001 3002
		int ret = ext4_enable_quotas(sb);

		if (!ret)
			quota_update = 1;
		else
			ext4_msg(sb, KERN_ERR,
				"Cannot turn on quotas: error %d", ret);
	}

	/* Turn on journaled quotas used for old sytle */
J
Jan Kara 已提交
3003
	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
3004 3005
		if (EXT4_SB(sb)->s_qf_names[i]) {
			int ret = ext4_quota_on_mount(sb, i);
3006 3007 3008 3009

			if (!ret)
				quota_update = 1;
			else
3010 3011
				ext4_msg(sb, KERN_ERR,
					"Cannot turn on journaled "
3012
					"quota: type %d: error %d", i, ret);
3013 3014 3015 3016 3017 3018 3019
		}
	}
#endif

	while (es->s_last_orphan) {
		struct inode *inode;

3020 3021 3022 3023 3024 3025 3026 3027 3028 3029
		/*
		 * We may have encountered an error during cleanup; if
		 * so, skip the rest.
		 */
		if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
			jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
			es->s_last_orphan = 0;
			break;
		}

3030 3031
		inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
		if (IS_ERR(inode)) {
3032 3033 3034 3035
			es->s_last_orphan = 0;
			break;
		}

3036
		list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
3037
		dquot_initialize(inode);
3038
		if (inode->i_nlink) {
3039 3040 3041 3042
			if (test_opt(sb, DEBUG))
				ext4_msg(sb, KERN_DEBUG,
					"%s: truncating inode %lu to %lld bytes",
					__func__, inode->i_ino, inode->i_size);
3043
			jbd_debug(2, "truncating inode %lu to %lld bytes\n",
3044
				  inode->i_ino, inode->i_size);
A
Al Viro 已提交
3045
			inode_lock(inode);
3046
			truncate_inode_pages(inode->i_mapping, inode->i_size);
3047 3048 3049
			ret = ext4_truncate(inode);
			if (ret)
				ext4_std_error(inode->i_sb, ret);
A
Al Viro 已提交
3050
			inode_unlock(inode);
3051 3052
			nr_truncates++;
		} else {
3053 3054 3055 3056
			if (test_opt(sb, DEBUG))
				ext4_msg(sb, KERN_DEBUG,
					"%s: deleting unreferenced inode %lu",
					__func__, inode->i_ino);
3057 3058 3059 3060 3061 3062 3063
			jbd_debug(2, "deleting unreferenced inode %lu\n",
				  inode->i_ino);
			nr_orphans++;
		}
		iput(inode);  /* The delete magic happens here! */
	}

3064
#define PLURAL(x) (x), ((x) == 1) ? "" : "s"
3065 3066

	if (nr_orphans)
3067 3068
		ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted",
		       PLURAL(nr_orphans));
3069
	if (nr_truncates)
3070 3071
		ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
		       PLURAL(nr_truncates));
3072
#ifdef CONFIG_QUOTA
3073 3074 3075 3076 3077 3078
	/* Turn off quotas if they were enabled for orphan cleanup */
	if (quota_update) {
		for (i = 0; i < EXT4_MAXQUOTAS; i++) {
			if (sb_dqopt(sb)->files[i])
				dquot_quota_off(sb, i);
		}
3079 3080
	}
#endif
3081
	sb->s_flags = s_flags; /* Restore SB_RDONLY status */
3082
}
3083

3084 3085 3086 3087 3088 3089 3090
/*
 * Maximal extent format file size.
 * Resulting logical blkno at s_maxbytes must fit in our on-disk
 * extent format containers, within a sector_t, and within i_blocks
 * in the vfs.  ext4 inode has 48 bits of i_block in fsblock units,
 * so that won't be a limiting factor.
 *
3091 3092 3093 3094 3095 3096
 * However there is other limiting factor. We do store extents in the form
 * of starting block and length, hence the resulting length of the extent
 * covering maximum file size must fit into on-disk format containers as
 * well. Given that length is always by 1 unit bigger than max unit (because
 * we count 0 as well) we have to lower the s_maxbytes by one fs block.
 *
3097 3098
 * Note, this does *not* consider any metadata overhead for vfs i_blocks.
 */
3099
static loff_t ext4_max_size(int blkbits, int has_huge_files)
3100 3101 3102 3103
{
	loff_t res;
	loff_t upper_limit = MAX_LFS_FILESIZE;

C
Christoph Hellwig 已提交
3104 3105 3106
	BUILD_BUG_ON(sizeof(blkcnt_t) < sizeof(u64));

	if (!has_huge_files) {
3107 3108 3109 3110 3111 3112 3113
		upper_limit = (1LL << 32) - 1;

		/* total blocks in file system block size */
		upper_limit >>= (blkbits - 9);
		upper_limit <<= blkbits;
	}

3114 3115 3116 3117 3118 3119
	/*
	 * 32-bit extent-start container, ee_block. We lower the maxbytes
	 * by one fs block, so ee_len can cover the extent of maximum file
	 * size
	 */
	res = (1LL << 32) - 1;
3120 3121 3122 3123 3124 3125 3126 3127
	res <<= blkbits;

	/* Sanity check against vm- & vfs- imposed limits */
	if (res > upper_limit)
		res = upper_limit;

	return res;
}
3128 3129

/*
3130
 * Maximal bitmap file size.  There is a direct, and {,double-,triple-}indirect
3131 3132
 * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
 * We need to be 1 filesystem block less than the 2^48 sector limit.
3133
 */
3134
static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
3135
{
3136
	loff_t res = EXT4_NDIR_BLOCKS;
3137 3138
	int meta_blocks;
	loff_t upper_limit;
3139 3140 3141 3142 3143 3144
	/* This is calculated to be the largest file size for a dense, block
	 * mapped file such that the file's total number of 512-byte sectors,
	 * including data and all indirect blocks, does not exceed (2^48 - 1).
	 *
	 * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
	 * number of 512-byte sectors of the file.
3145 3146
	 */

C
Christoph Hellwig 已提交
3147
	if (!has_huge_files) {
3148
		/*
C
Christoph Hellwig 已提交
3149 3150 3151
		 * !has_huge_files or implies that the inode i_block field
		 * represents total file blocks in 2^32 512-byte sectors ==
		 * size of vfs inode i_blocks * 8
3152 3153 3154 3155 3156 3157 3158
		 */
		upper_limit = (1LL << 32) - 1;

		/* total blocks in file system block size */
		upper_limit >>= (bits - 9);

	} else {
A
Aneesh Kumar K.V 已提交
3159 3160 3161 3162 3163 3164
		/*
		 * We use 48 bit ext4_inode i_blocks
		 * With EXT4_HUGE_FILE_FL set the i_blocks
		 * represent total number of blocks in
		 * file system block size
		 */
3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177
		upper_limit = (1LL << 48) - 1;

	}

	/* indirect blocks */
	meta_blocks = 1;
	/* double indirect blocks */
	meta_blocks += 1 + (1LL << (bits-2));
	/* tripple indirect blocks */
	meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));

	upper_limit -= meta_blocks;
	upper_limit <<= bits;
3178 3179 3180 3181 3182 3183 3184

	res += 1LL << (bits-2);
	res += 1LL << (2*(bits-2));
	res += 1LL << (3*(bits-2));
	res <<= bits;
	if (res > upper_limit)
		res = upper_limit;
3185 3186 3187 3188

	if (res > MAX_LFS_FILESIZE)
		res = MAX_LFS_FILESIZE;

3189 3190 3191
	return res;
}

3192
static ext4_fsblk_t descriptor_loc(struct super_block *sb,
3193
				   ext4_fsblk_t logical_sb_block, int nr)
3194
{
3195
	struct ext4_sb_info *sbi = EXT4_SB(sb);
3196
	ext4_group_t bg, first_meta_bg;
3197 3198 3199 3200
	int has_super = 0;

	first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);

3201
	if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg)
3202
		return logical_sb_block + nr + 1;
3203
	bg = sbi->s_desc_per_block * nr;
3204
	if (ext4_bg_has_super(sb, bg))
3205
		has_super = 1;
3206

3207 3208 3209 3210 3211 3212 3213
	/*
	 * If we have a meta_bg fs with 1k blocks, group 0's GDT is at
	 * block 2, not 1.  If s_first_data_block == 0 (bigalloc is enabled
	 * on modern mke2fs or blksize > 1k on older mke2fs) then we must
	 * compensate.
	 */
	if (sb->s_blocksize == 1024 && nr == 0 &&
3214
	    le32_to_cpu(sbi->s_es->s_first_data_block) == 0)
3215 3216
		has_super++;

3217
	return (has_super + ext4_group_first_block_no(sb, bg));
3218 3219
}

3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235
/**
 * ext4_get_stripe_size: Get the stripe size.
 * @sbi: In memory super block info
 *
 * If we have specified it via mount option, then
 * use the mount option value. If the value specified at mount time is
 * greater than the blocks per group use the super block value.
 * If the super block value is greater than blocks per group return 0.
 * Allocator needs it be less than blocks per group.
 *
 */
static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
{
	unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
	unsigned long stripe_width =
			le32_to_cpu(sbi->s_es->s_raid_stripe_width);
D
Dan Ehrenberg 已提交
3236
	int ret;
3237 3238

	if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
D
Dan Ehrenberg 已提交
3239
		ret = sbi->s_stripe;
3240
	else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
D
Dan Ehrenberg 已提交
3241
		ret = stripe_width;
3242
	else if (stride && stride <= sbi->s_blocks_per_group)
D
Dan Ehrenberg 已提交
3243 3244 3245
		ret = stride;
	else
		ret = 0;
3246

D
Dan Ehrenberg 已提交
3247 3248 3249 3250 3251 3252
	/*
	 * If the stripe width is 1, this makes no sense and
	 * we set it to 0 to turn off stripe handling code.
	 */
	if (ret <= 1)
		ret = 0;
3253

D
Dan Ehrenberg 已提交
3254
	return ret;
3255
}
3256

3257 3258 3259 3260 3261 3262 3263 3264
/*
 * Check whether this filesystem can be mounted based on
 * the features present and the RDONLY/RDWR mount requested.
 * Returns 1 if this filesystem can be mounted as requested,
 * 0 if it cannot be.
 */
static int ext4_feature_set_ok(struct super_block *sb, int readonly)
{
3265
	if (ext4_has_unknown_ext4_incompat_features(sb)) {
3266 3267 3268 3269 3270 3271 3272 3273
		ext4_msg(sb, KERN_ERR,
			"Couldn't mount because of "
			"unsupported optional features (%x)",
			(le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
			~EXT4_FEATURE_INCOMPAT_SUPP));
		return 0;
	}

3274 3275 3276 3277 3278 3279 3280 3281 3282
#ifndef CONFIG_UNICODE
	if (ext4_has_feature_casefold(sb)) {
		ext4_msg(sb, KERN_ERR,
			 "Filesystem with casefold feature cannot be "
			 "mounted without CONFIG_UNICODE");
		return 0;
	}
#endif

3283 3284 3285
	if (readonly)
		return 1;

3286
	if (ext4_has_feature_readonly(sb)) {
D
Darrick J. Wong 已提交
3287
		ext4_msg(sb, KERN_INFO, "filesystem is read-only");
3288
		sb->s_flags |= SB_RDONLY;
D
Darrick J. Wong 已提交
3289 3290 3291
		return 1;
	}

3292
	/* Check that feature set is OK for a read-write mount */
3293
	if (ext4_has_unknown_ext4_ro_compat_features(sb)) {
3294 3295 3296 3297 3298 3299
		ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
			 "unsupported optional features (%x)",
			 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
				~EXT4_FEATURE_RO_COMPAT_SUPP));
		return 0;
	}
3300
	if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) {
3301 3302 3303 3304 3305
		ext4_msg(sb, KERN_ERR,
			 "Can't support bigalloc feature without "
			 "extents feature\n");
		return 0;
	}
3306

3307
#if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2)
3308 3309
	if (!readonly && (ext4_has_feature_quota(sb) ||
			  ext4_has_feature_project(sb))) {
3310
		ext4_msg(sb, KERN_ERR,
3311
			 "The kernel was not built with CONFIG_QUOTA and CONFIG_QFMT_V2");
L
Li Xi 已提交
3312 3313
		return 0;
	}
3314
#endif  /* CONFIG_QUOTA */
3315 3316 3317
	return 1;
}

3318 3319 3320 3321
/*
 * This function is called once a day if we have errors logged
 * on the file system
 */
3322
static void print_daily_error_info(struct timer_list *t)
3323
{
3324 3325 3326
	struct ext4_sb_info *sbi = from_timer(sbi, t, s_err_report);
	struct super_block *sb = sbi->s_sb;
	struct ext4_super_block *es = sbi->s_es;
3327 3328

	if (es->s_error_count)
3329 3330
		/* fsck newer than v1.41.13 is needed to clean this condition. */
		ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
3331 3332
			 le32_to_cpu(es->s_error_count));
	if (es->s_first_error_time) {
3333 3334 3335
		printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %llu: %.*s:%d",
		       sb->s_id,
		       ext4_get_tstamp(es, s_first_error_time),
3336 3337 3338 3339
		       (int) sizeof(es->s_first_error_func),
		       es->s_first_error_func,
		       le32_to_cpu(es->s_first_error_line));
		if (es->s_first_error_ino)
3340
			printk(KERN_CONT ": inode %u",
3341 3342
			       le32_to_cpu(es->s_first_error_ino));
		if (es->s_first_error_block)
3343
			printk(KERN_CONT ": block %llu", (unsigned long long)
3344
			       le64_to_cpu(es->s_first_error_block));
3345
		printk(KERN_CONT "\n");
3346 3347
	}
	if (es->s_last_error_time) {
3348 3349 3350
		printk(KERN_NOTICE "EXT4-fs (%s): last error at time %llu: %.*s:%d",
		       sb->s_id,
		       ext4_get_tstamp(es, s_last_error_time),
3351 3352 3353 3354
		       (int) sizeof(es->s_last_error_func),
		       es->s_last_error_func,
		       le32_to_cpu(es->s_last_error_line));
		if (es->s_last_error_ino)
3355
			printk(KERN_CONT ": inode %u",
3356 3357
			       le32_to_cpu(es->s_last_error_ino));
		if (es->s_last_error_block)
3358
			printk(KERN_CONT ": block %llu", (unsigned long long)
3359
			       le64_to_cpu(es->s_last_error_block));
3360
		printk(KERN_CONT "\n");
3361 3362 3363 3364
	}
	mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ);  /* Once a day */
}

3365 3366 3367 3368
/* Find next suitable group and run ext4_init_inode_table */
static int ext4_run_li_request(struct ext4_li_request *elr)
{
	struct ext4_group_desc *gdp = NULL;
3369 3370 3371
	struct super_block *sb = elr->lr_super;
	ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
	ext4_group_t group = elr->lr_next_group;
3372
	unsigned long timeout = 0;
3373
	unsigned int prefetch_ios = 0;
3374 3375
	int ret = 0;

3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394
	if (elr->lr_mode == EXT4_LI_MODE_PREFETCH_BBITMAP) {
		elr->lr_next_group = ext4_mb_prefetch(sb, group,
				EXT4_SB(sb)->s_mb_prefetch, &prefetch_ios);
		if (prefetch_ios)
			ext4_mb_prefetch_fini(sb, elr->lr_next_group,
					      prefetch_ios);
		trace_ext4_prefetch_bitmaps(sb, group, elr->lr_next_group,
					    prefetch_ios);
		if (group >= elr->lr_next_group) {
			ret = 1;
			if (elr->lr_first_not_zeroed != ngroups &&
			    !sb_rdonly(sb) && test_opt(sb, INIT_INODE_TABLE)) {
				elr->lr_next_group = elr->lr_first_not_zeroed;
				elr->lr_mode = EXT4_LI_MODE_ITABLE;
				ret = 0;
			}
		}
		return ret;
	}
3395

3396
	for (; group < ngroups; group++) {
3397 3398 3399 3400 3401 3402 3403 3404 3405 3406
		gdp = ext4_get_group_desc(sb, group, NULL);
		if (!gdp) {
			ret = 1;
			break;
		}

		if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
			break;
	}

3407
	if (group >= ngroups)
3408 3409 3410 3411 3412 3413
		ret = 1;

	if (!ret) {
		timeout = jiffies;
		ret = ext4_init_inode_table(sb, group,
					    elr->lr_timeout ? 0 : 1);
3414
		trace_ext4_lazy_itable_init(sb, group);
3415
		if (elr->lr_timeout == 0) {
3416
			timeout = (jiffies - timeout) *
3417
				EXT4_SB(elr->lr_super)->s_li_wait_mult;
3418 3419 3420 3421 3422 3423 3424 3425 3426 3427
			elr->lr_timeout = timeout;
		}
		elr->lr_next_sched = jiffies + elr->lr_timeout;
		elr->lr_next_group = group + 1;
	}
	return ret;
}

/*
 * Remove lr_request from the list_request and free the
3428
 * request structure. Should be called with li_list_mtx held
3429 3430 3431 3432 3433 3434 3435
 */
static void ext4_remove_li_request(struct ext4_li_request *elr)
{
	if (!elr)
		return;

	list_del(&elr->lr_request);
3436
	EXT4_SB(elr->lr_super)->s_li_request = NULL;
3437 3438 3439 3440 3441
	kfree(elr);
}

static void ext4_unregister_li_request(struct super_block *sb)
{
3442 3443 3444
	mutex_lock(&ext4_li_mtx);
	if (!ext4_li_info) {
		mutex_unlock(&ext4_li_mtx);
3445
		return;
3446
	}
3447 3448

	mutex_lock(&ext4_li_info->li_list_mtx);
3449
	ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
3450
	mutex_unlock(&ext4_li_info->li_list_mtx);
3451
	mutex_unlock(&ext4_li_mtx);
3452 3453
}

3454 3455
static struct task_struct *ext4_lazyinit_task;

3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469
/*
 * This is the function where ext4lazyinit thread lives. It walks
 * through the request list searching for next scheduled filesystem.
 * When such a fs is found, run the lazy initialization request
 * (ext4_rn_li_request) and keep track of the time spend in this
 * function. Based on that time we compute next schedule time of
 * the request. When walking through the list is complete, compute
 * next waking time and put itself into sleep.
 */
static int ext4_lazyinit_thread(void *arg)
{
	struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg;
	struct list_head *pos, *n;
	struct ext4_li_request *elr;
3470
	unsigned long next_wakeup, cur;
3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483

	BUG_ON(NULL == eli);

cont_thread:
	while (true) {
		next_wakeup = MAX_JIFFY_OFFSET;

		mutex_lock(&eli->li_list_mtx);
		if (list_empty(&eli->li_request_list)) {
			mutex_unlock(&eli->li_list_mtx);
			goto exit_thread;
		}
		list_for_each_safe(pos, n, &eli->li_request_list) {
3484 3485
			int err = 0;
			int progress = 0;
3486 3487 3488
			elr = list_entry(pos, struct ext4_li_request,
					 lr_request);

3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506
			if (time_before(jiffies, elr->lr_next_sched)) {
				if (time_before(elr->lr_next_sched, next_wakeup))
					next_wakeup = elr->lr_next_sched;
				continue;
			}
			if (down_read_trylock(&elr->lr_super->s_umount)) {
				if (sb_start_write_trylock(elr->lr_super)) {
					progress = 1;
					/*
					 * We hold sb->s_umount, sb can not
					 * be removed from the list, it is
					 * now safe to drop li_list_mtx
					 */
					mutex_unlock(&eli->li_list_mtx);
					err = ext4_run_li_request(elr);
					sb_end_write(elr->lr_super);
					mutex_lock(&eli->li_list_mtx);
					n = pos->next;
3507
				}
3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518
				up_read((&elr->lr_super->s_umount));
			}
			/* error, remove the lazy_init job */
			if (err) {
				ext4_remove_li_request(elr);
				continue;
			}
			if (!progress) {
				elr->lr_next_sched = jiffies +
					(prandom_u32()
					 % (EXT4_DEF_LI_MAX_START_DELAY * HZ));
3519 3520 3521 3522 3523 3524
			}
			if (time_before(elr->lr_next_sched, next_wakeup))
				next_wakeup = elr->lr_next_sched;
		}
		mutex_unlock(&eli->li_list_mtx);

3525
		try_to_freeze();
3526

3527 3528
		cur = jiffies;
		if ((time_after_eq(cur, next_wakeup)) ||
3529
		    (MAX_JIFFY_OFFSET == next_wakeup)) {
3530 3531 3532 3533
			cond_resched();
			continue;
		}

3534 3535
		schedule_timeout_interruptible(next_wakeup - cur);

3536 3537 3538 3539
		if (kthread_should_stop()) {
			ext4_clear_request_list();
			goto exit_thread;
		}
3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581
	}

exit_thread:
	/*
	 * It looks like the request list is empty, but we need
	 * to check it under the li_list_mtx lock, to prevent any
	 * additions into it, and of course we should lock ext4_li_mtx
	 * to atomically free the list and ext4_li_info, because at
	 * this point another ext4 filesystem could be registering
	 * new one.
	 */
	mutex_lock(&ext4_li_mtx);
	mutex_lock(&eli->li_list_mtx);
	if (!list_empty(&eli->li_request_list)) {
		mutex_unlock(&eli->li_list_mtx);
		mutex_unlock(&ext4_li_mtx);
		goto cont_thread;
	}
	mutex_unlock(&eli->li_list_mtx);
	kfree(ext4_li_info);
	ext4_li_info = NULL;
	mutex_unlock(&ext4_li_mtx);

	return 0;
}

static void ext4_clear_request_list(void)
{
	struct list_head *pos, *n;
	struct ext4_li_request *elr;

	mutex_lock(&ext4_li_info->li_list_mtx);
	list_for_each_safe(pos, n, &ext4_li_info->li_request_list) {
		elr = list_entry(pos, struct ext4_li_request,
				 lr_request);
		ext4_remove_li_request(elr);
	}
	mutex_unlock(&ext4_li_info->li_list_mtx);
}

static int ext4_run_lazyinit_thread(void)
{
3582 3583 3584 3585
	ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
					 ext4_li_info, "ext4lazyinit");
	if (IS_ERR(ext4_lazyinit_task)) {
		int err = PTR_ERR(ext4_lazyinit_task);
3586 3587 3588
		ext4_clear_request_list();
		kfree(ext4_li_info);
		ext4_li_info = NULL;
3589
		printk(KERN_CRIT "EXT4-fs: error %d creating inode table "
3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608
				 "initialization thread\n",
				 err);
		return err;
	}
	ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
	return 0;
}

/*
 * Check whether it make sense to run itable init. thread or not.
 * If there is at least one uninitialized inode table, return
 * corresponding group number, else the loop goes through all
 * groups and return total number of groups.
 */
static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
{
	ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
	struct ext4_group_desc *gdp = NULL;

3609 3610 3611
	if (!ext4_has_group_desc_csum(sb))
		return ngroups;

3612 3613 3614 3615 3616
	for (group = 0; group < ngroups; group++) {
		gdp = ext4_get_group_desc(sb, group, NULL);
		if (!gdp)
			continue;

3617
		if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651
			break;
	}

	return group;
}

static int ext4_li_info_new(void)
{
	struct ext4_lazy_init *eli = NULL;

	eli = kzalloc(sizeof(*eli), GFP_KERNEL);
	if (!eli)
		return -ENOMEM;

	INIT_LIST_HEAD(&eli->li_request_list);
	mutex_init(&eli->li_list_mtx);

	eli->li_state |= EXT4_LAZYINIT_QUIT;

	ext4_li_info = eli;

	return 0;
}

static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
					    ext4_group_t start)
{
	struct ext4_li_request *elr;

	elr = kzalloc(sizeof(*elr), GFP_KERNEL);
	if (!elr)
		return NULL;

	elr->lr_super = sb;
3652 3653 3654 3655 3656 3657 3658
	elr->lr_first_not_zeroed = start;
	if (test_opt(sb, PREFETCH_BLOCK_BITMAPS))
		elr->lr_mode = EXT4_LI_MODE_PREFETCH_BBITMAP;
	else {
		elr->lr_mode = EXT4_LI_MODE_ITABLE;
		elr->lr_next_group = start;
	}
3659 3660 3661 3662 3663 3664

	/*
	 * Randomize first schedule time of the request to
	 * spread the inode table initialization requests
	 * better.
	 */
3665 3666
	elr->lr_next_sched = jiffies + (prandom_u32() %
				(EXT4_DEF_LI_MAX_START_DELAY * HZ));
3667 3668 3669
	return elr;
}

3670 3671
int ext4_register_li_request(struct super_block *sb,
			     ext4_group_t first_not_zeroed)
3672 3673
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
3674
	struct ext4_li_request *elr = NULL;
3675
	ext4_group_t ngroups = sbi->s_groups_count;
3676
	int ret = 0;
3677

3678
	mutex_lock(&ext4_li_mtx);
3679 3680 3681 3682 3683 3684
	if (sbi->s_li_request != NULL) {
		/*
		 * Reset timeout so it can be computed again, because
		 * s_li_wait_mult might have changed.
		 */
		sbi->s_li_request->lr_timeout = 0;
3685
		goto out;
3686
	}
3687

3688 3689 3690
	if (!test_opt(sb, PREFETCH_BLOCK_BITMAPS) &&
	    (first_not_zeroed == ngroups || sb_rdonly(sb) ||
	     !test_opt(sb, INIT_INODE_TABLE)))
3691
		goto out;
3692 3693

	elr = ext4_li_request_new(sb, first_not_zeroed);
3694 3695 3696 3697
	if (!elr) {
		ret = -ENOMEM;
		goto out;
	}
3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709

	if (NULL == ext4_li_info) {
		ret = ext4_li_info_new();
		if (ret)
			goto out;
	}

	mutex_lock(&ext4_li_info->li_list_mtx);
	list_add(&elr->lr_request, &ext4_li_info->li_request_list);
	mutex_unlock(&ext4_li_info->li_list_mtx);

	sbi->s_li_request = elr;
3710 3711 3712 3713 3714 3715
	/*
	 * set elr to NULL here since it has been inserted to
	 * the request_list and the removal and free of it is
	 * handled by ext4_clear_request_list from now on.
	 */
	elr = NULL;
3716 3717 3718 3719 3720 3721 3722

	if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
		ret = ext4_run_lazyinit_thread();
		if (ret)
			goto out;
	}
out:
3723 3724
	mutex_unlock(&ext4_li_mtx);
	if (ret)
3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738
		kfree(elr);
	return ret;
}

/*
 * We do not need to lock anything since this is called on
 * module unload.
 */
static void ext4_destroy_lazyinit_thread(void)
{
	/*
	 * If thread exited earlier
	 * there's nothing to be done.
	 */
3739
	if (!ext4_li_info || !ext4_lazyinit_task)
3740 3741
		return;

3742
	kthread_stop(ext4_lazyinit_task);
3743 3744
}

3745 3746 3747 3748 3749 3750
static int set_journal_csum_feature_set(struct super_block *sb)
{
	int ret = 1;
	int compat, incompat;
	struct ext4_sb_info *sbi = EXT4_SB(sb);

3751
	if (ext4_has_metadata_csum(sb)) {
3752
		/* journal checksum v3 */
3753
		compat = 0;
3754
		incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
3755 3756 3757 3758 3759 3760
	} else {
		/* journal checksum v1 */
		compat = JBD2_FEATURE_COMPAT_CHECKSUM;
		incompat = 0;
	}

3761 3762 3763 3764
	jbd2_journal_clear_features(sbi->s_journal,
			JBD2_FEATURE_COMPAT_CHECKSUM, 0,
			JBD2_FEATURE_INCOMPAT_CSUM_V3 |
			JBD2_FEATURE_INCOMPAT_CSUM_V2);
3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776
	if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
		ret = jbd2_journal_set_features(sbi->s_journal,
				compat, 0,
				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
				incompat);
	} else if (test_opt(sb, JOURNAL_CHECKSUM)) {
		ret = jbd2_journal_set_features(sbi->s_journal,
				compat, 0,
				incompat);
		jbd2_journal_clear_features(sbi->s_journal, 0, 0,
				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
	} else {
3777 3778
		jbd2_journal_clear_features(sbi->s_journal, 0, 0,
				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
3779 3780 3781 3782 3783
	}

	return ret;
}

3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807
/*
 * Note: calculating the overhead so we can be compatible with
 * historical BSD practice is quite difficult in the face of
 * clusters/bigalloc.  This is because multiple metadata blocks from
 * different block group can end up in the same allocation cluster.
 * Calculating the exact overhead in the face of clustered allocation
 * requires either O(all block bitmaps) in memory or O(number of block
 * groups**2) in time.  We will still calculate the superblock for
 * older file systems --- and if we come across with a bigalloc file
 * system with zero in s_overhead_clusters the estimate will be close to
 * correct especially for very large cluster sizes --- but for newer
 * file systems, it's better to calculate this figure once at mkfs
 * time, and store it in the superblock.  If the superblock value is
 * present (even for non-bigalloc file systems), we will use it.
 */
static int count_overhead(struct super_block *sb, ext4_group_t grp,
			  char *buf)
{
	struct ext4_sb_info	*sbi = EXT4_SB(sb);
	struct ext4_group_desc	*gdp;
	ext4_fsblk_t		first_block, last_block, b;
	ext4_group_t		i, ngroups = ext4_get_groups_count(sb);
	int			s, j, count = 0;

3808
	if (!ext4_has_feature_bigalloc(sb))
3809 3810 3811
		return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
			sbi->s_itb_per_group + 2);

3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840
	first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
		(grp * EXT4_BLOCKS_PER_GROUP(sb));
	last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
	for (i = 0; i < ngroups; i++) {
		gdp = ext4_get_group_desc(sb, i, NULL);
		b = ext4_block_bitmap(sb, gdp);
		if (b >= first_block && b <= last_block) {
			ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
			count++;
		}
		b = ext4_inode_bitmap(sb, gdp);
		if (b >= first_block && b <= last_block) {
			ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
			count++;
		}
		b = ext4_inode_table(sb, gdp);
		if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
			for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
				int c = EXT4_B2C(sbi, b - first_block);
				ext4_set_bit(c, buf);
				count++;
			}
		if (i != grp)
			continue;
		s = 0;
		if (ext4_bg_has_super(sb, grp)) {
			ext4_set_bit(s++, buf);
			count++;
		}
3841 3842 3843 3844 3845
		j = ext4_bg_num_gdb(sb, grp);
		if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
			ext4_error(sb, "Invalid number of block group "
				   "descriptor blocks: %d", j);
			j = EXT4_BLOCKS_PER_GROUP(sb) - s;
3846
		}
3847 3848 3849
		count += j;
		for (; j > 0; j--)
			ext4_set_bit(EXT4_B2C(sbi, s++), buf);
3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863
	}
	if (!count)
		return 0;
	return EXT4_CLUSTERS_PER_GROUP(sb) -
		ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
}

/*
 * Compute the overhead and stash it in sbi->s_overhead
 */
int ext4_calculate_overhead(struct super_block *sb)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
3864 3865
	struct inode *j_inode;
	unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum);
3866 3867
	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
	ext4_fsblk_t overhead = 0;
3868
	char *buf = (char *) get_zeroed_page(GFP_NOFS);
3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895

	if (!buf)
		return -ENOMEM;

	/*
	 * Compute the overhead (FS structures).  This is constant
	 * for a given filesystem unless the number of block groups
	 * changes so we cache the previous value until it does.
	 */

	/*
	 * All of the blocks before first_data_block are overhead
	 */
	overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));

	/*
	 * Add the overhead found in each block group
	 */
	for (i = 0; i < ngroups; i++) {
		int blks;

		blks = count_overhead(sb, i, buf);
		overhead += blks;
		if (blks)
			memset(buf, 0, PAGE_SIZE);
		cond_resched();
	}
3896 3897 3898 3899 3900

	/*
	 * Add the internal journal blocks whether the journal has been
	 * loaded or not
	 */
3901
	if (sbi->s_journal && !sbi->s_journal_bdev)
3902
		overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_total_len);
3903 3904
	else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) {
		/* j_inum for internal journal is non-zero */
3905 3906 3907 3908 3909 3910 3911 3912 3913
		j_inode = ext4_get_journal_inode(sb, j_inum);
		if (j_inode) {
			j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
			overhead += EXT4_NUM_B2C(sbi, j_blocks);
			iput(j_inode);
		} else {
			ext4_msg(sb, KERN_ERR, "can't get journal size");
		}
	}
3914 3915 3916 3917 3918 3919
	sbi->s_overhead = overhead;
	smp_wmb();
	free_page((unsigned long) buf);
	return 0;
}

3920
static void ext4_set_resv_clusters(struct super_block *sb)
L
Lukas Czerner 已提交
3921 3922
{
	ext4_fsblk_t resv_clusters;
3923
	struct ext4_sb_info *sbi = EXT4_SB(sb);
L
Lukas Czerner 已提交
3924

3925 3926 3927 3928 3929 3930
	/*
	 * There's no need to reserve anything when we aren't using extents.
	 * The space estimates are exact, there are no unwritten extents,
	 * hole punching doesn't need new metadata... This is needed especially
	 * to keep ext2/3 backward compatibility.
	 */
3931
	if (!ext4_has_feature_extents(sb))
3932
		return;
L
Lukas Czerner 已提交
3933 3934 3935 3936
	/*
	 * By default we reserve 2% or 4096 clusters, whichever is smaller.
	 * This should cover the situations where we can not afford to run
	 * out of space like for example punch hole, or converting
3937
	 * unwritten extents in delalloc path. In most cases such
L
Lukas Czerner 已提交
3938 3939 3940
	 * allocation would require 1, or 2 blocks, higher numbers are
	 * very rare.
	 */
3941 3942
	resv_clusters = (ext4_blocks_count(sbi->s_es) >>
			 sbi->s_cluster_bits);
L
Lukas Czerner 已提交
3943 3944 3945 3946

	do_div(resv_clusters, 50);
	resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);

3947
	atomic64_set(&sbi->s_resv_clusters, resv_clusters);
L
Lukas Czerner 已提交
3948 3949
}

3950
static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3951
{
3952
	struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
3953
	char *orig_data = kstrdup(data, GFP_KERNEL);
3954
	struct buffer_head *bh, **group_desc;
3955
	struct ext4_super_block *es = NULL;
3956
	struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
3957
	struct flex_groups **flex_groups;
3958 3959
	ext4_fsblk_t block;
	ext4_fsblk_t sb_block = get_sb_block(&data);
3960
	ext4_fsblk_t logical_sb_block;
3961 3962 3963 3964
	unsigned long offset = 0;
	unsigned long journal_devnum = 0;
	unsigned long def_mount_opts;
	struct inode *root;
3965
	const char *descr;
3966
	int ret = -ENOMEM;
3967
	int blocksize, clustersize;
3968 3969
	unsigned int db_count;
	unsigned int i;
3970
	int needs_recovery, has_huge_files;
L
Laurent Vivier 已提交
3971
	__u64 blocks_count;
3972
	int err = 0;
3973
	unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
3974
	ext4_group_t first_not_zeroed;
3975

3976 3977
	if ((data && !orig_data) || !sbi)
		goto out_free_base;
3978

3979
	sbi->s_daxdev = dax_dev;
3980 3981
	sbi->s_blockgroup_lock =
		kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
3982 3983 3984
	if (!sbi->s_blockgroup_lock)
		goto out_free_base;

3985
	sb->s_fs_info = sbi;
3986
	sbi->s_sb = sb;
3987
	sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
M
Miklos Szeredi 已提交
3988
	sbi->s_sb_block = sb_block;
3989 3990
	if (sb->s_bdev->bd_part)
		sbi->s_sectors_written_start =
3991
			part_stat_read(sb->s_bdev->bd_part, sectors[STAT_WRITE]);
3992

3993
	/* Cleanup superblock name */
3994
	strreplace(sb->s_id, '/', '!');
3995

3996
	/* -EINVAL is default */
3997
	ret = -EINVAL;
3998
	blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
3999
	if (!blocksize) {
4000
		ext4_msg(sb, KERN_ERR, "unable to set blocksize");
4001 4002 4003 4004
		goto out_fail;
	}

	/*
4005
	 * The ext4 superblock will not be buffer aligned for other than 1kB
4006 4007
	 * block sizes.  We need to calculate the offset from buffer start.
	 */
4008
	if (blocksize != EXT4_MIN_BLOCK_SIZE) {
4009 4010
		logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
		offset = do_div(logical_sb_block, blocksize);
4011
	} else {
4012
		logical_sb_block = sb_block;
4013 4014
	}

4015 4016
	bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
	if (IS_ERR(bh)) {
4017
		ext4_msg(sb, KERN_ERR, "unable to read superblock");
4018 4019
		ret = PTR_ERR(bh);
		bh = NULL;
4020 4021 4022 4023
		goto out_fail;
	}
	/*
	 * Note: s_es must be initialized as soon as possible because
4024
	 *       some ext4 macro-instructions depend on its value
4025
	 */
4026
	es = (struct ext4_super_block *) (bh->b_data + offset);
4027 4028
	sbi->s_es = es;
	sb->s_magic = le16_to_cpu(es->s_magic);
4029 4030
	if (sb->s_magic != EXT4_SUPER_MAGIC)
		goto cantfind_ext4;
T
Theodore Ts'o 已提交
4031
	sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
4032

4033
	/* Warn if metadata_csum and gdt_csum are both set. */
4034 4035
	if (ext4_has_feature_metadata_csum(sb) &&
	    ext4_has_feature_gdt_csum(sb))
4036
		ext4_warning(sb, "metadata_csum and uninit_bg are "
4037 4038
			     "redundant flags; please run fsck.");

4039 4040 4041 4042 4043 4044 4045 4046
	/* Check for a known checksum algorithm */
	if (!ext4_verify_csum_type(sb, es)) {
		ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
			 "unknown checksum algorithm.");
		silent = 1;
		goto cantfind_ext4;
	}

4047
	/* Load the checksum driver */
4048 4049 4050 4051 4052 4053
	sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
	if (IS_ERR(sbi->s_chksum_driver)) {
		ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
		ret = PTR_ERR(sbi->s_chksum_driver);
		sbi->s_chksum_driver = NULL;
		goto failed_mount;
4054 4055
	}

4056 4057 4058 4059 4060
	/* Check superblock checksum */
	if (!ext4_superblock_csum_verify(sb, es)) {
		ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
			 "invalid superblock checksum.  Run e2fsck?");
		silent = 1;
4061
		ret = -EFSBADCRC;
4062 4063 4064 4065
		goto cantfind_ext4;
	}

	/* Precompute checksum seed for all metadata */
4066
	if (ext4_has_feature_csum_seed(sb))
4067
		sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
T
Tahsin Erdogan 已提交
4068
	else if (ext4_has_metadata_csum(sb) || ext4_has_feature_ea_inode(sb))
4069 4070 4071
		sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
					       sizeof(es->s_uuid));

4072 4073
	/* Set defaults before we parse the mount options */
	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
4074
	set_opt(sb, INIT_INODE_TABLE);
4075
	if (def_mount_opts & EXT4_DEFM_DEBUG)
4076
		set_opt(sb, DEBUG);
4077
	if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
4078
		set_opt(sb, GRPID);
4079
	if (def_mount_opts & EXT4_DEFM_UID16)
4080
		set_opt(sb, NO_UID32);
4081 4082
	/* xattr user namespace & acls are now defaulted on */
	set_opt(sb, XATTR_USER);
T
Theodore Ts'o 已提交
4083
#ifdef CONFIG_EXT4_FS_POSIX_ACL
4084
	set_opt(sb, POSIX_ACL);
4085
#endif
4086 4087
	if (ext4_has_feature_fast_commit(sb))
		set_opt2(sb, JOURNAL_FAST_COMMIT);
4088 4089 4090 4091
	/* don't forget to enable journal_csum when metadata_csum is enabled. */
	if (ext4_has_metadata_csum(sb))
		set_opt(sb, JOURNAL_CHECKSUM);

4092
	if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
4093
		set_opt(sb, JOURNAL_DATA);
4094
	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
4095
		set_opt(sb, ORDERED_DATA);
4096
	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
4097
		set_opt(sb, WRITEBACK_DATA);
4098 4099

	if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
4100
		set_opt(sb, ERRORS_PANIC);
4101
	else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
4102
		set_opt(sb, ERRORS_CONT);
4103
	else
4104
		set_opt(sb, ERRORS_RO);
4105 4106
	/* block_validity enabled by default; disable with noblock_validity */
	set_opt(sb, BLOCK_VALIDITY);
4107
	if (def_mount_opts & EXT4_DEFM_DISCARD)
4108
		set_opt(sb, DISCARD);
4109

4110 4111
	sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
	sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
4112 4113 4114
	sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
	sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
	sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
4115

4116
	if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
4117
		set_opt(sb, BARRIER);
4118

4119 4120 4121 4122
	/*
	 * enable delayed allocation by default
	 * Use -o nodelalloc to turn it off
	 */
4123
	if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) &&
4124
	    ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
4125
		set_opt(sb, DELALLOC);
4126

4127 4128 4129 4130 4131 4132
	/*
	 * set default s_li_wait_mult for lazyinit, for the case there is
	 * no mount option specified.
	 */
	sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;

4133 4134
	if (le32_to_cpu(es->s_log_block_size) >
	    (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
4135
		ext4_msg(sb, KERN_ERR,
4136 4137
			 "Invalid log block size: %u",
			 le32_to_cpu(es->s_log_block_size));
4138 4139
		goto failed_mount;
	}
4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151
	if (le32_to_cpu(es->s_log_cluster_size) >
	    (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
		ext4_msg(sb, KERN_ERR,
			 "Invalid log cluster size: %u",
			 le32_to_cpu(es->s_log_cluster_size));
		goto failed_mount;
	}

	blocksize = EXT4_MIN_BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);

	if (blocksize == PAGE_SIZE)
		set_opt(sb, DIOREAD_NOLOCK);
4152

4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169
	if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
		sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
		sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
	} else {
		sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
		sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
		if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
			ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
				 sbi->s_first_ino);
			goto failed_mount;
		}
		if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
		    (!is_power_of_2(sbi->s_inode_size)) ||
		    (sbi->s_inode_size > blocksize)) {
			ext4_msg(sb, KERN_ERR,
			       "unsupported inode size: %d",
			       sbi->s_inode_size);
4170
			ext4_msg(sb, KERN_ERR, "blocksize: %d", blocksize);
4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215
			goto failed_mount;
		}
		/*
		 * i_atime_extra is the last extra field available for
		 * [acm]times in struct ext4_inode. Checking for that
		 * field should suffice to ensure we have extra space
		 * for all three.
		 */
		if (sbi->s_inode_size >= offsetof(struct ext4_inode, i_atime_extra) +
			sizeof(((struct ext4_inode *)0)->i_atime_extra)) {
			sb->s_time_gran = 1;
			sb->s_time_max = EXT4_EXTRA_TIMESTAMP_MAX;
		} else {
			sb->s_time_gran = NSEC_PER_SEC;
			sb->s_time_max = EXT4_NON_EXTRA_TIMESTAMP_MAX;
		}
		sb->s_time_min = EXT4_TIMESTAMP_MIN;
	}
	if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
		sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
			EXT4_GOOD_OLD_INODE_SIZE;
		if (ext4_has_feature_extra_isize(sb)) {
			unsigned v, max = (sbi->s_inode_size -
					   EXT4_GOOD_OLD_INODE_SIZE);

			v = le16_to_cpu(es->s_want_extra_isize);
			if (v > max) {
				ext4_msg(sb, KERN_ERR,
					 "bad s_want_extra_isize: %d", v);
				goto failed_mount;
			}
			if (sbi->s_want_extra_isize < v)
				sbi->s_want_extra_isize = v;

			v = le16_to_cpu(es->s_min_extra_isize);
			if (v > max) {
				ext4_msg(sb, KERN_ERR,
					 "bad s_min_extra_isize: %d", v);
				goto failed_mount;
			}
			if (sbi->s_want_extra_isize < v)
				sbi->s_want_extra_isize = v;
		}
	}

4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228
	if (sbi->s_es->s_mount_opts[0]) {
		char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
					      sizeof(sbi->s_es->s_mount_opts),
					      GFP_KERNEL);
		if (!s_mount_opts)
			goto failed_mount;
		if (!parse_options(s_mount_opts, sb, &journal_devnum,
				   &journal_ioprio, 0)) {
			ext4_msg(sb, KERN_WARNING,
				 "failed to parse options in superblock: %s",
				 s_mount_opts);
		}
		kfree(s_mount_opts);
4229
	}
4230
	sbi->s_def_mount_opt = sbi->s_mount_opt;
4231
	if (!parse_options((char *) data, sb, &journal_devnum,
4232
			   &journal_ioprio, 0))
4233 4234
		goto failed_mount;

4235
#ifdef CONFIG_UNICODE
4236
	if (ext4_has_feature_casefold(sb) && !sb->s_encoding) {
4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266
		const struct ext4_sb_encodings *encoding_info;
		struct unicode_map *encoding;
		__u16 encoding_flags;

		if (ext4_has_feature_encrypt(sb)) {
			ext4_msg(sb, KERN_ERR,
				 "Can't mount with encoding and encryption");
			goto failed_mount;
		}

		if (ext4_sb_read_encoding(es, &encoding_info,
					  &encoding_flags)) {
			ext4_msg(sb, KERN_ERR,
				 "Encoding requested by superblock is unknown");
			goto failed_mount;
		}

		encoding = utf8_load(encoding_info->version);
		if (IS_ERR(encoding)) {
			ext4_msg(sb, KERN_ERR,
				 "can't mount with superblock charset: %s-%s "
				 "not supported by the kernel. flags: 0x%x.",
				 encoding_info->name, encoding_info->version,
				 encoding_flags);
			goto failed_mount;
		}
		ext4_msg(sb, KERN_INFO,"Using encoding defined by superblock: "
			 "%s-%s with flags 0x%hx", encoding_info->name,
			 encoding_info->version?:"\b", encoding_flags);

4267 4268
		sb->s_encoding = encoding;
		sb->s_encoding_flags = encoding_flags;
4269 4270 4271
	}
#endif

4272
	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
4273
		printk_once(KERN_WARNING "EXT4-fs: Warning: mounting with data=journal disables delayed allocation, dioread_nolock, O_DIRECT and fast_commit support!\n");
4274
		/* can't mount with both data=journal and dioread_nolock. */
4275
		clear_opt(sb, DIOREAD_NOLOCK);
4276
		clear_opt2(sb, JOURNAL_FAST_COMMIT);
4277 4278 4279 4280 4281
		if (test_opt2(sb, EXPLICIT_DELALLOC)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and delalloc");
			goto failed_mount;
		}
4282
		if (test_opt(sb, DAX_ALWAYS)) {
R
Ross Zwisler 已提交
4283 4284 4285 4286
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and dax");
			goto failed_mount;
		}
4287 4288 4289 4290 4291
		if (ext4_has_feature_encrypt(sb)) {
			ext4_msg(sb, KERN_WARNING,
				 "encrypted files will use data=ordered "
				 "instead of data journaling mode");
		}
4292 4293
		if (test_opt(sb, DELALLOC))
			clear_opt(sb, DELALLOC);
4294 4295
	} else {
		sb->s_iflags |= SB_I_CGROUPWB;
4296 4297
	}

4298 4299
	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
		(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
4300

4301
	if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
4302 4303 4304
	    (ext4_has_compat_features(sb) ||
	     ext4_has_ro_compat_features(sb) ||
	     ext4_has_incompat_features(sb)))
4305 4306 4307
		ext4_msg(sb, KERN_WARNING,
		       "feature flags set on rev 0 fs, "
		       "running e2fsck is recommended");
4308

4309 4310
	if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) {
		set_opt2(sb, HURD_COMPAT);
4311
		if (ext4_has_feature_64bit(sb)) {
4312 4313 4314 4315
			ext4_msg(sb, KERN_ERR,
				 "The Hurd can't support 64-bit file systems");
			goto failed_mount;
		}
T
Tahsin Erdogan 已提交
4316 4317 4318 4319 4320 4321 4322 4323 4324 4325

		/*
		 * ea_inode feature uses l_i_version field which is not
		 * available in HURD_COMPAT mode.
		 */
		if (ext4_has_feature_ea_inode(sb)) {
			ext4_msg(sb, KERN_ERR,
				 "ea_inode feature is not supported for Hurd");
			goto failed_mount;
		}
4326 4327
	}

4328 4329 4330 4331 4332
	if (IS_EXT2_SB(sb)) {
		if (ext2_feature_set_ok(sb))
			ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
				 "using the ext4 subsystem");
		else {
4333 4334 4335 4336 4337 4338
			/*
			 * If we're probing be silent, if this looks like
			 * it's actually an ext[34] filesystem.
			 */
			if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
				goto failed_mount;
4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349
			ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
				 "to feature incompatibilities");
			goto failed_mount;
		}
	}

	if (IS_EXT3_SB(sb)) {
		if (ext3_feature_set_ok(sb))
			ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
				 "using the ext4 subsystem");
		else {
4350 4351 4352 4353 4354 4355
			/*
			 * If we're probing be silent, if this looks like
			 * it's actually an ext4 filesystem.
			 */
			if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
				goto failed_mount;
4356 4357 4358 4359 4360 4361
			ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
				 "to feature incompatibilities");
			goto failed_mount;
		}
	}

4362 4363 4364 4365 4366
	/*
	 * Check feature flags regardless of the revision level, since we
	 * previously didn't change the revision level when setting the flags,
	 * so there is a chance incompat flags are set on a rev 0 filesystem.
	 */
4367
	if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
4368
		goto failed_mount;
4369

4370 4371 4372 4373 4374 4375 4376
	if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
		ext4_msg(sb, KERN_ERR,
			 "Number of reserved GDT blocks insanely large: %d",
			 le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
		goto failed_mount;
	}

4377 4378 4379
	if (bdev_dax_supported(sb->s_bdev, blocksize))
		set_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags);

4380
	if (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) {
4381 4382 4383
		if (ext4_has_feature_inline_data(sb)) {
			ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem"
					" that may contain inline data");
4384
			goto failed_mount;
4385
		}
4386
		if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags)) {
4387
			ext4_msg(sb, KERN_ERR,
4388 4389
				"DAX unsupported by block device.");
			goto failed_mount;
4390
		}
R
Ross Zwisler 已提交
4391 4392
	}

4393
	if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
4394 4395 4396 4397 4398
		ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
			 es->s_encryption_level);
		goto failed_mount;
	}

4399
	if (sb->s_blocksize != blocksize) {
4400 4401
		/* Validate the filesystem blocksize */
		if (!sb_set_blocksize(sb, blocksize)) {
4402
			ext4_msg(sb, KERN_ERR, "bad block size %d",
4403
					blocksize);
4404 4405 4406
			goto failed_mount;
		}

4407
		brelse(bh);
4408 4409
		logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
		offset = do_div(logical_sb_block, blocksize);
4410 4411
		bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
		if (IS_ERR(bh)) {
4412 4413
			ext4_msg(sb, KERN_ERR,
			       "Can't read superblock on 2nd try");
4414 4415
			ret = PTR_ERR(bh);
			bh = NULL;
4416 4417
			goto failed_mount;
		}
4418
		es = (struct ext4_super_block *)(bh->b_data + offset);
4419
		sbi->s_es = es;
4420
		if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
4421 4422
			ext4_msg(sb, KERN_ERR,
			       "Magic mismatch, very weird!");
4423 4424 4425 4426
			goto failed_mount;
		}
	}

4427
	has_huge_files = ext4_has_feature_huge_file(sb);
4428 4429 4430
	sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
						      has_huge_files);
	sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
4431

4432
	sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
4433
	if (ext4_has_feature_64bit(sb)) {
4434
		if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
4435
		    sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
V
vignesh babu 已提交
4436
		    !is_power_of_2(sbi->s_desc_size)) {
4437 4438
			ext4_msg(sb, KERN_ERR,
			       "unsupported descriptor size %lu",
4439 4440 4441 4442 4443
			       sbi->s_desc_size);
			goto failed_mount;
		}
	} else
		sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
4444

4445 4446
	sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
	sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
4447

4448
	sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
4449
	if (sbi->s_inodes_per_block == 0)
4450
		goto cantfind_ext4;
4451 4452 4453
	if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
	    sbi->s_inodes_per_group > blocksize * 8) {
		ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
4454
			 sbi->s_inodes_per_group);
4455 4456
		goto failed_mount;
	}
4457 4458
	sbi->s_itb_per_group = sbi->s_inodes_per_group /
					sbi->s_inodes_per_block;
4459
	sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
4460 4461
	sbi->s_sbh = bh;
	sbi->s_mount_state = le16_to_cpu(es->s_state);
4462 4463
	sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
	sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
4464

4465
	for (i = 0; i < 4; i++)
4466 4467
		sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
	sbi->s_def_hash_version = es->s_def_hash_version;
4468
	if (ext4_has_feature_dir_index(sb)) {
4469 4470 4471 4472
		i = le32_to_cpu(es->s_flags);
		if (i & EXT2_FLAGS_UNSIGNED_HASH)
			sbi->s_hash_unsigned = 3;
		else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
4473
#ifdef __CHAR_UNSIGNED__
4474
			if (!sb_rdonly(sb))
4475 4476 4477
				es->s_flags |=
					cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
			sbi->s_hash_unsigned = 3;
4478
#else
4479
			if (!sb_rdonly(sb))
4480 4481
				es->s_flags |=
					cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
4482
#endif
4483
		}
4484
	}
4485

4486 4487
	/* Handle clustersize */
	clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
4488
	if (ext4_has_feature_bigalloc(sb)) {
4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514
		if (clustersize < blocksize) {
			ext4_msg(sb, KERN_ERR,
				 "cluster size (%d) smaller than "
				 "block size (%d)", clustersize, blocksize);
			goto failed_mount;
		}
		sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
			le32_to_cpu(es->s_log_block_size);
		sbi->s_clusters_per_group =
			le32_to_cpu(es->s_clusters_per_group);
		if (sbi->s_clusters_per_group > blocksize * 8) {
			ext4_msg(sb, KERN_ERR,
				 "#clusters per group too big: %lu",
				 sbi->s_clusters_per_group);
			goto failed_mount;
		}
		if (sbi->s_blocks_per_group !=
		    (sbi->s_clusters_per_group * (clustersize / blocksize))) {
			ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and "
				 "clusters per group (%lu) inconsistent",
				 sbi->s_blocks_per_group,
				 sbi->s_clusters_per_group);
			goto failed_mount;
		}
	} else {
		if (clustersize != blocksize) {
4515 4516 4517 4518
			ext4_msg(sb, KERN_ERR,
				 "fragment/cluster size (%d) != "
				 "block size (%d)", clustersize, blocksize);
			goto failed_mount;
4519 4520 4521 4522 4523 4524 4525 4526 4527
		}
		if (sbi->s_blocks_per_group > blocksize * 8) {
			ext4_msg(sb, KERN_ERR,
				 "#blocks per group too big: %lu",
				 sbi->s_blocks_per_group);
			goto failed_mount;
		}
		sbi->s_clusters_per_group = sbi->s_blocks_per_group;
		sbi->s_cluster_bits = 0;
4528
	}
4529 4530
	sbi->s_cluster_ratio = clustersize / blocksize;

4531 4532 4533 4534
	/* Do we have standard group size of clustersize * 8 blocks ? */
	if (sbi->s_blocks_per_group == clustersize << 3)
		set_opt2(sb, STD_GROUP_SIZE);

4535 4536 4537 4538
	/*
	 * Test whether we have more sectors than will fit in sector_t,
	 * and whether the max offset is addressable by the page cache.
	 */
4539
	err = generic_check_addressable(sb->s_blocksize_bits,
4540
					ext4_blocks_count(es));
4541
	if (err) {
4542
		ext4_msg(sb, KERN_ERR, "filesystem"
4543
			 " too large to mount safely on this system");
4544 4545 4546
		goto failed_mount;
	}

4547 4548
	if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
		goto cantfind_ext4;
4549

4550 4551 4552
	/* check blocks count against device size */
	blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
	if (blocks_count && ext4_blocks_count(es) > blocks_count) {
4553 4554
		ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
		       "exceeds size of device (%llu blocks)",
4555 4556 4557 4558
		       ext4_blocks_count(es), blocks_count);
		goto failed_mount;
	}

4559 4560 4561 4562 4563
	/*
	 * It makes no sense for the first data block to be beyond the end
	 * of the filesystem.
	 */
	if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
4564
		ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
4565 4566 4567
			 "block %u is beyond end of filesystem (%llu)",
			 le32_to_cpu(es->s_first_data_block),
			 ext4_blocks_count(es));
4568 4569
		goto failed_mount;
	}
4570 4571 4572 4573 4574 4575 4576
	if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) &&
	    (sbi->s_cluster_ratio == 1)) {
		ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
			 "block is 0 with a 1k block and cluster size");
		goto failed_mount;
	}

L
Laurent Vivier 已提交
4577 4578 4579 4580
	blocks_count = (ext4_blocks_count(es) -
			le32_to_cpu(es->s_first_data_block) +
			EXT4_BLOCKS_PER_GROUP(sb) - 1);
	do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
4581
	if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
4582
		ext4_msg(sb, KERN_WARNING, "groups count too large: %llu "
4583
		       "(block count %llu, first data block %u, "
4584
		       "blocks per group %lu)", blocks_count,
4585 4586 4587 4588 4589
		       ext4_blocks_count(es),
		       le32_to_cpu(es->s_first_data_block),
		       EXT4_BLOCKS_PER_GROUP(sb));
		goto failed_mount;
	}
L
Laurent Vivier 已提交
4590
	sbi->s_groups_count = blocks_count;
4591 4592
	sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
			(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
4593 4594 4595 4596 4597 4598 4599 4600
	if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
	    le32_to_cpu(es->s_inodes_count)) {
		ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
			 le32_to_cpu(es->s_inodes_count),
			 ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
		ret = -EINVAL;
		goto failed_mount;
	}
4601 4602
	db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
		   EXT4_DESC_PER_BLOCK(sb);
4603
	if (ext4_has_feature_meta_bg(sb)) {
4604
		if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
4605 4606 4607 4608 4609 4610 4611
			ext4_msg(sb, KERN_WARNING,
				 "first meta block group too large: %u "
				 "(group descriptor block count %u)",
				 le32_to_cpu(es->s_first_meta_bg), db_count);
			goto failed_mount;
		}
	}
4612 4613 4614 4615
	rcu_assign_pointer(sbi->s_group_desc,
			   kvmalloc_array(db_count,
					  sizeof(struct buffer_head *),
					  GFP_KERNEL));
4616
	if (sbi->s_group_desc == NULL) {
4617
		ext4_msg(sb, KERN_ERR, "not enough memory");
4618
		ret = -ENOMEM;
4619 4620 4621
		goto failed_mount;
	}

4622
	bgl_lock_init(sbi->s_blockgroup_lock);
4623

4624 4625 4626
	/* Pre-read the descriptors into the buffer cache */
	for (i = 0; i < db_count; i++) {
		block = descriptor_loc(sb, logical_sb_block, i);
4627
		ext4_sb_breadahead_unmovable(sb, block);
4628 4629
	}

4630
	for (i = 0; i < db_count; i++) {
4631 4632
		struct buffer_head *bh;

4633
		block = descriptor_loc(sb, logical_sb_block, i);
4634 4635
		bh = ext4_sb_bread_unmovable(sb, block);
		if (IS_ERR(bh)) {
4636 4637
			ext4_msg(sb, KERN_ERR,
			       "can't read group descriptor %d", i);
4638
			db_count = i;
4639 4640
			ret = PTR_ERR(bh);
			bh = NULL;
4641 4642
			goto failed_mount2;
		}
4643 4644 4645
		rcu_read_lock();
		rcu_dereference(sbi->s_group_desc)[i] = bh;
		rcu_read_unlock();
4646
	}
4647
	sbi->s_gdb_count = db_count;
4648
	if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
4649
		ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
4650
		ret = -EFSCORRUPTED;
4651
		goto failed_mount2;
4652
	}
4653

4654
	timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
4655 4656
	spin_lock_init(&sbi->s_error_lock);
	INIT_WORK(&sbi->s_error_work, flush_stashed_error_work);
4657

4658
	/* Register extent status tree shrinker */
4659
	if (ext4_es_register_shrinker(sbi))
4660 4661
		goto failed_mount3;

4662
	sbi->s_stripe = ext4_get_stripe_size(sbi);
4663
	sbi->s_extent_max_zeroout_kb = 32;
4664

4665 4666 4667
	/*
	 * set up enough so that it can read an inode
	 */
4668
	sb->s_op = &ext4_sops;
4669 4670
	sb->s_export_op = &ext4_export_ops;
	sb->s_xattr = ext4_xattr_handlers;
4671
#ifdef CONFIG_FS_ENCRYPTION
4672
	sb->s_cop = &ext4_cryptops;
4673
#endif
E
Eric Biggers 已提交
4674 4675 4676
#ifdef CONFIG_FS_VERITY
	sb->s_vop = &ext4_verityops;
#endif
4677
#ifdef CONFIG_QUOTA
4678
	sb->dq_op = &ext4_quota_operations;
4679
	if (ext4_has_feature_quota(sb))
4680
		sb->s_qcop = &dquot_quotactl_sysfile_ops;
4681 4682
	else
		sb->s_qcop = &ext4_qctl_operations;
L
Li Xi 已提交
4683
	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
4684
#endif
4685
	memcpy(&sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
4686

4687
	INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
4688
	mutex_init(&sbi->s_orphan_lock);
4689

4690 4691 4692 4693 4694 4695 4696 4697
	/* Initialize fast commit stuff */
	atomic_set(&sbi->s_fc_subtid, 0);
	atomic_set(&sbi->s_fc_ineligible_updates, 0);
	INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_MAIN]);
	INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_STAGING]);
	INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_MAIN]);
	INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_STAGING]);
	sbi->s_fc_bytes = 0;
4698 4699
	ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
	ext4_clear_mount_flag(sb, EXT4_MF_FC_COMMITTING);
4700 4701
	spin_lock_init(&sbi->s_fc_lock);
	memset(&sbi->s_fc_stats, 0, sizeof(sbi->s_fc_stats));
4702 4703 4704 4705 4706 4707 4708
	sbi->s_fc_replay_state.fc_regions = NULL;
	sbi->s_fc_replay_state.fc_regions_size = 0;
	sbi->s_fc_replay_state.fc_regions_used = 0;
	sbi->s_fc_replay_state.fc_regions_valid = 0;
	sbi->s_fc_replay_state.fc_modified_inodes = NULL;
	sbi->s_fc_replay_state.fc_modified_inodes_size = 0;
	sbi->s_fc_replay_state.fc_modified_inodes_used = 0;
4709

4710 4711 4712
	sb->s_root = NULL;

	needs_recovery = (es->s_last_orphan != 0 ||
4713
			  ext4_has_feature_journal_needs_recovery(sb));
4714

4715
	if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb))
4716
		if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
4717
			goto failed_mount3a;
4718

4719 4720 4721 4722
	/*
	 * The first inode we look at is the journal inode.  Don't try
	 * root first: it may be modified in the journal!
	 */
4723
	if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
4724 4725
		err = ext4_load_journal(sb, es, journal_devnum);
		if (err)
4726
			goto failed_mount3a;
4727
	} else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) &&
4728
		   ext4_has_feature_journal_needs_recovery(sb)) {
4729 4730
		ext4_msg(sb, KERN_ERR, "required journal recovery "
		       "suppressed and not mounted read-only");
4731
		goto failed_mount_wq;
4732
	} else {
4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755
		/* Nojournal mode, all journal mount options are illegal */
		if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "journal_checksum, fs mounted w/o journal");
			goto failed_mount_wq;
		}
		if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "journal_async_commit, fs mounted w/o journal");
			goto failed_mount_wq;
		}
		if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "commit=%lu, fs mounted w/o journal",
				 sbi->s_commit_interval / HZ);
			goto failed_mount_wq;
		}
		if (EXT4_MOUNT_DATA_FLAGS &
		    (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "data=, fs mounted w/o journal");
			goto failed_mount_wq;
		}
4756
		sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM;
4757
		clear_opt(sb, JOURNAL_CHECKSUM);
4758
		clear_opt(sb, DATA_FLAGS);
4759
		clear_opt2(sb, JOURNAL_FAST_COMMIT);
4760 4761 4762
		sbi->s_journal = NULL;
		needs_recovery = 0;
		goto no_journal;
4763 4764
	}

4765
	if (ext4_has_feature_64bit(sb) &&
4766 4767
	    !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
				       JBD2_FEATURE_INCOMPAT_64BIT)) {
4768
		ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
4769
		goto failed_mount_wq;
4770 4771
	}

4772 4773 4774 4775
	if (!set_journal_csum_feature_set(sb)) {
		ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
			 "feature set");
		goto failed_mount_wq;
4776
	}
4777

4778 4779 4780 4781 4782 4783 4784 4785
	if (test_opt2(sb, JOURNAL_FAST_COMMIT) &&
		!jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
					  JBD2_FEATURE_INCOMPAT_FAST_COMMIT)) {
		ext4_msg(sb, KERN_ERR,
			"Failed to set fast commit journal feature");
		goto failed_mount_wq;
	}

4786 4787 4788 4789 4790
	/* We have now updated the journal if required, so we can
	 * validate the data journaling mode. */
	switch (test_opt(sb, DATA_FLAGS)) {
	case 0:
		/* No mode set, assume a default based on the journal
A
Andrew Morton 已提交
4791 4792 4793
		 * capabilities: ORDERED_DATA if the journal can
		 * cope, else JOURNAL_DATA
		 */
4794
		if (jbd2_journal_check_available_features
4795
		    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
4796
			set_opt(sb, ORDERED_DATA);
4797 4798
			sbi->s_def_mount_opt |= EXT4_MOUNT_ORDERED_DATA;
		} else {
4799
			set_opt(sb, JOURNAL_DATA);
4800 4801
			sbi->s_def_mount_opt |= EXT4_MOUNT_JOURNAL_DATA;
		}
4802 4803
		break;

4804 4805
	case EXT4_MOUNT_ORDERED_DATA:
	case EXT4_MOUNT_WRITEBACK_DATA:
4806 4807
		if (!jbd2_journal_check_available_features
		    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
4808 4809
			ext4_msg(sb, KERN_ERR, "Journal does not support "
			       "requested data journaling mode");
4810
			goto failed_mount_wq;
4811 4812 4813 4814
		}
	default:
		break;
	}
4815 4816 4817 4818 4819 4820 4821 4822

	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
	    test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
		ext4_msg(sb, KERN_ERR, "can't mount with "
			"journal_async_commit in data=ordered mode");
		goto failed_mount_wq;
	}

4823
	set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
4824

4825
	sbi->s_journal->j_submit_inode_data_buffers =
4826
		ext4_journal_submit_inode_data_buffers;
4827
	sbi->s_journal->j_finish_inode_data_buffers =
4828
		ext4_journal_finish_inode_data_buffers;
B
Bobi Jam 已提交
4829

4830
no_journal:
4831 4832 4833
	if (!test_opt(sb, NO_MBCACHE)) {
		sbi->s_ea_block_cache = ext4_xattr_create_cache();
		if (!sbi->s_ea_block_cache) {
T
Tahsin Erdogan 已提交
4834
			ext4_msg(sb, KERN_ERR,
4835
				 "Failed to create ea_block_cache");
T
Tahsin Erdogan 已提交
4836 4837
			goto failed_mount_wq;
		}
4838 4839 4840 4841 4842 4843 4844 4845 4846

		if (ext4_has_feature_ea_inode(sb)) {
			sbi->s_ea_inode_cache = ext4_xattr_create_cache();
			if (!sbi->s_ea_inode_cache) {
				ext4_msg(sb, KERN_ERR,
					 "Failed to create ea_inode_cache");
				goto failed_mount_wq;
			}
		}
4847 4848
	}

E
Eric Biggers 已提交
4849 4850 4851 4852 4853
	if (ext4_has_feature_verity(sb) && blocksize != PAGE_SIZE) {
		ext4_msg(sb, KERN_ERR, "Unsupported blocksize for fs-verity");
		goto failed_mount_wq;
	}

4854
	if (DUMMY_ENCRYPTION_ENABLED(sbi) && !sb_rdonly(sb) &&
4855 4856
	    !ext4_has_feature_encrypt(sb)) {
		ext4_set_feature_encrypt(sb);
4857
		ext4_commit_super(sb);
4858 4859
	}

4860 4861 4862 4863 4864 4865 4866
	/*
	 * Get the # of file system overhead blocks from the
	 * superblock if present.
	 */
	if (es->s_overhead_clusters)
		sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
	else {
4867 4868
		err = ext4_calculate_overhead(sb);
		if (err)
4869 4870 4871
			goto failed_mount_wq;
	}

T
Tejun Heo 已提交
4872 4873 4874 4875
	/*
	 * The maximum number of concurrent works can be high and
	 * concurrency isn't really necessary.  Limit it to 1.
	 */
4876 4877 4878 4879
	EXT4_SB(sb)->rsv_conversion_wq =
		alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
	if (!EXT4_SB(sb)->rsv_conversion_wq) {
		printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
4880
		ret = -ENOMEM;
4881 4882 4883
		goto failed_mount4;
	}

4884
	/*
4885
	 * The jbd2_journal_load will have done any necessary log recovery,
4886 4887 4888
	 * so we can safely mount the rest of the filesystem now.
	 */

4889
	root = ext4_iget(sb, EXT4_ROOT_INO, EXT4_IGET_SPECIAL);
4890
	if (IS_ERR(root)) {
4891
		ext4_msg(sb, KERN_ERR, "get root inode failed");
4892
		ret = PTR_ERR(root);
4893
		root = NULL;
4894 4895 4896
		goto failed_mount4;
	}
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
4897
		ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
A
Al Viro 已提交
4898
		iput(root);
4899 4900
		goto failed_mount4;
	}
4901 4902

#ifdef CONFIG_UNICODE
4903
	if (sb->s_encoding)
4904 4905 4906
		sb->s_d_op = &ext4_dentry_ops;
#endif

4907
	sb->s_root = d_make_root(root);
4908
	if (!sb->s_root) {
4909
		ext4_msg(sb, KERN_ERR, "get root dentry failed");
4910 4911 4912
		ret = -ENOMEM;
		goto failed_mount4;
	}
4913

4914 4915
	ret = ext4_setup_super(sb, es, sb_rdonly(sb));
	if (ret == -EROFS) {
4916
		sb->s_flags |= SB_RDONLY;
4917 4918 4919
		ret = 0;
	} else if (ret)
		goto failed_mount4a;
K
Kalpak Shah 已提交
4920

4921
	ext4_set_resv_clusters(sb);
L
Lukas Czerner 已提交
4922

4923 4924 4925 4926 4927 4928 4929
	if (test_opt(sb, BLOCK_VALIDITY)) {
		err = ext4_setup_system_zone(sb);
		if (err) {
			ext4_msg(sb, KERN_ERR, "failed to initialize system "
				 "zone (%d)", err);
			goto failed_mount4a;
		}
4930
	}
4931
	ext4_fc_replay_cleanup(sb);
4932 4933 4934 4935 4936 4937

	ext4_ext_init(sb);
	err = ext4_mb_init(sb);
	if (err) {
		ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
			 err);
4938
		goto failed_mount5;
4939 4940
	}

4941 4942 4943 4944 4945 4946 4947 4948
	/*
	 * We can only set up the journal commit callback once
	 * mballoc is initialized
	 */
	if (sbi->s_journal)
		sbi->s_journal->j_commit_callback =
			ext4_journal_commit_callback;

4949 4950 4951
	block = ext4_count_free_clusters(sb);
	ext4_free_blocks_count_set(sbi->s_es, 
				   EXT4_C2B(sbi, block));
4952 4953
	err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
				  GFP_KERNEL);
4954 4955 4956
	if (!err) {
		unsigned long freei = ext4_count_free_inodes(sb);
		sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
4957 4958
		err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
					  GFP_KERNEL);
4959 4960 4961
	}
	if (!err)
		err = percpu_counter_init(&sbi->s_dirs_counter,
4962
					  ext4_count_dirs(sb), GFP_KERNEL);
4963
	if (!err)
4964 4965
		err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
					  GFP_KERNEL);
4966
	if (!err)
4967
		err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
4968

4969 4970 4971 4972 4973
	if (err) {
		ext4_msg(sb, KERN_ERR, "insufficient memory");
		goto failed_mount6;
	}

4974
	if (ext4_has_feature_flex_bg(sb))
4975 4976 4977 4978 4979 4980 4981
		if (!ext4_fill_flex_info(sb)) {
			ext4_msg(sb, KERN_ERR,
			       "unable to initialize "
			       "flex_bg meta info!");
			goto failed_mount6;
		}

4982 4983
	err = ext4_register_li_request(sb, first_not_zeroed);
	if (err)
4984
		goto failed_mount6;
4985

4986
	err = ext4_register_sysfs(sb);
4987 4988
	if (err)
		goto failed_mount7;
T
Theodore Ts'o 已提交
4989

4990 4991
#ifdef CONFIG_QUOTA
	/* Enable quota usage during mount. */
4992
	if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) {
4993 4994 4995 4996 4997 4998
		err = ext4_enable_quotas(sb);
		if (err)
			goto failed_mount8;
	}
#endif  /* CONFIG_QUOTA */

4999 5000 5001 5002 5003
	/*
	 * Save the original bdev mapping's wb_err value which could be
	 * used to detect the metadata async write error.
	 */
	spin_lock_init(&sbi->s_bdev_wb_lock);
5004 5005
	errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err,
				 &sbi->s_bdev_wb_err);
5006
	sb->s_bdev->bd_super = sb;
5007 5008 5009
	EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
	ext4_orphan_cleanup(sb, es);
	EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
5010
	if (needs_recovery) {
5011
		ext4_msg(sb, KERN_INFO, "recovery complete");
5012 5013 5014
		err = ext4_mark_recovery_complete(sb, es);
		if (err)
			goto failed_mount8;
5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025
	}
	if (EXT4_SB(sb)->s_journal) {
		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
			descr = " journalled data mode";
		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
			descr = " ordered data mode";
		else
			descr = " writeback data mode";
	} else
		descr = "out journal";

5026 5027 5028 5029 5030 5031 5032 5033
	if (test_opt(sb, DISCARD)) {
		struct request_queue *q = bdev_get_queue(sb->s_bdev);
		if (!blk_queue_discard(q))
			ext4_msg(sb, KERN_WARNING,
				 "mounting with \"discard\" option, but "
				 "the device does not support discard");
	}

5034 5035
	if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
		ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
5036 5037 5038
			 "Opts: %.*s%s%s", descr,
			 (int) sizeof(sbi->s_es->s_mount_opts),
			 sbi->s_es->s_mount_opts,
5039
			 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
5040

5041 5042
	if (es->s_error_count)
		mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
5043

5044 5045 5046 5047
	/* Enable message ratelimiting. Default is 10 messages per 5 secs. */
	ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
	ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
	ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);
5048 5049
	atomic_set(&sbi->s_warning_count, 0);
	atomic_set(&sbi->s_msg_count, 0);
5050

5051
	kfree(orig_data);
5052 5053
	return 0;

5054
cantfind_ext4:
5055
	if (!silent)
5056
		ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
5057 5058
	goto failed_mount;

5059
failed_mount8:
5060
	ext4_unregister_sysfs(sb);
5061
	kobject_put(&sbi->s_kobj);
5062 5063 5064
failed_mount7:
	ext4_unregister_li_request(sb);
failed_mount6:
5065
	ext4_mb_release(sb);
5066 5067 5068 5069 5070 5071 5072 5073
	rcu_read_lock();
	flex_groups = rcu_dereference(sbi->s_flex_groups);
	if (flex_groups) {
		for (i = 0; i < sbi->s_flex_groups_allocated; i++)
			kvfree(flex_groups[i]);
		kvfree(flex_groups);
	}
	rcu_read_unlock();
5074 5075 5076 5077
	percpu_counter_destroy(&sbi->s_freeclusters_counter);
	percpu_counter_destroy(&sbi->s_freeinodes_counter);
	percpu_counter_destroy(&sbi->s_dirs_counter);
	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
5078
	percpu_free_rwsem(&sbi->s_writepages_rwsem);
5079
failed_mount5:
5080 5081 5082
	ext4_ext_release(sb);
	ext4_release_system_zone(sb);
failed_mount4a:
A
Al Viro 已提交
5083
	dput(sb->s_root);
5084
	sb->s_root = NULL;
A
Al Viro 已提交
5085
failed_mount4:
5086
	ext4_msg(sb, KERN_ERR, "mount failed");
5087 5088
	if (EXT4_SB(sb)->rsv_conversion_wq)
		destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
5089
failed_mount_wq:
5090 5091 5092 5093 5094 5095
	ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
	sbi->s_ea_inode_cache = NULL;

	ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
	sbi->s_ea_block_cache = NULL;

5096 5097 5098 5099
	if (sbi->s_journal) {
		jbd2_journal_destroy(sbi->s_journal);
		sbi->s_journal = NULL;
	}
5100
failed_mount3a:
5101
	ext4_es_unregister_shrinker(sbi);
5102
failed_mount3:
5103
	del_timer_sync(&sbi->s_err_report);
5104
	flush_work(&sbi->s_error_work);
5105 5106
	if (sbi->s_mmp_tsk)
		kthread_stop(sbi->s_mmp_tsk);
5107
failed_mount2:
5108 5109
	rcu_read_lock();
	group_desc = rcu_dereference(sbi->s_group_desc);
5110
	for (i = 0; i < db_count; i++)
5111 5112 5113
		brelse(group_desc[i]);
	kvfree(group_desc);
	rcu_read_unlock();
5114
failed_mount:
5115 5116
	if (sbi->s_chksum_driver)
		crypto_free_shash(sbi->s_chksum_driver);
5117 5118

#ifdef CONFIG_UNICODE
5119
	utf8_unload(sb->s_encoding);
5120 5121
#endif

5122
#ifdef CONFIG_QUOTA
J
Jan Kara 已提交
5123
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
5124
		kfree(get_qf_name(sb, sbi, i));
5125
#endif
5126
	fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
5127
	ext4_blkdev_remove(sbi);
5128 5129 5130
	brelse(bh);
out_fail:
	sb->s_fs_info = NULL;
5131
	kfree(sbi->s_blockgroup_lock);
5132
out_free_base:
5133
	kfree(sbi);
5134
	kfree(orig_data);
5135
	fs_put_dax(dax_dev);
5136
	return err ? err : ret;
5137 5138 5139 5140 5141 5142 5143
}

/*
 * Setup any per-fs journal parameters now.  We'll do this both on
 * initial mount, once the journal has been initialised but before we've
 * done any recovery; and again on any subsequent remount.
 */
5144
static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
5145
{
5146
	struct ext4_sb_info *sbi = EXT4_SB(sb);
5147

5148 5149 5150
	journal->j_commit_interval = sbi->s_commit_interval;
	journal->j_min_batch_time = sbi->s_min_batch_time;
	journal->j_max_batch_time = sbi->s_max_batch_time;
5151
	ext4_fc_init(sb, journal);
5152

5153
	write_lock(&journal->j_state_lock);
5154
	if (test_opt(sb, BARRIER))
5155
		journal->j_flags |= JBD2_BARRIER;
5156
	else
5157
		journal->j_flags &= ~JBD2_BARRIER;
5158 5159 5160 5161
	if (test_opt(sb, DATA_ERR_ABORT))
		journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR;
	else
		journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR;
5162
	write_unlock(&journal->j_state_lock);
5163 5164
}

5165 5166
static struct inode *ext4_get_journal_inode(struct super_block *sb,
					     unsigned int journal_inum)
5167 5168 5169
{
	struct inode *journal_inode;

5170 5171 5172 5173 5174
	/*
	 * Test for the existence of a valid inode on disk.  Bad things
	 * happen if we iget() an unused inode, as the subsequent iput()
	 * will try to delete it.
	 */
5175
	journal_inode = ext4_iget(sb, journal_inum, EXT4_IGET_SPECIAL);
5176
	if (IS_ERR(journal_inode)) {
5177
		ext4_msg(sb, KERN_ERR, "no journal found");
5178 5179 5180 5181 5182
		return NULL;
	}
	if (!journal_inode->i_nlink) {
		make_bad_inode(journal_inode);
		iput(journal_inode);
5183
		ext4_msg(sb, KERN_ERR, "journal inode is deleted");
5184 5185 5186
		return NULL;
	}

5187
	jbd_debug(2, "Journal inode found at %p: %lld bytes\n",
5188
		  journal_inode, journal_inode->i_size);
5189
	if (!S_ISREG(journal_inode->i_mode)) {
5190
		ext4_msg(sb, KERN_ERR, "invalid journal inode");
5191 5192 5193
		iput(journal_inode);
		return NULL;
	}
5194 5195 5196 5197 5198 5199 5200 5201 5202
	return journal_inode;
}

static journal_t *ext4_get_journal(struct super_block *sb,
				   unsigned int journal_inum)
{
	struct inode *journal_inode;
	journal_t *journal;

5203 5204
	if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
		return NULL;
5205 5206 5207 5208

	journal_inode = ext4_get_journal_inode(sb, journal_inum);
	if (!journal_inode)
		return NULL;
5209

5210
	journal = jbd2_journal_init_inode(journal_inode);
5211
	if (!journal) {
5212
		ext4_msg(sb, KERN_ERR, "Could not load journal inode");
5213 5214 5215 5216
		iput(journal_inode);
		return NULL;
	}
	journal->j_private = sb;
5217
	ext4_init_journal_params(sb, journal);
5218 5219 5220
	return journal;
}

5221
static journal_t *ext4_get_dev_journal(struct super_block *sb,
5222 5223
				       dev_t j_dev)
{
5224
	struct buffer_head *bh;
5225
	journal_t *journal;
5226 5227
	ext4_fsblk_t start;
	ext4_fsblk_t len;
5228
	int hblock, blocksize;
5229
	ext4_fsblk_t sb_block;
5230
	unsigned long offset;
5231
	struct ext4_super_block *es;
5232 5233
	struct block_device *bdev;

5234 5235
	if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
		return NULL;
5236

5237
	bdev = ext4_blkdev_get(j_dev, sb);
5238 5239 5240 5241
	if (bdev == NULL)
		return NULL;

	blocksize = sb->s_blocksize;
5242
	hblock = bdev_logical_block_size(bdev);
5243
	if (blocksize < hblock) {
5244 5245
		ext4_msg(sb, KERN_ERR,
			"blocksize too small for journal device");
5246 5247 5248
		goto out_bdev;
	}

5249 5250
	sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
	offset = EXT4_MIN_BLOCK_SIZE % blocksize;
5251 5252
	set_blocksize(bdev, blocksize);
	if (!(bh = __bread(bdev, sb_block, blocksize))) {
5253 5254
		ext4_msg(sb, KERN_ERR, "couldn't read superblock of "
		       "external journal");
5255 5256 5257
		goto out_bdev;
	}

5258
	es = (struct ext4_super_block *) (bh->b_data + offset);
5259
	if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
5260
	    !(le32_to_cpu(es->s_feature_incompat) &
5261
	      EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
5262 5263
		ext4_msg(sb, KERN_ERR, "external journal has "
					"bad superblock");
5264 5265 5266 5267
		brelse(bh);
		goto out_bdev;
	}

5268 5269 5270 5271 5272 5273 5274 5275 5276
	if ((le32_to_cpu(es->s_feature_ro_compat) &
	     EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
	    es->s_checksum != ext4_superblock_csum(sb, es)) {
		ext4_msg(sb, KERN_ERR, "external journal has "
				       "corrupt superblock");
		brelse(bh);
		goto out_bdev;
	}

5277
	if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
5278
		ext4_msg(sb, KERN_ERR, "journal UUID does not match");
5279 5280 5281 5282
		brelse(bh);
		goto out_bdev;
	}

L
Laurent Vivier 已提交
5283
	len = ext4_blocks_count(es);
5284 5285 5286
	start = sb_block + 1;
	brelse(bh);	/* we're done with the superblock */

5287
	journal = jbd2_journal_init_dev(bdev, sb->s_bdev,
5288 5289
					start, len, blocksize);
	if (!journal) {
5290
		ext4_msg(sb, KERN_ERR, "failed to create device journal");
5291 5292 5293
		goto out_bdev;
	}
	journal->j_private = sb;
5294
	if (ext4_read_bh_lock(journal->j_sb_buffer, REQ_META | REQ_PRIO, true)) {
5295
		ext4_msg(sb, KERN_ERR, "I/O error on journal device");
5296 5297 5298
		goto out_journal;
	}
	if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
5299 5300
		ext4_msg(sb, KERN_ERR, "External journal has more than one "
					"user (unsupported) - %d",
5301 5302 5303
			be32_to_cpu(journal->j_superblock->s_nr_users));
		goto out_journal;
	}
5304
	EXT4_SB(sb)->s_journal_bdev = bdev;
5305
	ext4_init_journal_params(sb, journal);
5306
	return journal;
5307

5308
out_journal:
5309
	jbd2_journal_destroy(journal);
5310
out_bdev:
5311
	ext4_blkdev_put(bdev);
5312 5313 5314
	return NULL;
}

5315 5316
static int ext4_load_journal(struct super_block *sb,
			     struct ext4_super_block *es,
5317 5318 5319 5320 5321 5322 5323
			     unsigned long journal_devnum)
{
	journal_t *journal;
	unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
	dev_t journal_dev;
	int err = 0;
	int really_read_only;
5324
	int journal_dev_ro;
5325

5326 5327
	if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
		return -EFSCORRUPTED;
5328

5329 5330
	if (journal_devnum &&
	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
5331 5332
		ext4_msg(sb, KERN_INFO, "external journal device major/minor "
			"numbers have changed");
5333 5334 5335 5336
		journal_dev = new_decode_dev(journal_devnum);
	} else
		journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));

5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361
	if (journal_inum && journal_dev) {
		ext4_msg(sb, KERN_ERR,
			 "filesystem has both journal inode and journal device!");
		return -EINVAL;
	}

	if (journal_inum) {
		journal = ext4_get_journal(sb, journal_inum);
		if (!journal)
			return -EINVAL;
	} else {
		journal = ext4_get_dev_journal(sb, journal_dev);
		if (!journal)
			return -EINVAL;
	}

	journal_dev_ro = bdev_read_only(journal->j_dev);
	really_read_only = bdev_read_only(sb->s_bdev) | journal_dev_ro;

	if (journal_dev_ro && !sb_rdonly(sb)) {
		ext4_msg(sb, KERN_ERR,
			 "journal device read-only, try mounting with '-o ro'");
		err = -EROFS;
		goto err_out;
	}
5362 5363 5364 5365 5366 5367

	/*
	 * Are we loading a blank journal or performing recovery after a
	 * crash?  For recovery, we need to check in advance whether we
	 * can get read-write access to the device.
	 */
5368
	if (ext4_has_feature_journal_needs_recovery(sb)) {
5369
		if (sb_rdonly(sb)) {
5370 5371
			ext4_msg(sb, KERN_INFO, "INFO: recovery "
					"required on readonly filesystem");
5372
			if (really_read_only) {
5373
				ext4_msg(sb, KERN_ERR, "write access "
5374 5375
					"unavailable, cannot proceed "
					"(try mounting with noload)");
5376 5377
				err = -EROFS;
				goto err_out;
5378
			}
5379 5380
			ext4_msg(sb, KERN_INFO, "write access will "
			       "be enabled during recovery");
5381 5382 5383
		}
	}

5384
	if (!(journal->j_flags & JBD2_BARRIER))
5385
		ext4_msg(sb, KERN_INFO, "barriers disabled");
5386

5387
	if (!ext4_has_feature_journal_needs_recovery(sb))
5388
		err = jbd2_journal_wipe(journal, !really_read_only);
5389 5390 5391 5392 5393
	if (!err) {
		char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
		if (save)
			memcpy(save, ((char *) es) +
			       EXT4_S_ERR_START, EXT4_S_ERR_LEN);
5394
		err = jbd2_journal_load(journal);
5395 5396 5397 5398 5399
		if (save)
			memcpy(((char *) es) + EXT4_S_ERR_START,
			       save, EXT4_S_ERR_LEN);
		kfree(save);
	}
5400 5401

	if (err) {
5402
		ext4_msg(sb, KERN_ERR, "error loading journal");
5403
		goto err_out;
5404 5405
	}

5406
	EXT4_SB(sb)->s_journal = journal;
5407 5408 5409 5410 5411 5412
	err = ext4_clear_journal_err(sb, es);
	if (err) {
		EXT4_SB(sb)->s_journal = NULL;
		jbd2_journal_destroy(journal);
		return err;
	}
5413

5414
	if (!really_read_only && journal_devnum &&
5415 5416 5417 5418
	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
		es->s_journal_dev = cpu_to_le32(journal_devnum);

		/* Make sure we flush the recovery flag to disk. */
5419
		ext4_commit_super(sb);
5420 5421 5422
	}

	return 0;
5423 5424 5425 5426

err_out:
	jbd2_journal_destroy(journal);
	return err;
5427 5428
}

5429
static int ext4_commit_super(struct super_block *sb)
5430
{
5431
	struct ext4_sb_info *sbi = EXT4_SB(sb);
5432
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
5433
	struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
5434
	int error = 0;
5435

5436
	if (!sbh || block_device_ejected(sb))
5437
		return error;
5438

5439 5440 5441 5442 5443 5444 5445 5446 5447 5448
	/*
	 * If the file system is mounted read-only, don't update the
	 * superblock write time.  This avoids updating the superblock
	 * write time when we are mounting the root file system
	 * read/only but we need to replay the journal; at that point,
	 * for people who are east of GMT and who make their clock
	 * tick in localtime for Windows bug-for-bug compatibility,
	 * the clock is set in the future, and this will cause e2fsck
	 * to complain and force a full file system check.
	 */
5449
	if (!(sb->s_flags & SB_RDONLY))
5450
		ext4_update_tstamp(es, s_wtime);
5451 5452 5453
	if (sb->s_bdev->bd_part)
		es->s_kbytes_written =
			cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
5454 5455
			    ((part_stat_read(sb->s_bdev->bd_part,
					     sectors[STAT_WRITE]) -
T
Theodore Ts'o 已提交
5456
			      EXT4_SB(sb)->s_sectors_written_start) >> 1));
5457 5458 5459
	else
		es->s_kbytes_written =
			cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
5460 5461
	if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
		ext4_free_blocks_count_set(es,
5462 5463
			EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
				&EXT4_SB(sb)->s_freeclusters_counter)));
5464 5465 5466
	if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
		es->s_free_inodes_count =
			cpu_to_le32(percpu_counter_sum_positive(
5467
				&EXT4_SB(sb)->s_freeinodes_counter));
5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507
	/* Copy error information to the on-disk superblock */
	spin_lock(&sbi->s_error_lock);
	if (sbi->s_add_error_count > 0) {
		es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
		if (!es->s_first_error_time && !es->s_first_error_time_hi) {
			__ext4_update_tstamp(&es->s_first_error_time,
					     &es->s_first_error_time_hi,
					     sbi->s_first_error_time);
			strncpy(es->s_first_error_func, sbi->s_first_error_func,
				sizeof(es->s_first_error_func));
			es->s_first_error_line =
				cpu_to_le32(sbi->s_first_error_line);
			es->s_first_error_ino =
				cpu_to_le32(sbi->s_first_error_ino);
			es->s_first_error_block =
				cpu_to_le64(sbi->s_first_error_block);
			es->s_first_error_errcode =
				ext4_errno_to_code(sbi->s_first_error_code);
		}
		__ext4_update_tstamp(&es->s_last_error_time,
				     &es->s_last_error_time_hi,
				     sbi->s_last_error_time);
		strncpy(es->s_last_error_func, sbi->s_last_error_func,
			sizeof(es->s_last_error_func));
		es->s_last_error_line = cpu_to_le32(sbi->s_last_error_line);
		es->s_last_error_ino = cpu_to_le32(sbi->s_last_error_ino);
		es->s_last_error_block = cpu_to_le64(sbi->s_last_error_block);
		es->s_last_error_errcode =
				ext4_errno_to_code(sbi->s_last_error_code);
		/*
		 * Start the daily error reporting function if it hasn't been
		 * started already
		 */
		if (!es->s_error_count)
			mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ);
		le32_add_cpu(&es->s_error_count, sbi->s_add_error_count);
		sbi->s_add_error_count = 0;
	}
	spin_unlock(&sbi->s_error_lock);

5508
	BUFFER_TRACE(sbh, "marking dirty");
5509
	ext4_superblock_csum_set(sb);
5510
	lock_buffer(sbh);
5511
	if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524
		/*
		 * Oh, dear.  A previous attempt to write the
		 * superblock failed.  This could happen because the
		 * USB device was yanked out.  Or it could happen to
		 * be a transient write error and maybe the block will
		 * be remapped.  Nothing we can do but to retry the
		 * write and hope for the best.
		 */
		ext4_msg(sb, KERN_ERR, "previous I/O error to "
		       "superblock detected");
		clear_buffer_write_io_error(sbh);
		set_buffer_uptodate(sbh);
	}
5525
	mark_buffer_dirty(sbh);
5526 5527 5528 5529 5530 5531 5532 5533
	unlock_buffer(sbh);
	error = __sync_dirty_buffer(sbh,
		REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0));
	if (buffer_write_io_error(sbh)) {
		ext4_msg(sb, KERN_ERR, "I/O error while writing "
		       "superblock");
		clear_buffer_write_io_error(sbh);
		set_buffer_uptodate(sbh);
5534
	}
5535
	return error;
5536 5537 5538 5539 5540 5541 5542
}

/*
 * Have we just finished recovery?  If so, and if we are mounting (or
 * remounting) the filesystem readonly, then we will end up with a
 * consistent fs on disk.  Record that fact.
 */
5543 5544
static int ext4_mark_recovery_complete(struct super_block *sb,
				       struct ext4_super_block *es)
5545
{
5546
	int err;
5547
	journal_t *journal = EXT4_SB(sb)->s_journal;
5548

5549
	if (!ext4_has_feature_journal(sb)) {
5550 5551 5552 5553 5554 5555
		if (journal != NULL) {
			ext4_error(sb, "Journal got removed while the fs was "
				   "mounted!");
			return -EFSCORRUPTED;
		}
		return 0;
5556
	}
5557
	jbd2_journal_lock_updates(journal);
5558 5559
	err = jbd2_journal_flush(journal);
	if (err < 0)
5560 5561
		goto out;

5562
	if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) {
5563
		ext4_clear_feature_journal_needs_recovery(sb);
5564
		ext4_commit_super(sb);
5565
	}
5566
out:
5567
	jbd2_journal_unlock_updates(journal);
5568
	return err;
5569 5570 5571 5572 5573 5574 5575
}

/*
 * If we are mounting (or read-write remounting) a filesystem whose journal
 * has recorded an error from a previous lifetime, move that error to the
 * main filesystem now.
 */
5576
static int ext4_clear_journal_err(struct super_block *sb,
5577
				   struct ext4_super_block *es)
5578 5579 5580 5581 5582
{
	journal_t *journal;
	int j_errno;
	const char *errstr;

5583 5584 5585 5586
	if (!ext4_has_feature_journal(sb)) {
		ext4_error(sb, "Journal got removed while the fs was mounted!");
		return -EFSCORRUPTED;
	}
5587

5588
	journal = EXT4_SB(sb)->s_journal;
5589 5590 5591

	/*
	 * Now check for any error status which may have been recorded in the
5592
	 * journal by a prior ext4_error() or ext4_abort()
5593 5594
	 */

5595
	j_errno = jbd2_journal_errno(journal);
5596 5597 5598
	if (j_errno) {
		char nbuf[16];

5599
		errstr = ext4_decode_error(sb, j_errno, nbuf);
5600
		ext4_warning(sb, "Filesystem error recorded "
5601
			     "from previous mount: %s", errstr);
5602
		ext4_warning(sb, "Marking fs in need of filesystem check.");
5603

5604 5605
		EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
		es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
5606
		ext4_commit_super(sb);
5607

5608
		jbd2_journal_clear_err(journal);
5609
		jbd2_journal_update_sb_errno(journal);
5610
	}
5611
	return 0;
5612 5613 5614 5615 5616 5617
}

/*
 * Force the running and committing transactions to commit,
 * and wait on the commit.
 */
5618
int ext4_force_commit(struct super_block *sb)
5619 5620 5621
{
	journal_t *journal;

5622
	if (sb_rdonly(sb))
5623 5624
		return 0;

5625
	journal = EXT4_SB(sb)->s_journal;
5626
	return ext4_journal_force_commit(journal);
5627 5628
}

5629
static int ext4_sync_fs(struct super_block *sb, int wait)
5630
{
5631
	int ret = 0;
5632
	tid_t target;
5633
	bool needs_barrier = false;
5634
	struct ext4_sb_info *sbi = EXT4_SB(sb);
5635

5636
	if (unlikely(ext4_forced_shutdown(sbi)))
5637 5638
		return 0;

5639
	trace_ext4_sync_fs(sb, wait);
5640
	flush_workqueue(sbi->rsv_conversion_wq);
5641 5642 5643 5644 5645
	/*
	 * Writeback quota in non-journalled quota case - journalled quota has
	 * no dirty dquots
	 */
	dquot_writeback_dquots(sb, -1);
5646 5647 5648 5649 5650
	/*
	 * Data writeback is possible w/o journal transaction, so barrier must
	 * being sent at the end of the function. But we can skip it if
	 * transaction_commit will do it for us.
	 */
5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662
	if (sbi->s_journal) {
		target = jbd2_get_latest_transaction(sbi->s_journal);
		if (wait && sbi->s_journal->j_flags & JBD2_BARRIER &&
		    !jbd2_trans_will_send_data_barrier(sbi->s_journal, target))
			needs_barrier = true;

		if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
			if (wait)
				ret = jbd2_log_wait_commit(sbi->s_journal,
							   target);
		}
	} else if (wait && test_opt(sb, BARRIER))
5663 5664 5665
		needs_barrier = true;
	if (needs_barrier) {
		int err;
5666
		err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL);
5667 5668
		if (!ret)
			ret = err;
5669
	}
5670 5671 5672 5673

	return ret;
}

5674 5675 5676
/*
 * LVM calls this function before a (read-only) snapshot is created.  This
 * gives us a chance to flush the journal completely and mark the fs clean.
5677 5678
 *
 * Note that only this function cannot bring a filesystem to be in a clean
5679 5680
 * state independently. It relies on upper layer to stop all data & metadata
 * modifications.
5681
 */
5682
static int ext4_freeze(struct super_block *sb)
5683
{
5684 5685
	int error = 0;
	journal_t *journal;
5686

5687
	if (sb_rdonly(sb))
5688
		return 0;
5689

5690
	journal = EXT4_SB(sb)->s_journal;
5691

5692 5693 5694
	if (journal) {
		/* Now we set up the journal barrier. */
		jbd2_journal_lock_updates(journal);
5695

5696 5697 5698 5699 5700 5701 5702
		/*
		 * Don't clear the needs_recovery flag if we failed to
		 * flush the journal.
		 */
		error = jbd2_journal_flush(journal);
		if (error < 0)
			goto out;
5703 5704

		/* Journal blocked and flushed, clear needs_recovery flag. */
5705
		ext4_clear_feature_journal_needs_recovery(sb);
5706
	}
5707

5708
	error = ext4_commit_super(sb);
5709
out:
5710 5711 5712
	if (journal)
		/* we rely on upper layer to stop further updates */
		jbd2_journal_unlock_updates(journal);
5713
	return error;
5714 5715 5716 5717 5718 5719
}

/*
 * Called by LVM after the snapshot is done.  We need to reset the RECOVER
 * flag here, even though the filesystem is not technically dirty yet.
 */
5720
static int ext4_unfreeze(struct super_block *sb)
5721
{
5722
	if (sb_rdonly(sb) || ext4_forced_shutdown(EXT4_SB(sb)))
5723 5724
		return 0;

5725 5726
	if (EXT4_SB(sb)->s_journal) {
		/* Reset the needs_recovery flag before the fs is unlocked. */
5727
		ext4_set_feature_journal_needs_recovery(sb);
5728 5729
	}

5730
	ext4_commit_super(sb);
5731
	return 0;
5732 5733
}

5734 5735 5736 5737 5738
/*
 * Structure to save mount options for ext4_remount's benefit
 */
struct ext4_mount_options {
	unsigned long s_mount_opt;
5739
	unsigned long s_mount_opt2;
5740 5741
	kuid_t s_resuid;
	kgid_t s_resgid;
5742 5743 5744 5745
	unsigned long s_commit_interval;
	u32 s_min_batch_time, s_max_batch_time;
#ifdef CONFIG_QUOTA
	int s_jquota_fmt;
J
Jan Kara 已提交
5746
	char *s_qf_names[EXT4_MAXQUOTAS];
5747 5748 5749
#endif
};

5750
static int ext4_remount(struct super_block *sb, int *flags, char *data)
5751
{
5752
	struct ext4_super_block *es;
5753
	struct ext4_sb_info *sbi = EXT4_SB(sb);
5754
	unsigned long old_sb_flags, vfs_flags;
5755
	struct ext4_mount_options old_opts;
5756
	int enable_quota = 0;
5757
	ext4_group_t g;
5758
	unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
5759
	int err = 0;
5760
#ifdef CONFIG_QUOTA
5761
	int i, j;
5762
	char *to_free[EXT4_MAXQUOTAS];
5763
#endif
5764
	char *orig_data = kstrdup(data, GFP_KERNEL);
5765

5766 5767 5768
	if (data && !orig_data)
		return -ENOMEM;

5769 5770 5771
	/* Store the original options */
	old_sb_flags = sb->s_flags;
	old_opts.s_mount_opt = sbi->s_mount_opt;
5772
	old_opts.s_mount_opt2 = sbi->s_mount_opt2;
5773 5774 5775
	old_opts.s_resuid = sbi->s_resuid;
	old_opts.s_resgid = sbi->s_resgid;
	old_opts.s_commit_interval = sbi->s_commit_interval;
5776 5777
	old_opts.s_min_batch_time = sbi->s_min_batch_time;
	old_opts.s_max_batch_time = sbi->s_max_batch_time;
5778 5779
#ifdef CONFIG_QUOTA
	old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
J
Jan Kara 已提交
5780
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
5781
		if (sbi->s_qf_names[i]) {
5782 5783 5784
			char *qf_name = get_qf_name(sb, sbi, i);

			old_opts.s_qf_names[i] = kstrdup(qf_name, GFP_KERNEL);
5785 5786 5787
			if (!old_opts.s_qf_names[i]) {
				for (j = 0; j < i; j++)
					kfree(old_opts.s_qf_names[j]);
5788
				kfree(orig_data);
5789 5790 5791 5792
				return -ENOMEM;
			}
		} else
			old_opts.s_qf_names[i] = NULL;
5793
#endif
5794 5795
	if (sbi->s_journal && sbi->s_journal->j_task->io_context)
		journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
5796

5797 5798 5799 5800 5801 5802 5803 5804
	/*
	 * Some options can be enabled by ext4 and/or by VFS mount flag
	 * either way we need to make sure it matches in both *flags and
	 * s_flags. Copy those selected flags from *flags to s_flags
	 */
	vfs_flags = SB_LAZYTIME | SB_I_VERSION;
	sb->s_flags = (sb->s_flags & ~vfs_flags) | (*flags & vfs_flags);

5805
	if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
5806 5807 5808 5809
		err = -EINVAL;
		goto restore_opts;
	}

5810
	if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
5811 5812
	    test_opt(sb, JOURNAL_CHECKSUM)) {
		ext4_msg(sb, KERN_ERR, "changing journal_checksum "
5813 5814
			 "during remount not supported; ignoring");
		sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
5815 5816
	}

5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829
	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
		if (test_opt2(sb, EXPLICIT_DELALLOC)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and delalloc");
			err = -EINVAL;
			goto restore_opts;
		}
		if (test_opt(sb, DIOREAD_NOLOCK)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and dioread_nolock");
			err = -EINVAL;
			goto restore_opts;
		}
5830 5831 5832 5833 5834 5835 5836
	} else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) {
		if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				"journal_async_commit in data=ordered mode");
			err = -EINVAL;
			goto restore_opts;
		}
R
Ross Zwisler 已提交
5837 5838
	}

5839 5840 5841 5842 5843 5844
	if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_NO_MBCACHE) {
		ext4_msg(sb, KERN_ERR, "can't enable nombcache during remount");
		err = -EINVAL;
		goto restore_opts;
	}

5845
	if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
5846
		ext4_abort(sb, EXT4_ERR_ESHUTDOWN, "Abort forced by user");
5847

5848 5849
	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
		(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
5850 5851 5852

	es = sbi->s_es;

5853
	if (sbi->s_journal) {
5854
		ext4_init_journal_params(sb, sbi->s_journal);
5855 5856
		set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
	}
5857

5858 5859 5860
	/* Flush outstanding errors before changing fs state */
	flush_work(&sbi->s_error_work);

5861
	if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
5862
		if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) {
5863 5864 5865 5866
			err = -EROFS;
			goto restore_opts;
		}

5867
		if (*flags & SB_RDONLY) {
5868 5869 5870
			err = sync_filesystem(sb);
			if (err < 0)
				goto restore_opts;
5871 5872
			err = dquot_suspend(sb, -1);
			if (err < 0)
5873 5874
				goto restore_opts;

5875 5876 5877 5878
			/*
			 * First of all, the unconditional stuff we have to do
			 * to disable replay of the journal when we next remount
			 */
5879
			sb->s_flags |= SB_RDONLY;
5880 5881 5882 5883 5884 5885

			/*
			 * OK, test if we are remounting a valid rw partition
			 * readonly, and if so set the rdonly flag and then
			 * mark the partition as valid again.
			 */
5886 5887
			if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) &&
			    (sbi->s_mount_state & EXT4_VALID_FS))
5888 5889
				es->s_state = cpu_to_le16(sbi->s_mount_state);

5890 5891 5892 5893 5894
			if (sbi->s_journal) {
				/*
				 * We let remount-ro finish even if marking fs
				 * as clean failed...
				 */
5895
				ext4_mark_recovery_complete(sb, es);
5896
			}
5897 5898
			if (sbi->s_mmp_tsk)
				kthread_stop(sbi->s_mmp_tsk);
5899
		} else {
5900
			/* Make sure we can mount this feature set readwrite */
5901
			if (ext4_has_feature_readonly(sb) ||
D
Darrick J. Wong 已提交
5902
			    !ext4_feature_set_ok(sb, 0)) {
5903 5904 5905
				err = -EROFS;
				goto restore_opts;
			}
5906 5907
			/*
			 * Make sure the group descriptor checksums
5908
			 * are sane.  If they aren't, refuse to remount r/w.
5909 5910 5911 5912 5913
			 */
			for (g = 0; g < sbi->s_groups_count; g++) {
				struct ext4_group_desc *gdp =
					ext4_get_group_desc(sb, g, NULL);

5914
				if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
5915 5916
					ext4_msg(sb, KERN_ERR,
	       "ext4_remount: Checksum for group %u failed (%u!=%u)",
5917
		g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)),
5918
					       le16_to_cpu(gdp->bg_checksum));
5919
					err = -EFSBADCRC;
5920 5921 5922 5923
					goto restore_opts;
				}
			}

5924 5925 5926 5927 5928 5929
			/*
			 * If we have an unprocessed orphan list hanging
			 * around from a previously readonly bdev mount,
			 * require a full umount/remount for now.
			 */
			if (es->s_last_orphan) {
5930
				ext4_msg(sb, KERN_WARNING, "Couldn't "
5931 5932
				       "remount RDWR because of unprocessed "
				       "orphan inode list.  Please "
5933
				       "umount/remount instead");
5934 5935 5936 5937
				err = -EINVAL;
				goto restore_opts;
			}

5938 5939 5940 5941 5942 5943
			/*
			 * Mounting a RDONLY partition read-write, so reread
			 * and store the current valid flag.  (It may have
			 * been changed by e2fsck since we originally mounted
			 * the partition.)
			 */
5944 5945 5946 5947 5948
			if (sbi->s_journal) {
				err = ext4_clear_journal_err(sb, es);
				if (err)
					goto restore_opts;
			}
5949
			sbi->s_mount_state = le16_to_cpu(es->s_state);
5950 5951 5952 5953 5954 5955

			err = ext4_setup_super(sb, es, 0);
			if (err)
				goto restore_opts;

			sb->s_flags &= ~SB_RDONLY;
5956
			if (ext4_has_feature_mmp(sb))
5957 5958 5959 5960 5961
				if (ext4_multi_mount_protect(sb,
						le64_to_cpu(es->s_mmp_block))) {
					err = -EROFS;
					goto restore_opts;
				}
5962
			enable_quota = 1;
5963 5964
		}
	}
5965 5966 5967 5968 5969

	/*
	 * Reinitialize lazy itable initialization thread based on
	 * current settings
	 */
5970
	if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE))
5971 5972 5973 5974 5975 5976 5977
		ext4_unregister_li_request(sb);
	else {
		ext4_group_t first_not_zeroed;
		first_not_zeroed = ext4_has_uninit_itable(sb);
		ext4_register_li_request(sb, first_not_zeroed);
	}

5978 5979 5980 5981 5982
	/*
	 * Handle creation of system zone data early because it can fail.
	 * Releasing of existing data is done when we are sure remount will
	 * succeed.
	 */
5983
	if (test_opt(sb, BLOCK_VALIDITY) && !sbi->s_system_blks) {
5984 5985 5986 5987
		err = ext4_setup_system_zone(sb);
		if (err)
			goto restore_opts;
	}
5988

5989
	if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) {
5990
		err = ext4_commit_super(sb);
5991 5992 5993
		if (err)
			goto restore_opts;
	}
5994

5995 5996
#ifdef CONFIG_QUOTA
	/* Release old quota file names */
J
Jan Kara 已提交
5997
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
5998
		kfree(old_opts.s_qf_names[i]);
5999 6000 6001
	if (enable_quota) {
		if (sb_any_quota_suspended(sb))
			dquot_resume(sb, -1);
6002
		else if (ext4_has_feature_quota(sb)) {
6003
			err = ext4_enable_quotas(sb);
6004
			if (err)
6005 6006 6007
				goto restore_opts;
		}
	}
6008
#endif
6009
	if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
6010
		ext4_release_system_zone(sb);
6011

6012 6013 6014 6015 6016 6017
	/*
	 * Some options can be enabled by ext4 and/or by VFS mount flag
	 * either way we need to make sure it matches in both *flags and
	 * s_flags. Copy those selected flags from s_flags to *flags
	 */
	*flags = (*flags & ~vfs_flags) | (sb->s_flags & vfs_flags);
6018 6019 6020

	ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
	kfree(orig_data);
6021
	return 0;
6022

6023 6024 6025
restore_opts:
	sb->s_flags = old_sb_flags;
	sbi->s_mount_opt = old_opts.s_mount_opt;
6026
	sbi->s_mount_opt2 = old_opts.s_mount_opt2;
6027 6028 6029
	sbi->s_resuid = old_opts.s_resuid;
	sbi->s_resgid = old_opts.s_resgid;
	sbi->s_commit_interval = old_opts.s_commit_interval;
6030 6031
	sbi->s_min_batch_time = old_opts.s_min_batch_time;
	sbi->s_max_batch_time = old_opts.s_max_batch_time;
6032
	if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
6033
		ext4_release_system_zone(sb);
6034 6035
#ifdef CONFIG_QUOTA
	sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
J
Jan Kara 已提交
6036
	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
6037 6038
		to_free[i] = get_qf_name(sb, sbi, i);
		rcu_assign_pointer(sbi->s_qf_names[i], old_opts.s_qf_names[i]);
6039
	}
6040 6041 6042
	synchronize_rcu();
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
		kfree(to_free[i]);
6043
#endif
6044
	kfree(orig_data);
6045 6046 6047
	return err;
}

L
Li Xi 已提交
6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060
#ifdef CONFIG_QUOTA
static int ext4_statfs_project(struct super_block *sb,
			       kprojid_t projid, struct kstatfs *buf)
{
	struct kqid qid;
	struct dquot *dquot;
	u64 limit;
	u64 curblock;

	qid = make_kqid_projid(projid);
	dquot = dqget(sb, qid);
	if (IS_ERR(dquot))
		return PTR_ERR(dquot);
6061
	spin_lock(&dquot->dq_dqb_lock);
L
Li Xi 已提交
6062

6063 6064
	limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
			     dquot->dq_dqb.dqb_bhardlimit);
6065 6066
	limit >>= sb->s_blocksize_bits;

L
Li Xi 已提交
6067
	if (limit && buf->f_blocks > limit) {
6068 6069
		curblock = (dquot->dq_dqb.dqb_curspace +
			    dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
L
Li Xi 已提交
6070 6071 6072 6073 6074 6075
		buf->f_blocks = limit;
		buf->f_bfree = buf->f_bavail =
			(buf->f_blocks > curblock) ?
			 (buf->f_blocks - curblock) : 0;
	}

6076 6077
	limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
			     dquot->dq_dqb.dqb_ihardlimit);
L
Li Xi 已提交
6078 6079 6080 6081 6082 6083 6084
	if (limit && buf->f_files > limit) {
		buf->f_files = limit;
		buf->f_ffree =
			(buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
			 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
	}

6085
	spin_unlock(&dquot->dq_dqb_lock);
L
Li Xi 已提交
6086 6087 6088 6089 6090
	dqput(dquot);
	return 0;
}
#endif

6091
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
6092 6093
{
	struct super_block *sb = dentry->d_sb;
6094 6095
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
L
Lukas Czerner 已提交
6096
	ext4_fsblk_t overhead = 0, resv_blocks;
P
Pekka Enberg 已提交
6097
	u64 fsid;
6098
	s64 bfree;
L
Lukas Czerner 已提交
6099
	resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters));
6100

6101 6102
	if (!test_opt(sb, MINIX_DF))
		overhead = sbi->s_overhead;
6103

6104
	buf->f_type = EXT4_SUPER_MAGIC;
6105
	buf->f_bsize = sb->s_blocksize;
6106
	buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead);
6107 6108
	bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
		percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
6109
	/* prevent underflow in case that few free space is available */
6110
	buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
L
Lukas Czerner 已提交
6111 6112 6113
	buf->f_bavail = buf->f_bfree -
			(ext4_r_blocks_count(es) + resv_blocks);
	if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks))
6114 6115
		buf->f_bavail = 0;
	buf->f_files = le32_to_cpu(es->s_inodes_count);
P
Peter Zijlstra 已提交
6116
	buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
6117
	buf->f_namelen = EXT4_NAME_LEN;
P
Pekka Enberg 已提交
6118 6119
	fsid = le64_to_cpup((void *)es->s_uuid) ^
	       le64_to_cpup((void *)es->s_uuid + sizeof(u64));
6120
	buf->f_fsid = u64_to_fsid(fsid);
6121

L
Li Xi 已提交
6122 6123 6124 6125 6126
#ifdef CONFIG_QUOTA
	if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) &&
	    sb_has_quota_limits_enabled(sb, PRJQUOTA))
		ext4_statfs_project(sb, EXT4_I(dentry->d_inode)->i_projid, buf);
#endif
6127 6128 6129 6130 6131 6132
	return 0;
}


#ifdef CONFIG_QUOTA

J
Jan Kara 已提交
6133 6134 6135 6136
/*
 * Helper functions so that transaction is started before we acquire dqio_sem
 * to keep correct lock ordering of transaction > dqio_sem
 */
6137 6138
static inline struct inode *dquot_to_inode(struct dquot *dquot)
{
6139
	return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
6140 6141
}

6142
static int ext4_write_dquot(struct dquot *dquot)
6143 6144 6145 6146 6147 6148
{
	int ret, err;
	handle_t *handle;
	struct inode *inode;

	inode = dquot_to_inode(dquot);
6149
	handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
6150
				    EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
6151 6152 6153
	if (IS_ERR(handle))
		return PTR_ERR(handle);
	ret = dquot_commit(dquot);
6154
	err = ext4_journal_stop(handle);
6155 6156 6157 6158 6159
	if (!ret)
		ret = err;
	return ret;
}

6160
static int ext4_acquire_dquot(struct dquot *dquot)
6161 6162 6163 6164
{
	int ret, err;
	handle_t *handle;

6165
	handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
6166
				    EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
6167 6168 6169
	if (IS_ERR(handle))
		return PTR_ERR(handle);
	ret = dquot_acquire(dquot);
6170
	err = ext4_journal_stop(handle);
6171 6172 6173 6174 6175
	if (!ret)
		ret = err;
	return ret;
}

6176
static int ext4_release_dquot(struct dquot *dquot)
6177 6178 6179 6180
{
	int ret, err;
	handle_t *handle;

6181
	handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
6182
				    EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
J
Jan Kara 已提交
6183 6184 6185
	if (IS_ERR(handle)) {
		/* Release dquot anyway to avoid endless cycle in dqput() */
		dquot_release(dquot);
6186
		return PTR_ERR(handle);
J
Jan Kara 已提交
6187
	}
6188
	ret = dquot_release(dquot);
6189
	err = ext4_journal_stop(handle);
6190 6191 6192 6193 6194
	if (!ret)
		ret = err;
	return ret;
}

6195
static int ext4_mark_dquot_dirty(struct dquot *dquot)
6196
{
6197 6198 6199
	struct super_block *sb = dquot->dq_sb;
	struct ext4_sb_info *sbi = EXT4_SB(sb);

6200
	/* Are we journaling quotas? */
6201
	if (ext4_has_feature_quota(sb) ||
6202
	    sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
6203
		dquot_mark_dquot_dirty(dquot);
6204
		return ext4_write_dquot(dquot);
6205 6206 6207 6208 6209
	} else {
		return dquot_mark_dquot_dirty(dquot);
	}
}

6210
static int ext4_write_info(struct super_block *sb, int type)
6211 6212 6213 6214 6215
{
	int ret, err;
	handle_t *handle;

	/* Data block + inode block */
6216
	handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2);
6217 6218 6219
	if (IS_ERR(handle))
		return PTR_ERR(handle);
	ret = dquot_commit_info(sb, type);
6220
	err = ext4_journal_stop(handle);
6221 6222 6223 6224 6225 6226 6227 6228 6229
	if (!ret)
		ret = err;
	return ret;
}

/*
 * Turn on quotas during mount time - we need to find
 * the quota file and such...
 */
6230
static int ext4_quota_on_mount(struct super_block *sb, int type)
6231
{
6232
	return dquot_quota_on_mount(sb, get_qf_name(sb, EXT4_SB(sb), type),
6233
					EXT4_SB(sb)->s_jquota_fmt, type);
6234 6235
}

6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249
static void lockdep_set_quota_inode(struct inode *inode, int subclass)
{
	struct ext4_inode_info *ei = EXT4_I(inode);

	/* The first argument of lockdep_set_subclass has to be
	 * *exactly* the same as the argument to init_rwsem() --- in
	 * this case, in init_once() --- or lockdep gets unhappy
	 * because the name of the lock is set using the
	 * stringification of the argument to init_rwsem().
	 */
	(void) ei;	/* shut up clang warning if !CONFIG_LOCKDEP */
	lockdep_set_subclass(&ei->i_data_sem, subclass);
}

6250 6251 6252
/*
 * Standard function to be called on quota_on
 */
6253
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
A
Al Viro 已提交
6254
			 const struct path *path)
6255 6256 6257 6258 6259
{
	int err;

	if (!test_opt(sb, QUOTA))
		return -EINVAL;
6260

6261
	/* Quotafile not on the same filesystem? */
6262
	if (path->dentry->d_sb != sb)
6263
		return -EXDEV;
6264 6265 6266 6267 6268

	/* Quota already enabled for this file? */
	if (IS_NOQUOTA(d_inode(path->dentry)))
		return -EBUSY;

6269 6270
	/* Journaling quota? */
	if (EXT4_SB(sb)->s_qf_names[type]) {
6271
		/* Quotafile not in fs root? */
6272
		if (path->dentry->d_parent != sb->s_root)
6273 6274 6275
			ext4_msg(sb, KERN_WARNING,
				"Quota file not on filesystem root. "
				"Journaled quota will not work");
6276 6277 6278 6279 6280 6281 6282
		sb_dqopt(sb)->flags |= DQUOT_NOLIST_DIRTY;
	} else {
		/*
		 * Clear the flag just in case mount options changed since
		 * last time.
		 */
		sb_dqopt(sb)->flags &= ~DQUOT_NOLIST_DIRTY;
6283
	}
6284 6285 6286 6287 6288

	/*
	 * When we journal data on quota file, we have to flush journal to see
	 * all updates to the file when we bypass pagecache...
	 */
6289
	if (EXT4_SB(sb)->s_journal &&
6290
	    ext4_should_journal_data(d_inode(path->dentry))) {
6291 6292 6293 6294 6295
		/*
		 * We don't need to lock updates but journal_flush() could
		 * otherwise be livelocked...
		 */
		jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
6296
		err = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
6297
		jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
6298
		if (err)
6299
			return err;
6300
	}
6301

6302 6303
	lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
	err = dquot_quota_on(sb, type, format_id, path);
6304
	if (err) {
6305 6306
		lockdep_set_quota_inode(path->dentry->d_inode,
					     I_DATA_SEM_NORMAL);
6307 6308 6309 6310
	} else {
		struct inode *inode = d_inode(path->dentry);
		handle_t *handle;

6311 6312 6313 6314 6315
		/*
		 * Set inode flags to prevent userspace from messing with quota
		 * files. If this fails, we return success anyway since quotas
		 * are already enabled and this is not a hard failure.
		 */
6316 6317 6318 6319 6320 6321 6322
		inode_lock(inode);
		handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
		if (IS_ERR(handle))
			goto unlock_inode;
		EXT4_I(inode)->i_flags |= EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL;
		inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
				S_NOATIME | S_IMMUTABLE);
6323
		err = ext4_mark_inode_dirty(handle, inode);
6324 6325 6326 6327
		ext4_journal_stop(handle);
	unlock_inode:
		inode_unlock(inode);
	}
6328
	return err;
6329 6330
}

6331 6332 6333 6334 6335
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
			     unsigned int flags)
{
	int err;
	struct inode *qf_inode;
J
Jan Kara 已提交
6336
	unsigned long qf_inums[EXT4_MAXQUOTAS] = {
6337
		le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
L
Li Xi 已提交
6338 6339
		le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
		le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
6340 6341
	};

6342
	BUG_ON(!ext4_has_feature_quota(sb));
6343 6344 6345 6346

	if (!qf_inums[type])
		return -EPERM;

6347
	qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL);
6348 6349 6350 6351 6352
	if (IS_ERR(qf_inode)) {
		ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
		return PTR_ERR(qf_inode);
	}

J
Jan Kara 已提交
6353 6354
	/* Don't account quota for quota files to avoid recursion */
	qf_inode->i_flags |= S_NOQUOTA;
6355
	lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
6356
	err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
6357 6358
	if (err)
		lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
6359
	iput(qf_inode);
6360 6361 6362 6363 6364 6365 6366 6367

	return err;
}

/* Enable usage tracking for all quota types. */
static int ext4_enable_quotas(struct super_block *sb)
{
	int type, err = 0;
J
Jan Kara 已提交
6368
	unsigned long qf_inums[EXT4_MAXQUOTAS] = {
6369
		le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
L
Li Xi 已提交
6370 6371
		le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
		le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
6372
	};
6373 6374 6375 6376 6377
	bool quota_mopt[EXT4_MAXQUOTAS] = {
		test_opt(sb, USRQUOTA),
		test_opt(sb, GRPQUOTA),
		test_opt(sb, PRJQUOTA),
	};
6378

6379
	sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
J
Jan Kara 已提交
6380
	for (type = 0; type < EXT4_MAXQUOTAS; type++) {
6381 6382
		if (qf_inums[type]) {
			err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
6383 6384
				DQUOT_USAGE_ENABLED |
				(quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
6385 6386
			if (err) {
				ext4_warning(sb,
6387 6388 6389
					"Failed to enable quota tracking "
					"(type=%d, err=%d). Please run "
					"e2fsck to fix.", type, err);
6390 6391 6392
				for (type--; type >= 0; type--)
					dquot_quota_off(sb, type);

6393 6394 6395 6396 6397 6398 6399
				return err;
			}
		}
	}
	return 0;
}

6400 6401
static int ext4_quota_off(struct super_block *sb, int type)
{
6402 6403
	struct inode *inode = sb_dqopt(sb)->files[type];
	handle_t *handle;
6404
	int err;
6405

6406 6407 6408
	/* Force all delayed allocation blocks to be allocated.
	 * Caller already holds s_umount sem */
	if (test_opt(sb, DELALLOC))
6409 6410
		sync_filesystem(sb);

6411
	if (!inode || !igrab(inode))
6412 6413
		goto out;

6414
	err = dquot_quota_off(sb, type);
6415
	if (err || ext4_has_feature_quota(sb))
6416 6417 6418
		goto out_put;

	inode_lock(inode);
6419 6420 6421 6422 6423
	/*
	 * Update modification times of quota files when userspace can
	 * start looking at them. If we fail, we return success anyway since
	 * this is not a hard failure and quotas are already disabled.
	 */
6424
	handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
6425 6426
	if (IS_ERR(handle)) {
		err = PTR_ERR(handle);
6427
		goto out_unlock;
6428
	}
6429 6430
	EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL);
	inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
6431
	inode->i_mtime = inode->i_ctime = current_time(inode);
6432
	err = ext4_mark_inode_dirty(handle, inode);
6433
	ext4_journal_stop(handle);
6434 6435 6436
out_unlock:
	inode_unlock(inode);
out_put:
6437
	lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL);
6438 6439
	iput(inode);
	return err;
6440
out:
6441 6442 6443
	return dquot_quota_off(sb, type);
}

6444 6445
/* Read data from quotafile - avoid pagecache and such because we cannot afford
 * acquiring the locks... As quota files are never truncated and quota code
L
Lucas De Marchi 已提交
6446
 * itself serializes the operations (and no one else should touch the files)
6447
 * we don't have to be afraid of races */
6448
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
6449 6450 6451
			       size_t len, loff_t off)
{
	struct inode *inode = sb_dqopt(sb)->files[type];
A
Aneesh Kumar K.V 已提交
6452
	ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466
	int offset = off & (sb->s_blocksize - 1);
	int tocopy;
	size_t toread;
	struct buffer_head *bh;
	loff_t i_size = i_size_read(inode);

	if (off > i_size)
		return 0;
	if (off+len > i_size)
		len = i_size-off;
	toread = len;
	while (toread > 0) {
		tocopy = sb->s_blocksize - offset < toread ?
				sb->s_blocksize - offset : toread;
6467 6468 6469
		bh = ext4_bread(NULL, inode, blk, 0);
		if (IS_ERR(bh))
			return PTR_ERR(bh);
6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484
		if (!bh)	/* A hole? */
			memset(data, 0, tocopy);
		else
			memcpy(data, bh->b_data+offset, tocopy);
		brelse(bh);
		offset = 0;
		toread -= tocopy;
		data += tocopy;
		blk++;
	}
	return len;
}

/* Write to quotafile (we know the transaction is already started and has
 * enough credits) */
6485
static ssize_t ext4_quota_write(struct super_block *sb, int type,
6486 6487 6488
				const char *data, size_t len, loff_t off)
{
	struct inode *inode = sb_dqopt(sb)->files[type];
A
Aneesh Kumar K.V 已提交
6489
	ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
6490
	int err = 0, err2 = 0, offset = off & (sb->s_blocksize - 1);
6491
	int retries = 0;
6492 6493 6494
	struct buffer_head *bh;
	handle_t *handle = journal_current_handle();

6495
	if (EXT4_SB(sb)->s_journal && !handle) {
6496 6497
		ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
			" cancelled because transaction is not started",
J
Jan Kara 已提交
6498 6499 6500
			(unsigned long long)off, (unsigned long long)len);
		return -EIO;
	}
6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511
	/*
	 * Since we account only one data block in transaction credits,
	 * then it is impossible to cross a block boundary.
	 */
	if (sb->s_blocksize - offset < len) {
		ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
			" cancelled because not block aligned",
			(unsigned long long)off, (unsigned long long)len);
		return -EIO;
	}

6512 6513 6514 6515
	do {
		bh = ext4_bread(handle, inode, blk,
				EXT4_GET_BLOCKS_CREATE |
				EXT4_GET_BLOCKS_METADATA_NOFAIL);
6516
	} while (PTR_ERR(bh) == -ENOSPC &&
6517
		 ext4_should_retry_alloc(inode->i_sb, &retries));
6518 6519
	if (IS_ERR(bh))
		return PTR_ERR(bh);
6520 6521
	if (!bh)
		goto out;
6522
	BUFFER_TRACE(bh, "get write access");
6523 6524 6525
	err = ext4_journal_get_write_access(handle, bh);
	if (err) {
		brelse(bh);
6526
		return err;
6527
	}
6528 6529 6530 6531
	lock_buffer(bh);
	memcpy(bh->b_data+offset, data, len);
	flush_dcache_page(bh->b_page);
	unlock_buffer(bh);
6532
	err = ext4_handle_dirty_metadata(handle, NULL, bh);
6533
	brelse(bh);
6534
out:
6535 6536
	if (inode->i_size < off + len) {
		i_size_write(inode, off + len);
6537
		EXT4_I(inode)->i_disksize = inode->i_size;
6538 6539 6540
		err2 = ext4_mark_inode_dirty(handle, inode);
		if (unlikely(err2 && !err))
			err = err2;
6541
	}
6542
	return err ? err : len;
6543 6544 6545
}
#endif

A
Al Viro 已提交
6546 6547
static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
		       const char *dev_name, void *data)
6548
{
A
Al Viro 已提交
6549
	return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super);
6550 6551
}

J
Jan Kara 已提交
6552
#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564
static inline void register_as_ext2(void)
{
	int err = register_filesystem(&ext2_fs_type);
	if (err)
		printk(KERN_WARNING
		       "EXT4-fs: Unable to register as ext2 (%d)\n", err);
}

static inline void unregister_as_ext2(void)
{
	unregister_filesystem(&ext2_fs_type);
}
6565 6566 6567

static inline int ext2_feature_set_ok(struct super_block *sb)
{
6568
	if (ext4_has_unknown_ext2_incompat_features(sb))
6569
		return 0;
6570
	if (sb_rdonly(sb))
6571
		return 1;
6572
	if (ext4_has_unknown_ext2_ro_compat_features(sb))
6573 6574 6575
		return 0;
	return 1;
}
6576 6577 6578
#else
static inline void register_as_ext2(void) { }
static inline void unregister_as_ext2(void) { }
6579
static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593
#endif

static inline void register_as_ext3(void)
{
	int err = register_filesystem(&ext3_fs_type);
	if (err)
		printk(KERN_WARNING
		       "EXT4-fs: Unable to register as ext3 (%d)\n", err);
}

static inline void unregister_as_ext3(void)
{
	unregister_filesystem(&ext3_fs_type);
}
6594 6595 6596

static inline int ext3_feature_set_ok(struct super_block *sb)
{
6597
	if (ext4_has_unknown_ext3_incompat_features(sb))
6598
		return 0;
6599
	if (!ext4_has_feature_journal(sb))
6600
		return 0;
6601
	if (sb_rdonly(sb))
6602
		return 1;
6603
	if (ext4_has_unknown_ext3_ro_compat_features(sb))
6604 6605 6606
		return 0;
	return 1;
}
6607

T
Theodore Ts'o 已提交
6608 6609 6610
static struct file_system_type ext4_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "ext4",
A
Al Viro 已提交
6611
	.mount		= ext4_mount,
T
Theodore Ts'o 已提交
6612 6613 6614
	.kill_sb	= kill_block_super,
	.fs_flags	= FS_REQUIRES_DEV,
};
6615
MODULE_ALIAS_FS("ext4");
T
Theodore Ts'o 已提交
6616

6617 6618 6619
/* Shared across all ext4 file systems */
wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];

6620
static int __init ext4_init_fs(void)
6621
{
6622
	int i, err;
6623

6624
	ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
6625 6626 6627
	ext4_li_info = NULL;
	mutex_init(&ext4_li_mtx);

6628
	/* Build-time check for flags consistency */
6629
	ext4_check_flag_values();
6630

6631
	for (i = 0; i < EXT4_WQ_HASH_SZ; i++)
6632 6633
		init_waitqueue_head(&ext4__ioend_wq[i]);

6634
	err = ext4_init_es();
6635 6636
	if (err)
		return err;
6637

6638
	err = ext4_init_pending();
E
Eric Biggers 已提交
6639 6640 6641 6642
	if (err)
		goto out7;

	err = ext4_init_post_read_processing();
6643 6644 6645
	if (err)
		goto out6;

6646 6647
	err = ext4_init_pageio();
	if (err)
6648
		goto out5;
6649

6650
	err = ext4_init_system_zone();
6651
	if (err)
6652
		goto out4;
6653

6654
	err = ext4_init_sysfs();
T
Theodore Ts'o 已提交
6655
	if (err)
6656
		goto out3;
6657

6658
	err = ext4_init_mballoc();
6659 6660
	if (err)
		goto out2;
6661 6662 6663
	err = init_inodecache();
	if (err)
		goto out1;
6664 6665 6666 6667 6668

	err = ext4_fc_init_dentry_cache();
	if (err)
		goto out05;

6669
	register_as_ext3();
6670
	register_as_ext2();
T
Theodore Ts'o 已提交
6671
	err = register_filesystem(&ext4_fs_type);
6672 6673
	if (err)
		goto out;
6674

6675 6676
	return 0;
out:
6677 6678
	unregister_as_ext2();
	unregister_as_ext3();
6679
out05:
6680 6681
	destroy_inodecache();
out1:
6682
	ext4_exit_mballoc();
6683
out2:
6684 6685
	ext4_exit_sysfs();
out3:
6686
	ext4_exit_system_zone();
6687
out4:
6688
	ext4_exit_pageio();
6689
out5:
E
Eric Biggers 已提交
6690
	ext4_exit_post_read_processing();
6691
out6:
E
Eric Biggers 已提交
6692 6693
	ext4_exit_pending();
out7:
6694 6695
	ext4_exit_es();

6696 6697 6698
	return err;
}

6699
static void __exit ext4_exit_fs(void)
6700
{
6701
	ext4_destroy_lazyinit_thread();
6702 6703
	unregister_as_ext2();
	unregister_as_ext3();
T
Theodore Ts'o 已提交
6704
	unregister_filesystem(&ext4_fs_type);
6705
	destroy_inodecache();
6706
	ext4_exit_mballoc();
6707
	ext4_exit_sysfs();
6708 6709
	ext4_exit_system_zone();
	ext4_exit_pageio();
E
Eric Biggers 已提交
6710
	ext4_exit_post_read_processing();
6711
	ext4_exit_es();
6712
	ext4_exit_pending();
6713 6714 6715
}

MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
6716
MODULE_DESCRIPTION("Fourth Extended Filesystem");
6717
MODULE_LICENSE("GPL");
6718
MODULE_SOFTDEP("pre: crc32c");
6719 6720
module_init(ext4_init_fs)
module_exit(ext4_exit_fs)