super.c 189.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 *  linux/fs/ext4/super.c
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/inode.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/module.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/time.h>
24
#include <linux/vmalloc.h>
25 26 27
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/blkdev.h>
28
#include <linux/backing-dev.h>
29 30
#include <linux/parser.h>
#include <linux/buffer_head.h>
31
#include <linux/exportfs.h>
32 33 34 35 36 37
#include <linux/vfs.h>
#include <linux/random.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/quotaops.h>
#include <linux/seq_file.h>
T
Theodore Ts'o 已提交
38
#include <linux/ctype.h>
V
Vignesh Babu 已提交
39
#include <linux/log2.h>
A
Andreas Dilger 已提交
40
#include <linux/crc16.h>
41
#include <linux/dax.h>
D
Dan Magenheimer 已提交
42
#include <linux/cleancache.h>
43
#include <linux/uaccess.h>
J
Jeff Layton 已提交
44
#include <linux/iversion.h>
45
#include <linux/unicode.h>
46
#include <linux/part_stat.h>
47 48 49
#include <linux/kthread.h>
#include <linux/freezer.h>

50
#include "ext4.h"
51
#include "ext4_extents.h"	/* Needed for trace points definition */
52
#include "ext4_jbd2.h"
53 54
#include "xattr.h"
#include "acl.h"
55
#include "mballoc.h"
D
Darrick J. Wong 已提交
56
#include "fsmap.h"
57

58 59 60
#define CREATE_TRACE_POINTS
#include <trace/events/ext4.h>

61 62
static struct ext4_lazy_init *ext4_li_info;
static struct mutex ext4_li_mtx;
63
static struct ratelimit_state ext4_mount_msg_ratelimit;
64

65
static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
66
			     unsigned long journal_devnum);
67
static int ext4_show_options(struct seq_file *seq, struct dentry *root);
68
static int ext4_commit_super(struct super_block *sb, int sync);
69
static int ext4_mark_recovery_complete(struct super_block *sb,
70
					struct ext4_super_block *es);
71 72
static int ext4_clear_journal_err(struct super_block *sb,
				  struct ext4_super_block *es);
73
static int ext4_sync_fs(struct super_block *sb, int wait);
74 75
static int ext4_remount(struct super_block *sb, int *flags, char *data);
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
76 77
static int ext4_unfreeze(struct super_block *sb);
static int ext4_freeze(struct super_block *sb);
A
Al Viro 已提交
78 79
static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
		       const char *dev_name, void *data);
80 81
static inline int ext2_feature_set_ok(struct super_block *sb);
static inline int ext3_feature_set_ok(struct super_block *sb);
82
static int ext4_feature_set_ok(struct super_block *sb, int readonly);
83 84
static void ext4_destroy_lazyinit_thread(void);
static void ext4_unregister_li_request(struct super_block *sb);
85
static void ext4_clear_request_list(void);
86 87
static struct inode *ext4_get_journal_inode(struct super_block *sb,
					    unsigned int journal_inum);
88

J
Jan Kara 已提交
89 90 91 92 93 94 95
/*
 * Lock ordering
 *
 * Note the difference between i_mmap_sem (EXT4_I(inode)->i_mmap_sem) and
 * i_mmap_rwsem (inode->i_mmap_rwsem)!
 *
 * page fault path:
96
 * mmap_lock -> sb_start_pagefault -> i_mmap_sem (r) -> transaction start ->
J
Jan Kara 已提交
97 98 99
 *   page lock -> i_data_sem (rw)
 *
 * buffered write path:
100
 * sb_start_write -> i_mutex -> mmap_lock
J
Jan Kara 已提交
101 102 103 104
 * sb_start_write -> i_mutex -> transaction start -> page lock ->
 *   i_data_sem (rw)
 *
 * truncate:
105 106 107
 * sb_start_write -> i_mutex -> i_mmap_sem (w) -> i_mmap_rwsem (w) -> page lock
 * sb_start_write -> i_mutex -> i_mmap_sem (w) -> transaction start ->
 *   i_data_sem (rw)
J
Jan Kara 已提交
108 109
 *
 * direct IO:
110
 * sb_start_write -> i_mutex -> mmap_lock
111
 * sb_start_write -> i_mutex -> transaction start -> i_data_sem (rw)
J
Jan Kara 已提交
112 113 114 115 116
 *
 * writepages:
 * transaction start -> page lock(s) -> i_data_sem (rw)
 */

J
Jan Kara 已提交
117
#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
118 119 120 121 122 123 124
static struct file_system_type ext2_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "ext2",
	.mount		= ext4_mount,
	.kill_sb	= kill_block_super,
	.fs_flags	= FS_REQUIRES_DEV,
};
125
MODULE_ALIAS_FS("ext2");
126
MODULE_ALIAS("ext2");
127 128 129 130 131 132
#define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
#else
#define IS_EXT2_SB(sb) (0)
#endif


133 134 135
static struct file_system_type ext3_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "ext3",
A
Al Viro 已提交
136
	.mount		= ext4_mount,
137 138 139
	.kill_sb	= kill_block_super,
	.fs_flags	= FS_REQUIRES_DEV,
};
140
MODULE_ALIAS_FS("ext3");
141
MODULE_ALIAS("ext3");
142
#define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
L
Laurent Vivier 已提交
143

144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205

static inline void __ext4_read_bh(struct buffer_head *bh, int op_flags,
				  bh_end_io_t *end_io)
{
	/*
	 * buffer's verified bit is no longer valid after reading from
	 * disk again due to write out error, clear it to make sure we
	 * recheck the buffer contents.
	 */
	clear_buffer_verified(bh);

	bh->b_end_io = end_io ? end_io : end_buffer_read_sync;
	get_bh(bh);
	submit_bh(REQ_OP_READ, op_flags, bh);
}

void ext4_read_bh_nowait(struct buffer_head *bh, int op_flags,
			 bh_end_io_t *end_io)
{
	BUG_ON(!buffer_locked(bh));

	if (ext4_buffer_uptodate(bh)) {
		unlock_buffer(bh);
		return;
	}
	__ext4_read_bh(bh, op_flags, end_io);
}

int ext4_read_bh(struct buffer_head *bh, int op_flags, bh_end_io_t *end_io)
{
	BUG_ON(!buffer_locked(bh));

	if (ext4_buffer_uptodate(bh)) {
		unlock_buffer(bh);
		return 0;
	}

	__ext4_read_bh(bh, op_flags, end_io);

	wait_on_buffer(bh);
	if (buffer_uptodate(bh))
		return 0;
	return -EIO;
}

int ext4_read_bh_lock(struct buffer_head *bh, int op_flags, bool wait)
{
	if (trylock_buffer(bh)) {
		if (wait)
			return ext4_read_bh(bh, op_flags, NULL);
		ext4_read_bh_nowait(bh, op_flags, NULL);
		return 0;
	}
	if (wait) {
		wait_on_buffer(bh);
		if (buffer_uptodate(bh))
			return 0;
		return -EIO;
	}
	return 0;
}

206
/*
207
 * This works like __bread_gfp() except it uses ERR_PTR for error
208 209 210 211
 * returns.  Currently with sb_bread it's impossible to distinguish
 * between ENOMEM and EIO situations (since both result in a NULL
 * return.
 */
212 213 214
static struct buffer_head *__ext4_sb_bread_gfp(struct super_block *sb,
					       sector_t block, int op_flags,
					       gfp_t gfp)
215
{
216 217
	struct buffer_head *bh;
	int ret;
218

219
	bh = sb_getblk_gfp(sb, block, gfp);
220 221
	if (bh == NULL)
		return ERR_PTR(-ENOMEM);
222
	if (ext4_buffer_uptodate(bh))
223
		return bh;
224 225 226 227 228 229 230

	ret = ext4_read_bh_lock(bh, REQ_META | op_flags, true);
	if (ret) {
		put_bh(bh);
		return ERR_PTR(ret);
	}
	return bh;
231 232
}

233 234 235 236 237 238 239 240 241 242 243 244
struct buffer_head *ext4_sb_bread(struct super_block *sb, sector_t block,
				   int op_flags)
{
	return __ext4_sb_bread_gfp(sb, block, op_flags, __GFP_MOVABLE);
}

struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb,
					    sector_t block)
{
	return __ext4_sb_bread_gfp(sb, block, 0, 0);
}

245 246 247 248 249 250 251 252
void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block)
{
	struct buffer_head *bh = sb_getblk_gfp(sb, block, 0);

	if (likely(bh)) {
		ext4_read_bh_lock(bh, REQ_RAHEAD, false);
		brelse(bh);
	}
253 254
}

255 256 257
static int ext4_verify_csum_type(struct super_block *sb,
				 struct ext4_super_block *es)
{
258
	if (!ext4_has_feature_metadata_csum(sb))
259 260 261 262 263
		return 1;

	return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
}

264 265 266 267 268 269 270 271 272 273 274 275
static __le32 ext4_superblock_csum(struct super_block *sb,
				   struct ext4_super_block *es)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	int offset = offsetof(struct ext4_super_block, s_checksum);
	__u32 csum;

	csum = ext4_chksum(sbi, ~0, (char *)es, offset);

	return cpu_to_le32(csum);
}

276 277
static int ext4_superblock_csum_verify(struct super_block *sb,
				       struct ext4_super_block *es)
278
{
279
	if (!ext4_has_metadata_csum(sb))
280 281 282 283 284
		return 1;

	return es->s_checksum == ext4_superblock_csum(sb, es);
}

285
void ext4_superblock_csum_set(struct super_block *sb)
286
{
287 288
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;

289
	if (!ext4_has_metadata_csum(sb))
290 291 292 293 294
		return;

	es->s_checksum = ext4_superblock_csum(sb, es);
}

295 296
ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
			       struct ext4_group_desc *bg)
L
Laurent Vivier 已提交
297
{
298
	return le32_to_cpu(bg->bg_block_bitmap_lo) |
299
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
300
		 (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
L
Laurent Vivier 已提交
301 302
}

303 304
ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
			       struct ext4_group_desc *bg)
L
Laurent Vivier 已提交
305
{
306
	return le32_to_cpu(bg->bg_inode_bitmap_lo) |
307
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
308
		 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
L
Laurent Vivier 已提交
309 310
}

311 312
ext4_fsblk_t ext4_inode_table(struct super_block *sb,
			      struct ext4_group_desc *bg)
L
Laurent Vivier 已提交
313
{
314
	return le32_to_cpu(bg->bg_inode_table_lo) |
315
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
316
		 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
L
Laurent Vivier 已提交
317 318
}

319 320
__u32 ext4_free_group_clusters(struct super_block *sb,
			       struct ext4_group_desc *bg)
321 322 323
{
	return le16_to_cpu(bg->bg_free_blocks_count_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
324
		 (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
325 326 327 328 329 330 331
}

__u32 ext4_free_inodes_count(struct super_block *sb,
			      struct ext4_group_desc *bg)
{
	return le16_to_cpu(bg->bg_free_inodes_count_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
332
		 (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
333 334 335 336 337 338 339
}

__u32 ext4_used_dirs_count(struct super_block *sb,
			      struct ext4_group_desc *bg)
{
	return le16_to_cpu(bg->bg_used_dirs_count_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
340
		 (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
341 342 343 344 345 346 347
}

__u32 ext4_itable_unused_count(struct super_block *sb,
			      struct ext4_group_desc *bg)
{
	return le16_to_cpu(bg->bg_itable_unused_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
348
		 (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
349 350
}

351 352
void ext4_block_bitmap_set(struct super_block *sb,
			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
L
Laurent Vivier 已提交
353
{
354
	bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
355 356
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
L
Laurent Vivier 已提交
357 358
}

359 360
void ext4_inode_bitmap_set(struct super_block *sb,
			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
L
Laurent Vivier 已提交
361
{
362
	bg->bg_inode_bitmap_lo  = cpu_to_le32((u32)blk);
363 364
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
L
Laurent Vivier 已提交
365 366
}

367 368
void ext4_inode_table_set(struct super_block *sb,
			  struct ext4_group_desc *bg, ext4_fsblk_t blk)
L
Laurent Vivier 已提交
369
{
370
	bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
371 372
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
L
Laurent Vivier 已提交
373 374
}

375 376
void ext4_free_group_clusters_set(struct super_block *sb,
				  struct ext4_group_desc *bg, __u32 count)
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
{
	bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
}

void ext4_free_inodes_set(struct super_block *sb,
			  struct ext4_group_desc *bg, __u32 count)
{
	bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
}

void ext4_used_dirs_set(struct super_block *sb,
			  struct ext4_group_desc *bg, __u32 count)
{
	bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
}

void ext4_itable_unused_set(struct super_block *sb,
			  struct ext4_group_desc *bg, __u32 count)
{
	bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
}

407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424
static void __ext4_update_tstamp(__le32 *lo, __u8 *hi)
{
	time64_t now = ktime_get_real_seconds();

	now = clamp_val(now, 0, (1ull << 40) - 1);

	*lo = cpu_to_le32(lower_32_bits(now));
	*hi = upper_32_bits(now);
}

static time64_t __ext4_get_tstamp(__le32 *lo, __u8 *hi)
{
	return ((time64_t)(*hi) << 32) + le32_to_cpu(*lo);
}
#define ext4_update_tstamp(es, tstamp) \
	__ext4_update_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi)
#define ext4_get_tstamp(es, tstamp) \
	__ext4_get_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi)
425

426 427 428
static void __save_error_info(struct super_block *sb, int error,
			      __u32 ino, __u64 block,
			      const char *func, unsigned int line)
429 430
{
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
431
	int err;
432 433

	EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
434 435
	if (bdev_read_only(sb->s_bdev))
		return;
436
	es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
437
	ext4_update_tstamp(es, s_last_error_time);
438 439
	strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
	es->s_last_error_line = cpu_to_le32(line);
440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495
	es->s_last_error_ino = cpu_to_le32(ino);
	es->s_last_error_block = cpu_to_le64(block);
	switch (error) {
	case EIO:
		err = EXT4_ERR_EIO;
		break;
	case ENOMEM:
		err = EXT4_ERR_ENOMEM;
		break;
	case EFSBADCRC:
		err = EXT4_ERR_EFSBADCRC;
		break;
	case 0:
	case EFSCORRUPTED:
		err = EXT4_ERR_EFSCORRUPTED;
		break;
	case ENOSPC:
		err = EXT4_ERR_ENOSPC;
		break;
	case ENOKEY:
		err = EXT4_ERR_ENOKEY;
		break;
	case EROFS:
		err = EXT4_ERR_EROFS;
		break;
	case EFBIG:
		err = EXT4_ERR_EFBIG;
		break;
	case EEXIST:
		err = EXT4_ERR_EEXIST;
		break;
	case ERANGE:
		err = EXT4_ERR_ERANGE;
		break;
	case EOVERFLOW:
		err = EXT4_ERR_EOVERFLOW;
		break;
	case EBUSY:
		err = EXT4_ERR_EBUSY;
		break;
	case ENOTDIR:
		err = EXT4_ERR_ENOTDIR;
		break;
	case ENOTEMPTY:
		err = EXT4_ERR_ENOTEMPTY;
		break;
	case ESHUTDOWN:
		err = EXT4_ERR_ESHUTDOWN;
		break;
	case EFAULT:
		err = EXT4_ERR_EFAULT;
		break;
	default:
		err = EXT4_ERR_UNKNOWN;
	}
	es->s_last_error_errcode = err;
496 497
	if (!es->s_first_error_time) {
		es->s_first_error_time = es->s_last_error_time;
498
		es->s_first_error_time_hi = es->s_last_error_time_hi;
499 500 501 502 503
		strncpy(es->s_first_error_func, func,
			sizeof(es->s_first_error_func));
		es->s_first_error_line = cpu_to_le32(line);
		es->s_first_error_ino = es->s_last_error_ino;
		es->s_first_error_block = es->s_last_error_block;
504
		es->s_first_error_errcode = es->s_last_error_errcode;
505
	}
506 507 508 509 510 511
	/*
	 * Start the daily error reporting function if it hasn't been
	 * started already
	 */
	if (!es->s_error_count)
		mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ);
512
	le32_add_cpu(&es->s_error_count, 1);
513 514
}

515 516 517
static void save_error_info(struct super_block *sb, int error,
			    __u32 ino, __u64 block,
			    const char *func, unsigned int line)
518
{
519
	__save_error_info(sb, error, ino, block, func, line);
520 521
	if (!bdev_read_only(sb->s_bdev))
		ext4_commit_super(sb, 1);
522 523
}

524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
/*
 * The del_gendisk() function uninitializes the disk-specific data
 * structures, including the bdi structure, without telling anyone
 * else.  Once this happens, any attempt to call mark_buffer_dirty()
 * (for example, by ext4_commit_super), will cause a kernel OOPS.
 * This is a kludge to prevent these oops until we can put in a proper
 * hook in del_gendisk() to inform the VFS and file system layers.
 */
static int block_device_ejected(struct super_block *sb)
{
	struct inode *bd_inode = sb->s_bdev->bd_inode;
	struct backing_dev_info *bdi = inode_to_bdi(bd_inode);

	return bdi->dev == NULL;
}

B
Bobi Jam 已提交
540 541 542 543 544
static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
{
	struct super_block		*sb = journal->j_private;
	struct ext4_sb_info		*sbi = EXT4_SB(sb);
	int				error = is_journal_aborted(journal);
545
	struct ext4_journal_cb_entry	*jce;
B
Bobi Jam 已提交
546

547
	BUG_ON(txn->t_state == T_FINISHED);
548 549 550

	ext4_process_freed_data(sb, txn->t_tid);

B
Bobi Jam 已提交
551
	spin_lock(&sbi->s_md_lock);
552 553 554
	while (!list_empty(&txn->t_private_list)) {
		jce = list_entry(txn->t_private_list.next,
				 struct ext4_journal_cb_entry, jce_list);
B
Bobi Jam 已提交
555 556 557 558 559 560 561
		list_del_init(&jce->jce_list);
		spin_unlock(&sbi->s_md_lock);
		jce->jce_func(sb, jce, error);
		spin_lock(&sbi->s_md_lock);
	}
	spin_unlock(&sbi->s_md_lock);
}
562

563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
/*
 * This writepage callback for write_cache_pages()
 * takes care of a few cases after page cleaning.
 *
 * write_cache_pages() already checks for dirty pages
 * and calls clear_page_dirty_for_io(), which we want,
 * to write protect the pages.
 *
 * However, we may have to redirty a page (see below.)
 */
static int ext4_journalled_writepage_callback(struct page *page,
					      struct writeback_control *wbc,
					      void *data)
{
	transaction_t *transaction = (transaction_t *) data;
	struct buffer_head *bh, *head;
	struct journal_head *jh;

	bh = head = page_buffers(page);
	do {
		/*
		 * We have to redirty a page in these cases:
		 * 1) If buffer is dirty, it means the page was dirty because it
		 * contains a buffer that needs checkpointing. So the dirty bit
		 * needs to be preserved so that checkpointing writes the buffer
		 * properly.
		 * 2) If buffer is not part of the committing transaction
		 * (we may have just accidentally come across this buffer because
		 * inode range tracking is not exact) or if the currently running
		 * transaction already contains this buffer as well, dirty bit
		 * needs to be preserved so that the buffer gets writeprotected
		 * properly on running transaction's commit.
		 */
		jh = bh2jh(bh);
		if (buffer_dirty(bh) ||
		    (jh && (jh->b_transaction != transaction ||
			    jh->b_next_transaction))) {
			redirty_page_for_writepage(wbc, page);
			goto out;
		}
	} while ((bh = bh->b_this_page) != head);

out:
	return AOP_WRITEPAGE_ACTIVATE;
}

static int ext4_journalled_submit_inode_data_buffers(struct jbd2_inode *jinode)
{
	struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
	struct writeback_control wbc = {
		.sync_mode =  WB_SYNC_ALL,
		.nr_to_write = LONG_MAX,
		.range_start = jinode->i_dirty_start,
		.range_end = jinode->i_dirty_end,
        };

	return write_cache_pages(mapping, &wbc,
				 ext4_journalled_writepage_callback,
				 jinode->i_transaction);
}

static int ext4_journal_submit_inode_data_buffers(struct jbd2_inode *jinode)
{
	int ret;

	if (ext4_should_journal_data(jinode->i_vfs_inode))
		ret = ext4_journalled_submit_inode_data_buffers(jinode);
	else
		ret = jbd2_journal_submit_inode_data_buffers(jinode);

	return ret;
}

static int ext4_journal_finish_inode_data_buffers(struct jbd2_inode *jinode)
{
	int ret = 0;

	if (!ext4_should_journal_data(jinode->i_vfs_inode))
		ret = jbd2_journal_finish_inode_data_buffers(jinode);

	return ret;
}

646 647 648 649 650 651
static bool system_going_down(void)
{
	return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
		|| system_state == SYSTEM_RESTART;
}

652 653 654 655
/* Deal with the reporting of failure conditions on a filesystem such as
 * inconsistencies detected or read IO failures.
 *
 * On ext2, we can store the error state of the filesystem in the
656
 * superblock.  That is not possible on ext4, because we may have other
657 658 659 660 661
 * write ordering constraints on the superblock which prevent us from
 * writing it out straight away; and given that the journal is about to
 * be aborted, we can't rely on the current, or future, transactions to
 * write out the superblock safely.
 *
662
 * We'll just use the jbd2_journal_abort() error code to record an error in
663
 * the journal instead.  On recovery, the journal will complain about
664 665 666
 * that error until we've noted it down and cleared it.
 */

667
static void ext4_handle_error(struct super_block *sb)
668
{
669 670 671
	if (test_opt(sb, WARN_ON_ERROR))
		WARN_ON_ONCE(1);

672
	if (sb_rdonly(sb))
673 674
		return;

675
	if (!test_opt(sb, ERRORS_CONT)) {
676
		journal_t *journal = EXT4_SB(sb)->s_journal;
677

678
		ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED);
679
		if (journal)
680
			jbd2_journal_abort(journal, -EIO);
681
	}
682 683 684 685 686 687
	/*
	 * We force ERRORS_RO behavior when system is rebooting. Otherwise we
	 * could panic during 'reboot -f' as the underlying device got already
	 * disabled.
	 */
	if (test_opt(sb, ERRORS_RO) || system_going_down()) {
688
		ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
689 690 691 692 693
		/*
		 * Make sure updated value of ->s_mount_flags will be visible
		 * before ->s_flags update
		 */
		smp_wmb();
694
		sb->s_flags |= SB_RDONLY;
695
	} else if (test_opt(sb, ERRORS_PANIC)) {
696
		panic("EXT4-fs (device %s): panic forced after error\n",
697
			sb->s_id);
698
	}
699 700
}

701 702 703 704
#define ext4_error_ratelimit(sb)					\
		___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state),	\
			     "EXT4-fs error")

705
void __ext4_error(struct super_block *sb, const char *function,
706 707
		  unsigned int line, int error, __u64 block,
		  const char *fmt, ...)
708
{
J
Joe Perches 已提交
709
	struct va_format vaf;
710 711
	va_list args;

712 713 714
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return;

715
	trace_ext4_error(sb, function, line);
716 717 718 719 720 721 722 723 724
	if (ext4_error_ratelimit(sb)) {
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		printk(KERN_CRIT
		       "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
		       sb->s_id, function, line, current->comm, &vaf);
		va_end(args);
	}
725
	save_error_info(sb, error, 0, block, function, line);
726
	ext4_handle_error(sb);
727 728
}

729
void __ext4_error_inode(struct inode *inode, const char *function,
730
			unsigned int line, ext4_fsblk_t block, int error,
731
			const char *fmt, ...)
732 733
{
	va_list args;
734
	struct va_format vaf;
735

736 737 738
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return;

739
	trace_ext4_error(inode->i_sb, function, line);
740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755
	if (ext4_error_ratelimit(inode->i_sb)) {
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		if (block)
			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
			       "inode #%lu: block %llu: comm %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       block, current->comm, &vaf);
		else
			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
			       "inode #%lu: comm %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       current->comm, &vaf);
		va_end(args);
	}
756 757
	save_error_info(inode->i_sb, error, inode->i_ino, block,
			function, line);
758 759 760
	ext4_handle_error(inode->i_sb);
}

761 762 763
void __ext4_error_file(struct file *file, const char *function,
		       unsigned int line, ext4_fsblk_t block,
		       const char *fmt, ...)
764 765
{
	va_list args;
766
	struct va_format vaf;
A
Al Viro 已提交
767
	struct inode *inode = file_inode(file);
768 769
	char pathname[80], *path;

770 771 772
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return;

773
	trace_ext4_error(inode->i_sb, function, line);
774
	if (ext4_error_ratelimit(inode->i_sb)) {
M
Miklos Szeredi 已提交
775
		path = file_path(file, pathname, sizeof(pathname));
776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794
		if (IS_ERR(path))
			path = "(unknown)";
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		if (block)
			printk(KERN_CRIT
			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
			       "block %llu: comm %s: path %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       block, current->comm, path, &vaf);
		else
			printk(KERN_CRIT
			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
			       "comm %s: path %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       current->comm, path, &vaf);
		va_end(args);
	}
795 796
	save_error_info(inode->i_sb, EFSCORRUPTED, inode->i_ino, block,
			function, line);
797 798 799
	ext4_handle_error(inode->i_sb);
}

800 801
const char *ext4_decode_error(struct super_block *sb, int errno,
			      char nbuf[16])
802 803 804 805
{
	char *errstr = NULL;

	switch (errno) {
806 807 808 809 810 811
	case -EFSCORRUPTED:
		errstr = "Corrupt filesystem";
		break;
	case -EFSBADCRC:
		errstr = "Filesystem failed CRC";
		break;
812 813 814 815 816 817 818
	case -EIO:
		errstr = "IO failure";
		break;
	case -ENOMEM:
		errstr = "Out of memory";
		break;
	case -EROFS:
819 820
		if (!sb || (EXT4_SB(sb)->s_journal &&
			    EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839
			errstr = "Journal has aborted";
		else
			errstr = "Readonly filesystem";
		break;
	default:
		/* If the caller passed in an extra buffer for unknown
		 * errors, textualise them now.  Else we just return
		 * NULL. */
		if (nbuf) {
			/* Check for truncated error codes... */
			if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
				errstr = nbuf;
		}
		break;
	}

	return errstr;
}

840
/* __ext4_std_error decodes expected errors from journaling functions
841 842
 * automatically and invokes the appropriate error response.  */

843 844
void __ext4_std_error(struct super_block *sb, const char *function,
		      unsigned int line, int errno)
845 846 847 848
{
	char nbuf[16];
	const char *errstr;

849 850 851
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return;

852 853 854
	/* Special case: if the error is EROFS, and we're not already
	 * inside a transaction, then there's really no point in logging
	 * an error. */
855
	if (errno == -EROFS && journal_current_handle() == NULL && sb_rdonly(sb))
856 857
		return;

858 859 860 861 862
	if (ext4_error_ratelimit(sb)) {
		errstr = ext4_decode_error(sb, errno, nbuf);
		printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
		       sb->s_id, function, line, errstr);
	}
863

864
	save_error_info(sb, -errno, 0, 0, function, line);
865
	ext4_handle_error(sb);
866 867 868
}

/*
869
 * ext4_abort is a much stronger failure handler than ext4_error.  The
870 871 872 873 874 875 876 877
 * abort function may be used to deal with unrecoverable failures such
 * as journal IO errors or ENOMEM at a critical moment in log management.
 *
 * We unconditionally force the filesystem into an ABORT|READONLY state,
 * unless the error response on the fs has been set to panic in which
 * case we take the easy way out and panic immediately.
 */

878
void __ext4_abort(struct super_block *sb, const char *function,
879
		  unsigned int line, int error, const char *fmt, ...)
880
{
881
	struct va_format vaf;
882 883
	va_list args;

884 885 886
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return;

887
	save_error_info(sb, error, 0, 0, function, line);
888
	va_start(args, fmt);
889 890 891 892
	vaf.fmt = fmt;
	vaf.va = &args;
	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: %pV\n",
	       sb->s_id, function, line, &vaf);
893 894
	va_end(args);

895
	if (sb_rdonly(sb) == 0) {
896
		ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED);
897 898 899 900
		if (EXT4_SB(sb)->s_journal)
			jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);

		ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
901 902 903 904 905
		/*
		 * Make sure updated value of ->s_mount_flags will be visible
		 * before ->s_flags update
		 */
		smp_wmb();
906
		sb->s_flags |= SB_RDONLY;
907
	}
908
	if (test_opt(sb, ERRORS_PANIC) && !system_going_down())
909
		panic("EXT4-fs panic from previous error\n");
910 911
}

912 913
void __ext4_msg(struct super_block *sb,
		const char *prefix, const char *fmt, ...)
914
{
J
Joe Perches 已提交
915
	struct va_format vaf;
916 917
	va_list args;

918
	atomic_inc(&EXT4_SB(sb)->s_msg_count);
919 920 921
	if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
		return;

922
	va_start(args, fmt);
J
Joe Perches 已提交
923 924 925
	vaf.fmt = fmt;
	vaf.va = &args;
	printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
926 927 928
	va_end(args);
}

929 930 931 932 933 934
static int ext4_warning_ratelimit(struct super_block *sb)
{
	atomic_inc(&EXT4_SB(sb)->s_warning_count);
	return ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state),
			    "EXT4-fs warning");
}
935

936
void __ext4_warning(struct super_block *sb, const char *function,
937
		    unsigned int line, const char *fmt, ...)
938
{
J
Joe Perches 已提交
939
	struct va_format vaf;
940 941
	va_list args;

942
	if (!ext4_warning_ratelimit(sb))
943 944
		return;

945
	va_start(args, fmt);
J
Joe Perches 已提交
946 947 948 949
	vaf.fmt = fmt;
	vaf.va = &args;
	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
	       sb->s_id, function, line, &vaf);
950 951 952
	va_end(args);
}

953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970
void __ext4_warning_inode(const struct inode *inode, const char *function,
			  unsigned int line, const char *fmt, ...)
{
	struct va_format vaf;
	va_list args;

	if (!ext4_warning_ratelimit(inode->i_sb))
		return;

	va_start(args, fmt);
	vaf.fmt = fmt;
	vaf.va = &args;
	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: "
	       "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id,
	       function, line, inode->i_ino, current->comm, &vaf);
	va_end(args);
}

971 972 973 974
void __ext4_grp_locked_error(const char *function, unsigned int line,
			     struct super_block *sb, ext4_group_t grp,
			     unsigned long ino, ext4_fsblk_t block,
			     const char *fmt, ...)
975 976 977
__releases(bitlock)
__acquires(bitlock)
{
J
Joe Perches 已提交
978
	struct va_format vaf;
979 980
	va_list args;

981 982 983
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return;

984
	trace_ext4_error(sb, function, line);
985
	__save_error_info(sb, EFSCORRUPTED, ino, block, function, line);
J
Joe Perches 已提交
986

987 988 989 990 991 992 993 994 995 996 997 998 999 1000
	if (ext4_error_ratelimit(sb)) {
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
		       sb->s_id, function, line, grp);
		if (ino)
			printk(KERN_CONT "inode %lu: ", ino);
		if (block)
			printk(KERN_CONT "block %llu:",
			       (unsigned long long) block);
		printk(KERN_CONT "%pV\n", &vaf);
		va_end(args);
	}
1001

1002 1003 1004
	if (test_opt(sb, WARN_ON_ERROR))
		WARN_ON_ONCE(1);

1005
	if (test_opt(sb, ERRORS_CONT)) {
1006
		ext4_commit_super(sb, 0);
1007 1008
		return;
	}
1009

1010
	ext4_unlock_group(sb, grp);
1011
	ext4_commit_super(sb, 1);
1012 1013 1014 1015 1016 1017 1018
	ext4_handle_error(sb);
	/*
	 * We only get here in the ERRORS_RO case; relocking the group
	 * may be dangerous, but nothing bad will happen since the
	 * filesystem will have already been marked read/only and the
	 * journal has been aborted.  We return 1 as a hint to callers
	 * who might what to use the return value from
L
Lucas De Marchi 已提交
1019
	 * ext4_grp_locked_error() to distinguish between the
1020 1021 1022 1023 1024 1025 1026 1027
	 * ERRORS_CONT and ERRORS_RO case, and perhaps return more
	 * aggressively from the ext4 function in question, with a
	 * more appropriate error code.
	 */
	ext4_lock_group(sb, grp);
	return;
}

1028 1029 1030 1031 1032 1033 1034
void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
				     ext4_group_t group,
				     unsigned int flags)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
	struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
1035 1036 1037 1038 1039 1040 1041 1042
	int ret;

	if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) {
		ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
					    &grp->bb_state);
		if (!ret)
			percpu_counter_sub(&sbi->s_freeclusters_counter,
					   grp->bb_free);
1043 1044
	}

1045 1046 1047 1048
	if (flags & EXT4_GROUP_INFO_IBITMAP_CORRUPT) {
		ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT,
					    &grp->bb_state);
		if (!ret && gdp) {
1049 1050 1051 1052 1053 1054 1055 1056 1057
			int count;

			count = ext4_free_inodes_count(sb, gdp);
			percpu_counter_sub(&sbi->s_freeinodes_counter,
					   count);
		}
	}
}

1058
void ext4_update_dynamic_rev(struct super_block *sb)
1059
{
1060
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
1061

1062
	if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
1063 1064
		return;

1065
	ext4_warning(sb,
1066 1067
		     "updating to rev %d because of new feature flag, "
		     "running e2fsck is recommended",
1068
		     EXT4_DYNAMIC_REV);
1069

1070 1071 1072
	es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
	es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
	es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
	/* leave es->s_feature_*compat flags alone */
	/* es->s_uuid will be set by e2fsck if empty */

	/*
	 * The rest of the superblock fields should be zero, and if not it
	 * means they are likely already in use, so leave them alone.  We
	 * can leave it up to e2fsck to clean up any inconsistencies there.
	 */
}

/*
 * Open the external journal device
 */
1086
static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
1087 1088 1089
{
	struct block_device *bdev;

1090
	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
1091 1092 1093 1094 1095
	if (IS_ERR(bdev))
		goto fail;
	return bdev;

fail:
C
Christoph Hellwig 已提交
1096 1097 1098
	ext4_msg(sb, KERN_ERR,
		 "failed to open journal device unknown-block(%u,%u) %ld",
		 MAJOR(dev), MINOR(dev), PTR_ERR(bdev));
1099 1100 1101 1102 1103 1104
	return NULL;
}

/*
 * Release the journal device
 */
A
Al Viro 已提交
1105
static void ext4_blkdev_put(struct block_device *bdev)
1106
{
A
Al Viro 已提交
1107
	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1108 1109
}

A
Al Viro 已提交
1110
static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
1111 1112
{
	struct block_device *bdev;
1113
	bdev = sbi->s_journal_bdev;
1114
	if (bdev) {
A
Al Viro 已提交
1115
		ext4_blkdev_put(bdev);
1116
		sbi->s_journal_bdev = NULL;
1117 1118 1119 1120 1121
	}
}

static inline struct inode *orphan_list_entry(struct list_head *l)
{
1122
	return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
1123 1124
}

1125
static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
1126 1127 1128
{
	struct list_head *l;

1129 1130
	ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
		 le32_to_cpu(sbi->s_es->s_last_orphan));
1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142

	printk(KERN_ERR "sb_info orphan list:\n");
	list_for_each(l, &sbi->s_orphan) {
		struct inode *inode = orphan_list_entry(l);
		printk(KERN_ERR "  "
		       "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
		       inode->i_sb->s_id, inode->i_ino, inode,
		       inode->i_mode, inode->i_nlink,
		       NEXT_ORPHAN(inode));
	}
}

1143 1144 1145 1146 1147 1148 1149
#ifdef CONFIG_QUOTA
static int ext4_quota_off(struct super_block *sb, int type);

static inline void ext4_quota_off_umount(struct super_block *sb)
{
	int type;

1150 1151 1152
	/* Use our quota_off function to clear inode flags etc. */
	for (type = 0; type < EXT4_MAXQUOTAS; type++)
		ext4_quota_off(sb, type);
1153
}
1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165

/*
 * This is a helper function which is used in the mount/remount
 * codepaths (which holds s_umount) to fetch the quota file name.
 */
static inline char *get_qf_name(struct super_block *sb,
				struct ext4_sb_info *sbi,
				int type)
{
	return rcu_dereference_protected(sbi->s_qf_names[type],
					 lockdep_is_held(&sb->s_umount));
}
1166 1167 1168 1169 1170 1171
#else
static inline void ext4_quota_off_umount(struct super_block *sb)
{
}
#endif

1172
static void ext4_put_super(struct super_block *sb)
1173
{
1174 1175
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
1176
	struct buffer_head **group_desc;
1177
	struct flex_groups **flex_groups;
1178
	int aborted = 0;
1179
	int i, err;
1180

1181
	ext4_unregister_li_request(sb);
1182
	ext4_quota_off_umount(sb);
1183

1184
	destroy_workqueue(sbi->rsv_conversion_wq);
1185

1186 1187 1188 1189 1190 1191 1192
	/*
	 * Unregister sysfs before destroying jbd2 journal.
	 * Since we could still access attr_journal_task attribute via sysfs
	 * path which could have sbi->s_journal->j_task as NULL
	 */
	ext4_unregister_sysfs(sb);

1193
	if (sbi->s_journal) {
1194
		aborted = is_journal_aborted(sbi->s_journal);
1195 1196
		err = jbd2_journal_destroy(sbi->s_journal);
		sbi->s_journal = NULL;
1197
		if ((err < 0) && !aborted) {
1198
			ext4_abort(sb, -err, "Couldn't clean up the journal");
1199
		}
1200
	}
1201

1202
	ext4_es_unregister_shrinker(sbi);
1203
	del_timer_sync(&sbi->s_err_report);
1204 1205 1206 1207
	ext4_release_system_zone(sb);
	ext4_mb_release(sb);
	ext4_ext_release(sb);

1208
	if (!sb_rdonly(sb) && !aborted) {
1209
		ext4_clear_feature_journal_needs_recovery(sb);
1210 1211
		es->s_state = cpu_to_le16(sbi->s_mount_state);
	}
1212
	if (!sb_rdonly(sb))
1213 1214
		ext4_commit_super(sb, 1);

1215 1216
	rcu_read_lock();
	group_desc = rcu_dereference(sbi->s_group_desc);
1217
	for (i = 0; i < sbi->s_gdb_count; i++)
1218 1219
		brelse(group_desc[i]);
	kvfree(group_desc);
1220 1221 1222 1223 1224 1225
	flex_groups = rcu_dereference(sbi->s_flex_groups);
	if (flex_groups) {
		for (i = 0; i < sbi->s_flex_groups_allocated; i++)
			kvfree(flex_groups[i]);
		kvfree(flex_groups);
	}
1226
	rcu_read_unlock();
1227
	percpu_counter_destroy(&sbi->s_freeclusters_counter);
1228 1229
	percpu_counter_destroy(&sbi->s_freeinodes_counter);
	percpu_counter_destroy(&sbi->s_dirs_counter);
1230
	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
1231
	percpu_free_rwsem(&sbi->s_writepages_rwsem);
1232
#ifdef CONFIG_QUOTA
J
Jan Kara 已提交
1233
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
1234
		kfree(get_qf_name(sb, sbi, i));
1235 1236 1237 1238 1239 1240 1241 1242 1243 1244
#endif

	/* Debugging code just in case the in-memory inode orphan list
	 * isn't empty.  The on-disk one can be non-empty if we've
	 * detected an error and taken the fs readonly, but the
	 * in-memory list had better be clean by this point. */
	if (!list_empty(&sbi->s_orphan))
		dump_orphan_list(sb, sbi);
	J_ASSERT(list_empty(&sbi->s_orphan));

1245
	sync_blockdev(sb->s_bdev);
1246
	invalidate_bdev(sb->s_bdev);
1247
	if (sbi->s_journal_bdev && sbi->s_journal_bdev != sb->s_bdev) {
1248 1249 1250 1251 1252
		/*
		 * Invalidate the journal device's buffers.  We don't want them
		 * floating about in memory - the physical journal device may
		 * hotswapped, and it breaks the `ro-after' testing code.
		 */
1253 1254
		sync_blockdev(sbi->s_journal_bdev);
		invalidate_bdev(sbi->s_journal_bdev);
1255
		ext4_blkdev_remove(sbi);
1256
	}
1257 1258 1259 1260 1261 1262 1263

	ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
	sbi->s_ea_inode_cache = NULL;

	ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
	sbi->s_ea_block_cache = NULL;

1264 1265
	if (sbi->s_mmp_tsk)
		kthread_stop(sbi->s_mmp_tsk);
1266
	brelse(sbi->s_sbh);
1267
	sb->s_fs_info = NULL;
T
Theodore Ts'o 已提交
1268 1269 1270 1271 1272 1273
	/*
	 * Now that we are completely done shutting down the
	 * superblock, we need to actually destroy the kobject.
	 */
	kobject_put(&sbi->s_kobj);
	wait_for_completion(&sbi->s_kobj_unregister);
1274 1275
	if (sbi->s_chksum_driver)
		crypto_free_shash(sbi->s_chksum_driver);
1276
	kfree(sbi->s_blockgroup_lock);
1277
	fs_put_dax(sbi->s_daxdev);
1278
	fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
1279
#ifdef CONFIG_UNICODE
1280
	utf8_unload(sb->s_encoding);
1281
#endif
1282 1283 1284
	kfree(sbi);
}

1285
static struct kmem_cache *ext4_inode_cachep;
1286 1287 1288 1289

/*
 * Called inside transaction, so use GFP_NOFS
 */
1290
static struct inode *ext4_alloc_inode(struct super_block *sb)
1291
{
1292
	struct ext4_inode_info *ei;
1293

C
Christoph Lameter 已提交
1294
	ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
1295 1296
	if (!ei)
		return NULL;
1297

J
Jeff Layton 已提交
1298
	inode_set_iversion(&ei->vfs_inode, 1);
1299
	spin_lock_init(&ei->i_raw_lock);
1300
	INIT_LIST_HEAD(&ei->i_prealloc_list);
1301
	atomic_set(&ei->i_prealloc_active, 0);
1302
	spin_lock_init(&ei->i_prealloc_lock);
Z
Zheng Liu 已提交
1303 1304
	ext4_es_init_tree(&ei->i_es_tree);
	rwlock_init(&ei->i_es_lock);
1305
	INIT_LIST_HEAD(&ei->i_es_list);
1306
	ei->i_es_all_nr = 0;
1307
	ei->i_es_shk_nr = 0;
1308
	ei->i_es_shrink_lblk = 0;
1309 1310
	ei->i_reserved_data_blocks = 0;
	spin_lock_init(&(ei->i_block_reservation_lock));
1311
	ext4_init_pending_tree(&ei->i_pending_tree);
1312 1313
#ifdef CONFIG_QUOTA
	ei->i_reserved_quota = 0;
J
Jan Kara 已提交
1314
	memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
1315
#endif
1316
	ei->jinode = NULL;
1317
	INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
1318
	spin_lock_init(&ei->i_completed_io_lock);
1319 1320
	ei->i_sync_tid = 0;
	ei->i_datasync_tid = 0;
1321
	atomic_set(&ei->i_unwritten, 0);
1322
	INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
1323 1324
	ext4_fc_init_inode(&ei->vfs_inode);
	mutex_init(&ei->i_fc_lock);
1325 1326 1327
	return &ei->vfs_inode;
}

1328 1329 1330 1331
static int ext4_drop_inode(struct inode *inode)
{
	int drop = generic_drop_inode(inode);

E
Eric Biggers 已提交
1332 1333 1334
	if (!drop)
		drop = fscrypt_drop_inode(inode);

1335 1336 1337 1338
	trace_ext4_drop_inode(inode, drop);
	return drop;
}

A
Al Viro 已提交
1339
static void ext4_free_in_core_inode(struct inode *inode)
N
Nick Piggin 已提交
1340
{
1341
	fscrypt_free_inode(inode);
1342 1343 1344 1345
	if (!list_empty(&(EXT4_I(inode)->i_fc_list))) {
		pr_warn("%s: inode %ld still in fc list",
			__func__, inode->i_ino);
	}
N
Nick Piggin 已提交
1346 1347 1348
	kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
}

1349
static void ext4_destroy_inode(struct inode *inode)
1350
{
1351
	if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
1352 1353 1354
		ext4_msg(inode->i_sb, KERN_ERR,
			 "Inode %lu (%p): orphan list check failed!",
			 inode->i_ino, EXT4_I(inode));
1355 1356 1357 1358 1359
		print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
				EXT4_I(inode), sizeof(struct ext4_inode_info),
				true);
		dump_stack();
	}
1360 1361
}

1362
static void init_once(void *foo)
1363
{
1364
	struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
1365

C
Christoph Lameter 已提交
1366 1367
	INIT_LIST_HEAD(&ei->i_orphan);
	init_rwsem(&ei->xattr_sem);
1368
	init_rwsem(&ei->i_data_sem);
1369
	init_rwsem(&ei->i_mmap_sem);
C
Christoph Lameter 已提交
1370
	inode_init_once(&ei->vfs_inode);
1371
	ext4_fc_init_inode(&ei->vfs_inode);
1372 1373
}

1374
static int __init init_inodecache(void)
1375
{
1376 1377 1378 1379 1380 1381 1382
	ext4_inode_cachep = kmem_cache_create_usercopy("ext4_inode_cache",
				sizeof(struct ext4_inode_info), 0,
				(SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
					SLAB_ACCOUNT),
				offsetof(struct ext4_inode_info, i_data),
				sizeof_field(struct ext4_inode_info, i_data),
				init_once);
1383
	if (ext4_inode_cachep == NULL)
1384 1385 1386 1387 1388 1389
		return -ENOMEM;
	return 0;
}

static void destroy_inodecache(void)
{
1390 1391 1392 1393 1394
	/*
	 * Make sure all delayed rcu free inodes are flushed before we
	 * destroy cache.
	 */
	rcu_barrier();
1395
	kmem_cache_destroy(ext4_inode_cachep);
1396 1397
}

A
Al Viro 已提交
1398
void ext4_clear_inode(struct inode *inode)
1399
{
1400
	ext4_fc_del(inode);
A
Al Viro 已提交
1401
	invalidate_inode_buffers(inode);
1402
	clear_inode(inode);
1403
	ext4_discard_preallocations(inode, 0);
1404
	ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
J
Jan Kara 已提交
1405
	dquot_drop(inode);
1406 1407 1408 1409 1410 1411
	if (EXT4_I(inode)->jinode) {
		jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
					       EXT4_I(inode)->jinode);
		jbd2_free_inode(EXT4_I(inode)->jinode);
		EXT4_I(inode)->jinode = NULL;
	}
1412
	fscrypt_put_encryption_info(inode);
E
Eric Biggers 已提交
1413
	fsverity_cleanup_inode(inode);
1414 1415
}

C
Christoph Hellwig 已提交
1416
static struct inode *ext4_nfs_get_inode(struct super_block *sb,
1417
					u64 ino, u32 generation)
1418 1419 1420
{
	struct inode *inode;

1421
	/*
1422 1423 1424
	 * Currently we don't know the generation for parent directory, so
	 * a generation of 0 means "accept any"
	 */
1425
	inode = ext4_iget(sb, ino, EXT4_IGET_HANDLE);
1426 1427 1428
	if (IS_ERR(inode))
		return ERR_CAST(inode);
	if (generation && inode->i_generation != generation) {
1429 1430 1431
		iput(inode);
		return ERR_PTR(-ESTALE);
	}
C
Christoph Hellwig 已提交
1432 1433 1434 1435 1436

	return inode;
}

static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
1437
					int fh_len, int fh_type)
C
Christoph Hellwig 已提交
1438 1439 1440 1441 1442 1443
{
	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
				    ext4_nfs_get_inode);
}

static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
1444
					int fh_len, int fh_type)
C
Christoph Hellwig 已提交
1445 1446 1447
{
	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
				    ext4_nfs_get_inode);
1448 1449
}

1450 1451 1452 1453 1454 1455 1456 1457 1458 1459
static int ext4_nfs_commit_metadata(struct inode *inode)
{
	struct writeback_control wbc = {
		.sync_mode = WB_SYNC_ALL
	};

	trace_ext4_nfs_commit_metadata(inode);
	return ext4_write_inode(inode, &wbc);
}

1460 1461 1462 1463 1464 1465
/*
 * Try to release metadata pages (indirect blocks, directories) which are
 * mapped via the block device.  Since these pages could have journal heads
 * which would prevent try_to_free_buffers() from freeing them, we must use
 * jbd2 layer's try_to_free_buffers() function to release them.
 */
1466 1467
static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
				 gfp_t wait)
1468 1469 1470 1471 1472 1473 1474
{
	journal_t *journal = EXT4_SB(sb)->s_journal;

	WARN_ON(PageChecked(page));
	if (!page_has_buffers(page))
		return 0;
	if (journal)
1475 1476
		return jbd2_journal_try_to_free_buffers(journal, page);

1477 1478 1479
	return try_to_free_buffers(page);
}

1480
#ifdef CONFIG_FS_ENCRYPTION
1481 1482 1483 1484 1485 1486 1487 1488 1489
static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
{
	return ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
				 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len);
}

static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
							void *fs_data)
{
1490
	handle_t *handle = fs_data;
1491
	int res, res2, credits, retries = 0;
1492

1493 1494 1495 1496 1497 1498 1499 1500
	/*
	 * Encrypting the root directory is not allowed because e2fsck expects
	 * lost+found to exist and be unencrypted, and encrypting the root
	 * directory would imply encrypting the lost+found directory as well as
	 * the filename "lost+found" itself.
	 */
	if (inode->i_ino == EXT4_ROOT_INO)
		return -EPERM;
1501

1502 1503 1504
	if (WARN_ON_ONCE(IS_DAX(inode) && i_size_read(inode)))
		return -EINVAL;

I
Ira Weiny 已提交
1505 1506 1507
	if (ext4_test_inode_flag(inode, EXT4_INODE_DAX))
		return -EOPNOTSUPP;

1508 1509 1510 1511
	res = ext4_convert_inline_data(inode);
	if (res)
		return res;

1512 1513 1514 1515 1516 1517 1518
	/*
	 * If a journal handle was specified, then the encryption context is
	 * being set on a new inode via inheritance and is part of a larger
	 * transaction to create the inode.  Otherwise the encryption context is
	 * being set on an existing inode in its own transaction.  Only in the
	 * latter case should the "retry on ENOSPC" logic be used.
	 */
1519

1520 1521 1522 1523 1524
	if (handle) {
		res = ext4_xattr_set_handle(handle, inode,
					    EXT4_XATTR_INDEX_ENCRYPTION,
					    EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
					    ctx, len, 0);
1525 1526 1527 1528
		if (!res) {
			ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
			ext4_clear_inode_state(inode,
					EXT4_STATE_MAY_INLINE_DATA);
1529
			/*
1530 1531
			 * Update inode->i_flags - S_ENCRYPTED will be enabled,
			 * S_DAX may be disabled
1532
			 */
1533
			ext4_set_inode_flags(inode, false);
1534 1535 1536 1537
		}
		return res;
	}

1538 1539 1540
	res = dquot_initialize(inode);
	if (res)
		return res;
1541
retry:
1542 1543
	res = ext4_xattr_set_credits(inode, len, false /* is_create */,
				     &credits);
T
Tahsin Erdogan 已提交
1544 1545 1546
	if (res)
		return res;

1547
	handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
1548 1549 1550
	if (IS_ERR(handle))
		return PTR_ERR(handle);

1551 1552 1553
	res = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_ENCRYPTION,
				    EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
				    ctx, len, 0);
1554 1555
	if (!res) {
		ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
1556 1557 1558 1559
		/*
		 * Update inode->i_flags - S_ENCRYPTED will be enabled,
		 * S_DAX may be disabled
		 */
1560
		ext4_set_inode_flags(inode, false);
1561 1562 1563 1564 1565
		res = ext4_mark_inode_dirty(handle, inode);
		if (res)
			EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
	}
	res2 = ext4_journal_stop(handle);
1566 1567 1568

	if (res == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
		goto retry;
1569 1570 1571 1572 1573
	if (!res)
		res = res2;
	return res;
}

1574
static const union fscrypt_policy *ext4_get_dummy_policy(struct super_block *sb)
1575
{
1576
	return EXT4_SB(sb)->s_dummy_enc_policy.policy;
1577 1578
}

1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590
static bool ext4_has_stable_inodes(struct super_block *sb)
{
	return ext4_has_feature_stable_inodes(sb);
}

static void ext4_get_ino_and_lblk_bits(struct super_block *sb,
				       int *ino_bits_ret, int *lblk_bits_ret)
{
	*ino_bits_ret = 8 * sizeof(EXT4_SB(sb)->s_es->s_inodes_count);
	*lblk_bits_ret = 8 * sizeof(ext4_lblk_t);
}

1591
static const struct fscrypt_operations ext4_cryptops = {
1592
	.key_prefix		= "ext4:",
1593 1594
	.get_context		= ext4_get_context,
	.set_context		= ext4_set_context,
1595
	.get_dummy_policy	= ext4_get_dummy_policy,
1596
	.empty_dir		= ext4_empty_dir,
1597
	.max_namelen		= EXT4_NAME_LEN,
1598 1599
	.has_stable_inodes	= ext4_has_stable_inodes,
	.get_ino_and_lblk_bits	= ext4_get_ino_and_lblk_bits,
1600 1601 1602
};
#endif

1603
#ifdef CONFIG_QUOTA
1604
static const char * const quotatypes[] = INITQFNAMES;
L
Li Xi 已提交
1605
#define QTYPE2NAME(t) (quotatypes[t])
1606

1607 1608 1609 1610 1611
static int ext4_write_dquot(struct dquot *dquot);
static int ext4_acquire_dquot(struct dquot *dquot);
static int ext4_release_dquot(struct dquot *dquot);
static int ext4_mark_dquot_dirty(struct dquot *dquot);
static int ext4_write_info(struct super_block *sb, int type);
1612
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
A
Al Viro 已提交
1613
			 const struct path *path);
1614 1615
static int ext4_quota_on_mount(struct super_block *sb, int type);
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
1616
			       size_t len, loff_t off);
1617
static ssize_t ext4_quota_write(struct super_block *sb, int type,
1618
				const char *data, size_t len, loff_t off);
1619 1620 1621
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
			     unsigned int flags);
static int ext4_enable_quotas(struct super_block *sb);
1622

J
Jan Kara 已提交
1623 1624 1625 1626 1627
static struct dquot **ext4_get_dquots(struct inode *inode)
{
	return EXT4_I(inode)->i_dquot;
}

1628
static const struct dquot_operations ext4_quota_operations = {
1629 1630 1631 1632 1633 1634 1635 1636 1637 1638
	.get_reserved_space	= ext4_get_reserved_space,
	.write_dquot		= ext4_write_dquot,
	.acquire_dquot		= ext4_acquire_dquot,
	.release_dquot		= ext4_release_dquot,
	.mark_dirty		= ext4_mark_dquot_dirty,
	.write_info		= ext4_write_info,
	.alloc_dquot		= dquot_alloc,
	.destroy_dquot		= dquot_destroy,
	.get_projid		= ext4_get_projid,
	.get_inode_usage	= ext4_get_inode_usage,
C
Chengguang Xu 已提交
1639
	.get_next_id		= dquot_get_next_id,
1640 1641
};

1642
static const struct quotactl_ops ext4_qctl_operations = {
1643
	.quota_on	= ext4_quota_on,
1644
	.quota_off	= ext4_quota_off,
1645
	.quota_sync	= dquot_quota_sync,
1646
	.get_state	= dquot_get_state,
1647 1648
	.set_info	= dquot_set_dqinfo,
	.get_dqblk	= dquot_get_dqblk,
1649 1650
	.set_dqblk	= dquot_set_dqblk,
	.get_nextdqblk	= dquot_get_next_dqblk,
1651 1652 1653
};
#endif

1654
static const struct super_operations ext4_sops = {
1655
	.alloc_inode	= ext4_alloc_inode,
A
Al Viro 已提交
1656
	.free_inode	= ext4_free_in_core_inode,
1657 1658 1659
	.destroy_inode	= ext4_destroy_inode,
	.write_inode	= ext4_write_inode,
	.dirty_inode	= ext4_dirty_inode,
1660
	.drop_inode	= ext4_drop_inode,
A
Al Viro 已提交
1661
	.evict_inode	= ext4_evict_inode,
1662 1663
	.put_super	= ext4_put_super,
	.sync_fs	= ext4_sync_fs,
1664 1665
	.freeze_fs	= ext4_freeze,
	.unfreeze_fs	= ext4_unfreeze,
1666 1667 1668
	.statfs		= ext4_statfs,
	.remount_fs	= ext4_remount,
	.show_options	= ext4_show_options,
1669
#ifdef CONFIG_QUOTA
1670 1671
	.quota_read	= ext4_quota_read,
	.quota_write	= ext4_quota_write,
J
Jan Kara 已提交
1672
	.get_dquots	= ext4_get_dquots,
1673
#endif
1674
	.bdev_try_to_free_page = bdev_try_to_free_page,
1675 1676
};

1677
static const struct export_operations ext4_export_ops = {
C
Christoph Hellwig 已提交
1678 1679
	.fh_to_dentry = ext4_fh_to_dentry,
	.fh_to_parent = ext4_fh_to_parent,
1680
	.get_parent = ext4_get_parent,
1681
	.commit_metadata = ext4_nfs_commit_metadata,
1682 1683 1684 1685 1686
};

enum {
	Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
	Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
1687
	Opt_nouid32, Opt_debug, Opt_removed,
1688
	Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
1689
	Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
1690 1691
	Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
	Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
1692
	Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
1693
	Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
1694
	Opt_inlinecrypt,
1695
	Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
J
Jan Kara 已提交
1696
	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
T
Theodore Ts'o 已提交
1697
	Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
1698 1699
	Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version,
	Opt_dax, Opt_dax_always, Opt_dax_inode, Opt_dax_never,
1700 1701
	Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error,
	Opt_nowarn_on_error, Opt_mblk_io_submit,
1702
	Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize,
1703
	Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
1704
	Opt_inode_readahead_blks, Opt_journal_ioprio,
1705
	Opt_dioread_nolock, Opt_dioread_lock,
1706
	Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
1707
	Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache,
1708
	Opt_prefetch_block_bitmaps,
1709
#ifdef CONFIG_EXT4_DEBUG
1710
	Opt_fc_debug_max_replay, Opt_fc_debug_force
1711
#endif
1712 1713
};

1714
static const match_table_t tokens = {
1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728
	{Opt_bsd_df, "bsddf"},
	{Opt_minix_df, "minixdf"},
	{Opt_grpid, "grpid"},
	{Opt_grpid, "bsdgroups"},
	{Opt_nogrpid, "nogrpid"},
	{Opt_nogrpid, "sysvgroups"},
	{Opt_resgid, "resgid=%u"},
	{Opt_resuid, "resuid=%u"},
	{Opt_sb, "sb=%u"},
	{Opt_err_cont, "errors=continue"},
	{Opt_err_panic, "errors=panic"},
	{Opt_err_ro, "errors=remount-ro"},
	{Opt_nouid32, "nouid32"},
	{Opt_debug, "debug"},
1729 1730
	{Opt_removed, "oldalloc"},
	{Opt_removed, "orlov"},
1731 1732 1733 1734
	{Opt_user_xattr, "user_xattr"},
	{Opt_nouser_xattr, "nouser_xattr"},
	{Opt_acl, "acl"},
	{Opt_noacl, "noacl"},
1735
	{Opt_noload, "norecovery"},
1736
	{Opt_noload, "noload"},
1737 1738
	{Opt_removed, "nobh"},
	{Opt_removed, "bh"},
1739
	{Opt_commit, "commit=%u"},
1740 1741
	{Opt_min_batch_time, "min_batch_time=%u"},
	{Opt_max_batch_time, "max_batch_time=%u"},
1742
	{Opt_journal_dev, "journal_dev=%u"},
1743
	{Opt_journal_path, "journal_path=%s"},
1744
	{Opt_journal_checksum, "journal_checksum"},
1745
	{Opt_nojournal_checksum, "nojournal_checksum"},
1746
	{Opt_journal_async_commit, "journal_async_commit"},
1747 1748 1749 1750
	{Opt_abort, "abort"},
	{Opt_data_journal, "data=journal"},
	{Opt_data_ordered, "data=ordered"},
	{Opt_data_writeback, "data=writeback"},
1751 1752
	{Opt_data_err_abort, "data_err=abort"},
	{Opt_data_err_ignore, "data_err=ignore"},
1753 1754 1755 1756 1757 1758
	{Opt_offusrjquota, "usrjquota="},
	{Opt_usrjquota, "usrjquota=%s"},
	{Opt_offgrpjquota, "grpjquota="},
	{Opt_grpjquota, "grpjquota=%s"},
	{Opt_jqfmt_vfsold, "jqfmt=vfsold"},
	{Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
J
Jan Kara 已提交
1759
	{Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
1760 1761 1762 1763
	{Opt_grpquota, "grpquota"},
	{Opt_noquota, "noquota"},
	{Opt_quota, "quota"},
	{Opt_usrquota, "usrquota"},
1764
	{Opt_prjquota, "prjquota"},
1765
	{Opt_barrier, "barrier=%u"},
T
Theodore Ts'o 已提交
1766 1767
	{Opt_barrier, "barrier"},
	{Opt_nobarrier, "nobarrier"},
1768
	{Opt_i_version, "i_version"},
R
Ross Zwisler 已提交
1769
	{Opt_dax, "dax"},
1770 1771 1772
	{Opt_dax_always, "dax=always"},
	{Opt_dax_inode, "dax=inode"},
	{Opt_dax_never, "dax=never"},
1773
	{Opt_stripe, "stripe=%u"},
1774
	{Opt_delalloc, "delalloc"},
1775 1776
	{Opt_warn_on_error, "warn_on_error"},
	{Opt_nowarn_on_error, "nowarn_on_error"},
1777 1778
	{Opt_lazytime, "lazytime"},
	{Opt_nolazytime, "nolazytime"},
1779
	{Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"},
1780
	{Opt_nodelalloc, "nodelalloc"},
1781 1782
	{Opt_removed, "mblk_io_submit"},
	{Opt_removed, "nomblk_io_submit"},
1783 1784
	{Opt_block_validity, "block_validity"},
	{Opt_noblock_validity, "noblock_validity"},
1785
	{Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
1786
	{Opt_journal_ioprio, "journal_ioprio=%u"},
1787
	{Opt_auto_da_alloc, "auto_da_alloc=%u"},
T
Theodore Ts'o 已提交
1788 1789
	{Opt_auto_da_alloc, "auto_da_alloc"},
	{Opt_noauto_da_alloc, "noauto_da_alloc"},
1790
	{Opt_dioread_nolock, "dioread_nolock"},
1791
	{Opt_dioread_lock, "nodioread_nolock"},
1792
	{Opt_dioread_lock, "dioread_lock"},
1793 1794
	{Opt_discard, "discard"},
	{Opt_nodiscard, "nodiscard"},
1795 1796 1797
	{Opt_init_itable, "init_itable=%u"},
	{Opt_init_itable, "init_itable"},
	{Opt_noinit_itable, "noinit_itable"},
1798
#ifdef CONFIG_EXT4_DEBUG
1799
	{Opt_fc_debug_force, "fc_debug_force"},
1800 1801
	{Opt_fc_debug_max_replay, "fc_debug_max_replay=%u"},
#endif
1802
	{Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
1803
	{Opt_test_dummy_encryption, "test_dummy_encryption=%s"},
1804
	{Opt_test_dummy_encryption, "test_dummy_encryption"},
1805
	{Opt_inlinecrypt, "inlinecrypt"},
1806 1807
	{Opt_nombcache, "nombcache"},
	{Opt_nombcache, "no_mbcache"},	/* for backward compatibility */
1808
	{Opt_prefetch_block_bitmaps, "prefetch_block_bitmaps"},
1809 1810 1811 1812 1813
	{Opt_removed, "check=none"},	/* mount option from ext2/3 */
	{Opt_removed, "nocheck"},	/* mount option from ext2/3 */
	{Opt_removed, "reservation"},	/* mount option from ext2/3 */
	{Opt_removed, "noreservation"}, /* mount option from ext2/3 */
	{Opt_removed, "journal=%u"},	/* mount option from ext2/3 */
J
Josef Bacik 已提交
1814
	{Opt_err, NULL},
1815 1816
};

1817
static ext4_fsblk_t get_sb_block(void **data)
1818
{
1819
	ext4_fsblk_t	sb_block;
1820 1821 1822 1823
	char		*options = (char *) *data;

	if (!options || strncmp(options, "sb=", 3) != 0)
		return 1;	/* Default location */
1824

1825
	options += 3;
1826
	/* TODO: use simple_strtoll with >32bit ext4 */
1827 1828
	sb_block = simple_strtoul(options, &options, 0);
	if (*options && *options != ',') {
1829
		printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n",
1830 1831 1832 1833 1834 1835
		       (char *) *data);
		return 1;
	}
	if (*options == ',')
		options++;
	*data = (void *) options;
1836

1837 1838 1839
	return sb_block;
}

1840
#define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
1841 1842
static const char deprecated_msg[] =
	"Mount option \"%s\" will be removed by %s\n"
1843
	"Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
1844

D
Dmitry Monakhov 已提交
1845 1846 1847 1848
#ifdef CONFIG_QUOTA
static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
1849
	char *qname, *old_qname = get_qf_name(sb, sbi, qtype);
1850
	int ret = -1;
D
Dmitry Monakhov 已提交
1851

1852
	if (sb_any_quota_loaded(sb) && !old_qname) {
D
Dmitry Monakhov 已提交
1853 1854 1855
		ext4_msg(sb, KERN_ERR,
			"Cannot change journaled "
			"quota options when quota turned on");
1856
		return -1;
D
Dmitry Monakhov 已提交
1857
	}
1858
	if (ext4_has_feature_quota(sb)) {
1859 1860 1861
		ext4_msg(sb, KERN_INFO, "Journaled quota options "
			 "ignored when QUOTA feature is enabled");
		return 1;
1862
	}
D
Dmitry Monakhov 已提交
1863 1864 1865 1866
	qname = match_strdup(args);
	if (!qname) {
		ext4_msg(sb, KERN_ERR,
			"Not enough memory for storing quotafile name");
1867
		return -1;
D
Dmitry Monakhov 已提交
1868
	}
1869 1870
	if (old_qname) {
		if (strcmp(old_qname, qname) == 0)
1871 1872 1873 1874 1875 1876
			ret = 1;
		else
			ext4_msg(sb, KERN_ERR,
				 "%s quota file already specified",
				 QTYPE2NAME(qtype));
		goto errout;
D
Dmitry Monakhov 已提交
1877
	}
1878
	if (strchr(qname, '/')) {
D
Dmitry Monakhov 已提交
1879 1880
		ext4_msg(sb, KERN_ERR,
			"quotafile must be on filesystem root");
1881
		goto errout;
D
Dmitry Monakhov 已提交
1882
	}
1883
	rcu_assign_pointer(sbi->s_qf_names[qtype], qname);
1884
	set_opt(sb, QUOTA);
D
Dmitry Monakhov 已提交
1885
	return 1;
1886 1887 1888
errout:
	kfree(qname);
	return ret;
D
Dmitry Monakhov 已提交
1889 1890 1891 1892 1893 1894
}

static int clear_qf_name(struct super_block *sb, int qtype)
{

	struct ext4_sb_info *sbi = EXT4_SB(sb);
1895
	char *old_qname = get_qf_name(sb, sbi, qtype);
D
Dmitry Monakhov 已提交
1896

1897
	if (sb_any_quota_loaded(sb) && old_qname) {
D
Dmitry Monakhov 已提交
1898 1899
		ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
			" when quota turned on");
1900
		return -1;
D
Dmitry Monakhov 已提交
1901
	}
1902 1903 1904
	rcu_assign_pointer(sbi->s_qf_names[qtype], NULL);
	synchronize_rcu();
	kfree(old_qname);
D
Dmitry Monakhov 已提交
1905 1906 1907 1908
	return 1;
}
#endif

1909 1910 1911 1912 1913 1914
#define MOPT_SET	0x0001
#define MOPT_CLEAR	0x0002
#define MOPT_NOSUPPORT	0x0004
#define MOPT_EXPLICIT	0x0008
#define MOPT_CLEAR_ERR	0x0010
#define MOPT_GTE0	0x0020
1915
#ifdef CONFIG_QUOTA
1916 1917 1918 1919 1920
#define MOPT_Q		0
#define MOPT_QFMT	0x0040
#else
#define MOPT_Q		MOPT_NOSUPPORT
#define MOPT_QFMT	MOPT_NOSUPPORT
1921
#endif
1922
#define MOPT_DATAJ	0x0080
1923 1924 1925
#define MOPT_NO_EXT2	0x0100
#define MOPT_NO_EXT3	0x0200
#define MOPT_EXT4_ONLY	(MOPT_NO_EXT2 | MOPT_NO_EXT3)
1926
#define MOPT_STRING	0x0400
1927
#define MOPT_SKIP	0x0800
1928
#define	MOPT_2		0x1000
1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940

static const struct mount_opts {
	int	token;
	int	mount_opt;
	int	flags;
} ext4_mount_opts[] = {
	{Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET},
	{Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR},
	{Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET},
	{Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR},
	{Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET},
	{Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR},
1941 1942 1943 1944
	{Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK,
	 MOPT_EXT4_ONLY | MOPT_SET},
	{Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK,
	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1945 1946
	{Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET},
	{Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR},
1947 1948 1949
	{Opt_delalloc, EXT4_MOUNT_DELALLOC,
	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
	{Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
1950
	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1951 1952
	{Opt_warn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_SET},
	{Opt_nowarn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_CLEAR},
1953 1954
	{Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1955
	{Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1956
	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1957
	{Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
1958
				    EXT4_MOUNT_JOURNAL_CHECKSUM),
1959
	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1960
	{Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET},
1961 1962 1963
	{Opt_err_panic, EXT4_MOUNT_ERRORS_PANIC, MOPT_SET | MOPT_CLEAR_ERR},
	{Opt_err_ro, EXT4_MOUNT_ERRORS_RO, MOPT_SET | MOPT_CLEAR_ERR},
	{Opt_err_cont, EXT4_MOUNT_ERRORS_CONT, MOPT_SET | MOPT_CLEAR_ERR},
1964
	{Opt_data_err_abort, EXT4_MOUNT_DATA_ERR_ABORT,
1965
	 MOPT_NO_EXT2},
1966
	{Opt_data_err_ignore, EXT4_MOUNT_DATA_ERR_ABORT,
1967
	 MOPT_NO_EXT2},
1968 1969 1970 1971 1972 1973 1974 1975 1976 1977
	{Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET},
	{Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR},
	{Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
	{Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
	{Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
	{Opt_commit, 0, MOPT_GTE0},
	{Opt_max_batch_time, 0, MOPT_GTE0},
	{Opt_min_batch_time, 0, MOPT_GTE0},
	{Opt_inode_readahead_blks, 0, MOPT_GTE0},
	{Opt_init_itable, 0, MOPT_GTE0},
1978 1979 1980 1981 1982 1983 1984
	{Opt_dax, EXT4_MOUNT_DAX_ALWAYS, MOPT_SET | MOPT_SKIP},
	{Opt_dax_always, EXT4_MOUNT_DAX_ALWAYS,
		MOPT_EXT4_ONLY | MOPT_SET | MOPT_SKIP},
	{Opt_dax_inode, EXT4_MOUNT2_DAX_INODE,
		MOPT_EXT4_ONLY | MOPT_SET | MOPT_SKIP},
	{Opt_dax_never, EXT4_MOUNT2_DAX_NEVER,
		MOPT_EXT4_ONLY | MOPT_SET | MOPT_SKIP},
1985
	{Opt_stripe, 0, MOPT_GTE0},
1986 1987
	{Opt_resuid, 0, MOPT_GTE0},
	{Opt_resgid, 0, MOPT_GTE0},
1988 1989 1990
	{Opt_journal_dev, 0, MOPT_NO_EXT2 | MOPT_GTE0},
	{Opt_journal_path, 0, MOPT_NO_EXT2 | MOPT_STRING},
	{Opt_journal_ioprio, 0, MOPT_NO_EXT2 | MOPT_GTE0},
1991 1992 1993 1994
	{Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
	{Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
	{Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA,
	 MOPT_NO_EXT2 | MOPT_DATAJ},
1995 1996
	{Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
	{Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR},
T
Theodore Ts'o 已提交
1997
#ifdef CONFIG_EXT4_FS_POSIX_ACL
1998 1999
	{Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET},
	{Opt_noacl, EXT4_MOUNT_POSIX_ACL, MOPT_CLEAR},
2000
#else
2001 2002
	{Opt_acl, 0, MOPT_NOSUPPORT},
	{Opt_noacl, 0, MOPT_NOSUPPORT},
2003
#endif
2004 2005
	{Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET},
	{Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET},
2006
	{Opt_debug_want_extra_isize, 0, MOPT_GTE0},
2007 2008 2009 2010 2011
	{Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q},
	{Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA,
							MOPT_SET | MOPT_Q},
	{Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA,
							MOPT_SET | MOPT_Q},
2012 2013
	{Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA,
							MOPT_SET | MOPT_Q},
2014
	{Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
2015 2016
		       EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
							MOPT_CLEAR | MOPT_Q},
2017 2018
	{Opt_usrjquota, 0, MOPT_Q | MOPT_STRING},
	{Opt_grpjquota, 0, MOPT_Q | MOPT_STRING},
2019 2020 2021 2022 2023
	{Opt_offusrjquota, 0, MOPT_Q},
	{Opt_offgrpjquota, 0, MOPT_Q},
	{Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
	{Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
	{Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
2024
	{Opt_max_dir_size_kb, 0, MOPT_GTE0},
2025
	{Opt_test_dummy_encryption, 0, MOPT_STRING},
2026
	{Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
2027 2028
	{Opt_prefetch_block_bitmaps, EXT4_MOUNT_PREFETCH_BLOCK_BITMAPS,
	 MOPT_SET},
2029
#ifdef CONFIG_EXT4_DEBUG
2030 2031
	{Opt_fc_debug_force, EXT4_MOUNT2_JOURNAL_FAST_COMMIT,
	 MOPT_SET | MOPT_2 | MOPT_EXT4_ONLY},
2032 2033
	{Opt_fc_debug_max_replay, 0, MOPT_GTE0},
#endif
2034 2035 2036
	{Opt_err, 0, 0}
};

2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066
#ifdef CONFIG_UNICODE
static const struct ext4_sb_encodings {
	__u16 magic;
	char *name;
	char *version;
} ext4_sb_encoding_map[] = {
	{EXT4_ENC_UTF8_12_1, "utf8", "12.1.0"},
};

static int ext4_sb_read_encoding(const struct ext4_super_block *es,
				 const struct ext4_sb_encodings **encoding,
				 __u16 *flags)
{
	__u16 magic = le16_to_cpu(es->s_encoding);
	int i;

	for (i = 0; i < ARRAY_SIZE(ext4_sb_encoding_map); i++)
		if (magic == ext4_sb_encoding_map[i].magic)
			break;

	if (i >= ARRAY_SIZE(ext4_sb_encoding_map))
		return -EINVAL;

	*encoding = &ext4_sb_encoding_map[i];
	*flags = le16_to_cpu(es->s_encoding_flags);

	return 0;
}
#endif

2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081
static int ext4_set_test_dummy_encryption(struct super_block *sb,
					  const char *opt,
					  const substring_t *arg,
					  bool is_remount)
{
#ifdef CONFIG_FS_ENCRYPTION
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	int err;

	/*
	 * This mount option is just for testing, and it's not worthwhile to
	 * implement the extra complexity (e.g. RCU protection) that would be
	 * needed to allow it to be set or changed during remount.  We do allow
	 * it to be specified during remount, but only if there is no change.
	 */
2082
	if (is_remount && !sbi->s_dummy_enc_policy.policy) {
2083 2084 2085 2086
		ext4_msg(sb, KERN_WARNING,
			 "Can't set test_dummy_encryption on remount");
		return -1;
	}
2087
	err = fscrypt_set_test_dummy_encryption(sb, arg->from,
2088
						&sbi->s_dummy_enc_policy);
2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109
	if (err) {
		if (err == -EEXIST)
			ext4_msg(sb, KERN_WARNING,
				 "Can't change test_dummy_encryption on remount");
		else if (err == -EINVAL)
			ext4_msg(sb, KERN_WARNING,
				 "Value of option \"%s\" is unrecognized", opt);
		else
			ext4_msg(sb, KERN_WARNING,
				 "Error processing option \"%s\" [%d]",
				 opt, err);
		return -1;
	}
	ext4_msg(sb, KERN_WARNING, "Test dummy encryption mode enabled");
#else
	ext4_msg(sb, KERN_WARNING,
		 "Test dummy encryption mount option ignored");
#endif
	return 1;
}

2110 2111 2112 2113 2114 2115
static int handle_mount_opt(struct super_block *sb, char *opt, int token,
			    substring_t *args, unsigned long *journal_devnum,
			    unsigned int *journal_ioprio, int is_remount)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	const struct mount_opts *m;
2116 2117
	kuid_t uid;
	kgid_t gid;
2118 2119
	int arg = 0;

2120 2121 2122 2123 2124 2125 2126 2127 2128 2129
#ifdef CONFIG_QUOTA
	if (token == Opt_usrjquota)
		return set_qf_name(sb, USRQUOTA, &args[0]);
	else if (token == Opt_grpjquota)
		return set_qf_name(sb, GRPQUOTA, &args[0]);
	else if (token == Opt_offusrjquota)
		return clear_qf_name(sb, USRQUOTA);
	else if (token == Opt_offgrpjquota)
		return clear_qf_name(sb, GRPQUOTA);
#endif
2130
	switch (token) {
2131 2132 2133 2134
	case Opt_noacl:
	case Opt_nouser_xattr:
		ext4_msg(sb, KERN_WARNING, deprecated_msg, opt, "3.5");
		break;
2135 2136 2137
	case Opt_sb:
		return 1;	/* handled by get_sb_block() */
	case Opt_removed:
2138
		ext4_msg(sb, KERN_WARNING, "Ignoring removed %s option", opt);
2139 2140
		return 1;
	case Opt_abort:
2141
		ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED);
2142 2143
		return 1;
	case Opt_i_version:
M
Matthew Garrett 已提交
2144
		sb->s_flags |= SB_I_VERSION;
2145
		return 1;
2146
	case Opt_lazytime:
2147
		sb->s_flags |= SB_LAZYTIME;
2148 2149
		return 1;
	case Opt_nolazytime:
2150
		sb->s_flags &= ~SB_LAZYTIME;
2151
		return 1;
2152 2153 2154 2155 2156 2157 2158
	case Opt_inlinecrypt:
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
		sb->s_flags |= SB_INLINECRYPT;
#else
		ext4_msg(sb, KERN_ERR, "inline encryption not supported");
#endif
		return 1;
2159 2160
	}

2161 2162 2163 2164 2165 2166 2167 2168 2169 2170
	for (m = ext4_mount_opts; m->token != Opt_err; m++)
		if (token == m->token)
			break;

	if (m->token == Opt_err) {
		ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" "
			 "or missing value", opt);
		return -1;
	}

2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181
	if ((m->flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) {
		ext4_msg(sb, KERN_ERR,
			 "Mount option \"%s\" incompatible with ext2", opt);
		return -1;
	}
	if ((m->flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) {
		ext4_msg(sb, KERN_ERR,
			 "Mount option \"%s\" incompatible with ext3", opt);
		return -1;
	}

2182
	if (args->from && !(m->flags & MOPT_STRING) && match_int(args, &arg))
2183 2184 2185
		return -1;
	if (args->from && (m->flags & MOPT_GTE0) && (arg < 0))
		return -1;
2186 2187 2188
	if (m->flags & MOPT_EXPLICIT) {
		if (m->mount_opt & EXT4_MOUNT_DELALLOC) {
			set_opt2(sb, EXPLICIT_DELALLOC);
2189 2190
		} else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) {
			set_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM);
2191 2192 2193
		} else
			return -1;
	}
2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206
	if (m->flags & MOPT_CLEAR_ERR)
		clear_opt(sb, ERRORS_MASK);
	if (token == Opt_noquota && sb_any_quota_loaded(sb)) {
		ext4_msg(sb, KERN_ERR, "Cannot change quota "
			 "options when quota turned on");
		return -1;
	}

	if (m->flags & MOPT_NOSUPPORT) {
		ext4_msg(sb, KERN_ERR, "%s option not supported", opt);
	} else if (token == Opt_commit) {
		if (arg == 0)
			arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
2207 2208 2209 2210 2211 2212 2213
		else if (arg > INT_MAX / HZ) {
			ext4_msg(sb, KERN_ERR,
				 "Invalid commit interval %d, "
				 "must be smaller than %d",
				 arg, INT_MAX / HZ);
			return -1;
		}
2214
		sbi->s_commit_interval = HZ * arg;
2215
	} else if (token == Opt_debug_want_extra_isize) {
2216 2217 2218 2219 2220 2221 2222
		if ((arg & 1) ||
		    (arg < 4) ||
		    (arg > (sbi->s_inode_size - EXT4_GOOD_OLD_INODE_SIZE))) {
			ext4_msg(sb, KERN_ERR,
				 "Invalid want_extra_isize %d", arg);
			return -1;
		}
2223
		sbi->s_want_extra_isize = arg;
2224 2225 2226 2227 2228
	} else if (token == Opt_max_batch_time) {
		sbi->s_max_batch_time = arg;
	} else if (token == Opt_min_batch_time) {
		sbi->s_min_batch_time = arg;
	} else if (token == Opt_inode_readahead_blks) {
2229 2230 2231 2232
		if (arg && (arg > (1 << 30) || !is_power_of_2(arg))) {
			ext4_msg(sb, KERN_ERR,
				 "EXT4-fs: inode_readahead_blks must be "
				 "0 or a power of 2 smaller than 2^31");
2233
			return -1;
2234 2235 2236 2237 2238 2239 2240 2241 2242
		}
		sbi->s_inode_readahead_blks = arg;
	} else if (token == Opt_init_itable) {
		set_opt(sb, INIT_INODE_TABLE);
		if (!args->from)
			arg = EXT4_DEF_LI_WAIT_MULT;
		sbi->s_li_wait_mult = arg;
	} else if (token == Opt_max_dir_size_kb) {
		sbi->s_max_dir_size_kb = arg;
2243 2244 2245 2246
#ifdef CONFIG_EXT4_DEBUG
	} else if (token == Opt_fc_debug_max_replay) {
		sbi->s_fc_debug_max_replay = arg;
#endif
2247 2248 2249 2250 2251 2252
	} else if (token == Opt_stripe) {
		sbi->s_stripe = arg;
	} else if (token == Opt_resuid) {
		uid = make_kuid(current_user_ns(), arg);
		if (!uid_valid(uid)) {
			ext4_msg(sb, KERN_ERR, "Invalid uid value %d", arg);
2253 2254
			return -1;
		}
2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269
		sbi->s_resuid = uid;
	} else if (token == Opt_resgid) {
		gid = make_kgid(current_user_ns(), arg);
		if (!gid_valid(gid)) {
			ext4_msg(sb, KERN_ERR, "Invalid gid value %d", arg);
			return -1;
		}
		sbi->s_resgid = gid;
	} else if (token == Opt_journal_dev) {
		if (is_remount) {
			ext4_msg(sb, KERN_ERR,
				 "Cannot specify journal on remount");
			return -1;
		}
		*journal_devnum = arg;
2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295
	} else if (token == Opt_journal_path) {
		char *journal_path;
		struct inode *journal_inode;
		struct path path;
		int error;

		if (is_remount) {
			ext4_msg(sb, KERN_ERR,
				 "Cannot specify journal on remount");
			return -1;
		}
		journal_path = match_strdup(&args[0]);
		if (!journal_path) {
			ext4_msg(sb, KERN_ERR, "error: could not dup "
				"journal device string");
			return -1;
		}

		error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
		if (error) {
			ext4_msg(sb, KERN_ERR, "error: could not find "
				"journal device path: error %d", error);
			kfree(journal_path);
			return -1;
		}

2296
		journal_inode = d_inode(path.dentry);
2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307
		if (!S_ISBLK(journal_inode->i_mode)) {
			ext4_msg(sb, KERN_ERR, "error: journal path %s "
				"is not a block device", journal_path);
			path_put(&path);
			kfree(journal_path);
			return -1;
		}

		*journal_devnum = new_encode_dev(journal_inode->i_rdev);
		path_put(&path);
		kfree(journal_path);
2308 2309 2310 2311 2312 2313 2314 2315
	} else if (token == Opt_journal_ioprio) {
		if (arg > 7) {
			ext4_msg(sb, KERN_ERR, "Invalid journal IO priority"
				 " (must be 0-7)");
			return -1;
		}
		*journal_ioprio =
			IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
2316
	} else if (token == Opt_test_dummy_encryption) {
2317 2318
		return ext4_set_test_dummy_encryption(sb, opt, &args[0],
						      is_remount);
2319 2320 2321 2322 2323
	} else if (m->flags & MOPT_DATAJ) {
		if (is_remount) {
			if (!sbi->s_journal)
				ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
			else if (test_opt(sb, DATA_FLAGS) != m->mount_opt) {
2324
				ext4_msg(sb, KERN_ERR,
2325 2326
					 "Cannot change data mode on remount");
				return -1;
2327
			}
2328
		} else {
2329 2330
			clear_opt(sb, DATA_FLAGS);
			sbi->s_mount_opt |= m->mount_opt;
2331
		}
2332 2333 2334 2335 2336 2337 2338 2339
#ifdef CONFIG_QUOTA
	} else if (m->flags & MOPT_QFMT) {
		if (sb_any_quota_loaded(sb) &&
		    sbi->s_jquota_fmt != m->mount_opt) {
			ext4_msg(sb, KERN_ERR, "Cannot change journaled "
				 "quota options when quota turned on");
			return -1;
		}
2340
		if (ext4_has_feature_quota(sb)) {
2341 2342
			ext4_msg(sb, KERN_INFO,
				 "Quota format mount options ignored "
2343
				 "when QUOTA feature is enabled");
2344
			return 1;
2345
		}
2346
		sbi->s_jquota_fmt = m->mount_opt;
R
Ross Zwisler 已提交
2347
#endif
2348 2349
	} else if (token == Opt_dax || token == Opt_dax_always ||
		   token == Opt_dax_inode || token == Opt_dax_never) {
2350
#ifdef CONFIG_FS_DAX
2351 2352 2353
		switch (token) {
		case Opt_dax:
		case Opt_dax_always:
2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368
			if (is_remount &&
			    (!(sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) ||
			     (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER))) {
			fail_dax_change_remount:
				ext4_msg(sb, KERN_ERR, "can't change "
					 "dax mount option while remounting");
				return -1;
			}
			if (is_remount &&
			    (test_opt(sb, DATA_FLAGS) ==
			     EXT4_MOUNT_JOURNAL_DATA)) {
				    ext4_msg(sb, KERN_ERR, "can't mount with "
					     "both data=journal and dax");
				    return -1;
			}
2369 2370 2371 2372 2373 2374
			ext4_msg(sb, KERN_WARNING,
				"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
			sbi->s_mount_opt |= EXT4_MOUNT_DAX_ALWAYS;
			sbi->s_mount_opt2 &= ~EXT4_MOUNT2_DAX_NEVER;
			break;
		case Opt_dax_never:
2375 2376 2377 2378
			if (is_remount &&
			    (!(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) ||
			     (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS)))
				goto fail_dax_change_remount;
2379 2380 2381 2382
			sbi->s_mount_opt2 |= EXT4_MOUNT2_DAX_NEVER;
			sbi->s_mount_opt &= ~EXT4_MOUNT_DAX_ALWAYS;
			break;
		case Opt_dax_inode:
2383 2384 2385 2386 2387
			if (is_remount &&
			    ((sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) ||
			     (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) ||
			     !(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_INODE)))
				goto fail_dax_change_remount;
2388 2389 2390 2391 2392 2393
			sbi->s_mount_opt &= ~EXT4_MOUNT_DAX_ALWAYS;
			sbi->s_mount_opt2 &= ~EXT4_MOUNT2_DAX_NEVER;
			/* Strictly for printing options */
			sbi->s_mount_opt2 |= EXT4_MOUNT2_DAX_INODE;
			break;
		}
2394
#else
R
Ross Zwisler 已提交
2395
		ext4_msg(sb, KERN_INFO, "dax option not supported");
2396 2397
		sbi->s_mount_opt2 |= EXT4_MOUNT2_DAX_NEVER;
		sbi->s_mount_opt &= ~EXT4_MOUNT_DAX_ALWAYS;
R
Ross Zwisler 已提交
2398
		return -1;
2399
#endif
2400 2401 2402 2403
	} else if (token == Opt_data_err_abort) {
		sbi->s_mount_opt |= m->mount_opt;
	} else if (token == Opt_data_err_ignore) {
		sbi->s_mount_opt &= ~m->mount_opt;
2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414
	} else {
		if (!args->from)
			arg = 1;
		if (m->flags & MOPT_CLEAR)
			arg = !arg;
		else if (unlikely(!(m->flags & MOPT_SET))) {
			ext4_msg(sb, KERN_WARNING,
				 "buggy handling of option %s", opt);
			WARN_ON(1);
			return -1;
		}
2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425
		if (m->flags & MOPT_2) {
			if (arg != 0)
				sbi->s_mount_opt2 |= m->mount_opt;
			else
				sbi->s_mount_opt2 &= ~m->mount_opt;
		} else {
			if (arg != 0)
				sbi->s_mount_opt |= m->mount_opt;
			else
				sbi->s_mount_opt &= ~m->mount_opt;
		}
2426
	}
2427
	return 1;
2428 2429 2430 2431 2432 2433 2434
}

static int parse_options(char *options, struct super_block *sb,
			 unsigned long *journal_devnum,
			 unsigned int *journal_ioprio,
			 int is_remount)
{
2435
	struct ext4_sb_info __maybe_unused *sbi = EXT4_SB(sb);
2436
	char *p, __maybe_unused *usr_qf_name, __maybe_unused *grp_qf_name;
2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449
	substring_t args[MAX_OPT_ARGS];
	int token;

	if (!options)
		return 1;

	while ((p = strsep(&options, ",")) != NULL) {
		if (!*p)
			continue;
		/*
		 * Initialize args struct so we know whether arg was
		 * found; some options take optional arguments.
		 */
2450
		args[0].to = args[0].from = NULL;
2451 2452 2453 2454
		token = match_token(p, tokens, args);
		if (handle_mount_opt(sb, p, token, args, journal_devnum,
				     journal_ioprio, is_remount) < 0)
			return 0;
2455 2456
	}
#ifdef CONFIG_QUOTA
2457 2458 2459 2460 2461 2462 2463 2464 2465 2466
	/*
	 * We do the test below only for project quotas. 'usrquota' and
	 * 'grpquota' mount options are allowed even without quota feature
	 * to support legacy quotas in quota files.
	 */
	if (test_opt(sb, PRJQUOTA) && !ext4_has_feature_project(sb)) {
		ext4_msg(sb, KERN_ERR, "Project quota feature not enabled. "
			 "Cannot enable project quota enforcement.");
		return 0;
	}
2467 2468 2469 2470
	usr_qf_name = get_qf_name(sb, sbi, USRQUOTA);
	grp_qf_name = get_qf_name(sb, sbi, GRPQUOTA);
	if (usr_qf_name || grp_qf_name) {
		if (test_opt(sb, USRQUOTA) && usr_qf_name)
2471
			clear_opt(sb, USRQUOTA);
2472

2473
		if (test_opt(sb, GRPQUOTA) && grp_qf_name)
2474
			clear_opt(sb, GRPQUOTA);
2475

D
Dmitry Monakhov 已提交
2476
		if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
2477 2478
			ext4_msg(sb, KERN_ERR, "old and new quota "
					"format mixing");
2479 2480 2481 2482
			return 0;
		}

		if (!sbi->s_jquota_fmt) {
2483 2484
			ext4_msg(sb, KERN_ERR, "journaled quota format "
					"not specified");
2485 2486 2487 2488
			return 0;
		}
	}
#endif
2489 2490 2491 2492 2493 2494 2495 2496
	if (test_opt(sb, DIOREAD_NOLOCK)) {
		int blocksize =
			BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
		if (blocksize < PAGE_SIZE)
			ext4_msg(sb, KERN_WARNING, "Warning: mounting with an "
				 "experimental mount option 'dioread_nolock' "
				 "for blocksize < PAGE_SIZE");
	}
2497 2498 2499
	return 1;
}

2500 2501 2502 2503 2504
static inline void ext4_show_quota_options(struct seq_file *seq,
					   struct super_block *sb)
{
#if defined(CONFIG_QUOTA)
	struct ext4_sb_info *sbi = EXT4_SB(sb);
2505
	char *usr_qf_name, *grp_qf_name;
2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523

	if (sbi->s_jquota_fmt) {
		char *fmtname = "";

		switch (sbi->s_jquota_fmt) {
		case QFMT_VFS_OLD:
			fmtname = "vfsold";
			break;
		case QFMT_VFS_V0:
			fmtname = "vfsv0";
			break;
		case QFMT_VFS_V1:
			fmtname = "vfsv1";
			break;
		}
		seq_printf(seq, ",jqfmt=%s", fmtname);
	}

2524 2525 2526 2527 2528 2529 2530 2531
	rcu_read_lock();
	usr_qf_name = rcu_dereference(sbi->s_qf_names[USRQUOTA]);
	grp_qf_name = rcu_dereference(sbi->s_qf_names[GRPQUOTA]);
	if (usr_qf_name)
		seq_show_option(seq, "usrjquota", usr_qf_name);
	if (grp_qf_name)
		seq_show_option(seq, "grpjquota", grp_qf_name);
	rcu_read_unlock();
2532 2533 2534
#endif
}

2535 2536
static const char *token2str(int token)
{
2537
	const struct match_token *t;
2538 2539 2540 2541 2542 2543 2544

	for (t = tokens; t->token != Opt_err; t++)
		if (t->token == token && !strchr(t->pattern, '='))
			break;
	return t->pattern;
}

2545 2546 2547 2548 2549
/*
 * Show an option if
 *  - it's set to a non-default value OR
 *  - if the per-sb default is different from the global default
 */
2550 2551
static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
			      int nodefs)
2552 2553 2554
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
2555
	int def_errors, def_mount_opt = sbi->s_def_mount_opt;
2556
	const struct mount_opts *m;
2557
	char sep = nodefs ? '\n' : ',';
2558

2559 2560
#define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep)
#define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg)
2561 2562

	if (sbi->s_sb_block != 1)
2563 2564 2565 2566 2567
		SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block);

	for (m = ext4_mount_opts; m->token != Opt_err; m++) {
		int want_set = m->flags & MOPT_SET;
		if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
2568
		    (m->flags & MOPT_CLEAR_ERR) || m->flags & MOPT_SKIP)
2569
			continue;
2570
		if (!nodefs && !(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt)))
2571 2572 2573 2574 2575 2576
			continue; /* skip if same as the default */
		if ((want_set &&
		     (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) ||
		    (!want_set && (sbi->s_mount_opt & m->mount_opt)))
			continue; /* select Opt_noFoo vs Opt_Foo */
		SEQ_OPTS_PRINT("%s", token2str(m->token));
2577
	}
2578

2579
	if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) ||
2580
	    le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID)
2581 2582 2583
		SEQ_OPTS_PRINT("resuid=%u",
				from_kuid_munged(&init_user_ns, sbi->s_resuid));
	if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) ||
2584
	    le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID)
2585 2586
		SEQ_OPTS_PRINT("resgid=%u",
				from_kgid_munged(&init_user_ns, sbi->s_resgid));
2587
	def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors);
2588 2589
	if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO)
		SEQ_OPTS_PUTS("errors=remount-ro");
2590
	if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
2591
		SEQ_OPTS_PUTS("errors=continue");
2592
	if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
2593
		SEQ_OPTS_PUTS("errors=panic");
2594
	if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
2595
		SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ);
2596
	if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME)
2597
		SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
2598
	if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
2599
		SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
M
Matthew Garrett 已提交
2600
	if (sb->s_flags & SB_I_VERSION)
2601
		SEQ_OPTS_PUTS("i_version");
2602
	if (nodefs || sbi->s_stripe)
2603
		SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
2604 2605
	if (nodefs || EXT4_MOUNT_DATA_FLAGS &
			(sbi->s_mount_opt ^ def_mount_opt)) {
2606 2607 2608 2609 2610 2611 2612
		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
			SEQ_OPTS_PUTS("data=journal");
		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
			SEQ_OPTS_PUTS("data=ordered");
		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
			SEQ_OPTS_PUTS("data=writeback");
	}
2613 2614
	if (nodefs ||
	    sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
2615 2616
		SEQ_OPTS_PRINT("inode_readahead_blks=%u",
			       sbi->s_inode_readahead_blks);
2617

2618
	if (test_opt(sb, INIT_INODE_TABLE) && (nodefs ||
2619
		       (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
2620
		SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
2621 2622
	if (nodefs || sbi->s_max_dir_size_kb)
		SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
2623 2624
	if (test_opt(sb, DATA_ERR_ABORT))
		SEQ_OPTS_PUTS("data_err=abort");
2625 2626

	fscrypt_show_test_dummy_encryption(seq, sep, sb);
2627

2628 2629 2630
	if (sb->s_flags & SB_INLINECRYPT)
		SEQ_OPTS_PUTS("inlinecrypt");

2631 2632 2633 2634 2635 2636 2637 2638 2639 2640
	if (test_opt(sb, DAX_ALWAYS)) {
		if (IS_EXT2_SB(sb))
			SEQ_OPTS_PUTS("dax");
		else
			SEQ_OPTS_PUTS("dax=always");
	} else if (test_opt2(sb, DAX_NEVER)) {
		SEQ_OPTS_PUTS("dax=never");
	} else if (test_opt2(sb, DAX_INODE)) {
		SEQ_OPTS_PUTS("dax=inode");
	}
2641 2642 2643 2644
	ext4_show_quota_options(seq, sb);
	return 0;
}

2645 2646 2647 2648 2649
static int ext4_show_options(struct seq_file *seq, struct dentry *root)
{
	return _ext4_show_options(seq, root->d_sb, 0);
}

2650
int ext4_seq_options_show(struct seq_file *seq, void *offset)
2651 2652 2653 2654
{
	struct super_block *sb = seq->private;
	int rc;

2655
	seq_puts(seq, sb_rdonly(sb) ? "ro" : "rw");
2656 2657 2658 2659 2660
	rc = _ext4_show_options(seq, sb, 1);
	seq_puts(seq, "\n");
	return rc;
}

2661
static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
2662 2663
			    int read_only)
{
2664
	struct ext4_sb_info *sbi = EXT4_SB(sb);
2665
	int err = 0;
2666

2667
	if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
2668 2669
		ext4_msg(sb, KERN_ERR, "revision level too high, "
			 "forcing read-only mode");
2670
		err = -EROFS;
2671
		goto done;
2672 2673
	}
	if (read_only)
2674
		goto done;
2675
	if (!(sbi->s_mount_state & EXT4_VALID_FS))
2676 2677
		ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, "
			 "running e2fsck is recommended");
2678
	else if (sbi->s_mount_state & EXT4_ERROR_FS)
2679 2680 2681
		ext4_msg(sb, KERN_WARNING,
			 "warning: mounting fs with errors, "
			 "running e2fsck is recommended");
2682
	else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
2683 2684
		 le16_to_cpu(es->s_mnt_count) >=
		 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
2685 2686 2687
		ext4_msg(sb, KERN_WARNING,
			 "warning: maximal mount count reached, "
			 "running e2fsck is recommended");
2688
	else if (le32_to_cpu(es->s_checkinterval) &&
2689 2690
		 (ext4_get_tstamp(es, s_lastcheck) +
		  le32_to_cpu(es->s_checkinterval) <= ktime_get_real_seconds()))
2691 2692 2693
		ext4_msg(sb, KERN_WARNING,
			 "warning: checktime reached, "
			 "running e2fsck is recommended");
2694
	if (!sbi->s_journal)
2695
		es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
2696
	if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
2697
		es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
M
Marcin Slusarz 已提交
2698
	le16_add_cpu(&es->s_mnt_count, 1);
2699
	ext4_update_tstamp(es, s_mtime);
2700
	if (sbi->s_journal)
2701
		ext4_set_feature_journal_needs_recovery(sb);
2702

2703
	err = ext4_commit_super(sb, 1);
2704
done:
2705
	if (test_opt(sb, DEBUG))
2706
		printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
2707
				"bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
2708 2709
			sb->s_blocksize,
			sbi->s_groups_count,
2710 2711
			EXT4_BLOCKS_PER_GROUP(sb),
			EXT4_INODES_PER_GROUP(sb),
2712
			sbi->s_mount_opt, sbi->s_mount_opt2);
2713

D
Dan Magenheimer 已提交
2714
	cleancache_init_fs(sb);
2715
	return err;
2716 2717
}

2718 2719 2720
int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
2721
	struct flex_groups **old_groups, **new_groups;
2722
	int size, i, j;
2723 2724 2725 2726 2727 2728 2729 2730

	if (!sbi->s_log_groups_per_flex)
		return 0;

	size = ext4_flex_group(sbi, ngroup - 1) + 1;
	if (size <= sbi->s_flex_groups_allocated)
		return 0;

2731 2732
	new_groups = kvzalloc(roundup_pow_of_two(size *
			      sizeof(*sbi->s_flex_groups)), GFP_KERNEL);
2733
	if (!new_groups) {
2734 2735
		ext4_msg(sb, KERN_ERR,
			 "not enough memory for %d flex group pointers", size);
2736 2737
		return -ENOMEM;
	}
2738 2739 2740 2741 2742
	for (i = sbi->s_flex_groups_allocated; i < size; i++) {
		new_groups[i] = kvzalloc(roundup_pow_of_two(
					 sizeof(struct flex_groups)),
					 GFP_KERNEL);
		if (!new_groups[i]) {
2743 2744
			for (j = sbi->s_flex_groups_allocated; j < i; j++)
				kvfree(new_groups[j]);
2745 2746 2747 2748 2749
			kvfree(new_groups);
			ext4_msg(sb, KERN_ERR,
				 "not enough memory for %d flex groups", size);
			return -ENOMEM;
		}
2750
	}
2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761
	rcu_read_lock();
	old_groups = rcu_dereference(sbi->s_flex_groups);
	if (old_groups)
		memcpy(new_groups, old_groups,
		       (sbi->s_flex_groups_allocated *
			sizeof(struct flex_groups *)));
	rcu_read_unlock();
	rcu_assign_pointer(sbi->s_flex_groups, new_groups);
	sbi->s_flex_groups_allocated = size;
	if (old_groups)
		ext4_kvfree_array_rcu(old_groups);
2762 2763 2764
	return 0;
}

2765 2766 2767 2768
static int ext4_fill_flex_info(struct super_block *sb)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_group_desc *gdp = NULL;
2769
	struct flex_groups *fg;
2770
	ext4_group_t flex_group;
2771
	int i, err;
2772

2773
	sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
2774
	if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
2775 2776 2777 2778
		sbi->s_log_groups_per_flex = 0;
		return 1;
	}

2779 2780
	err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
	if (err)
2781
		goto failed;
2782 2783

	for (i = 0; i < sbi->s_groups_count; i++) {
2784
		gdp = ext4_get_group_desc(sb, i, NULL);
2785 2786

		flex_group = ext4_flex_group(sbi, i);
2787 2788
		fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
		atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes);
2789
		atomic64_add(ext4_free_group_clusters(sb, gdp),
2790 2791
			     &fg->free_clusters);
		atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs);
2792 2793 2794 2795 2796 2797 2798
	}

	return 1;
failed:
	return 0;
}

2799
static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
2800
				   struct ext4_group_desc *gdp)
A
Andreas Dilger 已提交
2801
{
2802
	int offset = offsetof(struct ext4_group_desc, bg_checksum);
A
Andreas Dilger 已提交
2803
	__u16 crc = 0;
2804
	__le32 le_group = cpu_to_le32(block_group);
2805
	struct ext4_sb_info *sbi = EXT4_SB(sb);
A
Andreas Dilger 已提交
2806

2807
	if (ext4_has_metadata_csum(sbi->s_sb)) {
2808 2809
		/* Use new metadata_csum algorithm */
		__u32 csum32;
2810
		__u16 dummy_csum = 0;
2811 2812 2813

		csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
				     sizeof(le_group));
2814 2815 2816 2817 2818 2819 2820
		csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset);
		csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum,
				     sizeof(dummy_csum));
		offset += sizeof(dummy_csum);
		if (offset < sbi->s_desc_size)
			csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset,
					     sbi->s_desc_size - offset);
2821 2822 2823

		crc = csum32 & 0xFFFF;
		goto out;
A
Andreas Dilger 已提交
2824 2825
	}

2826
	/* old crc16 code */
2827
	if (!ext4_has_feature_gdt_csum(sb))
2828 2829
		return 0;

2830 2831 2832 2833 2834
	crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
	crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
	crc = crc16(crc, (__u8 *)gdp, offset);
	offset += sizeof(gdp->bg_checksum); /* skip checksum */
	/* for checksum of struct ext4_group_desc do the rest...*/
2835
	if (ext4_has_feature_64bit(sb) &&
2836 2837 2838 2839 2840 2841
	    offset < le16_to_cpu(sbi->s_es->s_desc_size))
		crc = crc16(crc, (__u8 *)gdp + offset,
			    le16_to_cpu(sbi->s_es->s_desc_size) -
				offset);

out:
A
Andreas Dilger 已提交
2842 2843 2844
	return cpu_to_le16(crc);
}

2845
int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
A
Andreas Dilger 已提交
2846 2847
				struct ext4_group_desc *gdp)
{
2848
	if (ext4_has_group_desc_csum(sb) &&
2849
	    (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp)))
A
Andreas Dilger 已提交
2850 2851 2852 2853 2854
		return 0;

	return 1;
}

2855 2856 2857 2858 2859
void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
			      struct ext4_group_desc *gdp)
{
	if (!ext4_has_group_desc_csum(sb))
		return;
2860
	gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp);
2861 2862
}

2863
/* Called at mount-time, super-block is locked */
2864
static int ext4_check_descriptors(struct super_block *sb,
2865
				  ext4_fsblk_t sb_block,
2866
				  ext4_group_t *first_not_zeroed)
2867
{
2868 2869 2870
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
	ext4_fsblk_t last_block;
2871
	ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
L
Laurent Vivier 已提交
2872 2873 2874
	ext4_fsblk_t block_bitmap;
	ext4_fsblk_t inode_bitmap;
	ext4_fsblk_t inode_table;
J
Jose R. Santos 已提交
2875
	int flexbg_flag = 0;
2876
	ext4_group_t i, grp = sbi->s_groups_count;
2877

2878
	if (ext4_has_feature_flex_bg(sb))
J
Jose R. Santos 已提交
2879 2880
		flexbg_flag = 1;

2881
	ext4_debug("Checking group descriptors");
2882

2883 2884 2885
	for (i = 0; i < sbi->s_groups_count; i++) {
		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);

J
Jose R. Santos 已提交
2886
		if (i == sbi->s_groups_count - 1 || flexbg_flag)
L
Laurent Vivier 已提交
2887
			last_block = ext4_blocks_count(sbi->s_es) - 1;
2888 2889
		else
			last_block = first_block +
2890
				(EXT4_BLOCKS_PER_GROUP(sb) - 1);
2891

2892 2893 2894 2895
		if ((grp == sbi->s_groups_count) &&
		   !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
			grp = i;

2896
		block_bitmap = ext4_block_bitmap(sb, gdp);
2897 2898 2899 2900
		if (block_bitmap == sb_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Block bitmap for group %u overlaps "
				 "superblock", i);
2901 2902
			if (!sb_rdonly(sb))
				return 0;
2903
		}
2904 2905 2906 2907 2908 2909 2910 2911
		if (block_bitmap >= sb_block + 1 &&
		    block_bitmap <= last_bg_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Block bitmap for group %u overlaps "
				 "block group descriptors", i);
			if (!sb_rdonly(sb))
				return 0;
		}
2912
		if (block_bitmap < first_block || block_bitmap > last_block) {
2913
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2914
			       "Block bitmap for group %u not in group "
2915
			       "(block %llu)!", i, block_bitmap);
2916 2917
			return 0;
		}
2918
		inode_bitmap = ext4_inode_bitmap(sb, gdp);
2919 2920 2921 2922
		if (inode_bitmap == sb_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Inode bitmap for group %u overlaps "
				 "superblock", i);
2923 2924
			if (!sb_rdonly(sb))
				return 0;
2925
		}
2926 2927 2928 2929 2930 2931 2932 2933
		if (inode_bitmap >= sb_block + 1 &&
		    inode_bitmap <= last_bg_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Inode bitmap for group %u overlaps "
				 "block group descriptors", i);
			if (!sb_rdonly(sb))
				return 0;
		}
2934
		if (inode_bitmap < first_block || inode_bitmap > last_block) {
2935
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2936
			       "Inode bitmap for group %u not in group "
2937
			       "(block %llu)!", i, inode_bitmap);
2938 2939
			return 0;
		}
2940
		inode_table = ext4_inode_table(sb, gdp);
2941 2942 2943 2944
		if (inode_table == sb_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Inode table for group %u overlaps "
				 "superblock", i);
2945 2946
			if (!sb_rdonly(sb))
				return 0;
2947
		}
2948 2949 2950 2951 2952 2953 2954 2955
		if (inode_table >= sb_block + 1 &&
		    inode_table <= last_bg_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Inode table for group %u overlaps "
				 "block group descriptors", i);
			if (!sb_rdonly(sb))
				return 0;
		}
L
Laurent Vivier 已提交
2956
		if (inode_table < first_block ||
2957
		    inode_table + sbi->s_itb_per_group - 1 > last_block) {
2958
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2959
			       "Inode table for group %u not in group "
2960
			       "(block %llu)!", i, inode_table);
2961 2962
			return 0;
		}
2963
		ext4_lock_group(sb, i);
2964
		if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
2965 2966
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Checksum for group %u failed (%u!=%u)",
2967
				 i, le16_to_cpu(ext4_group_desc_csum(sb, i,
2968
				     gdp)), le16_to_cpu(gdp->bg_checksum));
2969
			if (!sb_rdonly(sb)) {
2970
				ext4_unlock_group(sb, i);
2971
				return 0;
2972
			}
A
Andreas Dilger 已提交
2973
		}
2974
		ext4_unlock_group(sb, i);
J
Jose R. Santos 已提交
2975 2976
		if (!flexbg_flag)
			first_block += EXT4_BLOCKS_PER_GROUP(sb);
2977
	}
2978 2979
	if (NULL != first_not_zeroed)
		*first_not_zeroed = grp;
2980 2981 2982
	return 1;
}

2983
/* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at
2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995
 * the superblock) which were deleted from all directories, but held open by
 * a process at the time of a crash.  We walk the list and try to delete these
 * inodes at recovery time (only with a read-write filesystem).
 *
 * In order to keep the orphan inode chain consistent during traversal (in
 * case of crash during recovery), we link each inode into the superblock
 * orphan list_head and handle it the same way as an inode deletion during
 * normal operation (which journals the operations for us).
 *
 * We only do an iget() and an iput() on each inode, which is very safe if we
 * accidentally point at an in-use or already deleted inode.  The worst that
 * can happen in this case is that we get a "bit already cleared" message from
2996
 * ext4_free_inode().  The only reason we would point at a wrong inode is if
2997 2998 2999
 * e2fsck was run on this filesystem, and it must have already done the orphan
 * inode cleanup for us, so we can safely abort without any further action.
 */
3000 3001
static void ext4_orphan_cleanup(struct super_block *sb,
				struct ext4_super_block *es)
3002 3003
{
	unsigned int s_flags = sb->s_flags;
3004
	int ret, nr_orphans = 0, nr_truncates = 0;
3005
#ifdef CONFIG_QUOTA
3006
	int quota_update = 0;
3007 3008 3009 3010 3011 3012 3013
	int i;
#endif
	if (!es->s_last_orphan) {
		jbd_debug(4, "no orphan inodes to clean up\n");
		return;
	}

3014
	if (bdev_read_only(sb->s_bdev)) {
3015 3016
		ext4_msg(sb, KERN_ERR, "write access "
			"unavailable, skipping orphan cleanup");
3017 3018 3019
		return;
	}

3020 3021 3022 3023 3024 3025 3026
	/* Check if feature set would not allow a r/w mount */
	if (!ext4_feature_set_ok(sb, 0)) {
		ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
			 "unknown ROCOMPAT features");
		return;
	}

3027
	if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
3028
		/* don't clear list on RO mount w/ errors */
3029
		if (es->s_last_orphan && !(s_flags & SB_RDONLY)) {
3030
			ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
3031
				  "clearing orphan list.\n");
3032 3033
			es->s_last_orphan = 0;
		}
3034 3035 3036 3037
		jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
		return;
	}

3038
	if (s_flags & SB_RDONLY) {
3039
		ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
3040
		sb->s_flags &= ~SB_RDONLY;
3041 3042 3043
	}
#ifdef CONFIG_QUOTA
	/* Needed for iput() to work correctly and not trash data */
3044
	sb->s_flags |= SB_ACTIVE;
3045 3046 3047 3048 3049

	/*
	 * Turn on quotas which were not enabled for read-only mounts if
	 * filesystem has quota feature, so that they are updated correctly.
	 */
3050
	if (ext4_has_feature_quota(sb) && (s_flags & SB_RDONLY)) {
3051 3052 3053 3054 3055 3056 3057 3058 3059 3060
		int ret = ext4_enable_quotas(sb);

		if (!ret)
			quota_update = 1;
		else
			ext4_msg(sb, KERN_ERR,
				"Cannot turn on quotas: error %d", ret);
	}

	/* Turn on journaled quotas used for old sytle */
J
Jan Kara 已提交
3061
	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
3062 3063
		if (EXT4_SB(sb)->s_qf_names[i]) {
			int ret = ext4_quota_on_mount(sb, i);
3064 3065 3066 3067

			if (!ret)
				quota_update = 1;
			else
3068 3069
				ext4_msg(sb, KERN_ERR,
					"Cannot turn on journaled "
3070
					"quota: type %d: error %d", i, ret);
3071 3072 3073 3074 3075 3076 3077
		}
	}
#endif

	while (es->s_last_orphan) {
		struct inode *inode;

3078 3079 3080 3081 3082 3083 3084 3085 3086 3087
		/*
		 * We may have encountered an error during cleanup; if
		 * so, skip the rest.
		 */
		if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
			jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
			es->s_last_orphan = 0;
			break;
		}

3088 3089
		inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
		if (IS_ERR(inode)) {
3090 3091 3092 3093
			es->s_last_orphan = 0;
			break;
		}

3094
		list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
3095
		dquot_initialize(inode);
3096
		if (inode->i_nlink) {
3097 3098 3099 3100
			if (test_opt(sb, DEBUG))
				ext4_msg(sb, KERN_DEBUG,
					"%s: truncating inode %lu to %lld bytes",
					__func__, inode->i_ino, inode->i_size);
3101
			jbd_debug(2, "truncating inode %lu to %lld bytes\n",
3102
				  inode->i_ino, inode->i_size);
A
Al Viro 已提交
3103
			inode_lock(inode);
3104
			truncate_inode_pages(inode->i_mapping, inode->i_size);
3105 3106 3107
			ret = ext4_truncate(inode);
			if (ret)
				ext4_std_error(inode->i_sb, ret);
A
Al Viro 已提交
3108
			inode_unlock(inode);
3109 3110
			nr_truncates++;
		} else {
3111 3112 3113 3114
			if (test_opt(sb, DEBUG))
				ext4_msg(sb, KERN_DEBUG,
					"%s: deleting unreferenced inode %lu",
					__func__, inode->i_ino);
3115 3116 3117 3118 3119 3120 3121
			jbd_debug(2, "deleting unreferenced inode %lu\n",
				  inode->i_ino);
			nr_orphans++;
		}
		iput(inode);  /* The delete magic happens here! */
	}

3122
#define PLURAL(x) (x), ((x) == 1) ? "" : "s"
3123 3124

	if (nr_orphans)
3125 3126
		ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted",
		       PLURAL(nr_orphans));
3127
	if (nr_truncates)
3128 3129
		ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
		       PLURAL(nr_truncates));
3130
#ifdef CONFIG_QUOTA
3131 3132 3133 3134 3135 3136
	/* Turn off quotas if they were enabled for orphan cleanup */
	if (quota_update) {
		for (i = 0; i < EXT4_MAXQUOTAS; i++) {
			if (sb_dqopt(sb)->files[i])
				dquot_quota_off(sb, i);
		}
3137 3138
	}
#endif
3139
	sb->s_flags = s_flags; /* Restore SB_RDONLY status */
3140
}
3141

3142 3143 3144 3145 3146 3147 3148
/*
 * Maximal extent format file size.
 * Resulting logical blkno at s_maxbytes must fit in our on-disk
 * extent format containers, within a sector_t, and within i_blocks
 * in the vfs.  ext4 inode has 48 bits of i_block in fsblock units,
 * so that won't be a limiting factor.
 *
3149 3150 3151 3152 3153 3154
 * However there is other limiting factor. We do store extents in the form
 * of starting block and length, hence the resulting length of the extent
 * covering maximum file size must fit into on-disk format containers as
 * well. Given that length is always by 1 unit bigger than max unit (because
 * we count 0 as well) we have to lower the s_maxbytes by one fs block.
 *
3155 3156
 * Note, this does *not* consider any metadata overhead for vfs i_blocks.
 */
3157
static loff_t ext4_max_size(int blkbits, int has_huge_files)
3158 3159 3160 3161
{
	loff_t res;
	loff_t upper_limit = MAX_LFS_FILESIZE;

C
Christoph Hellwig 已提交
3162 3163 3164
	BUILD_BUG_ON(sizeof(blkcnt_t) < sizeof(u64));

	if (!has_huge_files) {
3165 3166 3167 3168 3169 3170 3171
		upper_limit = (1LL << 32) - 1;

		/* total blocks in file system block size */
		upper_limit >>= (blkbits - 9);
		upper_limit <<= blkbits;
	}

3172 3173 3174 3175 3176 3177
	/*
	 * 32-bit extent-start container, ee_block. We lower the maxbytes
	 * by one fs block, so ee_len can cover the extent of maximum file
	 * size
	 */
	res = (1LL << 32) - 1;
3178 3179 3180 3181 3182 3183 3184 3185
	res <<= blkbits;

	/* Sanity check against vm- & vfs- imposed limits */
	if (res > upper_limit)
		res = upper_limit;

	return res;
}
3186 3187

/*
3188
 * Maximal bitmap file size.  There is a direct, and {,double-,triple-}indirect
3189 3190
 * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
 * We need to be 1 filesystem block less than the 2^48 sector limit.
3191
 */
3192
static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
3193
{
3194
	loff_t res = EXT4_NDIR_BLOCKS;
3195 3196
	int meta_blocks;
	loff_t upper_limit;
3197 3198 3199 3200 3201 3202
	/* This is calculated to be the largest file size for a dense, block
	 * mapped file such that the file's total number of 512-byte sectors,
	 * including data and all indirect blocks, does not exceed (2^48 - 1).
	 *
	 * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
	 * number of 512-byte sectors of the file.
3203 3204
	 */

C
Christoph Hellwig 已提交
3205
	if (!has_huge_files) {
3206
		/*
C
Christoph Hellwig 已提交
3207 3208 3209
		 * !has_huge_files or implies that the inode i_block field
		 * represents total file blocks in 2^32 512-byte sectors ==
		 * size of vfs inode i_blocks * 8
3210 3211 3212 3213 3214 3215 3216
		 */
		upper_limit = (1LL << 32) - 1;

		/* total blocks in file system block size */
		upper_limit >>= (bits - 9);

	} else {
A
Aneesh Kumar K.V 已提交
3217 3218 3219 3220 3221 3222
		/*
		 * We use 48 bit ext4_inode i_blocks
		 * With EXT4_HUGE_FILE_FL set the i_blocks
		 * represent total number of blocks in
		 * file system block size
		 */
3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235
		upper_limit = (1LL << 48) - 1;

	}

	/* indirect blocks */
	meta_blocks = 1;
	/* double indirect blocks */
	meta_blocks += 1 + (1LL << (bits-2));
	/* tripple indirect blocks */
	meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));

	upper_limit -= meta_blocks;
	upper_limit <<= bits;
3236 3237 3238 3239 3240 3241 3242

	res += 1LL << (bits-2);
	res += 1LL << (2*(bits-2));
	res += 1LL << (3*(bits-2));
	res <<= bits;
	if (res > upper_limit)
		res = upper_limit;
3243 3244 3245 3246

	if (res > MAX_LFS_FILESIZE)
		res = MAX_LFS_FILESIZE;

3247 3248 3249
	return res;
}

3250
static ext4_fsblk_t descriptor_loc(struct super_block *sb,
3251
				   ext4_fsblk_t logical_sb_block, int nr)
3252
{
3253
	struct ext4_sb_info *sbi = EXT4_SB(sb);
3254
	ext4_group_t bg, first_meta_bg;
3255 3256 3257 3258
	int has_super = 0;

	first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);

3259
	if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg)
3260
		return logical_sb_block + nr + 1;
3261
	bg = sbi->s_desc_per_block * nr;
3262
	if (ext4_bg_has_super(sb, bg))
3263
		has_super = 1;
3264

3265 3266 3267 3268 3269 3270 3271
	/*
	 * If we have a meta_bg fs with 1k blocks, group 0's GDT is at
	 * block 2, not 1.  If s_first_data_block == 0 (bigalloc is enabled
	 * on modern mke2fs or blksize > 1k on older mke2fs) then we must
	 * compensate.
	 */
	if (sb->s_blocksize == 1024 && nr == 0 &&
3272
	    le32_to_cpu(sbi->s_es->s_first_data_block) == 0)
3273 3274
		has_super++;

3275
	return (has_super + ext4_group_first_block_no(sb, bg));
3276 3277
}

3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293
/**
 * ext4_get_stripe_size: Get the stripe size.
 * @sbi: In memory super block info
 *
 * If we have specified it via mount option, then
 * use the mount option value. If the value specified at mount time is
 * greater than the blocks per group use the super block value.
 * If the super block value is greater than blocks per group return 0.
 * Allocator needs it be less than blocks per group.
 *
 */
static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
{
	unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
	unsigned long stripe_width =
			le32_to_cpu(sbi->s_es->s_raid_stripe_width);
D
Dan Ehrenberg 已提交
3294
	int ret;
3295 3296

	if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
D
Dan Ehrenberg 已提交
3297
		ret = sbi->s_stripe;
3298
	else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
D
Dan Ehrenberg 已提交
3299
		ret = stripe_width;
3300
	else if (stride && stride <= sbi->s_blocks_per_group)
D
Dan Ehrenberg 已提交
3301 3302 3303
		ret = stride;
	else
		ret = 0;
3304

D
Dan Ehrenberg 已提交
3305 3306 3307 3308 3309 3310
	/*
	 * If the stripe width is 1, this makes no sense and
	 * we set it to 0 to turn off stripe handling code.
	 */
	if (ret <= 1)
		ret = 0;
3311

D
Dan Ehrenberg 已提交
3312
	return ret;
3313
}
3314

3315 3316 3317 3318 3319 3320 3321 3322
/*
 * Check whether this filesystem can be mounted based on
 * the features present and the RDONLY/RDWR mount requested.
 * Returns 1 if this filesystem can be mounted as requested,
 * 0 if it cannot be.
 */
static int ext4_feature_set_ok(struct super_block *sb, int readonly)
{
3323
	if (ext4_has_unknown_ext4_incompat_features(sb)) {
3324 3325 3326 3327 3328 3329 3330 3331
		ext4_msg(sb, KERN_ERR,
			"Couldn't mount because of "
			"unsupported optional features (%x)",
			(le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
			~EXT4_FEATURE_INCOMPAT_SUPP));
		return 0;
	}

3332 3333 3334 3335 3336 3337 3338 3339 3340
#ifndef CONFIG_UNICODE
	if (ext4_has_feature_casefold(sb)) {
		ext4_msg(sb, KERN_ERR,
			 "Filesystem with casefold feature cannot be "
			 "mounted without CONFIG_UNICODE");
		return 0;
	}
#endif

3341 3342 3343
	if (readonly)
		return 1;

3344
	if (ext4_has_feature_readonly(sb)) {
D
Darrick J. Wong 已提交
3345
		ext4_msg(sb, KERN_INFO, "filesystem is read-only");
3346
		sb->s_flags |= SB_RDONLY;
D
Darrick J. Wong 已提交
3347 3348 3349
		return 1;
	}

3350
	/* Check that feature set is OK for a read-write mount */
3351
	if (ext4_has_unknown_ext4_ro_compat_features(sb)) {
3352 3353 3354 3355 3356 3357
		ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
			 "unsupported optional features (%x)",
			 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
				~EXT4_FEATURE_RO_COMPAT_SUPP));
		return 0;
	}
3358
	if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) {
3359 3360 3361 3362 3363
		ext4_msg(sb, KERN_ERR,
			 "Can't support bigalloc feature without "
			 "extents feature\n");
		return 0;
	}
3364

3365
#if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2)
3366 3367
	if (!readonly && (ext4_has_feature_quota(sb) ||
			  ext4_has_feature_project(sb))) {
3368
		ext4_msg(sb, KERN_ERR,
3369
			 "The kernel was not built with CONFIG_QUOTA and CONFIG_QFMT_V2");
L
Li Xi 已提交
3370 3371
		return 0;
	}
3372
#endif  /* CONFIG_QUOTA */
3373 3374 3375
	return 1;
}

3376 3377 3378 3379
/*
 * This function is called once a day if we have errors logged
 * on the file system
 */
3380
static void print_daily_error_info(struct timer_list *t)
3381
{
3382 3383 3384
	struct ext4_sb_info *sbi = from_timer(sbi, t, s_err_report);
	struct super_block *sb = sbi->s_sb;
	struct ext4_super_block *es = sbi->s_es;
3385 3386

	if (es->s_error_count)
3387 3388
		/* fsck newer than v1.41.13 is needed to clean this condition. */
		ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
3389 3390
			 le32_to_cpu(es->s_error_count));
	if (es->s_first_error_time) {
3391 3392 3393
		printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %llu: %.*s:%d",
		       sb->s_id,
		       ext4_get_tstamp(es, s_first_error_time),
3394 3395 3396 3397
		       (int) sizeof(es->s_first_error_func),
		       es->s_first_error_func,
		       le32_to_cpu(es->s_first_error_line));
		if (es->s_first_error_ino)
3398
			printk(KERN_CONT ": inode %u",
3399 3400
			       le32_to_cpu(es->s_first_error_ino));
		if (es->s_first_error_block)
3401
			printk(KERN_CONT ": block %llu", (unsigned long long)
3402
			       le64_to_cpu(es->s_first_error_block));
3403
		printk(KERN_CONT "\n");
3404 3405
	}
	if (es->s_last_error_time) {
3406 3407 3408
		printk(KERN_NOTICE "EXT4-fs (%s): last error at time %llu: %.*s:%d",
		       sb->s_id,
		       ext4_get_tstamp(es, s_last_error_time),
3409 3410 3411 3412
		       (int) sizeof(es->s_last_error_func),
		       es->s_last_error_func,
		       le32_to_cpu(es->s_last_error_line));
		if (es->s_last_error_ino)
3413
			printk(KERN_CONT ": inode %u",
3414 3415
			       le32_to_cpu(es->s_last_error_ino));
		if (es->s_last_error_block)
3416
			printk(KERN_CONT ": block %llu", (unsigned long long)
3417
			       le64_to_cpu(es->s_last_error_block));
3418
		printk(KERN_CONT "\n");
3419 3420 3421 3422
	}
	mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ);  /* Once a day */
}

3423 3424 3425 3426
/* Find next suitable group and run ext4_init_inode_table */
static int ext4_run_li_request(struct ext4_li_request *elr)
{
	struct ext4_group_desc *gdp = NULL;
3427 3428 3429
	struct super_block *sb = elr->lr_super;
	ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
	ext4_group_t group = elr->lr_next_group;
3430
	unsigned long timeout = 0;
3431
	unsigned int prefetch_ios = 0;
3432 3433
	int ret = 0;

3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452
	if (elr->lr_mode == EXT4_LI_MODE_PREFETCH_BBITMAP) {
		elr->lr_next_group = ext4_mb_prefetch(sb, group,
				EXT4_SB(sb)->s_mb_prefetch, &prefetch_ios);
		if (prefetch_ios)
			ext4_mb_prefetch_fini(sb, elr->lr_next_group,
					      prefetch_ios);
		trace_ext4_prefetch_bitmaps(sb, group, elr->lr_next_group,
					    prefetch_ios);
		if (group >= elr->lr_next_group) {
			ret = 1;
			if (elr->lr_first_not_zeroed != ngroups &&
			    !sb_rdonly(sb) && test_opt(sb, INIT_INODE_TABLE)) {
				elr->lr_next_group = elr->lr_first_not_zeroed;
				elr->lr_mode = EXT4_LI_MODE_ITABLE;
				ret = 0;
			}
		}
		return ret;
	}
3453

3454
	for (; group < ngroups; group++) {
3455 3456 3457 3458 3459 3460 3461 3462 3463 3464
		gdp = ext4_get_group_desc(sb, group, NULL);
		if (!gdp) {
			ret = 1;
			break;
		}

		if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
			break;
	}

3465
	if (group >= ngroups)
3466 3467 3468 3469 3470 3471
		ret = 1;

	if (!ret) {
		timeout = jiffies;
		ret = ext4_init_inode_table(sb, group,
					    elr->lr_timeout ? 0 : 1);
3472
		trace_ext4_lazy_itable_init(sb, group);
3473
		if (elr->lr_timeout == 0) {
3474
			timeout = (jiffies - timeout) *
3475
				EXT4_SB(elr->lr_super)->s_li_wait_mult;
3476 3477 3478 3479 3480 3481 3482 3483 3484 3485
			elr->lr_timeout = timeout;
		}
		elr->lr_next_sched = jiffies + elr->lr_timeout;
		elr->lr_next_group = group + 1;
	}
	return ret;
}

/*
 * Remove lr_request from the list_request and free the
3486
 * request structure. Should be called with li_list_mtx held
3487 3488 3489 3490 3491 3492 3493
 */
static void ext4_remove_li_request(struct ext4_li_request *elr)
{
	if (!elr)
		return;

	list_del(&elr->lr_request);
3494
	EXT4_SB(elr->lr_super)->s_li_request = NULL;
3495 3496 3497 3498 3499
	kfree(elr);
}

static void ext4_unregister_li_request(struct super_block *sb)
{
3500 3501 3502
	mutex_lock(&ext4_li_mtx);
	if (!ext4_li_info) {
		mutex_unlock(&ext4_li_mtx);
3503
		return;
3504
	}
3505 3506

	mutex_lock(&ext4_li_info->li_list_mtx);
3507
	ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
3508
	mutex_unlock(&ext4_li_info->li_list_mtx);
3509
	mutex_unlock(&ext4_li_mtx);
3510 3511
}

3512 3513
static struct task_struct *ext4_lazyinit_task;

3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527
/*
 * This is the function where ext4lazyinit thread lives. It walks
 * through the request list searching for next scheduled filesystem.
 * When such a fs is found, run the lazy initialization request
 * (ext4_rn_li_request) and keep track of the time spend in this
 * function. Based on that time we compute next schedule time of
 * the request. When walking through the list is complete, compute
 * next waking time and put itself into sleep.
 */
static int ext4_lazyinit_thread(void *arg)
{
	struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg;
	struct list_head *pos, *n;
	struct ext4_li_request *elr;
3528
	unsigned long next_wakeup, cur;
3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541

	BUG_ON(NULL == eli);

cont_thread:
	while (true) {
		next_wakeup = MAX_JIFFY_OFFSET;

		mutex_lock(&eli->li_list_mtx);
		if (list_empty(&eli->li_request_list)) {
			mutex_unlock(&eli->li_list_mtx);
			goto exit_thread;
		}
		list_for_each_safe(pos, n, &eli->li_request_list) {
3542 3543
			int err = 0;
			int progress = 0;
3544 3545 3546
			elr = list_entry(pos, struct ext4_li_request,
					 lr_request);

3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564
			if (time_before(jiffies, elr->lr_next_sched)) {
				if (time_before(elr->lr_next_sched, next_wakeup))
					next_wakeup = elr->lr_next_sched;
				continue;
			}
			if (down_read_trylock(&elr->lr_super->s_umount)) {
				if (sb_start_write_trylock(elr->lr_super)) {
					progress = 1;
					/*
					 * We hold sb->s_umount, sb can not
					 * be removed from the list, it is
					 * now safe to drop li_list_mtx
					 */
					mutex_unlock(&eli->li_list_mtx);
					err = ext4_run_li_request(elr);
					sb_end_write(elr->lr_super);
					mutex_lock(&eli->li_list_mtx);
					n = pos->next;
3565
				}
3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576
				up_read((&elr->lr_super->s_umount));
			}
			/* error, remove the lazy_init job */
			if (err) {
				ext4_remove_li_request(elr);
				continue;
			}
			if (!progress) {
				elr->lr_next_sched = jiffies +
					(prandom_u32()
					 % (EXT4_DEF_LI_MAX_START_DELAY * HZ));
3577 3578 3579 3580 3581 3582
			}
			if (time_before(elr->lr_next_sched, next_wakeup))
				next_wakeup = elr->lr_next_sched;
		}
		mutex_unlock(&eli->li_list_mtx);

3583
		try_to_freeze();
3584

3585 3586
		cur = jiffies;
		if ((time_after_eq(cur, next_wakeup)) ||
3587
		    (MAX_JIFFY_OFFSET == next_wakeup)) {
3588 3589 3590 3591
			cond_resched();
			continue;
		}

3592 3593
		schedule_timeout_interruptible(next_wakeup - cur);

3594 3595 3596 3597
		if (kthread_should_stop()) {
			ext4_clear_request_list();
			goto exit_thread;
		}
3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639
	}

exit_thread:
	/*
	 * It looks like the request list is empty, but we need
	 * to check it under the li_list_mtx lock, to prevent any
	 * additions into it, and of course we should lock ext4_li_mtx
	 * to atomically free the list and ext4_li_info, because at
	 * this point another ext4 filesystem could be registering
	 * new one.
	 */
	mutex_lock(&ext4_li_mtx);
	mutex_lock(&eli->li_list_mtx);
	if (!list_empty(&eli->li_request_list)) {
		mutex_unlock(&eli->li_list_mtx);
		mutex_unlock(&ext4_li_mtx);
		goto cont_thread;
	}
	mutex_unlock(&eli->li_list_mtx);
	kfree(ext4_li_info);
	ext4_li_info = NULL;
	mutex_unlock(&ext4_li_mtx);

	return 0;
}

static void ext4_clear_request_list(void)
{
	struct list_head *pos, *n;
	struct ext4_li_request *elr;

	mutex_lock(&ext4_li_info->li_list_mtx);
	list_for_each_safe(pos, n, &ext4_li_info->li_request_list) {
		elr = list_entry(pos, struct ext4_li_request,
				 lr_request);
		ext4_remove_li_request(elr);
	}
	mutex_unlock(&ext4_li_info->li_list_mtx);
}

static int ext4_run_lazyinit_thread(void)
{
3640 3641 3642 3643
	ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
					 ext4_li_info, "ext4lazyinit");
	if (IS_ERR(ext4_lazyinit_task)) {
		int err = PTR_ERR(ext4_lazyinit_task);
3644 3645 3646
		ext4_clear_request_list();
		kfree(ext4_li_info);
		ext4_li_info = NULL;
3647
		printk(KERN_CRIT "EXT4-fs: error %d creating inode table "
3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666
				 "initialization thread\n",
				 err);
		return err;
	}
	ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
	return 0;
}

/*
 * Check whether it make sense to run itable init. thread or not.
 * If there is at least one uninitialized inode table, return
 * corresponding group number, else the loop goes through all
 * groups and return total number of groups.
 */
static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
{
	ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
	struct ext4_group_desc *gdp = NULL;

3667 3668 3669
	if (!ext4_has_group_desc_csum(sb))
		return ngroups;

3670 3671 3672 3673 3674
	for (group = 0; group < ngroups; group++) {
		gdp = ext4_get_group_desc(sb, group, NULL);
		if (!gdp)
			continue;

3675
		if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709
			break;
	}

	return group;
}

static int ext4_li_info_new(void)
{
	struct ext4_lazy_init *eli = NULL;

	eli = kzalloc(sizeof(*eli), GFP_KERNEL);
	if (!eli)
		return -ENOMEM;

	INIT_LIST_HEAD(&eli->li_request_list);
	mutex_init(&eli->li_list_mtx);

	eli->li_state |= EXT4_LAZYINIT_QUIT;

	ext4_li_info = eli;

	return 0;
}

static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
					    ext4_group_t start)
{
	struct ext4_li_request *elr;

	elr = kzalloc(sizeof(*elr), GFP_KERNEL);
	if (!elr)
		return NULL;

	elr->lr_super = sb;
3710 3711 3712 3713 3714 3715 3716
	elr->lr_first_not_zeroed = start;
	if (test_opt(sb, PREFETCH_BLOCK_BITMAPS))
		elr->lr_mode = EXT4_LI_MODE_PREFETCH_BBITMAP;
	else {
		elr->lr_mode = EXT4_LI_MODE_ITABLE;
		elr->lr_next_group = start;
	}
3717 3718 3719 3720 3721 3722

	/*
	 * Randomize first schedule time of the request to
	 * spread the inode table initialization requests
	 * better.
	 */
3723 3724
	elr->lr_next_sched = jiffies + (prandom_u32() %
				(EXT4_DEF_LI_MAX_START_DELAY * HZ));
3725 3726 3727
	return elr;
}

3728 3729
int ext4_register_li_request(struct super_block *sb,
			     ext4_group_t first_not_zeroed)
3730 3731
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
3732
	struct ext4_li_request *elr = NULL;
3733
	ext4_group_t ngroups = sbi->s_groups_count;
3734
	int ret = 0;
3735

3736
	mutex_lock(&ext4_li_mtx);
3737 3738 3739 3740 3741 3742
	if (sbi->s_li_request != NULL) {
		/*
		 * Reset timeout so it can be computed again, because
		 * s_li_wait_mult might have changed.
		 */
		sbi->s_li_request->lr_timeout = 0;
3743
		goto out;
3744
	}
3745

3746 3747 3748
	if (!test_opt(sb, PREFETCH_BLOCK_BITMAPS) &&
	    (first_not_zeroed == ngroups || sb_rdonly(sb) ||
	     !test_opt(sb, INIT_INODE_TABLE)))
3749
		goto out;
3750 3751

	elr = ext4_li_request_new(sb, first_not_zeroed);
3752 3753 3754 3755
	if (!elr) {
		ret = -ENOMEM;
		goto out;
	}
3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767

	if (NULL == ext4_li_info) {
		ret = ext4_li_info_new();
		if (ret)
			goto out;
	}

	mutex_lock(&ext4_li_info->li_list_mtx);
	list_add(&elr->lr_request, &ext4_li_info->li_request_list);
	mutex_unlock(&ext4_li_info->li_list_mtx);

	sbi->s_li_request = elr;
3768 3769 3770 3771 3772 3773
	/*
	 * set elr to NULL here since it has been inserted to
	 * the request_list and the removal and free of it is
	 * handled by ext4_clear_request_list from now on.
	 */
	elr = NULL;
3774 3775 3776 3777 3778 3779 3780

	if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
		ret = ext4_run_lazyinit_thread();
		if (ret)
			goto out;
	}
out:
3781 3782
	mutex_unlock(&ext4_li_mtx);
	if (ret)
3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796
		kfree(elr);
	return ret;
}

/*
 * We do not need to lock anything since this is called on
 * module unload.
 */
static void ext4_destroy_lazyinit_thread(void)
{
	/*
	 * If thread exited earlier
	 * there's nothing to be done.
	 */
3797
	if (!ext4_li_info || !ext4_lazyinit_task)
3798 3799
		return;

3800
	kthread_stop(ext4_lazyinit_task);
3801 3802
}

3803 3804 3805 3806 3807 3808
static int set_journal_csum_feature_set(struct super_block *sb)
{
	int ret = 1;
	int compat, incompat;
	struct ext4_sb_info *sbi = EXT4_SB(sb);

3809
	if (ext4_has_metadata_csum(sb)) {
3810
		/* journal checksum v3 */
3811
		compat = 0;
3812
		incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
3813 3814 3815 3816 3817 3818
	} else {
		/* journal checksum v1 */
		compat = JBD2_FEATURE_COMPAT_CHECKSUM;
		incompat = 0;
	}

3819 3820 3821 3822
	jbd2_journal_clear_features(sbi->s_journal,
			JBD2_FEATURE_COMPAT_CHECKSUM, 0,
			JBD2_FEATURE_INCOMPAT_CSUM_V3 |
			JBD2_FEATURE_INCOMPAT_CSUM_V2);
3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834
	if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
		ret = jbd2_journal_set_features(sbi->s_journal,
				compat, 0,
				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
				incompat);
	} else if (test_opt(sb, JOURNAL_CHECKSUM)) {
		ret = jbd2_journal_set_features(sbi->s_journal,
				compat, 0,
				incompat);
		jbd2_journal_clear_features(sbi->s_journal, 0, 0,
				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
	} else {
3835 3836
		jbd2_journal_clear_features(sbi->s_journal, 0, 0,
				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
3837 3838 3839 3840 3841
	}

	return ret;
}

3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865
/*
 * Note: calculating the overhead so we can be compatible with
 * historical BSD practice is quite difficult in the face of
 * clusters/bigalloc.  This is because multiple metadata blocks from
 * different block group can end up in the same allocation cluster.
 * Calculating the exact overhead in the face of clustered allocation
 * requires either O(all block bitmaps) in memory or O(number of block
 * groups**2) in time.  We will still calculate the superblock for
 * older file systems --- and if we come across with a bigalloc file
 * system with zero in s_overhead_clusters the estimate will be close to
 * correct especially for very large cluster sizes --- but for newer
 * file systems, it's better to calculate this figure once at mkfs
 * time, and store it in the superblock.  If the superblock value is
 * present (even for non-bigalloc file systems), we will use it.
 */
static int count_overhead(struct super_block *sb, ext4_group_t grp,
			  char *buf)
{
	struct ext4_sb_info	*sbi = EXT4_SB(sb);
	struct ext4_group_desc	*gdp;
	ext4_fsblk_t		first_block, last_block, b;
	ext4_group_t		i, ngroups = ext4_get_groups_count(sb);
	int			s, j, count = 0;

3866
	if (!ext4_has_feature_bigalloc(sb))
3867 3868 3869
		return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
			sbi->s_itb_per_group + 2);

3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898
	first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
		(grp * EXT4_BLOCKS_PER_GROUP(sb));
	last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
	for (i = 0; i < ngroups; i++) {
		gdp = ext4_get_group_desc(sb, i, NULL);
		b = ext4_block_bitmap(sb, gdp);
		if (b >= first_block && b <= last_block) {
			ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
			count++;
		}
		b = ext4_inode_bitmap(sb, gdp);
		if (b >= first_block && b <= last_block) {
			ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
			count++;
		}
		b = ext4_inode_table(sb, gdp);
		if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
			for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
				int c = EXT4_B2C(sbi, b - first_block);
				ext4_set_bit(c, buf);
				count++;
			}
		if (i != grp)
			continue;
		s = 0;
		if (ext4_bg_has_super(sb, grp)) {
			ext4_set_bit(s++, buf);
			count++;
		}
3899 3900 3901 3902 3903
		j = ext4_bg_num_gdb(sb, grp);
		if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
			ext4_error(sb, "Invalid number of block group "
				   "descriptor blocks: %d", j);
			j = EXT4_BLOCKS_PER_GROUP(sb) - s;
3904
		}
3905 3906 3907
		count += j;
		for (; j > 0; j--)
			ext4_set_bit(EXT4_B2C(sbi, s++), buf);
3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921
	}
	if (!count)
		return 0;
	return EXT4_CLUSTERS_PER_GROUP(sb) -
		ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
}

/*
 * Compute the overhead and stash it in sbi->s_overhead
 */
int ext4_calculate_overhead(struct super_block *sb)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
3922 3923
	struct inode *j_inode;
	unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum);
3924 3925
	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
	ext4_fsblk_t overhead = 0;
3926
	char *buf = (char *) get_zeroed_page(GFP_NOFS);
3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953

	if (!buf)
		return -ENOMEM;

	/*
	 * Compute the overhead (FS structures).  This is constant
	 * for a given filesystem unless the number of block groups
	 * changes so we cache the previous value until it does.
	 */

	/*
	 * All of the blocks before first_data_block are overhead
	 */
	overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));

	/*
	 * Add the overhead found in each block group
	 */
	for (i = 0; i < ngroups; i++) {
		int blks;

		blks = count_overhead(sb, i, buf);
		overhead += blks;
		if (blks)
			memset(buf, 0, PAGE_SIZE);
		cond_resched();
	}
3954 3955 3956 3957 3958

	/*
	 * Add the internal journal blocks whether the journal has been
	 * loaded or not
	 */
3959
	if (sbi->s_journal && !sbi->s_journal_bdev)
3960
		overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_total_len);
3961 3962
	else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) {
		/* j_inum for internal journal is non-zero */
3963 3964 3965 3966 3967 3968 3969 3970 3971
		j_inode = ext4_get_journal_inode(sb, j_inum);
		if (j_inode) {
			j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
			overhead += EXT4_NUM_B2C(sbi, j_blocks);
			iput(j_inode);
		} else {
			ext4_msg(sb, KERN_ERR, "can't get journal size");
		}
	}
3972 3973 3974 3975 3976 3977
	sbi->s_overhead = overhead;
	smp_wmb();
	free_page((unsigned long) buf);
	return 0;
}

3978
static void ext4_set_resv_clusters(struct super_block *sb)
L
Lukas Czerner 已提交
3979 3980
{
	ext4_fsblk_t resv_clusters;
3981
	struct ext4_sb_info *sbi = EXT4_SB(sb);
L
Lukas Czerner 已提交
3982

3983 3984 3985 3986 3987 3988
	/*
	 * There's no need to reserve anything when we aren't using extents.
	 * The space estimates are exact, there are no unwritten extents,
	 * hole punching doesn't need new metadata... This is needed especially
	 * to keep ext2/3 backward compatibility.
	 */
3989
	if (!ext4_has_feature_extents(sb))
3990
		return;
L
Lukas Czerner 已提交
3991 3992 3993 3994
	/*
	 * By default we reserve 2% or 4096 clusters, whichever is smaller.
	 * This should cover the situations where we can not afford to run
	 * out of space like for example punch hole, or converting
3995
	 * unwritten extents in delalloc path. In most cases such
L
Lukas Czerner 已提交
3996 3997 3998
	 * allocation would require 1, or 2 blocks, higher numbers are
	 * very rare.
	 */
3999 4000
	resv_clusters = (ext4_blocks_count(sbi->s_es) >>
			 sbi->s_cluster_bits);
L
Lukas Czerner 已提交
4001 4002 4003 4004

	do_div(resv_clusters, 50);
	resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);

4005
	atomic64_set(&sbi->s_resv_clusters, resv_clusters);
L
Lukas Czerner 已提交
4006 4007
}

4008
static int ext4_fill_super(struct super_block *sb, void *data, int silent)
4009
{
4010
	struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
4011
	char *orig_data = kstrdup(data, GFP_KERNEL);
4012
	struct buffer_head *bh, **group_desc;
4013
	struct ext4_super_block *es = NULL;
4014
	struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
4015
	struct flex_groups **flex_groups;
4016 4017
	ext4_fsblk_t block;
	ext4_fsblk_t sb_block = get_sb_block(&data);
4018
	ext4_fsblk_t logical_sb_block;
4019 4020 4021 4022
	unsigned long offset = 0;
	unsigned long journal_devnum = 0;
	unsigned long def_mount_opts;
	struct inode *root;
4023
	const char *descr;
4024
	int ret = -ENOMEM;
4025
	int blocksize, clustersize;
4026 4027
	unsigned int db_count;
	unsigned int i;
4028
	int needs_recovery, has_huge_files;
L
Laurent Vivier 已提交
4029
	__u64 blocks_count;
4030
	int err = 0;
4031
	unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
4032
	ext4_group_t first_not_zeroed;
4033

4034 4035
	if ((data && !orig_data) || !sbi)
		goto out_free_base;
4036

4037
	sbi->s_daxdev = dax_dev;
4038 4039
	sbi->s_blockgroup_lock =
		kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
4040 4041 4042
	if (!sbi->s_blockgroup_lock)
		goto out_free_base;

4043
	sb->s_fs_info = sbi;
4044
	sbi->s_sb = sb;
4045
	sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
M
Miklos Szeredi 已提交
4046
	sbi->s_sb_block = sb_block;
4047 4048
	if (sb->s_bdev->bd_part)
		sbi->s_sectors_written_start =
4049
			part_stat_read(sb->s_bdev->bd_part, sectors[STAT_WRITE]);
4050

4051
	/* Cleanup superblock name */
4052
	strreplace(sb->s_id, '/', '!');
4053

4054
	/* -EINVAL is default */
4055
	ret = -EINVAL;
4056
	blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
4057
	if (!blocksize) {
4058
		ext4_msg(sb, KERN_ERR, "unable to set blocksize");
4059 4060 4061 4062
		goto out_fail;
	}

	/*
4063
	 * The ext4 superblock will not be buffer aligned for other than 1kB
4064 4065
	 * block sizes.  We need to calculate the offset from buffer start.
	 */
4066
	if (blocksize != EXT4_MIN_BLOCK_SIZE) {
4067 4068
		logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
		offset = do_div(logical_sb_block, blocksize);
4069
	} else {
4070
		logical_sb_block = sb_block;
4071 4072
	}

4073 4074
	bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
	if (IS_ERR(bh)) {
4075
		ext4_msg(sb, KERN_ERR, "unable to read superblock");
4076 4077
		ret = PTR_ERR(bh);
		bh = NULL;
4078 4079 4080 4081
		goto out_fail;
	}
	/*
	 * Note: s_es must be initialized as soon as possible because
4082
	 *       some ext4 macro-instructions depend on its value
4083
	 */
4084
	es = (struct ext4_super_block *) (bh->b_data + offset);
4085 4086
	sbi->s_es = es;
	sb->s_magic = le16_to_cpu(es->s_magic);
4087 4088
	if (sb->s_magic != EXT4_SUPER_MAGIC)
		goto cantfind_ext4;
T
Theodore Ts'o 已提交
4089
	sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
4090

4091
	/* Warn if metadata_csum and gdt_csum are both set. */
4092 4093
	if (ext4_has_feature_metadata_csum(sb) &&
	    ext4_has_feature_gdt_csum(sb))
4094
		ext4_warning(sb, "metadata_csum and uninit_bg are "
4095 4096
			     "redundant flags; please run fsck.");

4097 4098 4099 4100 4101 4102 4103 4104
	/* Check for a known checksum algorithm */
	if (!ext4_verify_csum_type(sb, es)) {
		ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
			 "unknown checksum algorithm.");
		silent = 1;
		goto cantfind_ext4;
	}

4105
	/* Load the checksum driver */
4106 4107 4108 4109 4110 4111
	sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
	if (IS_ERR(sbi->s_chksum_driver)) {
		ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
		ret = PTR_ERR(sbi->s_chksum_driver);
		sbi->s_chksum_driver = NULL;
		goto failed_mount;
4112 4113
	}

4114 4115 4116 4117 4118
	/* Check superblock checksum */
	if (!ext4_superblock_csum_verify(sb, es)) {
		ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
			 "invalid superblock checksum.  Run e2fsck?");
		silent = 1;
4119
		ret = -EFSBADCRC;
4120 4121 4122 4123
		goto cantfind_ext4;
	}

	/* Precompute checksum seed for all metadata */
4124
	if (ext4_has_feature_csum_seed(sb))
4125
		sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
T
Tahsin Erdogan 已提交
4126
	else if (ext4_has_metadata_csum(sb) || ext4_has_feature_ea_inode(sb))
4127 4128 4129
		sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
					       sizeof(es->s_uuid));

4130 4131
	/* Set defaults before we parse the mount options */
	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
4132
	set_opt(sb, INIT_INODE_TABLE);
4133
	if (def_mount_opts & EXT4_DEFM_DEBUG)
4134
		set_opt(sb, DEBUG);
4135
	if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
4136
		set_opt(sb, GRPID);
4137
	if (def_mount_opts & EXT4_DEFM_UID16)
4138
		set_opt(sb, NO_UID32);
4139 4140
	/* xattr user namespace & acls are now defaulted on */
	set_opt(sb, XATTR_USER);
T
Theodore Ts'o 已提交
4141
#ifdef CONFIG_EXT4_FS_POSIX_ACL
4142
	set_opt(sb, POSIX_ACL);
4143
#endif
4144 4145
	if (ext4_has_feature_fast_commit(sb))
		set_opt2(sb, JOURNAL_FAST_COMMIT);
4146 4147 4148 4149
	/* don't forget to enable journal_csum when metadata_csum is enabled. */
	if (ext4_has_metadata_csum(sb))
		set_opt(sb, JOURNAL_CHECKSUM);

4150
	if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
4151
		set_opt(sb, JOURNAL_DATA);
4152
	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
4153
		set_opt(sb, ORDERED_DATA);
4154
	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
4155
		set_opt(sb, WRITEBACK_DATA);
4156 4157

	if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
4158
		set_opt(sb, ERRORS_PANIC);
4159
	else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
4160
		set_opt(sb, ERRORS_CONT);
4161
	else
4162
		set_opt(sb, ERRORS_RO);
4163 4164
	/* block_validity enabled by default; disable with noblock_validity */
	set_opt(sb, BLOCK_VALIDITY);
4165
	if (def_mount_opts & EXT4_DEFM_DISCARD)
4166
		set_opt(sb, DISCARD);
4167

4168 4169
	sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
	sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
4170 4171 4172
	sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
	sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
	sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
4173

4174
	if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
4175
		set_opt(sb, BARRIER);
4176

4177 4178 4179 4180
	/*
	 * enable delayed allocation by default
	 * Use -o nodelalloc to turn it off
	 */
4181
	if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) &&
4182
	    ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
4183
		set_opt(sb, DELALLOC);
4184

4185 4186 4187 4188 4189 4190
	/*
	 * set default s_li_wait_mult for lazyinit, for the case there is
	 * no mount option specified.
	 */
	sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;

4191
	blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
4192 4193 4194 4195

	if (blocksize == PAGE_SIZE)
		set_opt(sb, DIOREAD_NOLOCK);

4196 4197 4198 4199 4200 4201 4202 4203
	if (blocksize < EXT4_MIN_BLOCK_SIZE ||
	    blocksize > EXT4_MAX_BLOCK_SIZE) {
		ext4_msg(sb, KERN_ERR,
		       "Unsupported filesystem blocksize %d (%d log_block_size)",
			 blocksize, le32_to_cpu(es->s_log_block_size));
		goto failed_mount;
	}

4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220
	if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
		sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
		sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
	} else {
		sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
		sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
		if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
			ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
				 sbi->s_first_ino);
			goto failed_mount;
		}
		if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
		    (!is_power_of_2(sbi->s_inode_size)) ||
		    (sbi->s_inode_size > blocksize)) {
			ext4_msg(sb, KERN_ERR,
			       "unsupported inode size: %d",
			       sbi->s_inode_size);
4221
			ext4_msg(sb, KERN_ERR, "blocksize: %d", blocksize);
4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266
			goto failed_mount;
		}
		/*
		 * i_atime_extra is the last extra field available for
		 * [acm]times in struct ext4_inode. Checking for that
		 * field should suffice to ensure we have extra space
		 * for all three.
		 */
		if (sbi->s_inode_size >= offsetof(struct ext4_inode, i_atime_extra) +
			sizeof(((struct ext4_inode *)0)->i_atime_extra)) {
			sb->s_time_gran = 1;
			sb->s_time_max = EXT4_EXTRA_TIMESTAMP_MAX;
		} else {
			sb->s_time_gran = NSEC_PER_SEC;
			sb->s_time_max = EXT4_NON_EXTRA_TIMESTAMP_MAX;
		}
		sb->s_time_min = EXT4_TIMESTAMP_MIN;
	}
	if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
		sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
			EXT4_GOOD_OLD_INODE_SIZE;
		if (ext4_has_feature_extra_isize(sb)) {
			unsigned v, max = (sbi->s_inode_size -
					   EXT4_GOOD_OLD_INODE_SIZE);

			v = le16_to_cpu(es->s_want_extra_isize);
			if (v > max) {
				ext4_msg(sb, KERN_ERR,
					 "bad s_want_extra_isize: %d", v);
				goto failed_mount;
			}
			if (sbi->s_want_extra_isize < v)
				sbi->s_want_extra_isize = v;

			v = le16_to_cpu(es->s_min_extra_isize);
			if (v > max) {
				ext4_msg(sb, KERN_ERR,
					 "bad s_min_extra_isize: %d", v);
				goto failed_mount;
			}
			if (sbi->s_want_extra_isize < v)
				sbi->s_want_extra_isize = v;
		}
	}

4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279
	if (sbi->s_es->s_mount_opts[0]) {
		char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
					      sizeof(sbi->s_es->s_mount_opts),
					      GFP_KERNEL);
		if (!s_mount_opts)
			goto failed_mount;
		if (!parse_options(s_mount_opts, sb, &journal_devnum,
				   &journal_ioprio, 0)) {
			ext4_msg(sb, KERN_WARNING,
				 "failed to parse options in superblock: %s",
				 s_mount_opts);
		}
		kfree(s_mount_opts);
4280
	}
4281
	sbi->s_def_mount_opt = sbi->s_mount_opt;
4282
	if (!parse_options((char *) data, sb, &journal_devnum,
4283
			   &journal_ioprio, 0))
4284 4285
		goto failed_mount;

4286
#ifdef CONFIG_UNICODE
4287
	if (ext4_has_feature_casefold(sb) && !sb->s_encoding) {
4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317
		const struct ext4_sb_encodings *encoding_info;
		struct unicode_map *encoding;
		__u16 encoding_flags;

		if (ext4_has_feature_encrypt(sb)) {
			ext4_msg(sb, KERN_ERR,
				 "Can't mount with encoding and encryption");
			goto failed_mount;
		}

		if (ext4_sb_read_encoding(es, &encoding_info,
					  &encoding_flags)) {
			ext4_msg(sb, KERN_ERR,
				 "Encoding requested by superblock is unknown");
			goto failed_mount;
		}

		encoding = utf8_load(encoding_info->version);
		if (IS_ERR(encoding)) {
			ext4_msg(sb, KERN_ERR,
				 "can't mount with superblock charset: %s-%s "
				 "not supported by the kernel. flags: 0x%x.",
				 encoding_info->name, encoding_info->version,
				 encoding_flags);
			goto failed_mount;
		}
		ext4_msg(sb, KERN_INFO,"Using encoding defined by superblock: "
			 "%s-%s with flags 0x%hx", encoding_info->name,
			 encoding_info->version?:"\b", encoding_flags);

4318 4319
		sb->s_encoding = encoding;
		sb->s_encoding_flags = encoding_flags;
4320 4321 4322
	}
#endif

4323
	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
4324
		printk_once(KERN_WARNING "EXT4-fs: Warning: mounting with data=journal disables delayed allocation, dioread_nolock, O_DIRECT and fast_commit support!\n");
4325
		/* can't mount with both data=journal and dioread_nolock. */
4326
		clear_opt(sb, DIOREAD_NOLOCK);
4327
		clear_opt2(sb, JOURNAL_FAST_COMMIT);
4328 4329 4330 4331 4332
		if (test_opt2(sb, EXPLICIT_DELALLOC)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and delalloc");
			goto failed_mount;
		}
4333
		if (test_opt(sb, DAX_ALWAYS)) {
R
Ross Zwisler 已提交
4334 4335 4336 4337
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and dax");
			goto failed_mount;
		}
4338 4339 4340 4341 4342
		if (ext4_has_feature_encrypt(sb)) {
			ext4_msg(sb, KERN_WARNING,
				 "encrypted files will use data=ordered "
				 "instead of data journaling mode");
		}
4343 4344
		if (test_opt(sb, DELALLOC))
			clear_opt(sb, DELALLOC);
4345 4346
	} else {
		sb->s_iflags |= SB_I_CGROUPWB;
4347 4348
	}

4349 4350
	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
		(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
4351

4352
	if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
4353 4354 4355
	    (ext4_has_compat_features(sb) ||
	     ext4_has_ro_compat_features(sb) ||
	     ext4_has_incompat_features(sb)))
4356 4357 4358
		ext4_msg(sb, KERN_WARNING,
		       "feature flags set on rev 0 fs, "
		       "running e2fsck is recommended");
4359

4360 4361
	if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) {
		set_opt2(sb, HURD_COMPAT);
4362
		if (ext4_has_feature_64bit(sb)) {
4363 4364 4365 4366
			ext4_msg(sb, KERN_ERR,
				 "The Hurd can't support 64-bit file systems");
			goto failed_mount;
		}
T
Tahsin Erdogan 已提交
4367 4368 4369 4370 4371 4372 4373 4374 4375 4376

		/*
		 * ea_inode feature uses l_i_version field which is not
		 * available in HURD_COMPAT mode.
		 */
		if (ext4_has_feature_ea_inode(sb)) {
			ext4_msg(sb, KERN_ERR,
				 "ea_inode feature is not supported for Hurd");
			goto failed_mount;
		}
4377 4378
	}

4379 4380 4381 4382 4383
	if (IS_EXT2_SB(sb)) {
		if (ext2_feature_set_ok(sb))
			ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
				 "using the ext4 subsystem");
		else {
4384 4385 4386 4387 4388 4389
			/*
			 * If we're probing be silent, if this looks like
			 * it's actually an ext[34] filesystem.
			 */
			if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
				goto failed_mount;
4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400
			ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
				 "to feature incompatibilities");
			goto failed_mount;
		}
	}

	if (IS_EXT3_SB(sb)) {
		if (ext3_feature_set_ok(sb))
			ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
				 "using the ext4 subsystem");
		else {
4401 4402 4403 4404 4405 4406
			/*
			 * If we're probing be silent, if this looks like
			 * it's actually an ext4 filesystem.
			 */
			if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
				goto failed_mount;
4407 4408 4409 4410 4411 4412
			ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
				 "to feature incompatibilities");
			goto failed_mount;
		}
	}

4413 4414 4415 4416 4417
	/*
	 * Check feature flags regardless of the revision level, since we
	 * previously didn't change the revision level when setting the flags,
	 * so there is a chance incompat flags are set on a rev 0 filesystem.
	 */
4418
	if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
4419
		goto failed_mount;
4420

4421 4422 4423 4424 4425
	if (le32_to_cpu(es->s_log_block_size) >
	    (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
		ext4_msg(sb, KERN_ERR,
			 "Invalid log block size: %u",
			 le32_to_cpu(es->s_log_block_size));
4426 4427
		goto failed_mount;
	}
4428 4429 4430 4431 4432 4433 4434
	if (le32_to_cpu(es->s_log_cluster_size) >
	    (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
		ext4_msg(sb, KERN_ERR,
			 "Invalid log cluster size: %u",
			 le32_to_cpu(es->s_log_cluster_size));
		goto failed_mount;
	}
4435

4436 4437 4438 4439 4440 4441 4442
	if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
		ext4_msg(sb, KERN_ERR,
			 "Number of reserved GDT blocks insanely large: %d",
			 le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
		goto failed_mount;
	}

4443 4444 4445
	if (bdev_dax_supported(sb->s_bdev, blocksize))
		set_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags);

4446
	if (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) {
4447 4448 4449
		if (ext4_has_feature_inline_data(sb)) {
			ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem"
					" that may contain inline data");
4450
			goto failed_mount;
4451
		}
4452
		if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags)) {
4453
			ext4_msg(sb, KERN_ERR,
4454 4455
				"DAX unsupported by block device.");
			goto failed_mount;
4456
		}
R
Ross Zwisler 已提交
4457 4458
	}

4459
	if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
4460 4461 4462 4463 4464
		ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
			 es->s_encryption_level);
		goto failed_mount;
	}

4465
	if (sb->s_blocksize != blocksize) {
4466 4467
		/* Validate the filesystem blocksize */
		if (!sb_set_blocksize(sb, blocksize)) {
4468
			ext4_msg(sb, KERN_ERR, "bad block size %d",
4469
					blocksize);
4470 4471 4472
			goto failed_mount;
		}

4473
		brelse(bh);
4474 4475
		logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
		offset = do_div(logical_sb_block, blocksize);
4476 4477
		bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
		if (IS_ERR(bh)) {
4478 4479
			ext4_msg(sb, KERN_ERR,
			       "Can't read superblock on 2nd try");
4480 4481
			ret = PTR_ERR(bh);
			bh = NULL;
4482 4483
			goto failed_mount;
		}
4484
		es = (struct ext4_super_block *)(bh->b_data + offset);
4485
		sbi->s_es = es;
4486
		if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
4487 4488
			ext4_msg(sb, KERN_ERR,
			       "Magic mismatch, very weird!");
4489 4490 4491 4492
			goto failed_mount;
		}
	}

4493
	has_huge_files = ext4_has_feature_huge_file(sb);
4494 4495 4496
	sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
						      has_huge_files);
	sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
4497

4498
	sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
4499
	if (ext4_has_feature_64bit(sb)) {
4500
		if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
4501
		    sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
V
vignesh babu 已提交
4502
		    !is_power_of_2(sbi->s_desc_size)) {
4503 4504
			ext4_msg(sb, KERN_ERR,
			       "unsupported descriptor size %lu",
4505 4506 4507 4508 4509
			       sbi->s_desc_size);
			goto failed_mount;
		}
	} else
		sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
4510

4511 4512
	sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
	sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
4513

4514
	sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
4515
	if (sbi->s_inodes_per_block == 0)
4516
		goto cantfind_ext4;
4517 4518 4519
	if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
	    sbi->s_inodes_per_group > blocksize * 8) {
		ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
4520
			 sbi->s_inodes_per_group);
4521 4522
		goto failed_mount;
	}
4523 4524
	sbi->s_itb_per_group = sbi->s_inodes_per_group /
					sbi->s_inodes_per_block;
4525
	sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
4526 4527
	sbi->s_sbh = bh;
	sbi->s_mount_state = le16_to_cpu(es->s_state);
4528 4529
	sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
	sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
4530

4531
	for (i = 0; i < 4; i++)
4532 4533
		sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
	sbi->s_def_hash_version = es->s_def_hash_version;
4534
	if (ext4_has_feature_dir_index(sb)) {
4535 4536 4537 4538
		i = le32_to_cpu(es->s_flags);
		if (i & EXT2_FLAGS_UNSIGNED_HASH)
			sbi->s_hash_unsigned = 3;
		else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
4539
#ifdef __CHAR_UNSIGNED__
4540
			if (!sb_rdonly(sb))
4541 4542 4543
				es->s_flags |=
					cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
			sbi->s_hash_unsigned = 3;
4544
#else
4545
			if (!sb_rdonly(sb))
4546 4547
				es->s_flags |=
					cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
4548
#endif
4549
		}
4550
	}
4551

4552 4553
	/* Handle clustersize */
	clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
4554
	if (ext4_has_feature_bigalloc(sb)) {
4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580
		if (clustersize < blocksize) {
			ext4_msg(sb, KERN_ERR,
				 "cluster size (%d) smaller than "
				 "block size (%d)", clustersize, blocksize);
			goto failed_mount;
		}
		sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
			le32_to_cpu(es->s_log_block_size);
		sbi->s_clusters_per_group =
			le32_to_cpu(es->s_clusters_per_group);
		if (sbi->s_clusters_per_group > blocksize * 8) {
			ext4_msg(sb, KERN_ERR,
				 "#clusters per group too big: %lu",
				 sbi->s_clusters_per_group);
			goto failed_mount;
		}
		if (sbi->s_blocks_per_group !=
		    (sbi->s_clusters_per_group * (clustersize / blocksize))) {
			ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and "
				 "clusters per group (%lu) inconsistent",
				 sbi->s_blocks_per_group,
				 sbi->s_clusters_per_group);
			goto failed_mount;
		}
	} else {
		if (clustersize != blocksize) {
4581 4582 4583 4584
			ext4_msg(sb, KERN_ERR,
				 "fragment/cluster size (%d) != "
				 "block size (%d)", clustersize, blocksize);
			goto failed_mount;
4585 4586 4587 4588 4589 4590 4591 4592 4593
		}
		if (sbi->s_blocks_per_group > blocksize * 8) {
			ext4_msg(sb, KERN_ERR,
				 "#blocks per group too big: %lu",
				 sbi->s_blocks_per_group);
			goto failed_mount;
		}
		sbi->s_clusters_per_group = sbi->s_blocks_per_group;
		sbi->s_cluster_bits = 0;
4594
	}
4595 4596
	sbi->s_cluster_ratio = clustersize / blocksize;

4597 4598 4599 4600
	/* Do we have standard group size of clustersize * 8 blocks ? */
	if (sbi->s_blocks_per_group == clustersize << 3)
		set_opt2(sb, STD_GROUP_SIZE);

4601 4602 4603 4604
	/*
	 * Test whether we have more sectors than will fit in sector_t,
	 * and whether the max offset is addressable by the page cache.
	 */
4605
	err = generic_check_addressable(sb->s_blocksize_bits,
4606
					ext4_blocks_count(es));
4607
	if (err) {
4608
		ext4_msg(sb, KERN_ERR, "filesystem"
4609
			 " too large to mount safely on this system");
4610 4611 4612
		goto failed_mount;
	}

4613 4614
	if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
		goto cantfind_ext4;
4615

4616 4617 4618
	/* check blocks count against device size */
	blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
	if (blocks_count && ext4_blocks_count(es) > blocks_count) {
4619 4620
		ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
		       "exceeds size of device (%llu blocks)",
4621 4622 4623 4624
		       ext4_blocks_count(es), blocks_count);
		goto failed_mount;
	}

4625 4626 4627 4628 4629
	/*
	 * It makes no sense for the first data block to be beyond the end
	 * of the filesystem.
	 */
	if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
4630
		ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
4631 4632 4633
			 "block %u is beyond end of filesystem (%llu)",
			 le32_to_cpu(es->s_first_data_block),
			 ext4_blocks_count(es));
4634 4635
		goto failed_mount;
	}
4636 4637 4638 4639 4640 4641 4642
	if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) &&
	    (sbi->s_cluster_ratio == 1)) {
		ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
			 "block is 0 with a 1k block and cluster size");
		goto failed_mount;
	}

L
Laurent Vivier 已提交
4643 4644 4645 4646
	blocks_count = (ext4_blocks_count(es) -
			le32_to_cpu(es->s_first_data_block) +
			EXT4_BLOCKS_PER_GROUP(sb) - 1);
	do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
4647
	if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
4648
		ext4_msg(sb, KERN_WARNING, "groups count too large: %llu "
4649
		       "(block count %llu, first data block %u, "
4650
		       "blocks per group %lu)", blocks_count,
4651 4652 4653 4654 4655
		       ext4_blocks_count(es),
		       le32_to_cpu(es->s_first_data_block),
		       EXT4_BLOCKS_PER_GROUP(sb));
		goto failed_mount;
	}
L
Laurent Vivier 已提交
4656
	sbi->s_groups_count = blocks_count;
4657 4658
	sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
			(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
4659 4660 4661 4662 4663 4664 4665 4666
	if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
	    le32_to_cpu(es->s_inodes_count)) {
		ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
			 le32_to_cpu(es->s_inodes_count),
			 ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
		ret = -EINVAL;
		goto failed_mount;
	}
4667 4668
	db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
		   EXT4_DESC_PER_BLOCK(sb);
4669
	if (ext4_has_feature_meta_bg(sb)) {
4670
		if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
4671 4672 4673 4674 4675 4676 4677
			ext4_msg(sb, KERN_WARNING,
				 "first meta block group too large: %u "
				 "(group descriptor block count %u)",
				 le32_to_cpu(es->s_first_meta_bg), db_count);
			goto failed_mount;
		}
	}
4678 4679 4680 4681
	rcu_assign_pointer(sbi->s_group_desc,
			   kvmalloc_array(db_count,
					  sizeof(struct buffer_head *),
					  GFP_KERNEL));
4682
	if (sbi->s_group_desc == NULL) {
4683
		ext4_msg(sb, KERN_ERR, "not enough memory");
4684
		ret = -ENOMEM;
4685 4686 4687
		goto failed_mount;
	}

4688
	bgl_lock_init(sbi->s_blockgroup_lock);
4689

4690 4691 4692
	/* Pre-read the descriptors into the buffer cache */
	for (i = 0; i < db_count; i++) {
		block = descriptor_loc(sb, logical_sb_block, i);
4693
		ext4_sb_breadahead_unmovable(sb, block);
4694 4695
	}

4696
	for (i = 0; i < db_count; i++) {
4697 4698
		struct buffer_head *bh;

4699
		block = descriptor_loc(sb, logical_sb_block, i);
4700 4701
		bh = ext4_sb_bread_unmovable(sb, block);
		if (IS_ERR(bh)) {
4702 4703
			ext4_msg(sb, KERN_ERR,
			       "can't read group descriptor %d", i);
4704
			db_count = i;
4705 4706
			ret = PTR_ERR(bh);
			bh = NULL;
4707 4708
			goto failed_mount2;
		}
4709 4710 4711
		rcu_read_lock();
		rcu_dereference(sbi->s_group_desc)[i] = bh;
		rcu_read_unlock();
4712
	}
4713
	sbi->s_gdb_count = db_count;
4714
	if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
4715
		ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
4716
		ret = -EFSCORRUPTED;
4717
		goto failed_mount2;
4718
	}
4719

4720
	timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
4721

4722
	/* Register extent status tree shrinker */
4723
	if (ext4_es_register_shrinker(sbi))
4724 4725
		goto failed_mount3;

4726
	sbi->s_stripe = ext4_get_stripe_size(sbi);
4727
	sbi->s_extent_max_zeroout_kb = 32;
4728

4729 4730 4731
	/*
	 * set up enough so that it can read an inode
	 */
4732
	sb->s_op = &ext4_sops;
4733 4734
	sb->s_export_op = &ext4_export_ops;
	sb->s_xattr = ext4_xattr_handlers;
4735
#ifdef CONFIG_FS_ENCRYPTION
4736
	sb->s_cop = &ext4_cryptops;
4737
#endif
E
Eric Biggers 已提交
4738 4739 4740
#ifdef CONFIG_FS_VERITY
	sb->s_vop = &ext4_verityops;
#endif
4741
#ifdef CONFIG_QUOTA
4742
	sb->dq_op = &ext4_quota_operations;
4743
	if (ext4_has_feature_quota(sb))
4744
		sb->s_qcop = &dquot_quotactl_sysfile_ops;
4745 4746
	else
		sb->s_qcop = &ext4_qctl_operations;
L
Li Xi 已提交
4747
	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
4748
#endif
4749
	memcpy(&sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
4750

4751
	INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
4752
	mutex_init(&sbi->s_orphan_lock);
4753

4754 4755 4756 4757 4758 4759 4760 4761
	/* Initialize fast commit stuff */
	atomic_set(&sbi->s_fc_subtid, 0);
	atomic_set(&sbi->s_fc_ineligible_updates, 0);
	INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_MAIN]);
	INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_STAGING]);
	INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_MAIN]);
	INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_STAGING]);
	sbi->s_fc_bytes = 0;
4762 4763
	ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
	ext4_clear_mount_flag(sb, EXT4_MF_FC_COMMITTING);
4764 4765
	spin_lock_init(&sbi->s_fc_lock);
	memset(&sbi->s_fc_stats, 0, sizeof(sbi->s_fc_stats));
4766 4767 4768 4769 4770 4771 4772
	sbi->s_fc_replay_state.fc_regions = NULL;
	sbi->s_fc_replay_state.fc_regions_size = 0;
	sbi->s_fc_replay_state.fc_regions_used = 0;
	sbi->s_fc_replay_state.fc_regions_valid = 0;
	sbi->s_fc_replay_state.fc_modified_inodes = NULL;
	sbi->s_fc_replay_state.fc_modified_inodes_size = 0;
	sbi->s_fc_replay_state.fc_modified_inodes_used = 0;
4773

4774 4775 4776
	sb->s_root = NULL;

	needs_recovery = (es->s_last_orphan != 0 ||
4777
			  ext4_has_feature_journal_needs_recovery(sb));
4778

4779
	if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb))
4780
		if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
4781
			goto failed_mount3a;
4782

4783 4784 4785 4786
	/*
	 * The first inode we look at is the journal inode.  Don't try
	 * root first: it may be modified in the journal!
	 */
4787
	if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
4788 4789
		err = ext4_load_journal(sb, es, journal_devnum);
		if (err)
4790
			goto failed_mount3a;
4791
	} else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) &&
4792
		   ext4_has_feature_journal_needs_recovery(sb)) {
4793 4794
		ext4_msg(sb, KERN_ERR, "required journal recovery "
		       "suppressed and not mounted read-only");
4795
		goto failed_mount_wq;
4796
	} else {
4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819
		/* Nojournal mode, all journal mount options are illegal */
		if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "journal_checksum, fs mounted w/o journal");
			goto failed_mount_wq;
		}
		if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "journal_async_commit, fs mounted w/o journal");
			goto failed_mount_wq;
		}
		if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "commit=%lu, fs mounted w/o journal",
				 sbi->s_commit_interval / HZ);
			goto failed_mount_wq;
		}
		if (EXT4_MOUNT_DATA_FLAGS &
		    (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "data=, fs mounted w/o journal");
			goto failed_mount_wq;
		}
4820
		sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM;
4821
		clear_opt(sb, JOURNAL_CHECKSUM);
4822
		clear_opt(sb, DATA_FLAGS);
4823
		clear_opt2(sb, JOURNAL_FAST_COMMIT);
4824 4825 4826
		sbi->s_journal = NULL;
		needs_recovery = 0;
		goto no_journal;
4827 4828
	}

4829
	if (ext4_has_feature_64bit(sb) &&
4830 4831
	    !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
				       JBD2_FEATURE_INCOMPAT_64BIT)) {
4832
		ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
4833
		goto failed_mount_wq;
4834 4835
	}

4836 4837 4838 4839
	if (!set_journal_csum_feature_set(sb)) {
		ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
			 "feature set");
		goto failed_mount_wq;
4840
	}
4841

4842 4843 4844 4845 4846 4847 4848 4849
	if (test_opt2(sb, JOURNAL_FAST_COMMIT) &&
		!jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
					  JBD2_FEATURE_INCOMPAT_FAST_COMMIT)) {
		ext4_msg(sb, KERN_ERR,
			"Failed to set fast commit journal feature");
		goto failed_mount_wq;
	}

4850 4851 4852 4853 4854
	/* We have now updated the journal if required, so we can
	 * validate the data journaling mode. */
	switch (test_opt(sb, DATA_FLAGS)) {
	case 0:
		/* No mode set, assume a default based on the journal
A
Andrew Morton 已提交
4855 4856 4857
		 * capabilities: ORDERED_DATA if the journal can
		 * cope, else JOURNAL_DATA
		 */
4858
		if (jbd2_journal_check_available_features
4859
		    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
4860
			set_opt(sb, ORDERED_DATA);
4861 4862
			sbi->s_def_mount_opt |= EXT4_MOUNT_ORDERED_DATA;
		} else {
4863
			set_opt(sb, JOURNAL_DATA);
4864 4865
			sbi->s_def_mount_opt |= EXT4_MOUNT_JOURNAL_DATA;
		}
4866 4867
		break;

4868 4869
	case EXT4_MOUNT_ORDERED_DATA:
	case EXT4_MOUNT_WRITEBACK_DATA:
4870 4871
		if (!jbd2_journal_check_available_features
		    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
4872 4873
			ext4_msg(sb, KERN_ERR, "Journal does not support "
			       "requested data journaling mode");
4874
			goto failed_mount_wq;
4875 4876 4877 4878
		}
	default:
		break;
	}
4879 4880 4881 4882 4883 4884 4885 4886

	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
	    test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
		ext4_msg(sb, KERN_ERR, "can't mount with "
			"journal_async_commit in data=ordered mode");
		goto failed_mount_wq;
	}

4887
	set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
4888

B
Bobi Jam 已提交
4889
	sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
4890
	sbi->s_journal->j_submit_inode_data_buffers =
4891
		ext4_journal_submit_inode_data_buffers;
4892
	sbi->s_journal->j_finish_inode_data_buffers =
4893
		ext4_journal_finish_inode_data_buffers;
B
Bobi Jam 已提交
4894

4895
no_journal:
4896 4897 4898
	if (!test_opt(sb, NO_MBCACHE)) {
		sbi->s_ea_block_cache = ext4_xattr_create_cache();
		if (!sbi->s_ea_block_cache) {
T
Tahsin Erdogan 已提交
4899
			ext4_msg(sb, KERN_ERR,
4900
				 "Failed to create ea_block_cache");
T
Tahsin Erdogan 已提交
4901 4902
			goto failed_mount_wq;
		}
4903 4904 4905 4906 4907 4908 4909 4910 4911

		if (ext4_has_feature_ea_inode(sb)) {
			sbi->s_ea_inode_cache = ext4_xattr_create_cache();
			if (!sbi->s_ea_inode_cache) {
				ext4_msg(sb, KERN_ERR,
					 "Failed to create ea_inode_cache");
				goto failed_mount_wq;
			}
		}
4912 4913
	}

E
Eric Biggers 已提交
4914 4915 4916 4917 4918
	if (ext4_has_feature_verity(sb) && blocksize != PAGE_SIZE) {
		ext4_msg(sb, KERN_ERR, "Unsupported blocksize for fs-verity");
		goto failed_mount_wq;
	}

4919
	if (DUMMY_ENCRYPTION_ENABLED(sbi) && !sb_rdonly(sb) &&
4920 4921
	    !ext4_has_feature_encrypt(sb)) {
		ext4_set_feature_encrypt(sb);
4922 4923 4924
		ext4_commit_super(sb, 1);
	}

4925 4926 4927 4928 4929 4930 4931
	/*
	 * Get the # of file system overhead blocks from the
	 * superblock if present.
	 */
	if (es->s_overhead_clusters)
		sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
	else {
4932 4933
		err = ext4_calculate_overhead(sb);
		if (err)
4934 4935 4936
			goto failed_mount_wq;
	}

T
Tejun Heo 已提交
4937 4938 4939 4940
	/*
	 * The maximum number of concurrent works can be high and
	 * concurrency isn't really necessary.  Limit it to 1.
	 */
4941 4942 4943 4944
	EXT4_SB(sb)->rsv_conversion_wq =
		alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
	if (!EXT4_SB(sb)->rsv_conversion_wq) {
		printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
4945
		ret = -ENOMEM;
4946 4947 4948
		goto failed_mount4;
	}

4949
	/*
4950
	 * The jbd2_journal_load will have done any necessary log recovery,
4951 4952 4953
	 * so we can safely mount the rest of the filesystem now.
	 */

4954
	root = ext4_iget(sb, EXT4_ROOT_INO, EXT4_IGET_SPECIAL);
4955
	if (IS_ERR(root)) {
4956
		ext4_msg(sb, KERN_ERR, "get root inode failed");
4957
		ret = PTR_ERR(root);
4958
		root = NULL;
4959 4960 4961
		goto failed_mount4;
	}
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
4962
		ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
A
Al Viro 已提交
4963
		iput(root);
4964 4965
		goto failed_mount4;
	}
4966 4967

#ifdef CONFIG_UNICODE
4968
	if (sb->s_encoding)
4969 4970 4971
		sb->s_d_op = &ext4_dentry_ops;
#endif

4972
	sb->s_root = d_make_root(root);
4973
	if (!sb->s_root) {
4974
		ext4_msg(sb, KERN_ERR, "get root dentry failed");
4975 4976 4977
		ret = -ENOMEM;
		goto failed_mount4;
	}
4978

4979 4980
	ret = ext4_setup_super(sb, es, sb_rdonly(sb));
	if (ret == -EROFS) {
4981
		sb->s_flags |= SB_RDONLY;
4982 4983 4984
		ret = 0;
	} else if (ret)
		goto failed_mount4a;
K
Kalpak Shah 已提交
4985

4986
	ext4_set_resv_clusters(sb);
L
Lukas Czerner 已提交
4987

4988 4989 4990 4991 4992 4993 4994
	if (test_opt(sb, BLOCK_VALIDITY)) {
		err = ext4_setup_system_zone(sb);
		if (err) {
			ext4_msg(sb, KERN_ERR, "failed to initialize system "
				 "zone (%d)", err);
			goto failed_mount4a;
		}
4995
	}
4996
	ext4_fc_replay_cleanup(sb);
4997 4998 4999 5000 5001 5002

	ext4_ext_init(sb);
	err = ext4_mb_init(sb);
	if (err) {
		ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
			 err);
5003
		goto failed_mount5;
5004 5005
	}

5006 5007 5008
	block = ext4_count_free_clusters(sb);
	ext4_free_blocks_count_set(sbi->s_es, 
				   EXT4_C2B(sbi, block));
5009
	ext4_superblock_csum_set(sb);
5010 5011
	err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
				  GFP_KERNEL);
5012 5013 5014
	if (!err) {
		unsigned long freei = ext4_count_free_inodes(sb);
		sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
5015
		ext4_superblock_csum_set(sb);
5016 5017
		err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
					  GFP_KERNEL);
5018 5019 5020
	}
	if (!err)
		err = percpu_counter_init(&sbi->s_dirs_counter,
5021
					  ext4_count_dirs(sb), GFP_KERNEL);
5022
	if (!err)
5023 5024
		err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
					  GFP_KERNEL);
5025
	if (!err)
5026
		err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
5027

5028 5029 5030 5031 5032
	if (err) {
		ext4_msg(sb, KERN_ERR, "insufficient memory");
		goto failed_mount6;
	}

5033
	if (ext4_has_feature_flex_bg(sb))
5034 5035 5036 5037 5038 5039 5040
		if (!ext4_fill_flex_info(sb)) {
			ext4_msg(sb, KERN_ERR,
			       "unable to initialize "
			       "flex_bg meta info!");
			goto failed_mount6;
		}

5041 5042
	err = ext4_register_li_request(sb, first_not_zeroed);
	if (err)
5043
		goto failed_mount6;
5044

5045
	err = ext4_register_sysfs(sb);
5046 5047
	if (err)
		goto failed_mount7;
T
Theodore Ts'o 已提交
5048

5049 5050
#ifdef CONFIG_QUOTA
	/* Enable quota usage during mount. */
5051
	if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) {
5052 5053 5054 5055 5056 5057
		err = ext4_enable_quotas(sb);
		if (err)
			goto failed_mount8;
	}
#endif  /* CONFIG_QUOTA */

5058 5059 5060 5061 5062
	/*
	 * Save the original bdev mapping's wb_err value which could be
	 * used to detect the metadata async write error.
	 */
	spin_lock_init(&sbi->s_bdev_wb_lock);
5063 5064
	errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err,
				 &sbi->s_bdev_wb_err);
5065
	sb->s_bdev->bd_super = sb;
5066 5067 5068
	EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
	ext4_orphan_cleanup(sb, es);
	EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
5069
	if (needs_recovery) {
5070
		ext4_msg(sb, KERN_INFO, "recovery complete");
5071 5072 5073
		err = ext4_mark_recovery_complete(sb, es);
		if (err)
			goto failed_mount8;
5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084
	}
	if (EXT4_SB(sb)->s_journal) {
		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
			descr = " journalled data mode";
		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
			descr = " ordered data mode";
		else
			descr = " writeback data mode";
	} else
		descr = "out journal";

5085 5086 5087 5088 5089 5090 5091 5092
	if (test_opt(sb, DISCARD)) {
		struct request_queue *q = bdev_get_queue(sb->s_bdev);
		if (!blk_queue_discard(q))
			ext4_msg(sb, KERN_WARNING,
				 "mounting with \"discard\" option, but "
				 "the device does not support discard");
	}

5093 5094
	if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
		ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
5095 5096 5097
			 "Opts: %.*s%s%s", descr,
			 (int) sizeof(sbi->s_es->s_mount_opts),
			 sbi->s_es->s_mount_opts,
5098
			 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
5099

5100 5101
	if (es->s_error_count)
		mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
5102

5103 5104 5105 5106
	/* Enable message ratelimiting. Default is 10 messages per 5 secs. */
	ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
	ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
	ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);
5107 5108
	atomic_set(&sbi->s_warning_count, 0);
	atomic_set(&sbi->s_msg_count, 0);
5109

5110
	kfree(orig_data);
5111 5112
	return 0;

5113
cantfind_ext4:
5114
	if (!silent)
5115
		ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
5116 5117
	goto failed_mount;

5118
failed_mount8:
5119
	ext4_unregister_sysfs(sb);
5120
	kobject_put(&sbi->s_kobj);
5121 5122 5123
failed_mount7:
	ext4_unregister_li_request(sb);
failed_mount6:
5124
	ext4_mb_release(sb);
5125 5126 5127 5128 5129 5130 5131 5132
	rcu_read_lock();
	flex_groups = rcu_dereference(sbi->s_flex_groups);
	if (flex_groups) {
		for (i = 0; i < sbi->s_flex_groups_allocated; i++)
			kvfree(flex_groups[i]);
		kvfree(flex_groups);
	}
	rcu_read_unlock();
5133 5134 5135 5136
	percpu_counter_destroy(&sbi->s_freeclusters_counter);
	percpu_counter_destroy(&sbi->s_freeinodes_counter);
	percpu_counter_destroy(&sbi->s_dirs_counter);
	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
5137
	percpu_free_rwsem(&sbi->s_writepages_rwsem);
5138
failed_mount5:
5139 5140 5141
	ext4_ext_release(sb);
	ext4_release_system_zone(sb);
failed_mount4a:
A
Al Viro 已提交
5142
	dput(sb->s_root);
5143
	sb->s_root = NULL;
A
Al Viro 已提交
5144
failed_mount4:
5145
	ext4_msg(sb, KERN_ERR, "mount failed");
5146 5147
	if (EXT4_SB(sb)->rsv_conversion_wq)
		destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
5148
failed_mount_wq:
5149 5150 5151 5152 5153 5154
	ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
	sbi->s_ea_inode_cache = NULL;

	ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
	sbi->s_ea_block_cache = NULL;

5155 5156 5157 5158
	if (sbi->s_journal) {
		jbd2_journal_destroy(sbi->s_journal);
		sbi->s_journal = NULL;
	}
5159
failed_mount3a:
5160
	ext4_es_unregister_shrinker(sbi);
5161
failed_mount3:
5162
	del_timer_sync(&sbi->s_err_report);
5163 5164
	if (sbi->s_mmp_tsk)
		kthread_stop(sbi->s_mmp_tsk);
5165
failed_mount2:
5166 5167
	rcu_read_lock();
	group_desc = rcu_dereference(sbi->s_group_desc);
5168
	for (i = 0; i < db_count; i++)
5169 5170 5171
		brelse(group_desc[i]);
	kvfree(group_desc);
	rcu_read_unlock();
5172
failed_mount:
5173 5174
	if (sbi->s_chksum_driver)
		crypto_free_shash(sbi->s_chksum_driver);
5175 5176

#ifdef CONFIG_UNICODE
5177
	utf8_unload(sb->s_encoding);
5178 5179
#endif

5180
#ifdef CONFIG_QUOTA
J
Jan Kara 已提交
5181
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
5182
		kfree(get_qf_name(sb, sbi, i));
5183
#endif
5184
	fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
5185
	ext4_blkdev_remove(sbi);
5186 5187 5188
	brelse(bh);
out_fail:
	sb->s_fs_info = NULL;
5189
	kfree(sbi->s_blockgroup_lock);
5190
out_free_base:
5191
	kfree(sbi);
5192
	kfree(orig_data);
5193
	fs_put_dax(dax_dev);
5194
	return err ? err : ret;
5195 5196 5197 5198 5199 5200 5201
}

/*
 * Setup any per-fs journal parameters now.  We'll do this both on
 * initial mount, once the journal has been initialised but before we've
 * done any recovery; and again on any subsequent remount.
 */
5202
static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
5203
{
5204
	struct ext4_sb_info *sbi = EXT4_SB(sb);
5205

5206 5207 5208
	journal->j_commit_interval = sbi->s_commit_interval;
	journal->j_min_batch_time = sbi->s_min_batch_time;
	journal->j_max_batch_time = sbi->s_max_batch_time;
5209
	ext4_fc_init(sb, journal);
5210

5211
	write_lock(&journal->j_state_lock);
5212
	if (test_opt(sb, BARRIER))
5213
		journal->j_flags |= JBD2_BARRIER;
5214
	else
5215
		journal->j_flags &= ~JBD2_BARRIER;
5216 5217 5218 5219
	if (test_opt(sb, DATA_ERR_ABORT))
		journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR;
	else
		journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR;
5220
	write_unlock(&journal->j_state_lock);
5221 5222
}

5223 5224
static struct inode *ext4_get_journal_inode(struct super_block *sb,
					     unsigned int journal_inum)
5225 5226 5227
{
	struct inode *journal_inode;

5228 5229 5230 5231 5232
	/*
	 * Test for the existence of a valid inode on disk.  Bad things
	 * happen if we iget() an unused inode, as the subsequent iput()
	 * will try to delete it.
	 */
5233
	journal_inode = ext4_iget(sb, journal_inum, EXT4_IGET_SPECIAL);
5234
	if (IS_ERR(journal_inode)) {
5235
		ext4_msg(sb, KERN_ERR, "no journal found");
5236 5237 5238 5239 5240
		return NULL;
	}
	if (!journal_inode->i_nlink) {
		make_bad_inode(journal_inode);
		iput(journal_inode);
5241
		ext4_msg(sb, KERN_ERR, "journal inode is deleted");
5242 5243 5244
		return NULL;
	}

5245
	jbd_debug(2, "Journal inode found at %p: %lld bytes\n",
5246
		  journal_inode, journal_inode->i_size);
5247
	if (!S_ISREG(journal_inode->i_mode)) {
5248
		ext4_msg(sb, KERN_ERR, "invalid journal inode");
5249 5250 5251
		iput(journal_inode);
		return NULL;
	}
5252 5253 5254 5255 5256 5257 5258 5259 5260
	return journal_inode;
}

static journal_t *ext4_get_journal(struct super_block *sb,
				   unsigned int journal_inum)
{
	struct inode *journal_inode;
	journal_t *journal;

5261 5262
	if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
		return NULL;
5263 5264 5265 5266

	journal_inode = ext4_get_journal_inode(sb, journal_inum);
	if (!journal_inode)
		return NULL;
5267

5268
	journal = jbd2_journal_init_inode(journal_inode);
5269
	if (!journal) {
5270
		ext4_msg(sb, KERN_ERR, "Could not load journal inode");
5271 5272 5273 5274
		iput(journal_inode);
		return NULL;
	}
	journal->j_private = sb;
5275
	ext4_init_journal_params(sb, journal);
5276 5277 5278
	return journal;
}

5279
static journal_t *ext4_get_dev_journal(struct super_block *sb,
5280 5281
				       dev_t j_dev)
{
5282
	struct buffer_head *bh;
5283
	journal_t *journal;
5284 5285
	ext4_fsblk_t start;
	ext4_fsblk_t len;
5286
	int hblock, blocksize;
5287
	ext4_fsblk_t sb_block;
5288
	unsigned long offset;
5289
	struct ext4_super_block *es;
5290 5291
	struct block_device *bdev;

5292 5293
	if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
		return NULL;
5294

5295
	bdev = ext4_blkdev_get(j_dev, sb);
5296 5297 5298 5299
	if (bdev == NULL)
		return NULL;

	blocksize = sb->s_blocksize;
5300
	hblock = bdev_logical_block_size(bdev);
5301
	if (blocksize < hblock) {
5302 5303
		ext4_msg(sb, KERN_ERR,
			"blocksize too small for journal device");
5304 5305 5306
		goto out_bdev;
	}

5307 5308
	sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
	offset = EXT4_MIN_BLOCK_SIZE % blocksize;
5309 5310
	set_blocksize(bdev, blocksize);
	if (!(bh = __bread(bdev, sb_block, blocksize))) {
5311 5312
		ext4_msg(sb, KERN_ERR, "couldn't read superblock of "
		       "external journal");
5313 5314 5315
		goto out_bdev;
	}

5316
	es = (struct ext4_super_block *) (bh->b_data + offset);
5317
	if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
5318
	    !(le32_to_cpu(es->s_feature_incompat) &
5319
	      EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
5320 5321
		ext4_msg(sb, KERN_ERR, "external journal has "
					"bad superblock");
5322 5323 5324 5325
		brelse(bh);
		goto out_bdev;
	}

5326 5327 5328 5329 5330 5331 5332 5333 5334
	if ((le32_to_cpu(es->s_feature_ro_compat) &
	     EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
	    es->s_checksum != ext4_superblock_csum(sb, es)) {
		ext4_msg(sb, KERN_ERR, "external journal has "
				       "corrupt superblock");
		brelse(bh);
		goto out_bdev;
	}

5335
	if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
5336
		ext4_msg(sb, KERN_ERR, "journal UUID does not match");
5337 5338 5339 5340
		brelse(bh);
		goto out_bdev;
	}

L
Laurent Vivier 已提交
5341
	len = ext4_blocks_count(es);
5342 5343 5344
	start = sb_block + 1;
	brelse(bh);	/* we're done with the superblock */

5345
	journal = jbd2_journal_init_dev(bdev, sb->s_bdev,
5346 5347
					start, len, blocksize);
	if (!journal) {
5348
		ext4_msg(sb, KERN_ERR, "failed to create device journal");
5349 5350 5351
		goto out_bdev;
	}
	journal->j_private = sb;
5352
	if (ext4_read_bh_lock(journal->j_sb_buffer, REQ_META | REQ_PRIO, true)) {
5353
		ext4_msg(sb, KERN_ERR, "I/O error on journal device");
5354 5355 5356
		goto out_journal;
	}
	if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
5357 5358
		ext4_msg(sb, KERN_ERR, "External journal has more than one "
					"user (unsupported) - %d",
5359 5360 5361
			be32_to_cpu(journal->j_superblock->s_nr_users));
		goto out_journal;
	}
5362
	EXT4_SB(sb)->s_journal_bdev = bdev;
5363
	ext4_init_journal_params(sb, journal);
5364
	return journal;
5365

5366
out_journal:
5367
	jbd2_journal_destroy(journal);
5368
out_bdev:
5369
	ext4_blkdev_put(bdev);
5370 5371 5372
	return NULL;
}

5373 5374
static int ext4_load_journal(struct super_block *sb,
			     struct ext4_super_block *es,
5375 5376 5377 5378 5379 5380 5381
			     unsigned long journal_devnum)
{
	journal_t *journal;
	unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
	dev_t journal_dev;
	int err = 0;
	int really_read_only;
5382
	int journal_dev_ro;
5383

5384 5385
	if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
		return -EFSCORRUPTED;
5386

5387 5388
	if (journal_devnum &&
	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
5389 5390
		ext4_msg(sb, KERN_INFO, "external journal device major/minor "
			"numbers have changed");
5391 5392 5393 5394
		journal_dev = new_decode_dev(journal_devnum);
	} else
		journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));

5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419
	if (journal_inum && journal_dev) {
		ext4_msg(sb, KERN_ERR,
			 "filesystem has both journal inode and journal device!");
		return -EINVAL;
	}

	if (journal_inum) {
		journal = ext4_get_journal(sb, journal_inum);
		if (!journal)
			return -EINVAL;
	} else {
		journal = ext4_get_dev_journal(sb, journal_dev);
		if (!journal)
			return -EINVAL;
	}

	journal_dev_ro = bdev_read_only(journal->j_dev);
	really_read_only = bdev_read_only(sb->s_bdev) | journal_dev_ro;

	if (journal_dev_ro && !sb_rdonly(sb)) {
		ext4_msg(sb, KERN_ERR,
			 "journal device read-only, try mounting with '-o ro'");
		err = -EROFS;
		goto err_out;
	}
5420 5421 5422 5423 5424 5425

	/*
	 * Are we loading a blank journal or performing recovery after a
	 * crash?  For recovery, we need to check in advance whether we
	 * can get read-write access to the device.
	 */
5426
	if (ext4_has_feature_journal_needs_recovery(sb)) {
5427
		if (sb_rdonly(sb)) {
5428 5429
			ext4_msg(sb, KERN_INFO, "INFO: recovery "
					"required on readonly filesystem");
5430
			if (really_read_only) {
5431
				ext4_msg(sb, KERN_ERR, "write access "
5432 5433
					"unavailable, cannot proceed "
					"(try mounting with noload)");
5434 5435
				err = -EROFS;
				goto err_out;
5436
			}
5437 5438
			ext4_msg(sb, KERN_INFO, "write access will "
			       "be enabled during recovery");
5439 5440 5441
		}
	}

5442
	if (!(journal->j_flags & JBD2_BARRIER))
5443
		ext4_msg(sb, KERN_INFO, "barriers disabled");
5444

5445
	if (!ext4_has_feature_journal_needs_recovery(sb))
5446
		err = jbd2_journal_wipe(journal, !really_read_only);
5447 5448 5449 5450 5451
	if (!err) {
		char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
		if (save)
			memcpy(save, ((char *) es) +
			       EXT4_S_ERR_START, EXT4_S_ERR_LEN);
5452
		err = jbd2_journal_load(journal);
5453 5454 5455 5456 5457
		if (save)
			memcpy(((char *) es) + EXT4_S_ERR_START,
			       save, EXT4_S_ERR_LEN);
		kfree(save);
	}
5458 5459

	if (err) {
5460
		ext4_msg(sb, KERN_ERR, "error loading journal");
5461
		goto err_out;
5462 5463
	}

5464
	EXT4_SB(sb)->s_journal = journal;
5465 5466 5467 5468 5469 5470
	err = ext4_clear_journal_err(sb, es);
	if (err) {
		EXT4_SB(sb)->s_journal = NULL;
		jbd2_journal_destroy(journal);
		return err;
	}
5471

5472
	if (!really_read_only && journal_devnum &&
5473 5474 5475 5476
	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
		es->s_journal_dev = cpu_to_le32(journal_devnum);

		/* Make sure we flush the recovery flag to disk. */
5477
		ext4_commit_super(sb, 1);
5478 5479 5480
	}

	return 0;
5481 5482 5483 5484

err_out:
	jbd2_journal_destroy(journal);
	return err;
5485 5486
}

5487
static int ext4_commit_super(struct super_block *sb, int sync)
5488
{
5489
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
5490
	struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
5491
	int error = 0;
5492

5493
	if (!sbh || block_device_ejected(sb))
5494
		return error;
5495

5496 5497 5498 5499 5500 5501 5502 5503 5504 5505
	/*
	 * If the file system is mounted read-only, don't update the
	 * superblock write time.  This avoids updating the superblock
	 * write time when we are mounting the root file system
	 * read/only but we need to replay the journal; at that point,
	 * for people who are east of GMT and who make their clock
	 * tick in localtime for Windows bug-for-bug compatibility,
	 * the clock is set in the future, and this will cause e2fsck
	 * to complain and force a full file system check.
	 */
5506
	if (!(sb->s_flags & SB_RDONLY))
5507
		ext4_update_tstamp(es, s_wtime);
5508 5509 5510
	if (sb->s_bdev->bd_part)
		es->s_kbytes_written =
			cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
5511 5512
			    ((part_stat_read(sb->s_bdev->bd_part,
					     sectors[STAT_WRITE]) -
T
Theodore Ts'o 已提交
5513
			      EXT4_SB(sb)->s_sectors_written_start) >> 1));
5514 5515 5516
	else
		es->s_kbytes_written =
			cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
5517 5518
	if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
		ext4_free_blocks_count_set(es,
5519 5520
			EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
				&EXT4_SB(sb)->s_freeclusters_counter)));
5521 5522 5523
	if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
		es->s_free_inodes_count =
			cpu_to_le32(percpu_counter_sum_positive(
5524
				&EXT4_SB(sb)->s_freeinodes_counter));
5525
	BUFFER_TRACE(sbh, "marking dirty");
5526
	ext4_superblock_csum_set(sb);
5527 5528
	if (sync)
		lock_buffer(sbh);
5529
	if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542
		/*
		 * Oh, dear.  A previous attempt to write the
		 * superblock failed.  This could happen because the
		 * USB device was yanked out.  Or it could happen to
		 * be a transient write error and maybe the block will
		 * be remapped.  Nothing we can do but to retry the
		 * write and hope for the best.
		 */
		ext4_msg(sb, KERN_ERR, "previous I/O error to "
		       "superblock detected");
		clear_buffer_write_io_error(sbh);
		set_buffer_uptodate(sbh);
	}
5543
	mark_buffer_dirty(sbh);
5544
	if (sync) {
5545
		unlock_buffer(sbh);
5546
		error = __sync_dirty_buffer(sbh,
5547
			REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0));
5548
		if (buffer_write_io_error(sbh)) {
5549 5550
			ext4_msg(sb, KERN_ERR, "I/O error while writing "
			       "superblock");
5551 5552 5553 5554
			clear_buffer_write_io_error(sbh);
			set_buffer_uptodate(sbh);
		}
	}
5555
	return error;
5556 5557 5558 5559 5560 5561 5562
}

/*
 * Have we just finished recovery?  If so, and if we are mounting (or
 * remounting) the filesystem readonly, then we will end up with a
 * consistent fs on disk.  Record that fact.
 */
5563 5564
static int ext4_mark_recovery_complete(struct super_block *sb,
				       struct ext4_super_block *es)
5565
{
5566
	int err;
5567
	journal_t *journal = EXT4_SB(sb)->s_journal;
5568

5569
	if (!ext4_has_feature_journal(sb)) {
5570 5571 5572 5573 5574 5575
		if (journal != NULL) {
			ext4_error(sb, "Journal got removed while the fs was "
				   "mounted!");
			return -EFSCORRUPTED;
		}
		return 0;
5576
	}
5577
	jbd2_journal_lock_updates(journal);
5578 5579
	err = jbd2_journal_flush(journal);
	if (err < 0)
5580 5581
		goto out;

5582
	if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) {
5583
		ext4_clear_feature_journal_needs_recovery(sb);
5584
		ext4_commit_super(sb, 1);
5585
	}
5586
out:
5587
	jbd2_journal_unlock_updates(journal);
5588
	return err;
5589 5590 5591 5592 5593 5594 5595
}

/*
 * If we are mounting (or read-write remounting) a filesystem whose journal
 * has recorded an error from a previous lifetime, move that error to the
 * main filesystem now.
 */
5596
static int ext4_clear_journal_err(struct super_block *sb,
5597
				   struct ext4_super_block *es)
5598 5599 5600 5601 5602
{
	journal_t *journal;
	int j_errno;
	const char *errstr;

5603 5604 5605 5606
	if (!ext4_has_feature_journal(sb)) {
		ext4_error(sb, "Journal got removed while the fs was mounted!");
		return -EFSCORRUPTED;
	}
5607

5608
	journal = EXT4_SB(sb)->s_journal;
5609 5610 5611

	/*
	 * Now check for any error status which may have been recorded in the
5612
	 * journal by a prior ext4_error() or ext4_abort()
5613 5614
	 */

5615
	j_errno = jbd2_journal_errno(journal);
5616 5617 5618
	if (j_errno) {
		char nbuf[16];

5619
		errstr = ext4_decode_error(sb, j_errno, nbuf);
5620
		ext4_warning(sb, "Filesystem error recorded "
5621
			     "from previous mount: %s", errstr);
5622
		ext4_warning(sb, "Marking fs in need of filesystem check.");
5623

5624 5625
		EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
		es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
5626
		ext4_commit_super(sb, 1);
5627

5628
		jbd2_journal_clear_err(journal);
5629
		jbd2_journal_update_sb_errno(journal);
5630
	}
5631
	return 0;
5632 5633 5634 5635 5636 5637
}

/*
 * Force the running and committing transactions to commit,
 * and wait on the commit.
 */
5638
int ext4_force_commit(struct super_block *sb)
5639 5640 5641
{
	journal_t *journal;

5642
	if (sb_rdonly(sb))
5643 5644
		return 0;

5645
	journal = EXT4_SB(sb)->s_journal;
5646
	return ext4_journal_force_commit(journal);
5647 5648
}

5649
static int ext4_sync_fs(struct super_block *sb, int wait)
5650
{
5651
	int ret = 0;
5652
	tid_t target;
5653
	bool needs_barrier = false;
5654
	struct ext4_sb_info *sbi = EXT4_SB(sb);
5655

5656
	if (unlikely(ext4_forced_shutdown(sbi)))
5657 5658
		return 0;

5659
	trace_ext4_sync_fs(sb, wait);
5660
	flush_workqueue(sbi->rsv_conversion_wq);
5661 5662 5663 5664 5665
	/*
	 * Writeback quota in non-journalled quota case - journalled quota has
	 * no dirty dquots
	 */
	dquot_writeback_dquots(sb, -1);
5666 5667 5668 5669 5670
	/*
	 * Data writeback is possible w/o journal transaction, so barrier must
	 * being sent at the end of the function. But we can skip it if
	 * transaction_commit will do it for us.
	 */
5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682
	if (sbi->s_journal) {
		target = jbd2_get_latest_transaction(sbi->s_journal);
		if (wait && sbi->s_journal->j_flags & JBD2_BARRIER &&
		    !jbd2_trans_will_send_data_barrier(sbi->s_journal, target))
			needs_barrier = true;

		if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
			if (wait)
				ret = jbd2_log_wait_commit(sbi->s_journal,
							   target);
		}
	} else if (wait && test_opt(sb, BARRIER))
5683 5684 5685
		needs_barrier = true;
	if (needs_barrier) {
		int err;
5686
		err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL);
5687 5688
		if (!ret)
			ret = err;
5689
	}
5690 5691 5692 5693

	return ret;
}

5694 5695 5696
/*
 * LVM calls this function before a (read-only) snapshot is created.  This
 * gives us a chance to flush the journal completely and mark the fs clean.
5697 5698
 *
 * Note that only this function cannot bring a filesystem to be in a clean
5699 5700
 * state independently. It relies on upper layer to stop all data & metadata
 * modifications.
5701
 */
5702
static int ext4_freeze(struct super_block *sb)
5703
{
5704 5705
	int error = 0;
	journal_t *journal;
5706

5707
	if (sb_rdonly(sb))
5708
		return 0;
5709

5710
	journal = EXT4_SB(sb)->s_journal;
5711

5712 5713 5714
	if (journal) {
		/* Now we set up the journal barrier. */
		jbd2_journal_lock_updates(journal);
5715

5716 5717 5718 5719 5720 5721 5722
		/*
		 * Don't clear the needs_recovery flag if we failed to
		 * flush the journal.
		 */
		error = jbd2_journal_flush(journal);
		if (error < 0)
			goto out;
5723 5724

		/* Journal blocked and flushed, clear needs_recovery flag. */
5725
		ext4_clear_feature_journal_needs_recovery(sb);
5726
	}
5727 5728

	error = ext4_commit_super(sb, 1);
5729
out:
5730 5731 5732
	if (journal)
		/* we rely on upper layer to stop further updates */
		jbd2_journal_unlock_updates(journal);
5733
	return error;
5734 5735 5736 5737 5738 5739
}

/*
 * Called by LVM after the snapshot is done.  We need to reset the RECOVER
 * flag here, even though the filesystem is not technically dirty yet.
 */
5740
static int ext4_unfreeze(struct super_block *sb)
5741
{
5742
	if (sb_rdonly(sb) || ext4_forced_shutdown(EXT4_SB(sb)))
5743 5744
		return 0;

5745 5746
	if (EXT4_SB(sb)->s_journal) {
		/* Reset the needs_recovery flag before the fs is unlocked. */
5747
		ext4_set_feature_journal_needs_recovery(sb);
5748 5749
	}

5750
	ext4_commit_super(sb, 1);
5751
	return 0;
5752 5753
}

5754 5755 5756 5757 5758
/*
 * Structure to save mount options for ext4_remount's benefit
 */
struct ext4_mount_options {
	unsigned long s_mount_opt;
5759
	unsigned long s_mount_opt2;
5760 5761
	kuid_t s_resuid;
	kgid_t s_resgid;
5762 5763 5764 5765
	unsigned long s_commit_interval;
	u32 s_min_batch_time, s_max_batch_time;
#ifdef CONFIG_QUOTA
	int s_jquota_fmt;
J
Jan Kara 已提交
5766
	char *s_qf_names[EXT4_MAXQUOTAS];
5767 5768 5769
#endif
};

5770
static int ext4_remount(struct super_block *sb, int *flags, char *data)
5771
{
5772
	struct ext4_super_block *es;
5773
	struct ext4_sb_info *sbi = EXT4_SB(sb);
5774
	unsigned long old_sb_flags, vfs_flags;
5775
	struct ext4_mount_options old_opts;
5776
	int enable_quota = 0;
5777
	ext4_group_t g;
5778
	unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
5779
	int err = 0;
5780
#ifdef CONFIG_QUOTA
5781
	int i, j;
5782
	char *to_free[EXT4_MAXQUOTAS];
5783
#endif
5784
	char *orig_data = kstrdup(data, GFP_KERNEL);
5785

5786 5787 5788
	if (data && !orig_data)
		return -ENOMEM;

5789 5790 5791
	/* Store the original options */
	old_sb_flags = sb->s_flags;
	old_opts.s_mount_opt = sbi->s_mount_opt;
5792
	old_opts.s_mount_opt2 = sbi->s_mount_opt2;
5793 5794 5795
	old_opts.s_resuid = sbi->s_resuid;
	old_opts.s_resgid = sbi->s_resgid;
	old_opts.s_commit_interval = sbi->s_commit_interval;
5796 5797
	old_opts.s_min_batch_time = sbi->s_min_batch_time;
	old_opts.s_max_batch_time = sbi->s_max_batch_time;
5798 5799
#ifdef CONFIG_QUOTA
	old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
J
Jan Kara 已提交
5800
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
5801
		if (sbi->s_qf_names[i]) {
5802 5803 5804
			char *qf_name = get_qf_name(sb, sbi, i);

			old_opts.s_qf_names[i] = kstrdup(qf_name, GFP_KERNEL);
5805 5806 5807
			if (!old_opts.s_qf_names[i]) {
				for (j = 0; j < i; j++)
					kfree(old_opts.s_qf_names[j]);
5808
				kfree(orig_data);
5809 5810 5811 5812
				return -ENOMEM;
			}
		} else
			old_opts.s_qf_names[i] = NULL;
5813
#endif
5814 5815
	if (sbi->s_journal && sbi->s_journal->j_task->io_context)
		journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
5816

5817 5818 5819 5820 5821 5822 5823 5824
	/*
	 * Some options can be enabled by ext4 and/or by VFS mount flag
	 * either way we need to make sure it matches in both *flags and
	 * s_flags. Copy those selected flags from *flags to s_flags
	 */
	vfs_flags = SB_LAZYTIME | SB_I_VERSION;
	sb->s_flags = (sb->s_flags & ~vfs_flags) | (*flags & vfs_flags);

5825
	if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
5826 5827 5828 5829
		err = -EINVAL;
		goto restore_opts;
	}

5830
	if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
5831 5832
	    test_opt(sb, JOURNAL_CHECKSUM)) {
		ext4_msg(sb, KERN_ERR, "changing journal_checksum "
5833 5834
			 "during remount not supported; ignoring");
		sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
5835 5836
	}

5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849
	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
		if (test_opt2(sb, EXPLICIT_DELALLOC)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and delalloc");
			err = -EINVAL;
			goto restore_opts;
		}
		if (test_opt(sb, DIOREAD_NOLOCK)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and dioread_nolock");
			err = -EINVAL;
			goto restore_opts;
		}
5850 5851 5852 5853 5854 5855 5856
	} else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) {
		if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				"journal_async_commit in data=ordered mode");
			err = -EINVAL;
			goto restore_opts;
		}
R
Ross Zwisler 已提交
5857 5858
	}

5859 5860 5861 5862 5863 5864
	if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_NO_MBCACHE) {
		ext4_msg(sb, KERN_ERR, "can't enable nombcache during remount");
		err = -EINVAL;
		goto restore_opts;
	}

5865
	if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
5866
		ext4_abort(sb, EXT4_ERR_ESHUTDOWN, "Abort forced by user");
5867

5868 5869
	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
		(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
5870 5871 5872

	es = sbi->s_es;

5873
	if (sbi->s_journal) {
5874
		ext4_init_journal_params(sb, sbi->s_journal);
5875 5876
		set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
	}
5877

5878
	if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
5879
		if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) {
5880 5881 5882 5883
			err = -EROFS;
			goto restore_opts;
		}

5884
		if (*flags & SB_RDONLY) {
5885 5886 5887
			err = sync_filesystem(sb);
			if (err < 0)
				goto restore_opts;
5888 5889
			err = dquot_suspend(sb, -1);
			if (err < 0)
5890 5891
				goto restore_opts;

5892 5893 5894 5895
			/*
			 * First of all, the unconditional stuff we have to do
			 * to disable replay of the journal when we next remount
			 */
5896
			sb->s_flags |= SB_RDONLY;
5897 5898 5899 5900 5901 5902

			/*
			 * OK, test if we are remounting a valid rw partition
			 * readonly, and if so set the rdonly flag and then
			 * mark the partition as valid again.
			 */
5903 5904
			if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) &&
			    (sbi->s_mount_state & EXT4_VALID_FS))
5905 5906
				es->s_state = cpu_to_le16(sbi->s_mount_state);

5907 5908 5909 5910 5911
			if (sbi->s_journal) {
				/*
				 * We let remount-ro finish even if marking fs
				 * as clean failed...
				 */
5912
				ext4_mark_recovery_complete(sb, es);
5913
			}
5914 5915
			if (sbi->s_mmp_tsk)
				kthread_stop(sbi->s_mmp_tsk);
5916
		} else {
5917
			/* Make sure we can mount this feature set readwrite */
5918
			if (ext4_has_feature_readonly(sb) ||
D
Darrick J. Wong 已提交
5919
			    !ext4_feature_set_ok(sb, 0)) {
5920 5921 5922
				err = -EROFS;
				goto restore_opts;
			}
5923 5924
			/*
			 * Make sure the group descriptor checksums
5925
			 * are sane.  If they aren't, refuse to remount r/w.
5926 5927 5928 5929 5930
			 */
			for (g = 0; g < sbi->s_groups_count; g++) {
				struct ext4_group_desc *gdp =
					ext4_get_group_desc(sb, g, NULL);

5931
				if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
5932 5933
					ext4_msg(sb, KERN_ERR,
	       "ext4_remount: Checksum for group %u failed (%u!=%u)",
5934
		g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)),
5935
					       le16_to_cpu(gdp->bg_checksum));
5936
					err = -EFSBADCRC;
5937 5938 5939 5940
					goto restore_opts;
				}
			}

5941 5942 5943 5944 5945 5946
			/*
			 * If we have an unprocessed orphan list hanging
			 * around from a previously readonly bdev mount,
			 * require a full umount/remount for now.
			 */
			if (es->s_last_orphan) {
5947
				ext4_msg(sb, KERN_WARNING, "Couldn't "
5948 5949
				       "remount RDWR because of unprocessed "
				       "orphan inode list.  Please "
5950
				       "umount/remount instead");
5951 5952 5953 5954
				err = -EINVAL;
				goto restore_opts;
			}

5955 5956 5957 5958 5959 5960
			/*
			 * Mounting a RDONLY partition read-write, so reread
			 * and store the current valid flag.  (It may have
			 * been changed by e2fsck since we originally mounted
			 * the partition.)
			 */
5961 5962 5963 5964 5965
			if (sbi->s_journal) {
				err = ext4_clear_journal_err(sb, es);
				if (err)
					goto restore_opts;
			}
5966
			sbi->s_mount_state = le16_to_cpu(es->s_state);
5967 5968 5969 5970 5971 5972

			err = ext4_setup_super(sb, es, 0);
			if (err)
				goto restore_opts;

			sb->s_flags &= ~SB_RDONLY;
5973
			if (ext4_has_feature_mmp(sb))
5974 5975 5976 5977 5978
				if (ext4_multi_mount_protect(sb,
						le64_to_cpu(es->s_mmp_block))) {
					err = -EROFS;
					goto restore_opts;
				}
5979
			enable_quota = 1;
5980 5981
		}
	}
5982 5983 5984 5985 5986

	/*
	 * Reinitialize lazy itable initialization thread based on
	 * current settings
	 */
5987
	if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE))
5988 5989 5990 5991 5992 5993 5994
		ext4_unregister_li_request(sb);
	else {
		ext4_group_t first_not_zeroed;
		first_not_zeroed = ext4_has_uninit_itable(sb);
		ext4_register_li_request(sb, first_not_zeroed);
	}

5995 5996 5997 5998 5999
	/*
	 * Handle creation of system zone data early because it can fail.
	 * Releasing of existing data is done when we are sure remount will
	 * succeed.
	 */
6000
	if (test_opt(sb, BLOCK_VALIDITY) && !sbi->s_system_blks) {
6001 6002 6003 6004
		err = ext4_setup_system_zone(sb);
		if (err)
			goto restore_opts;
	}
6005

6006 6007 6008 6009 6010
	if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) {
		err = ext4_commit_super(sb, 1);
		if (err)
			goto restore_opts;
	}
6011

6012 6013
#ifdef CONFIG_QUOTA
	/* Release old quota file names */
J
Jan Kara 已提交
6014
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
6015
		kfree(old_opts.s_qf_names[i]);
6016 6017 6018
	if (enable_quota) {
		if (sb_any_quota_suspended(sb))
			dquot_resume(sb, -1);
6019
		else if (ext4_has_feature_quota(sb)) {
6020
			err = ext4_enable_quotas(sb);
6021
			if (err)
6022 6023 6024
				goto restore_opts;
		}
	}
6025
#endif
6026
	if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
6027
		ext4_release_system_zone(sb);
6028

6029 6030 6031 6032 6033 6034
	/*
	 * Some options can be enabled by ext4 and/or by VFS mount flag
	 * either way we need to make sure it matches in both *flags and
	 * s_flags. Copy those selected flags from s_flags to *flags
	 */
	*flags = (*flags & ~vfs_flags) | (sb->s_flags & vfs_flags);
6035 6036 6037

	ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
	kfree(orig_data);
6038
	return 0;
6039

6040 6041 6042
restore_opts:
	sb->s_flags = old_sb_flags;
	sbi->s_mount_opt = old_opts.s_mount_opt;
6043
	sbi->s_mount_opt2 = old_opts.s_mount_opt2;
6044 6045 6046
	sbi->s_resuid = old_opts.s_resuid;
	sbi->s_resgid = old_opts.s_resgid;
	sbi->s_commit_interval = old_opts.s_commit_interval;
6047 6048
	sbi->s_min_batch_time = old_opts.s_min_batch_time;
	sbi->s_max_batch_time = old_opts.s_max_batch_time;
6049
	if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
6050
		ext4_release_system_zone(sb);
6051 6052
#ifdef CONFIG_QUOTA
	sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
J
Jan Kara 已提交
6053
	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
6054 6055
		to_free[i] = get_qf_name(sb, sbi, i);
		rcu_assign_pointer(sbi->s_qf_names[i], old_opts.s_qf_names[i]);
6056
	}
6057 6058 6059
	synchronize_rcu();
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
		kfree(to_free[i]);
6060
#endif
6061
	kfree(orig_data);
6062 6063 6064
	return err;
}

L
Li Xi 已提交
6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077
#ifdef CONFIG_QUOTA
static int ext4_statfs_project(struct super_block *sb,
			       kprojid_t projid, struct kstatfs *buf)
{
	struct kqid qid;
	struct dquot *dquot;
	u64 limit;
	u64 curblock;

	qid = make_kqid_projid(projid);
	dquot = dqget(sb, qid);
	if (IS_ERR(dquot))
		return PTR_ERR(dquot);
6078
	spin_lock(&dquot->dq_dqb_lock);
L
Li Xi 已提交
6079

6080 6081
	limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
			     dquot->dq_dqb.dqb_bhardlimit);
6082 6083
	limit >>= sb->s_blocksize_bits;

L
Li Xi 已提交
6084
	if (limit && buf->f_blocks > limit) {
6085 6086
		curblock = (dquot->dq_dqb.dqb_curspace +
			    dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
L
Li Xi 已提交
6087 6088 6089 6090 6091 6092
		buf->f_blocks = limit;
		buf->f_bfree = buf->f_bavail =
			(buf->f_blocks > curblock) ?
			 (buf->f_blocks - curblock) : 0;
	}

6093 6094
	limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
			     dquot->dq_dqb.dqb_ihardlimit);
L
Li Xi 已提交
6095 6096 6097 6098 6099 6100 6101
	if (limit && buf->f_files > limit) {
		buf->f_files = limit;
		buf->f_ffree =
			(buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
			 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
	}

6102
	spin_unlock(&dquot->dq_dqb_lock);
L
Li Xi 已提交
6103 6104 6105 6106 6107
	dqput(dquot);
	return 0;
}
#endif

6108
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
6109 6110
{
	struct super_block *sb = dentry->d_sb;
6111 6112
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
L
Lukas Czerner 已提交
6113
	ext4_fsblk_t overhead = 0, resv_blocks;
P
Pekka Enberg 已提交
6114
	u64 fsid;
6115
	s64 bfree;
L
Lukas Czerner 已提交
6116
	resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters));
6117

6118 6119
	if (!test_opt(sb, MINIX_DF))
		overhead = sbi->s_overhead;
6120

6121
	buf->f_type = EXT4_SUPER_MAGIC;
6122
	buf->f_bsize = sb->s_blocksize;
6123
	buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead);
6124 6125
	bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
		percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
6126
	/* prevent underflow in case that few free space is available */
6127
	buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
L
Lukas Czerner 已提交
6128 6129 6130
	buf->f_bavail = buf->f_bfree -
			(ext4_r_blocks_count(es) + resv_blocks);
	if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks))
6131 6132
		buf->f_bavail = 0;
	buf->f_files = le32_to_cpu(es->s_inodes_count);
P
Peter Zijlstra 已提交
6133
	buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
6134
	buf->f_namelen = EXT4_NAME_LEN;
P
Pekka Enberg 已提交
6135 6136
	fsid = le64_to_cpup((void *)es->s_uuid) ^
	       le64_to_cpup((void *)es->s_uuid + sizeof(u64));
6137
	buf->f_fsid = u64_to_fsid(fsid);
6138

L
Li Xi 已提交
6139 6140 6141 6142 6143
#ifdef CONFIG_QUOTA
	if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) &&
	    sb_has_quota_limits_enabled(sb, PRJQUOTA))
		ext4_statfs_project(sb, EXT4_I(dentry->d_inode)->i_projid, buf);
#endif
6144 6145 6146 6147 6148 6149
	return 0;
}


#ifdef CONFIG_QUOTA

J
Jan Kara 已提交
6150 6151 6152 6153
/*
 * Helper functions so that transaction is started before we acquire dqio_sem
 * to keep correct lock ordering of transaction > dqio_sem
 */
6154 6155
static inline struct inode *dquot_to_inode(struct dquot *dquot)
{
6156
	return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
6157 6158
}

6159
static int ext4_write_dquot(struct dquot *dquot)
6160 6161 6162 6163 6164 6165
{
	int ret, err;
	handle_t *handle;
	struct inode *inode;

	inode = dquot_to_inode(dquot);
6166
	handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
6167
				    EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
6168 6169 6170
	if (IS_ERR(handle))
		return PTR_ERR(handle);
	ret = dquot_commit(dquot);
6171
	err = ext4_journal_stop(handle);
6172 6173 6174 6175 6176
	if (!ret)
		ret = err;
	return ret;
}

6177
static int ext4_acquire_dquot(struct dquot *dquot)
6178 6179 6180 6181
{
	int ret, err;
	handle_t *handle;

6182
	handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
6183
				    EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
6184 6185 6186
	if (IS_ERR(handle))
		return PTR_ERR(handle);
	ret = dquot_acquire(dquot);
6187
	err = ext4_journal_stop(handle);
6188 6189 6190 6191 6192
	if (!ret)
		ret = err;
	return ret;
}

6193
static int ext4_release_dquot(struct dquot *dquot)
6194 6195 6196 6197
{
	int ret, err;
	handle_t *handle;

6198
	handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
6199
				    EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
J
Jan Kara 已提交
6200 6201 6202
	if (IS_ERR(handle)) {
		/* Release dquot anyway to avoid endless cycle in dqput() */
		dquot_release(dquot);
6203
		return PTR_ERR(handle);
J
Jan Kara 已提交
6204
	}
6205
	ret = dquot_release(dquot);
6206
	err = ext4_journal_stop(handle);
6207 6208 6209 6210 6211
	if (!ret)
		ret = err;
	return ret;
}

6212
static int ext4_mark_dquot_dirty(struct dquot *dquot)
6213
{
6214 6215 6216
	struct super_block *sb = dquot->dq_sb;
	struct ext4_sb_info *sbi = EXT4_SB(sb);

6217
	/* Are we journaling quotas? */
6218
	if (ext4_has_feature_quota(sb) ||
6219
	    sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
6220
		dquot_mark_dquot_dirty(dquot);
6221
		return ext4_write_dquot(dquot);
6222 6223 6224 6225 6226
	} else {
		return dquot_mark_dquot_dirty(dquot);
	}
}

6227
static int ext4_write_info(struct super_block *sb, int type)
6228 6229 6230 6231 6232
{
	int ret, err;
	handle_t *handle;

	/* Data block + inode block */
6233
	handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2);
6234 6235 6236
	if (IS_ERR(handle))
		return PTR_ERR(handle);
	ret = dquot_commit_info(sb, type);
6237
	err = ext4_journal_stop(handle);
6238 6239 6240 6241 6242 6243 6244 6245 6246
	if (!ret)
		ret = err;
	return ret;
}

/*
 * Turn on quotas during mount time - we need to find
 * the quota file and such...
 */
6247
static int ext4_quota_on_mount(struct super_block *sb, int type)
6248
{
6249
	return dquot_quota_on_mount(sb, get_qf_name(sb, EXT4_SB(sb), type),
6250
					EXT4_SB(sb)->s_jquota_fmt, type);
6251 6252
}

6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266
static void lockdep_set_quota_inode(struct inode *inode, int subclass)
{
	struct ext4_inode_info *ei = EXT4_I(inode);

	/* The first argument of lockdep_set_subclass has to be
	 * *exactly* the same as the argument to init_rwsem() --- in
	 * this case, in init_once() --- or lockdep gets unhappy
	 * because the name of the lock is set using the
	 * stringification of the argument to init_rwsem().
	 */
	(void) ei;	/* shut up clang warning if !CONFIG_LOCKDEP */
	lockdep_set_subclass(&ei->i_data_sem, subclass);
}

6267 6268 6269
/*
 * Standard function to be called on quota_on
 */
6270
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
A
Al Viro 已提交
6271
			 const struct path *path)
6272 6273 6274 6275 6276
{
	int err;

	if (!test_opt(sb, QUOTA))
		return -EINVAL;
6277

6278
	/* Quotafile not on the same filesystem? */
6279
	if (path->dentry->d_sb != sb)
6280
		return -EXDEV;
6281 6282 6283 6284 6285

	/* Quota already enabled for this file? */
	if (IS_NOQUOTA(d_inode(path->dentry)))
		return -EBUSY;

6286 6287
	/* Journaling quota? */
	if (EXT4_SB(sb)->s_qf_names[type]) {
6288
		/* Quotafile not in fs root? */
6289
		if (path->dentry->d_parent != sb->s_root)
6290 6291 6292
			ext4_msg(sb, KERN_WARNING,
				"Quota file not on filesystem root. "
				"Journaled quota will not work");
6293 6294 6295 6296 6297 6298 6299
		sb_dqopt(sb)->flags |= DQUOT_NOLIST_DIRTY;
	} else {
		/*
		 * Clear the flag just in case mount options changed since
		 * last time.
		 */
		sb_dqopt(sb)->flags &= ~DQUOT_NOLIST_DIRTY;
6300
	}
6301 6302 6303 6304 6305

	/*
	 * When we journal data on quota file, we have to flush journal to see
	 * all updates to the file when we bypass pagecache...
	 */
6306
	if (EXT4_SB(sb)->s_journal &&
6307
	    ext4_should_journal_data(d_inode(path->dentry))) {
6308 6309 6310 6311 6312
		/*
		 * We don't need to lock updates but journal_flush() could
		 * otherwise be livelocked...
		 */
		jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
6313
		err = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
6314
		jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
6315
		if (err)
6316
			return err;
6317
	}
6318

6319 6320
	lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
	err = dquot_quota_on(sb, type, format_id, path);
6321
	if (err) {
6322 6323
		lockdep_set_quota_inode(path->dentry->d_inode,
					     I_DATA_SEM_NORMAL);
6324 6325 6326 6327
	} else {
		struct inode *inode = d_inode(path->dentry);
		handle_t *handle;

6328 6329 6330 6331 6332
		/*
		 * Set inode flags to prevent userspace from messing with quota
		 * files. If this fails, we return success anyway since quotas
		 * are already enabled and this is not a hard failure.
		 */
6333 6334 6335 6336 6337 6338 6339
		inode_lock(inode);
		handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
		if (IS_ERR(handle))
			goto unlock_inode;
		EXT4_I(inode)->i_flags |= EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL;
		inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
				S_NOATIME | S_IMMUTABLE);
6340
		err = ext4_mark_inode_dirty(handle, inode);
6341 6342 6343 6344
		ext4_journal_stop(handle);
	unlock_inode:
		inode_unlock(inode);
	}
6345
	return err;
6346 6347
}

6348 6349 6350 6351 6352
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
			     unsigned int flags)
{
	int err;
	struct inode *qf_inode;
J
Jan Kara 已提交
6353
	unsigned long qf_inums[EXT4_MAXQUOTAS] = {
6354
		le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
L
Li Xi 已提交
6355 6356
		le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
		le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
6357 6358
	};

6359
	BUG_ON(!ext4_has_feature_quota(sb));
6360 6361 6362 6363

	if (!qf_inums[type])
		return -EPERM;

6364
	qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL);
6365 6366 6367 6368 6369
	if (IS_ERR(qf_inode)) {
		ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
		return PTR_ERR(qf_inode);
	}

J
Jan Kara 已提交
6370 6371
	/* Don't account quota for quota files to avoid recursion */
	qf_inode->i_flags |= S_NOQUOTA;
6372
	lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
6373
	err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
6374 6375
	if (err)
		lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
6376
	iput(qf_inode);
6377 6378 6379 6380 6381 6382 6383 6384

	return err;
}

/* Enable usage tracking for all quota types. */
static int ext4_enable_quotas(struct super_block *sb)
{
	int type, err = 0;
J
Jan Kara 已提交
6385
	unsigned long qf_inums[EXT4_MAXQUOTAS] = {
6386
		le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
L
Li Xi 已提交
6387 6388
		le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
		le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
6389
	};
6390 6391 6392 6393 6394
	bool quota_mopt[EXT4_MAXQUOTAS] = {
		test_opt(sb, USRQUOTA),
		test_opt(sb, GRPQUOTA),
		test_opt(sb, PRJQUOTA),
	};
6395

6396
	sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
J
Jan Kara 已提交
6397
	for (type = 0; type < EXT4_MAXQUOTAS; type++) {
6398 6399
		if (qf_inums[type]) {
			err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
6400 6401
				DQUOT_USAGE_ENABLED |
				(quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
6402 6403
			if (err) {
				ext4_warning(sb,
6404 6405 6406
					"Failed to enable quota tracking "
					"(type=%d, err=%d). Please run "
					"e2fsck to fix.", type, err);
6407 6408 6409
				for (type--; type >= 0; type--)
					dquot_quota_off(sb, type);

6410 6411 6412 6413 6414 6415 6416
				return err;
			}
		}
	}
	return 0;
}

6417 6418
static int ext4_quota_off(struct super_block *sb, int type)
{
6419 6420
	struct inode *inode = sb_dqopt(sb)->files[type];
	handle_t *handle;
6421
	int err;
6422

6423 6424 6425
	/* Force all delayed allocation blocks to be allocated.
	 * Caller already holds s_umount sem */
	if (test_opt(sb, DELALLOC))
6426 6427
		sync_filesystem(sb);

6428
	if (!inode || !igrab(inode))
6429 6430
		goto out;

6431
	err = dquot_quota_off(sb, type);
6432
	if (err || ext4_has_feature_quota(sb))
6433 6434 6435
		goto out_put;

	inode_lock(inode);
6436 6437 6438 6439 6440
	/*
	 * Update modification times of quota files when userspace can
	 * start looking at them. If we fail, we return success anyway since
	 * this is not a hard failure and quotas are already disabled.
	 */
6441
	handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
6442 6443
	if (IS_ERR(handle)) {
		err = PTR_ERR(handle);
6444
		goto out_unlock;
6445
	}
6446 6447
	EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL);
	inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
6448
	inode->i_mtime = inode->i_ctime = current_time(inode);
6449
	err = ext4_mark_inode_dirty(handle, inode);
6450
	ext4_journal_stop(handle);
6451 6452 6453
out_unlock:
	inode_unlock(inode);
out_put:
6454
	lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL);
6455 6456
	iput(inode);
	return err;
6457
out:
6458 6459 6460
	return dquot_quota_off(sb, type);
}

6461 6462
/* Read data from quotafile - avoid pagecache and such because we cannot afford
 * acquiring the locks... As quota files are never truncated and quota code
L
Lucas De Marchi 已提交
6463
 * itself serializes the operations (and no one else should touch the files)
6464
 * we don't have to be afraid of races */
6465
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
6466 6467 6468
			       size_t len, loff_t off)
{
	struct inode *inode = sb_dqopt(sb)->files[type];
A
Aneesh Kumar K.V 已提交
6469
	ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483
	int offset = off & (sb->s_blocksize - 1);
	int tocopy;
	size_t toread;
	struct buffer_head *bh;
	loff_t i_size = i_size_read(inode);

	if (off > i_size)
		return 0;
	if (off+len > i_size)
		len = i_size-off;
	toread = len;
	while (toread > 0) {
		tocopy = sb->s_blocksize - offset < toread ?
				sb->s_blocksize - offset : toread;
6484 6485 6486
		bh = ext4_bread(NULL, inode, blk, 0);
		if (IS_ERR(bh))
			return PTR_ERR(bh);
6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501
		if (!bh)	/* A hole? */
			memset(data, 0, tocopy);
		else
			memcpy(data, bh->b_data+offset, tocopy);
		brelse(bh);
		offset = 0;
		toread -= tocopy;
		data += tocopy;
		blk++;
	}
	return len;
}

/* Write to quotafile (we know the transaction is already started and has
 * enough credits) */
6502
static ssize_t ext4_quota_write(struct super_block *sb, int type,
6503 6504 6505
				const char *data, size_t len, loff_t off)
{
	struct inode *inode = sb_dqopt(sb)->files[type];
A
Aneesh Kumar K.V 已提交
6506
	ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
6507
	int err = 0, err2 = 0, offset = off & (sb->s_blocksize - 1);
6508
	int retries = 0;
6509 6510 6511
	struct buffer_head *bh;
	handle_t *handle = journal_current_handle();

6512
	if (EXT4_SB(sb)->s_journal && !handle) {
6513 6514
		ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
			" cancelled because transaction is not started",
J
Jan Kara 已提交
6515 6516 6517
			(unsigned long long)off, (unsigned long long)len);
		return -EIO;
	}
6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528
	/*
	 * Since we account only one data block in transaction credits,
	 * then it is impossible to cross a block boundary.
	 */
	if (sb->s_blocksize - offset < len) {
		ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
			" cancelled because not block aligned",
			(unsigned long long)off, (unsigned long long)len);
		return -EIO;
	}

6529 6530 6531 6532
	do {
		bh = ext4_bread(handle, inode, blk,
				EXT4_GET_BLOCKS_CREATE |
				EXT4_GET_BLOCKS_METADATA_NOFAIL);
6533
	} while (PTR_ERR(bh) == -ENOSPC &&
6534
		 ext4_should_retry_alloc(inode->i_sb, &retries));
6535 6536
	if (IS_ERR(bh))
		return PTR_ERR(bh);
6537 6538
	if (!bh)
		goto out;
6539
	BUFFER_TRACE(bh, "get write access");
6540 6541 6542
	err = ext4_journal_get_write_access(handle, bh);
	if (err) {
		brelse(bh);
6543
		return err;
6544
	}
6545 6546 6547 6548
	lock_buffer(bh);
	memcpy(bh->b_data+offset, data, len);
	flush_dcache_page(bh->b_page);
	unlock_buffer(bh);
6549
	err = ext4_handle_dirty_metadata(handle, NULL, bh);
6550
	brelse(bh);
6551
out:
6552 6553
	if (inode->i_size < off + len) {
		i_size_write(inode, off + len);
6554
		EXT4_I(inode)->i_disksize = inode->i_size;
6555 6556 6557
		err2 = ext4_mark_inode_dirty(handle, inode);
		if (unlikely(err2 && !err))
			err = err2;
6558
	}
6559
	return err ? err : len;
6560 6561 6562
}
#endif

A
Al Viro 已提交
6563 6564
static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
		       const char *dev_name, void *data)
6565
{
A
Al Viro 已提交
6566
	return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super);
6567 6568
}

J
Jan Kara 已提交
6569
#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581
static inline void register_as_ext2(void)
{
	int err = register_filesystem(&ext2_fs_type);
	if (err)
		printk(KERN_WARNING
		       "EXT4-fs: Unable to register as ext2 (%d)\n", err);
}

static inline void unregister_as_ext2(void)
{
	unregister_filesystem(&ext2_fs_type);
}
6582 6583 6584

static inline int ext2_feature_set_ok(struct super_block *sb)
{
6585
	if (ext4_has_unknown_ext2_incompat_features(sb))
6586
		return 0;
6587
	if (sb_rdonly(sb))
6588
		return 1;
6589
	if (ext4_has_unknown_ext2_ro_compat_features(sb))
6590 6591 6592
		return 0;
	return 1;
}
6593 6594 6595
#else
static inline void register_as_ext2(void) { }
static inline void unregister_as_ext2(void) { }
6596
static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610
#endif

static inline void register_as_ext3(void)
{
	int err = register_filesystem(&ext3_fs_type);
	if (err)
		printk(KERN_WARNING
		       "EXT4-fs: Unable to register as ext3 (%d)\n", err);
}

static inline void unregister_as_ext3(void)
{
	unregister_filesystem(&ext3_fs_type);
}
6611 6612 6613

static inline int ext3_feature_set_ok(struct super_block *sb)
{
6614
	if (ext4_has_unknown_ext3_incompat_features(sb))
6615
		return 0;
6616
	if (!ext4_has_feature_journal(sb))
6617
		return 0;
6618
	if (sb_rdonly(sb))
6619
		return 1;
6620
	if (ext4_has_unknown_ext3_ro_compat_features(sb))
6621 6622 6623
		return 0;
	return 1;
}
6624

T
Theodore Ts'o 已提交
6625 6626 6627
static struct file_system_type ext4_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "ext4",
A
Al Viro 已提交
6628
	.mount		= ext4_mount,
T
Theodore Ts'o 已提交
6629 6630 6631
	.kill_sb	= kill_block_super,
	.fs_flags	= FS_REQUIRES_DEV,
};
6632
MODULE_ALIAS_FS("ext4");
T
Theodore Ts'o 已提交
6633

6634 6635 6636
/* Shared across all ext4 file systems */
wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];

6637
static int __init ext4_init_fs(void)
6638
{
6639
	int i, err;
6640

6641
	ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
6642 6643 6644
	ext4_li_info = NULL;
	mutex_init(&ext4_li_mtx);

6645
	/* Build-time check for flags consistency */
6646
	ext4_check_flag_values();
6647

6648
	for (i = 0; i < EXT4_WQ_HASH_SZ; i++)
6649 6650
		init_waitqueue_head(&ext4__ioend_wq[i]);

6651
	err = ext4_init_es();
6652 6653
	if (err)
		return err;
6654

6655
	err = ext4_init_pending();
E
Eric Biggers 已提交
6656 6657 6658 6659
	if (err)
		goto out7;

	err = ext4_init_post_read_processing();
6660 6661 6662
	if (err)
		goto out6;

6663 6664
	err = ext4_init_pageio();
	if (err)
6665
		goto out5;
6666

6667
	err = ext4_init_system_zone();
6668
	if (err)
6669
		goto out4;
6670

6671
	err = ext4_init_sysfs();
T
Theodore Ts'o 已提交
6672
	if (err)
6673
		goto out3;
6674

6675
	err = ext4_init_mballoc();
6676 6677
	if (err)
		goto out2;
6678 6679 6680
	err = init_inodecache();
	if (err)
		goto out1;
6681 6682 6683 6684 6685

	err = ext4_fc_init_dentry_cache();
	if (err)
		goto out05;

6686
	register_as_ext3();
6687
	register_as_ext2();
T
Theodore Ts'o 已提交
6688
	err = register_filesystem(&ext4_fs_type);
6689 6690
	if (err)
		goto out;
6691

6692 6693
	return 0;
out:
6694 6695
	unregister_as_ext2();
	unregister_as_ext3();
6696
out05:
6697 6698
	destroy_inodecache();
out1:
6699
	ext4_exit_mballoc();
6700
out2:
6701 6702
	ext4_exit_sysfs();
out3:
6703
	ext4_exit_system_zone();
6704
out4:
6705
	ext4_exit_pageio();
6706
out5:
E
Eric Biggers 已提交
6707
	ext4_exit_post_read_processing();
6708
out6:
E
Eric Biggers 已提交
6709 6710
	ext4_exit_pending();
out7:
6711 6712
	ext4_exit_es();

6713 6714 6715
	return err;
}

6716
static void __exit ext4_exit_fs(void)
6717
{
6718
	ext4_destroy_lazyinit_thread();
6719 6720
	unregister_as_ext2();
	unregister_as_ext3();
T
Theodore Ts'o 已提交
6721
	unregister_filesystem(&ext4_fs_type);
6722
	destroy_inodecache();
6723
	ext4_exit_mballoc();
6724
	ext4_exit_sysfs();
6725 6726
	ext4_exit_system_zone();
	ext4_exit_pageio();
E
Eric Biggers 已提交
6727
	ext4_exit_post_read_processing();
6728
	ext4_exit_es();
6729
	ext4_exit_pending();
6730 6731 6732
}

MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
6733
MODULE_DESCRIPTION("Fourth Extended Filesystem");
6734
MODULE_LICENSE("GPL");
6735
MODULE_SOFTDEP("pre: crc32c");
6736 6737
module_init(ext4_init_fs)
module_exit(ext4_exit_fs)