super.c 171.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 *  linux/fs/ext4/super.c
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/inode.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/module.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/time.h>
24
#include <linux/vmalloc.h>
25 26 27
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/blkdev.h>
28
#include <linux/backing-dev.h>
29 30
#include <linux/parser.h>
#include <linux/buffer_head.h>
31
#include <linux/exportfs.h>
32 33 34 35 36 37
#include <linux/vfs.h>
#include <linux/random.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/quotaops.h>
#include <linux/seq_file.h>
T
Theodore Ts'o 已提交
38
#include <linux/ctype.h>
V
Vignesh Babu 已提交
39
#include <linux/log2.h>
A
Andreas Dilger 已提交
40
#include <linux/crc16.h>
41
#include <linux/dax.h>
D
Dan Magenheimer 已提交
42
#include <linux/cleancache.h>
43
#include <linux/uaccess.h>
J
Jeff Layton 已提交
44
#include <linux/iversion.h>
45

46 47 48
#include <linux/kthread.h>
#include <linux/freezer.h>

49
#include "ext4.h"
50
#include "ext4_extents.h"	/* Needed for trace points definition */
51
#include "ext4_jbd2.h"
52 53
#include "xattr.h"
#include "acl.h"
54
#include "mballoc.h"
D
Darrick J. Wong 已提交
55
#include "fsmap.h"
56

57 58 59
#define CREATE_TRACE_POINTS
#include <trace/events/ext4.h>

60 61
static struct ext4_lazy_init *ext4_li_info;
static struct mutex ext4_li_mtx;
62
static struct ratelimit_state ext4_mount_msg_ratelimit;
63

64
static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
65
			     unsigned long journal_devnum);
66
static int ext4_show_options(struct seq_file *seq, struct dentry *root);
67
static int ext4_commit_super(struct super_block *sb, int sync);
68 69 70 71
static void ext4_mark_recovery_complete(struct super_block *sb,
					struct ext4_super_block *es);
static void ext4_clear_journal_err(struct super_block *sb,
				   struct ext4_super_block *es);
72
static int ext4_sync_fs(struct super_block *sb, int wait);
73 74
static int ext4_remount(struct super_block *sb, int *flags, char *data);
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
75 76
static int ext4_unfreeze(struct super_block *sb);
static int ext4_freeze(struct super_block *sb);
A
Al Viro 已提交
77 78
static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
		       const char *dev_name, void *data);
79 80
static inline int ext2_feature_set_ok(struct super_block *sb);
static inline int ext3_feature_set_ok(struct super_block *sb);
81
static int ext4_feature_set_ok(struct super_block *sb, int readonly);
82 83
static void ext4_destroy_lazyinit_thread(void);
static void ext4_unregister_li_request(struct super_block *sb);
84
static void ext4_clear_request_list(void);
85 86
static struct inode *ext4_get_journal_inode(struct super_block *sb,
					    unsigned int journal_inum);
87

J
Jan Kara 已提交
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
/*
 * Lock ordering
 *
 * Note the difference between i_mmap_sem (EXT4_I(inode)->i_mmap_sem) and
 * i_mmap_rwsem (inode->i_mmap_rwsem)!
 *
 * page fault path:
 * mmap_sem -> sb_start_pagefault -> i_mmap_sem (r) -> transaction start ->
 *   page lock -> i_data_sem (rw)
 *
 * buffered write path:
 * sb_start_write -> i_mutex -> mmap_sem
 * sb_start_write -> i_mutex -> transaction start -> page lock ->
 *   i_data_sem (rw)
 *
 * truncate:
104 105 106
 * sb_start_write -> i_mutex -> i_mmap_sem (w) -> i_mmap_rwsem (w) -> page lock
 * sb_start_write -> i_mutex -> i_mmap_sem (w) -> transaction start ->
 *   i_data_sem (rw)
J
Jan Kara 已提交
107 108
 *
 * direct IO:
109 110
 * sb_start_write -> i_mutex -> mmap_sem
 * sb_start_write -> i_mutex -> transaction start -> i_data_sem (rw)
J
Jan Kara 已提交
111 112 113 114 115
 *
 * writepages:
 * transaction start -> page lock(s) -> i_data_sem (rw)
 */

J
Jan Kara 已提交
116
#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
117 118 119 120 121 122 123
static struct file_system_type ext2_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "ext2",
	.mount		= ext4_mount,
	.kill_sb	= kill_block_super,
	.fs_flags	= FS_REQUIRES_DEV,
};
124
MODULE_ALIAS_FS("ext2");
125
MODULE_ALIAS("ext2");
126 127 128 129 130 131
#define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
#else
#define IS_EXT2_SB(sb) (0)
#endif


132 133 134
static struct file_system_type ext3_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "ext3",
A
Al Viro 已提交
135
	.mount		= ext4_mount,
136 137 138
	.kill_sb	= kill_block_super,
	.fs_flags	= FS_REQUIRES_DEV,
};
139
MODULE_ALIAS_FS("ext3");
140
MODULE_ALIAS("ext3");
141
#define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
L
Laurent Vivier 已提交
142

143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
/*
 * This works like sb_bread() except it uses ERR_PTR for error
 * returns.  Currently with sb_bread it's impossible to distinguish
 * between ENOMEM and EIO situations (since both result in a NULL
 * return.
 */
struct buffer_head *
ext4_sb_bread(struct super_block *sb, sector_t block, int op_flags)
{
	struct buffer_head *bh = sb_getblk(sb, block);

	if (bh == NULL)
		return ERR_PTR(-ENOMEM);
	if (buffer_uptodate(bh))
		return bh;
	ll_rw_block(REQ_OP_READ, REQ_META | op_flags, 1, &bh);
	wait_on_buffer(bh);
	if (buffer_uptodate(bh))
		return bh;
	put_bh(bh);
	return ERR_PTR(-EIO);
}

166 167 168
static int ext4_verify_csum_type(struct super_block *sb,
				 struct ext4_super_block *es)
{
169
	if (!ext4_has_feature_metadata_csum(sb))
170 171 172 173 174
		return 1;

	return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
}

175 176 177 178 179 180 181 182 183 184 185 186
static __le32 ext4_superblock_csum(struct super_block *sb,
				   struct ext4_super_block *es)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	int offset = offsetof(struct ext4_super_block, s_checksum);
	__u32 csum;

	csum = ext4_chksum(sbi, ~0, (char *)es, offset);

	return cpu_to_le32(csum);
}

187 188
static int ext4_superblock_csum_verify(struct super_block *sb,
				       struct ext4_super_block *es)
189
{
190
	if (!ext4_has_metadata_csum(sb))
191 192 193 194 195
		return 1;

	return es->s_checksum == ext4_superblock_csum(sb, es);
}

196
void ext4_superblock_csum_set(struct super_block *sb)
197
{
198 199
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;

200
	if (!ext4_has_metadata_csum(sb))
201 202 203 204 205
		return;

	es->s_checksum = ext4_superblock_csum(sb, es);
}

206 207 208 209
void *ext4_kvmalloc(size_t size, gfp_t flags)
{
	void *ret;

210
	ret = kmalloc(size, flags | __GFP_NOWARN);
211 212 213 214 215 216 217 218 219
	if (!ret)
		ret = __vmalloc(size, flags, PAGE_KERNEL);
	return ret;
}

void *ext4_kvzalloc(size_t size, gfp_t flags)
{
	void *ret;

220
	ret = kzalloc(size, flags | __GFP_NOWARN);
221 222 223 224 225
	if (!ret)
		ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
	return ret;
}

226 227
ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
			       struct ext4_group_desc *bg)
L
Laurent Vivier 已提交
228
{
229
	return le32_to_cpu(bg->bg_block_bitmap_lo) |
230
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
231
		 (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
L
Laurent Vivier 已提交
232 233
}

234 235
ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
			       struct ext4_group_desc *bg)
L
Laurent Vivier 已提交
236
{
237
	return le32_to_cpu(bg->bg_inode_bitmap_lo) |
238
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
239
		 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
L
Laurent Vivier 已提交
240 241
}

242 243
ext4_fsblk_t ext4_inode_table(struct super_block *sb,
			      struct ext4_group_desc *bg)
L
Laurent Vivier 已提交
244
{
245
	return le32_to_cpu(bg->bg_inode_table_lo) |
246
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
247
		 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
L
Laurent Vivier 已提交
248 249
}

250 251
__u32 ext4_free_group_clusters(struct super_block *sb,
			       struct ext4_group_desc *bg)
252 253 254
{
	return le16_to_cpu(bg->bg_free_blocks_count_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
255
		 (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
256 257 258 259 260 261 262
}

__u32 ext4_free_inodes_count(struct super_block *sb,
			      struct ext4_group_desc *bg)
{
	return le16_to_cpu(bg->bg_free_inodes_count_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
263
		 (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
264 265 266 267 268 269 270
}

__u32 ext4_used_dirs_count(struct super_block *sb,
			      struct ext4_group_desc *bg)
{
	return le16_to_cpu(bg->bg_used_dirs_count_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
271
		 (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
272 273 274 275 276 277 278
}

__u32 ext4_itable_unused_count(struct super_block *sb,
			      struct ext4_group_desc *bg)
{
	return le16_to_cpu(bg->bg_itable_unused_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
279
		 (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
280 281
}

282 283
void ext4_block_bitmap_set(struct super_block *sb,
			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
L
Laurent Vivier 已提交
284
{
285
	bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
286 287
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
L
Laurent Vivier 已提交
288 289
}

290 291
void ext4_inode_bitmap_set(struct super_block *sb,
			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
L
Laurent Vivier 已提交
292
{
293
	bg->bg_inode_bitmap_lo  = cpu_to_le32((u32)blk);
294 295
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
L
Laurent Vivier 已提交
296 297
}

298 299
void ext4_inode_table_set(struct super_block *sb,
			  struct ext4_group_desc *bg, ext4_fsblk_t blk)
L
Laurent Vivier 已提交
300
{
301
	bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
302 303
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
L
Laurent Vivier 已提交
304 305
}

306 307
void ext4_free_group_clusters_set(struct super_block *sb,
				  struct ext4_group_desc *bg, __u32 count)
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
{
	bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
}

void ext4_free_inodes_set(struct super_block *sb,
			  struct ext4_group_desc *bg, __u32 count)
{
	bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
}

void ext4_used_dirs_set(struct super_block *sb,
			  struct ext4_group_desc *bg, __u32 count)
{
	bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
}

void ext4_itable_unused_set(struct super_block *sb,
			  struct ext4_group_desc *bg, __u32 count)
{
	bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
}

338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
static void __ext4_update_tstamp(__le32 *lo, __u8 *hi)
{
	time64_t now = ktime_get_real_seconds();

	now = clamp_val(now, 0, (1ull << 40) - 1);

	*lo = cpu_to_le32(lower_32_bits(now));
	*hi = upper_32_bits(now);
}

static time64_t __ext4_get_tstamp(__le32 *lo, __u8 *hi)
{
	return ((time64_t)(*hi) << 32) + le32_to_cpu(*lo);
}
#define ext4_update_tstamp(es, tstamp) \
	__ext4_update_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi)
#define ext4_get_tstamp(es, tstamp) \
	__ext4_get_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi)
356

357 358 359 360 361 362
static void __save_error_info(struct super_block *sb, const char *func,
			    unsigned int line)
{
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;

	EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
363 364
	if (bdev_read_only(sb->s_bdev))
		return;
365
	es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
366
	ext4_update_tstamp(es, s_last_error_time);
367 368 369 370
	strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
	es->s_last_error_line = cpu_to_le32(line);
	if (!es->s_first_error_time) {
		es->s_first_error_time = es->s_last_error_time;
371
		es->s_first_error_time_hi = es->s_last_error_time_hi;
372 373 374 375 376 377
		strncpy(es->s_first_error_func, func,
			sizeof(es->s_first_error_func));
		es->s_first_error_line = cpu_to_le32(line);
		es->s_first_error_ino = es->s_last_error_ino;
		es->s_first_error_block = es->s_last_error_block;
	}
378 379 380 381 382 383
	/*
	 * Start the daily error reporting function if it hasn't been
	 * started already
	 */
	if (!es->s_error_count)
		mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ);
384
	le32_add_cpu(&es->s_error_count, 1);
385 386 387 388 389 390 391 392 393
}

static void save_error_info(struct super_block *sb, const char *func,
			    unsigned int line)
{
	__save_error_info(sb, func, line);
	ext4_commit_super(sb, 1);
}

394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
/*
 * The del_gendisk() function uninitializes the disk-specific data
 * structures, including the bdi structure, without telling anyone
 * else.  Once this happens, any attempt to call mark_buffer_dirty()
 * (for example, by ext4_commit_super), will cause a kernel OOPS.
 * This is a kludge to prevent these oops until we can put in a proper
 * hook in del_gendisk() to inform the VFS and file system layers.
 */
static int block_device_ejected(struct super_block *sb)
{
	struct inode *bd_inode = sb->s_bdev->bd_inode;
	struct backing_dev_info *bdi = inode_to_bdi(bd_inode);

	return bdi->dev == NULL;
}

B
Bobi Jam 已提交
410 411 412 413 414
static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
{
	struct super_block		*sb = journal->j_private;
	struct ext4_sb_info		*sbi = EXT4_SB(sb);
	int				error = is_journal_aborted(journal);
415
	struct ext4_journal_cb_entry	*jce;
B
Bobi Jam 已提交
416

417
	BUG_ON(txn->t_state == T_FINISHED);
418 419 420

	ext4_process_freed_data(sb, txn->t_tid);

B
Bobi Jam 已提交
421
	spin_lock(&sbi->s_md_lock);
422 423 424
	while (!list_empty(&txn->t_private_list)) {
		jce = list_entry(txn->t_private_list.next,
				 struct ext4_journal_cb_entry, jce_list);
B
Bobi Jam 已提交
425 426 427 428 429 430 431
		list_del_init(&jce->jce_list);
		spin_unlock(&sbi->s_md_lock);
		jce->jce_func(sb, jce, error);
		spin_lock(&sbi->s_md_lock);
	}
	spin_unlock(&sbi->s_md_lock);
}
432

433 434 435 436
/* Deal with the reporting of failure conditions on a filesystem such as
 * inconsistencies detected or read IO failures.
 *
 * On ext2, we can store the error state of the filesystem in the
437
 * superblock.  That is not possible on ext4, because we may have other
438 439 440 441 442
 * write ordering constraints on the superblock which prevent us from
 * writing it out straight away; and given that the journal is about to
 * be aborted, we can't rely on the current, or future, transactions to
 * write out the superblock safely.
 *
443
 * We'll just use the jbd2_journal_abort() error code to record an error in
444
 * the journal instead.  On recovery, the journal will complain about
445 446 447
 * that error until we've noted it down and cleared it.
 */

448
static void ext4_handle_error(struct super_block *sb)
449
{
450 451 452
	if (test_opt(sb, WARN_ON_ERROR))
		WARN_ON_ONCE(1);

453
	if (sb_rdonly(sb))
454 455
		return;

456
	if (!test_opt(sb, ERRORS_CONT)) {
457
		journal_t *journal = EXT4_SB(sb)->s_journal;
458

459
		EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
460
		if (journal)
461
			jbd2_journal_abort(journal, -EIO);
462
	}
463
	if (test_opt(sb, ERRORS_RO)) {
464
		ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
465 466 467 468 469
		/*
		 * Make sure updated value of ->s_mount_flags will be visible
		 * before ->s_flags update
		 */
		smp_wmb();
470
		sb->s_flags |= SB_RDONLY;
471
	}
472 473 474 475
	if (test_opt(sb, ERRORS_PANIC)) {
		if (EXT4_SB(sb)->s_journal &&
		  !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
			return;
476
		panic("EXT4-fs (device %s): panic forced after error\n",
477
			sb->s_id);
478
	}
479 480
}

481 482 483 484
#define ext4_error_ratelimit(sb)					\
		___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state),	\
			     "EXT4-fs error")

485
void __ext4_error(struct super_block *sb, const char *function,
486
		  unsigned int line, const char *fmt, ...)
487
{
J
Joe Perches 已提交
488
	struct va_format vaf;
489 490
	va_list args;

491 492 493
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return;

494
	trace_ext4_error(sb, function, line);
495 496 497 498 499 500 501 502 503
	if (ext4_error_ratelimit(sb)) {
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		printk(KERN_CRIT
		       "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
		       sb->s_id, function, line, current->comm, &vaf);
		va_end(args);
	}
504
	save_error_info(sb, function, line);
505
	ext4_handle_error(sb);
506 507
}

508 509 510
void __ext4_error_inode(struct inode *inode, const char *function,
			unsigned int line, ext4_fsblk_t block,
			const char *fmt, ...)
511 512
{
	va_list args;
513
	struct va_format vaf;
514
	struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
515

516 517 518
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return;

519
	trace_ext4_error(inode->i_sb, function, line);
520 521
	es->s_last_error_ino = cpu_to_le32(inode->i_ino);
	es->s_last_error_block = cpu_to_le64(block);
522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
	if (ext4_error_ratelimit(inode->i_sb)) {
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		if (block)
			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
			       "inode #%lu: block %llu: comm %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       block, current->comm, &vaf);
		else
			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
			       "inode #%lu: comm %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       current->comm, &vaf);
		va_end(args);
	}
538
	save_error_info(inode->i_sb, function, line);
539 540 541
	ext4_handle_error(inode->i_sb);
}

542 543 544
void __ext4_error_file(struct file *file, const char *function,
		       unsigned int line, ext4_fsblk_t block,
		       const char *fmt, ...)
545 546
{
	va_list args;
547
	struct va_format vaf;
548
	struct ext4_super_block *es;
A
Al Viro 已提交
549
	struct inode *inode = file_inode(file);
550 551
	char pathname[80], *path;

552 553 554
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return;

555
	trace_ext4_error(inode->i_sb, function, line);
556 557
	es = EXT4_SB(inode->i_sb)->s_es;
	es->s_last_error_ino = cpu_to_le32(inode->i_ino);
558
	if (ext4_error_ratelimit(inode->i_sb)) {
M
Miklos Szeredi 已提交
559
		path = file_path(file, pathname, sizeof(pathname));
560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578
		if (IS_ERR(path))
			path = "(unknown)";
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		if (block)
			printk(KERN_CRIT
			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
			       "block %llu: comm %s: path %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       block, current->comm, path, &vaf);
		else
			printk(KERN_CRIT
			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
			       "comm %s: path %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       current->comm, path, &vaf);
		va_end(args);
	}
579
	save_error_info(inode->i_sb, function, line);
580 581 582
	ext4_handle_error(inode->i_sb);
}

583 584
const char *ext4_decode_error(struct super_block *sb, int errno,
			      char nbuf[16])
585 586 587 588
{
	char *errstr = NULL;

	switch (errno) {
589 590 591 592 593 594
	case -EFSCORRUPTED:
		errstr = "Corrupt filesystem";
		break;
	case -EFSBADCRC:
		errstr = "Filesystem failed CRC";
		break;
595 596 597 598 599 600 601
	case -EIO:
		errstr = "IO failure";
		break;
	case -ENOMEM:
		errstr = "Out of memory";
		break;
	case -EROFS:
602 603
		if (!sb || (EXT4_SB(sb)->s_journal &&
			    EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
			errstr = "Journal has aborted";
		else
			errstr = "Readonly filesystem";
		break;
	default:
		/* If the caller passed in an extra buffer for unknown
		 * errors, textualise them now.  Else we just return
		 * NULL. */
		if (nbuf) {
			/* Check for truncated error codes... */
			if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
				errstr = nbuf;
		}
		break;
	}

	return errstr;
}

623
/* __ext4_std_error decodes expected errors from journaling functions
624 625
 * automatically and invokes the appropriate error response.  */

626 627
void __ext4_std_error(struct super_block *sb, const char *function,
		      unsigned int line, int errno)
628 629 630 631
{
	char nbuf[16];
	const char *errstr;

632 633 634
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return;

635 636 637
	/* Special case: if the error is EROFS, and we're not already
	 * inside a transaction, then there's really no point in logging
	 * an error. */
638
	if (errno == -EROFS && journal_current_handle() == NULL && sb_rdonly(sb))
639 640
		return;

641 642 643 644 645
	if (ext4_error_ratelimit(sb)) {
		errstr = ext4_decode_error(sb, errno, nbuf);
		printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
		       sb->s_id, function, line, errstr);
	}
646

647
	save_error_info(sb, function, line);
648
	ext4_handle_error(sb);
649 650 651
}

/*
652
 * ext4_abort is a much stronger failure handler than ext4_error.  The
653 654 655 656 657 658 659 660
 * abort function may be used to deal with unrecoverable failures such
 * as journal IO errors or ENOMEM at a critical moment in log management.
 *
 * We unconditionally force the filesystem into an ABORT|READONLY state,
 * unless the error response on the fs has been set to panic in which
 * case we take the easy way out and panic immediately.
 */

661
void __ext4_abort(struct super_block *sb, const char *function,
662
		unsigned int line, const char *fmt, ...)
663
{
664
	struct va_format vaf;
665 666
	va_list args;

667 668 669
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return;

670
	save_error_info(sb, function, line);
671
	va_start(args, fmt);
672 673 674 675
	vaf.fmt = fmt;
	vaf.va = &args;
	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: %pV\n",
	       sb->s_id, function, line, &vaf);
676 677
	va_end(args);

678
	if (sb_rdonly(sb) == 0) {
679 680
		ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
		EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
681 682 683 684 685
		/*
		 * Make sure updated value of ->s_mount_flags will be visible
		 * before ->s_flags update
		 */
		smp_wmb();
686
		sb->s_flags |= SB_RDONLY;
687 688 689 690
		if (EXT4_SB(sb)->s_journal)
			jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
		save_error_info(sb, function, line);
	}
691 692 693 694
	if (test_opt(sb, ERRORS_PANIC)) {
		if (EXT4_SB(sb)->s_journal &&
		  !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
			return;
695
		panic("EXT4-fs panic from previous error\n");
696
	}
697 698
}

699 700
void __ext4_msg(struct super_block *sb,
		const char *prefix, const char *fmt, ...)
701
{
J
Joe Perches 已提交
702
	struct va_format vaf;
703 704
	va_list args;

705 706 707
	if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
		return;

708
	va_start(args, fmt);
J
Joe Perches 已提交
709 710 711
	vaf.fmt = fmt;
	vaf.va = &args;
	printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
712 713 714
	va_end(args);
}

715 716 717 718
#define ext4_warning_ratelimit(sb)					\
		___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state),	\
			     "EXT4-fs warning")

719
void __ext4_warning(struct super_block *sb, const char *function,
720
		    unsigned int line, const char *fmt, ...)
721
{
J
Joe Perches 已提交
722
	struct va_format vaf;
723 724
	va_list args;

725
	if (!ext4_warning_ratelimit(sb))
726 727
		return;

728
	va_start(args, fmt);
J
Joe Perches 已提交
729 730 731 732
	vaf.fmt = fmt;
	vaf.va = &args;
	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
	       sb->s_id, function, line, &vaf);
733 734 735
	va_end(args);
}

736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753
void __ext4_warning_inode(const struct inode *inode, const char *function,
			  unsigned int line, const char *fmt, ...)
{
	struct va_format vaf;
	va_list args;

	if (!ext4_warning_ratelimit(inode->i_sb))
		return;

	va_start(args, fmt);
	vaf.fmt = fmt;
	vaf.va = &args;
	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: "
	       "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id,
	       function, line, inode->i_ino, current->comm, &vaf);
	va_end(args);
}

754 755 756 757
void __ext4_grp_locked_error(const char *function, unsigned int line,
			     struct super_block *sb, ext4_group_t grp,
			     unsigned long ino, ext4_fsblk_t block,
			     const char *fmt, ...)
758 759 760
__releases(bitlock)
__acquires(bitlock)
{
J
Joe Perches 已提交
761
	struct va_format vaf;
762 763 764
	va_list args;
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;

765 766 767
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return;

768
	trace_ext4_error(sb, function, line);
769 770 771
	es->s_last_error_ino = cpu_to_le32(ino);
	es->s_last_error_block = cpu_to_le64(block);
	__save_error_info(sb, function, line);
J
Joe Perches 已提交
772

773 774 775 776 777 778 779 780 781 782 783 784 785 786
	if (ext4_error_ratelimit(sb)) {
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
		       sb->s_id, function, line, grp);
		if (ino)
			printk(KERN_CONT "inode %lu: ", ino);
		if (block)
			printk(KERN_CONT "block %llu:",
			       (unsigned long long) block);
		printk(KERN_CONT "%pV\n", &vaf);
		va_end(args);
	}
787

788 789 790
	if (test_opt(sb, WARN_ON_ERROR))
		WARN_ON_ONCE(1);

791
	if (test_opt(sb, ERRORS_CONT)) {
792
		ext4_commit_super(sb, 0);
793 794
		return;
	}
795

796
	ext4_unlock_group(sb, grp);
797
	ext4_commit_super(sb, 1);
798 799 800 801 802 803 804
	ext4_handle_error(sb);
	/*
	 * We only get here in the ERRORS_RO case; relocking the group
	 * may be dangerous, but nothing bad will happen since the
	 * filesystem will have already been marked read/only and the
	 * journal has been aborted.  We return 1 as a hint to callers
	 * who might what to use the return value from
L
Lucas De Marchi 已提交
805
	 * ext4_grp_locked_error() to distinguish between the
806 807 808 809 810 811 812 813
	 * ERRORS_CONT and ERRORS_RO case, and perhaps return more
	 * aggressively from the ext4 function in question, with a
	 * more appropriate error code.
	 */
	ext4_lock_group(sb, grp);
	return;
}

814 815 816 817 818 819 820
void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
				     ext4_group_t group,
				     unsigned int flags)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
	struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
821 822 823 824 825 826 827 828
	int ret;

	if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) {
		ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
					    &grp->bb_state);
		if (!ret)
			percpu_counter_sub(&sbi->s_freeclusters_counter,
					   grp->bb_free);
829 830
	}

831 832 833 834
	if (flags & EXT4_GROUP_INFO_IBITMAP_CORRUPT) {
		ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT,
					    &grp->bb_state);
		if (!ret && gdp) {
835 836 837 838 839 840 841 842 843
			int count;

			count = ext4_free_inodes_count(sb, gdp);
			percpu_counter_sub(&sbi->s_freeinodes_counter,
					   count);
		}
	}
}

844
void ext4_update_dynamic_rev(struct super_block *sb)
845
{
846
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
847

848
	if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
849 850
		return;

851
	ext4_warning(sb,
852 853
		     "updating to rev %d because of new feature flag, "
		     "running e2fsck is recommended",
854
		     EXT4_DYNAMIC_REV);
855

856 857 858
	es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
	es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
	es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
859 860 861 862 863 864 865 866 867 868 869 870 871
	/* leave es->s_feature_*compat flags alone */
	/* es->s_uuid will be set by e2fsck if empty */

	/*
	 * The rest of the superblock fields should be zero, and if not it
	 * means they are likely already in use, so leave them alone.  We
	 * can leave it up to e2fsck to clean up any inconsistencies there.
	 */
}

/*
 * Open the external journal device
 */
872
static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
873 874 875 876
{
	struct block_device *bdev;
	char b[BDEVNAME_SIZE];

877
	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
878 879 880 881 882
	if (IS_ERR(bdev))
		goto fail;
	return bdev;

fail:
883
	ext4_msg(sb, KERN_ERR, "failed to open journal device %s: %ld",
884 885 886 887 888 889 890
			__bdevname(dev, b), PTR_ERR(bdev));
	return NULL;
}

/*
 * Release the journal device
 */
A
Al Viro 已提交
891
static void ext4_blkdev_put(struct block_device *bdev)
892
{
A
Al Viro 已提交
893
	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
894 895
}

A
Al Viro 已提交
896
static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
897 898 899 900
{
	struct block_device *bdev;
	bdev = sbi->journal_bdev;
	if (bdev) {
A
Al Viro 已提交
901
		ext4_blkdev_put(bdev);
902 903 904 905 906 907
		sbi->journal_bdev = NULL;
	}
}

static inline struct inode *orphan_list_entry(struct list_head *l)
{
908
	return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
909 910
}

911
static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
912 913 914
{
	struct list_head *l;

915 916
	ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
		 le32_to_cpu(sbi->s_es->s_last_orphan));
917 918 919 920 921 922 923 924 925 926 927 928

	printk(KERN_ERR "sb_info orphan list:\n");
	list_for_each(l, &sbi->s_orphan) {
		struct inode *inode = orphan_list_entry(l);
		printk(KERN_ERR "  "
		       "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
		       inode->i_sb->s_id, inode->i_ino, inode,
		       inode->i_mode, inode->i_nlink,
		       NEXT_ORPHAN(inode));
	}
}

929 930 931 932 933 934 935
#ifdef CONFIG_QUOTA
static int ext4_quota_off(struct super_block *sb, int type);

static inline void ext4_quota_off_umount(struct super_block *sb)
{
	int type;

936 937 938
	/* Use our quota_off function to clear inode flags etc. */
	for (type = 0; type < EXT4_MAXQUOTAS; type++)
		ext4_quota_off(sb, type);
939
}
940 941 942 943 944 945 946 947 948 949 950 951

/*
 * This is a helper function which is used in the mount/remount
 * codepaths (which holds s_umount) to fetch the quota file name.
 */
static inline char *get_qf_name(struct super_block *sb,
				struct ext4_sb_info *sbi,
				int type)
{
	return rcu_dereference_protected(sbi->s_qf_names[type],
					 lockdep_is_held(&sb->s_umount));
}
952 953 954 955 956 957
#else
static inline void ext4_quota_off_umount(struct super_block *sb)
{
}
#endif

958
static void ext4_put_super(struct super_block *sb)
959
{
960 961
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
962
	int aborted = 0;
963
	int i, err;
964

965
	ext4_unregister_li_request(sb);
966
	ext4_quota_off_umount(sb);
967

968
	destroy_workqueue(sbi->rsv_conversion_wq);
969

970
	if (sbi->s_journal) {
971
		aborted = is_journal_aborted(sbi->s_journal);
972 973
		err = jbd2_journal_destroy(sbi->s_journal);
		sbi->s_journal = NULL;
974
		if ((err < 0) && !aborted)
975
			ext4_abort(sb, "Couldn't clean up the journal");
976
	}
977

978
	ext4_unregister_sysfs(sb);
979
	ext4_es_unregister_shrinker(sbi);
980
	del_timer_sync(&sbi->s_err_report);
981 982 983 984
	ext4_release_system_zone(sb);
	ext4_mb_release(sb);
	ext4_ext_release(sb);

985
	if (!sb_rdonly(sb) && !aborted) {
986
		ext4_clear_feature_journal_needs_recovery(sb);
987 988
		es->s_state = cpu_to_le16(sbi->s_mount_state);
	}
989
	if (!sb_rdonly(sb))
990 991
		ext4_commit_super(sb, 1);

992 993
	for (i = 0; i < sbi->s_gdb_count; i++)
		brelse(sbi->s_group_desc[i]);
A
Al Viro 已提交
994 995
	kvfree(sbi->s_group_desc);
	kvfree(sbi->s_flex_groups);
996
	percpu_counter_destroy(&sbi->s_freeclusters_counter);
997 998
	percpu_counter_destroy(&sbi->s_freeinodes_counter);
	percpu_counter_destroy(&sbi->s_dirs_counter);
999
	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
1000
	percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
1001
#ifdef CONFIG_QUOTA
J
Jan Kara 已提交
1002
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
1003
		kfree(get_qf_name(sb, sbi, i));
1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
#endif

	/* Debugging code just in case the in-memory inode orphan list
	 * isn't empty.  The on-disk one can be non-empty if we've
	 * detected an error and taken the fs readonly, but the
	 * in-memory list had better be clean by this point. */
	if (!list_empty(&sbi->s_orphan))
		dump_orphan_list(sb, sbi);
	J_ASSERT(list_empty(&sbi->s_orphan));

1014
	sync_blockdev(sb->s_bdev);
1015
	invalidate_bdev(sb->s_bdev);
1016 1017 1018 1019 1020 1021 1022
	if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
		/*
		 * Invalidate the journal device's buffers.  We don't want them
		 * floating about in memory - the physical journal device may
		 * hotswapped, and it breaks the `ro-after' testing code.
		 */
		sync_blockdev(sbi->journal_bdev);
1023
		invalidate_bdev(sbi->journal_bdev);
1024
		ext4_blkdev_remove(sbi);
1025
	}
T
Tahsin Erdogan 已提交
1026 1027 1028 1029
	if (sbi->s_ea_inode_cache) {
		ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
		sbi->s_ea_inode_cache = NULL;
	}
1030 1031 1032
	if (sbi->s_ea_block_cache) {
		ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
		sbi->s_ea_block_cache = NULL;
1033
	}
1034 1035
	if (sbi->s_mmp_tsk)
		kthread_stop(sbi->s_mmp_tsk);
1036
	brelse(sbi->s_sbh);
1037
	sb->s_fs_info = NULL;
T
Theodore Ts'o 已提交
1038 1039 1040 1041 1042 1043
	/*
	 * Now that we are completely done shutting down the
	 * superblock, we need to actually destroy the kobject.
	 */
	kobject_put(&sbi->s_kobj);
	wait_for_completion(&sbi->s_kobj_unregister);
1044 1045
	if (sbi->s_chksum_driver)
		crypto_free_shash(sbi->s_chksum_driver);
1046
	kfree(sbi->s_blockgroup_lock);
1047
	fs_put_dax(sbi->s_daxdev);
1048 1049 1050
	kfree(sbi);
}

1051
static struct kmem_cache *ext4_inode_cachep;
1052 1053 1054 1055

/*
 * Called inside transaction, so use GFP_NOFS
 */
1056
static struct inode *ext4_alloc_inode(struct super_block *sb)
1057
{
1058
	struct ext4_inode_info *ei;
1059

C
Christoph Lameter 已提交
1060
	ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
1061 1062
	if (!ei)
		return NULL;
1063

J
Jeff Layton 已提交
1064
	inode_set_iversion(&ei->vfs_inode, 1);
1065
	spin_lock_init(&ei->i_raw_lock);
1066 1067
	INIT_LIST_HEAD(&ei->i_prealloc_list);
	spin_lock_init(&ei->i_prealloc_lock);
Z
Zheng Liu 已提交
1068 1069
	ext4_es_init_tree(&ei->i_es_tree);
	rwlock_init(&ei->i_es_lock);
1070
	INIT_LIST_HEAD(&ei->i_es_list);
1071
	ei->i_es_all_nr = 0;
1072
	ei->i_es_shk_nr = 0;
1073
	ei->i_es_shrink_lblk = 0;
1074
	ei->i_reserved_data_blocks = 0;
1075
	ei->i_da_metadata_calc_len = 0;
1076
	ei->i_da_metadata_calc_last_lblock = 0;
1077
	spin_lock_init(&(ei->i_block_reservation_lock));
1078 1079
#ifdef CONFIG_QUOTA
	ei->i_reserved_quota = 0;
J
Jan Kara 已提交
1080
	memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
1081
#endif
1082
	ei->jinode = NULL;
1083
	INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
1084
	spin_lock_init(&ei->i_completed_io_lock);
1085 1086
	ei->i_sync_tid = 0;
	ei->i_datasync_tid = 0;
1087
	atomic_set(&ei->i_unwritten, 0);
1088
	INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
1089 1090 1091
	return &ei->vfs_inode;
}

1092 1093 1094 1095 1096 1097 1098 1099
static int ext4_drop_inode(struct inode *inode)
{
	int drop = generic_drop_inode(inode);

	trace_ext4_drop_inode(inode, drop);
	return drop;
}

N
Nick Piggin 已提交
1100 1101 1102 1103 1104 1105
static void ext4_i_callback(struct rcu_head *head)
{
	struct inode *inode = container_of(head, struct inode, i_rcu);
	kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
}

1106
static void ext4_destroy_inode(struct inode *inode)
1107
{
1108
	if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
1109 1110 1111
		ext4_msg(inode->i_sb, KERN_ERR,
			 "Inode %lu (%p): orphan list check failed!",
			 inode->i_ino, EXT4_I(inode));
1112 1113 1114 1115 1116
		print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
				EXT4_I(inode), sizeof(struct ext4_inode_info),
				true);
		dump_stack();
	}
N
Nick Piggin 已提交
1117
	call_rcu(&inode->i_rcu, ext4_i_callback);
1118 1119
}

1120
static void init_once(void *foo)
1121
{
1122
	struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
1123

C
Christoph Lameter 已提交
1124 1125
	INIT_LIST_HEAD(&ei->i_orphan);
	init_rwsem(&ei->xattr_sem);
1126
	init_rwsem(&ei->i_data_sem);
1127
	init_rwsem(&ei->i_mmap_sem);
C
Christoph Lameter 已提交
1128
	inode_init_once(&ei->vfs_inode);
1129 1130
}

1131
static int __init init_inodecache(void)
1132
{
1133 1134 1135 1136 1137 1138 1139
	ext4_inode_cachep = kmem_cache_create_usercopy("ext4_inode_cache",
				sizeof(struct ext4_inode_info), 0,
				(SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
					SLAB_ACCOUNT),
				offsetof(struct ext4_inode_info, i_data),
				sizeof_field(struct ext4_inode_info, i_data),
				init_once);
1140
	if (ext4_inode_cachep == NULL)
1141 1142 1143 1144 1145 1146
		return -ENOMEM;
	return 0;
}

static void destroy_inodecache(void)
{
1147 1148 1149 1150 1151
	/*
	 * Make sure all delayed rcu free inodes are flushed before we
	 * destroy cache.
	 */
	rcu_barrier();
1152
	kmem_cache_destroy(ext4_inode_cachep);
1153 1154
}

A
Al Viro 已提交
1155
void ext4_clear_inode(struct inode *inode)
1156
{
A
Al Viro 已提交
1157
	invalidate_inode_buffers(inode);
1158
	clear_inode(inode);
1159
	dquot_drop(inode);
1160
	ext4_discard_preallocations(inode);
1161
	ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
1162 1163 1164 1165 1166 1167
	if (EXT4_I(inode)->jinode) {
		jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
					       EXT4_I(inode)->jinode);
		jbd2_free_inode(EXT4_I(inode)->jinode);
		EXT4_I(inode)->jinode = NULL;
	}
1168
	fscrypt_put_encryption_info(inode);
1169 1170
}

C
Christoph Hellwig 已提交
1171
static struct inode *ext4_nfs_get_inode(struct super_block *sb,
1172
					u64 ino, u32 generation)
1173 1174 1175
{
	struct inode *inode;

1176
	/*
1177 1178 1179
	 * Currently we don't know the generation for parent directory, so
	 * a generation of 0 means "accept any"
	 */
1180
	inode = ext4_iget(sb, ino, EXT4_IGET_HANDLE);
1181 1182 1183
	if (IS_ERR(inode))
		return ERR_CAST(inode);
	if (generation && inode->i_generation != generation) {
1184 1185 1186
		iput(inode);
		return ERR_PTR(-ESTALE);
	}
C
Christoph Hellwig 已提交
1187 1188 1189 1190 1191

	return inode;
}

static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
1192
					int fh_len, int fh_type)
C
Christoph Hellwig 已提交
1193 1194 1195 1196 1197 1198
{
	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
				    ext4_nfs_get_inode);
}

static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
1199
					int fh_len, int fh_type)
C
Christoph Hellwig 已提交
1200 1201 1202
{
	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
				    ext4_nfs_get_inode);
1203 1204
}

1205 1206 1207 1208 1209 1210 1211 1212 1213 1214
static int ext4_nfs_commit_metadata(struct inode *inode)
{
	struct writeback_control wbc = {
		.sync_mode = WB_SYNC_ALL
	};

	trace_ext4_nfs_commit_metadata(inode);
	return ext4_write_inode(inode, &wbc);
}

1215 1216 1217 1218 1219 1220
/*
 * Try to release metadata pages (indirect blocks, directories) which are
 * mapped via the block device.  Since these pages could have journal heads
 * which would prevent try_to_free_buffers() from freeing them, we must use
 * jbd2 layer's try_to_free_buffers() function to release them.
 */
1221 1222
static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
				 gfp_t wait)
1223 1224 1225 1226 1227 1228 1229 1230
{
	journal_t *journal = EXT4_SB(sb)->s_journal;

	WARN_ON(PageChecked(page));
	if (!page_has_buffers(page))
		return 0;
	if (journal)
		return jbd2_journal_try_to_free_buffers(journal, page,
1231
						wait & ~__GFP_DIRECT_RECLAIM);
1232 1233 1234
	return try_to_free_buffers(page);
}

1235 1236 1237 1238 1239 1240 1241 1242 1243 1244
#ifdef CONFIG_EXT4_FS_ENCRYPTION
static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
{
	return ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
				 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len);
}

static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
							void *fs_data)
{
1245
	handle_t *handle = fs_data;
1246
	int res, res2, credits, retries = 0;
1247

1248 1249 1250 1251 1252 1253 1254 1255
	/*
	 * Encrypting the root directory is not allowed because e2fsck expects
	 * lost+found to exist and be unencrypted, and encrypting the root
	 * directory would imply encrypting the lost+found directory as well as
	 * the filename "lost+found" itself.
	 */
	if (inode->i_ino == EXT4_ROOT_INO)
		return -EPERM;
1256

1257 1258 1259
	if (WARN_ON_ONCE(IS_DAX(inode) && i_size_read(inode)))
		return -EINVAL;

1260 1261 1262 1263
	res = ext4_convert_inline_data(inode);
	if (res)
		return res;

1264 1265 1266 1267 1268 1269 1270
	/*
	 * If a journal handle was specified, then the encryption context is
	 * being set on a new inode via inheritance and is part of a larger
	 * transaction to create the inode.  Otherwise the encryption context is
	 * being set on an existing inode in its own transaction.  Only in the
	 * latter case should the "retry on ENOSPC" logic be used.
	 */
1271

1272 1273 1274 1275 1276
	if (handle) {
		res = ext4_xattr_set_handle(handle, inode,
					    EXT4_XATTR_INDEX_ENCRYPTION,
					    EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
					    ctx, len, 0);
1277 1278 1279 1280
		if (!res) {
			ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
			ext4_clear_inode_state(inode,
					EXT4_STATE_MAY_INLINE_DATA);
1281
			/*
1282 1283
			 * Update inode->i_flags - S_ENCRYPTED will be enabled,
			 * S_DAX may be disabled
1284 1285
			 */
			ext4_set_inode_flags(inode);
1286 1287 1288 1289
		}
		return res;
	}

1290 1291 1292
	res = dquot_initialize(inode);
	if (res)
		return res;
1293
retry:
1294 1295
	res = ext4_xattr_set_credits(inode, len, false /* is_create */,
				     &credits);
T
Tahsin Erdogan 已提交
1296 1297 1298
	if (res)
		return res;

1299
	handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
1300 1301 1302
	if (IS_ERR(handle))
		return PTR_ERR(handle);

1303 1304 1305
	res = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_ENCRYPTION,
				    EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
				    ctx, len, 0);
1306 1307
	if (!res) {
		ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
1308 1309 1310 1311
		/*
		 * Update inode->i_flags - S_ENCRYPTED will be enabled,
		 * S_DAX may be disabled
		 */
1312
		ext4_set_inode_flags(inode);
1313 1314 1315 1316 1317
		res = ext4_mark_inode_dirty(handle, inode);
		if (res)
			EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
	}
	res2 = ext4_journal_stop(handle);
1318 1319 1320

	if (res == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
		goto retry;
1321 1322 1323 1324 1325
	if (!res)
		res = res2;
	return res;
}

1326
static bool ext4_dummy_context(struct inode *inode)
1327 1328 1329 1330
{
	return DUMMY_ENCRYPTION_ENABLED(EXT4_SB(inode->i_sb));
}

1331
static const struct fscrypt_operations ext4_cryptops = {
1332
	.key_prefix		= "ext4:",
1333 1334 1335 1336
	.get_context		= ext4_get_context,
	.set_context		= ext4_set_context,
	.dummy_context		= ext4_dummy_context,
	.empty_dir		= ext4_empty_dir,
1337
	.max_namelen		= EXT4_NAME_LEN,
1338 1339 1340
};
#endif

1341
#ifdef CONFIG_QUOTA
1342
static const char * const quotatypes[] = INITQFNAMES;
L
Li Xi 已提交
1343
#define QTYPE2NAME(t) (quotatypes[t])
1344

1345 1346 1347 1348 1349
static int ext4_write_dquot(struct dquot *dquot);
static int ext4_acquire_dquot(struct dquot *dquot);
static int ext4_release_dquot(struct dquot *dquot);
static int ext4_mark_dquot_dirty(struct dquot *dquot);
static int ext4_write_info(struct super_block *sb, int type);
1350
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
A
Al Viro 已提交
1351
			 const struct path *path);
1352 1353
static int ext4_quota_on_mount(struct super_block *sb, int type);
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
1354
			       size_t len, loff_t off);
1355
static ssize_t ext4_quota_write(struct super_block *sb, int type,
1356
				const char *data, size_t len, loff_t off);
1357 1358 1359
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
			     unsigned int flags);
static int ext4_enable_quotas(struct super_block *sb);
1360
static int ext4_get_next_id(struct super_block *sb, struct kqid *qid);
1361

J
Jan Kara 已提交
1362 1363 1364 1365 1366
static struct dquot **ext4_get_dquots(struct inode *inode)
{
	return EXT4_I(inode)->i_dquot;
}

1367
static const struct dquot_operations ext4_quota_operations = {
1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
	.get_reserved_space	= ext4_get_reserved_space,
	.write_dquot		= ext4_write_dquot,
	.acquire_dquot		= ext4_acquire_dquot,
	.release_dquot		= ext4_release_dquot,
	.mark_dirty		= ext4_mark_dquot_dirty,
	.write_info		= ext4_write_info,
	.alloc_dquot		= dquot_alloc,
	.destroy_dquot		= dquot_destroy,
	.get_projid		= ext4_get_projid,
	.get_inode_usage	= ext4_get_inode_usage,
	.get_next_id		= ext4_get_next_id,
1379 1380
};

1381
static const struct quotactl_ops ext4_qctl_operations = {
1382
	.quota_on	= ext4_quota_on,
1383
	.quota_off	= ext4_quota_off,
1384
	.quota_sync	= dquot_quota_sync,
1385
	.get_state	= dquot_get_state,
1386 1387
	.set_info	= dquot_set_dqinfo,
	.get_dqblk	= dquot_get_dqblk,
1388 1389
	.set_dqblk	= dquot_set_dqblk,
	.get_nextdqblk	= dquot_get_next_dqblk,
1390 1391 1392
};
#endif

1393
static const struct super_operations ext4_sops = {
1394 1395 1396 1397
	.alloc_inode	= ext4_alloc_inode,
	.destroy_inode	= ext4_destroy_inode,
	.write_inode	= ext4_write_inode,
	.dirty_inode	= ext4_dirty_inode,
1398
	.drop_inode	= ext4_drop_inode,
A
Al Viro 已提交
1399
	.evict_inode	= ext4_evict_inode,
1400 1401
	.put_super	= ext4_put_super,
	.sync_fs	= ext4_sync_fs,
1402 1403
	.freeze_fs	= ext4_freeze,
	.unfreeze_fs	= ext4_unfreeze,
1404 1405 1406
	.statfs		= ext4_statfs,
	.remount_fs	= ext4_remount,
	.show_options	= ext4_show_options,
1407
#ifdef CONFIG_QUOTA
1408 1409
	.quota_read	= ext4_quota_read,
	.quota_write	= ext4_quota_write,
J
Jan Kara 已提交
1410
	.get_dquots	= ext4_get_dquots,
1411
#endif
1412
	.bdev_try_to_free_page = bdev_try_to_free_page,
1413 1414
};

1415
static const struct export_operations ext4_export_ops = {
C
Christoph Hellwig 已提交
1416 1417
	.fh_to_dentry = ext4_fh_to_dentry,
	.fh_to_parent = ext4_fh_to_parent,
1418
	.get_parent = ext4_get_parent,
1419
	.commit_metadata = ext4_nfs_commit_metadata,
1420 1421 1422 1423 1424
};

enum {
	Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
	Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
1425
	Opt_nouid32, Opt_debug, Opt_removed,
1426
	Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
1427
	Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
1428 1429
	Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
	Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
1430
	Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
1431
	Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
1432
	Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
J
Jan Kara 已提交
1433
	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
T
Theodore Ts'o 已提交
1434
	Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
1435
	Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax,
1436 1437
	Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error,
	Opt_nowarn_on_error, Opt_mblk_io_submit,
1438
	Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize,
1439
	Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
1440
	Opt_inode_readahead_blks, Opt_journal_ioprio,
1441
	Opt_dioread_nolock, Opt_dioread_lock,
1442
	Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
1443
	Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache,
1444 1445
};

1446
static const match_table_t tokens = {
1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460
	{Opt_bsd_df, "bsddf"},
	{Opt_minix_df, "minixdf"},
	{Opt_grpid, "grpid"},
	{Opt_grpid, "bsdgroups"},
	{Opt_nogrpid, "nogrpid"},
	{Opt_nogrpid, "sysvgroups"},
	{Opt_resgid, "resgid=%u"},
	{Opt_resuid, "resuid=%u"},
	{Opt_sb, "sb=%u"},
	{Opt_err_cont, "errors=continue"},
	{Opt_err_panic, "errors=panic"},
	{Opt_err_ro, "errors=remount-ro"},
	{Opt_nouid32, "nouid32"},
	{Opt_debug, "debug"},
1461 1462
	{Opt_removed, "oldalloc"},
	{Opt_removed, "orlov"},
1463 1464 1465 1466
	{Opt_user_xattr, "user_xattr"},
	{Opt_nouser_xattr, "nouser_xattr"},
	{Opt_acl, "acl"},
	{Opt_noacl, "noacl"},
1467
	{Opt_noload, "norecovery"},
1468
	{Opt_noload, "noload"},
1469 1470
	{Opt_removed, "nobh"},
	{Opt_removed, "bh"},
1471
	{Opt_commit, "commit=%u"},
1472 1473
	{Opt_min_batch_time, "min_batch_time=%u"},
	{Opt_max_batch_time, "max_batch_time=%u"},
1474
	{Opt_journal_dev, "journal_dev=%u"},
1475
	{Opt_journal_path, "journal_path=%s"},
1476
	{Opt_journal_checksum, "journal_checksum"},
1477
	{Opt_nojournal_checksum, "nojournal_checksum"},
1478
	{Opt_journal_async_commit, "journal_async_commit"},
1479 1480 1481 1482
	{Opt_abort, "abort"},
	{Opt_data_journal, "data=journal"},
	{Opt_data_ordered, "data=ordered"},
	{Opt_data_writeback, "data=writeback"},
1483 1484
	{Opt_data_err_abort, "data_err=abort"},
	{Opt_data_err_ignore, "data_err=ignore"},
1485 1486 1487 1488 1489 1490
	{Opt_offusrjquota, "usrjquota="},
	{Opt_usrjquota, "usrjquota=%s"},
	{Opt_offgrpjquota, "grpjquota="},
	{Opt_grpjquota, "grpjquota=%s"},
	{Opt_jqfmt_vfsold, "jqfmt=vfsold"},
	{Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
J
Jan Kara 已提交
1491
	{Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
1492 1493 1494 1495
	{Opt_grpquota, "grpquota"},
	{Opt_noquota, "noquota"},
	{Opt_quota, "quota"},
	{Opt_usrquota, "usrquota"},
1496
	{Opt_prjquota, "prjquota"},
1497
	{Opt_barrier, "barrier=%u"},
T
Theodore Ts'o 已提交
1498 1499
	{Opt_barrier, "barrier"},
	{Opt_nobarrier, "nobarrier"},
1500
	{Opt_i_version, "i_version"},
R
Ross Zwisler 已提交
1501
	{Opt_dax, "dax"},
1502
	{Opt_stripe, "stripe=%u"},
1503
	{Opt_delalloc, "delalloc"},
1504 1505
	{Opt_warn_on_error, "warn_on_error"},
	{Opt_nowarn_on_error, "nowarn_on_error"},
1506 1507
	{Opt_lazytime, "lazytime"},
	{Opt_nolazytime, "nolazytime"},
1508
	{Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"},
1509
	{Opt_nodelalloc, "nodelalloc"},
1510 1511
	{Opt_removed, "mblk_io_submit"},
	{Opt_removed, "nomblk_io_submit"},
1512 1513
	{Opt_block_validity, "block_validity"},
	{Opt_noblock_validity, "noblock_validity"},
1514
	{Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
1515
	{Opt_journal_ioprio, "journal_ioprio=%u"},
1516
	{Opt_auto_da_alloc, "auto_da_alloc=%u"},
T
Theodore Ts'o 已提交
1517 1518
	{Opt_auto_da_alloc, "auto_da_alloc"},
	{Opt_noauto_da_alloc, "noauto_da_alloc"},
1519 1520
	{Opt_dioread_nolock, "dioread_nolock"},
	{Opt_dioread_lock, "dioread_lock"},
1521 1522
	{Opt_discard, "discard"},
	{Opt_nodiscard, "nodiscard"},
1523 1524 1525
	{Opt_init_itable, "init_itable=%u"},
	{Opt_init_itable, "init_itable"},
	{Opt_noinit_itable, "noinit_itable"},
1526
	{Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
1527
	{Opt_test_dummy_encryption, "test_dummy_encryption"},
1528 1529
	{Opt_nombcache, "nombcache"},
	{Opt_nombcache, "no_mbcache"},	/* for backward compatibility */
1530 1531 1532 1533 1534
	{Opt_removed, "check=none"},	/* mount option from ext2/3 */
	{Opt_removed, "nocheck"},	/* mount option from ext2/3 */
	{Opt_removed, "reservation"},	/* mount option from ext2/3 */
	{Opt_removed, "noreservation"}, /* mount option from ext2/3 */
	{Opt_removed, "journal=%u"},	/* mount option from ext2/3 */
J
Josef Bacik 已提交
1535
	{Opt_err, NULL},
1536 1537
};

1538
static ext4_fsblk_t get_sb_block(void **data)
1539
{
1540
	ext4_fsblk_t	sb_block;
1541 1542 1543 1544
	char		*options = (char *) *data;

	if (!options || strncmp(options, "sb=", 3) != 0)
		return 1;	/* Default location */
1545

1546
	options += 3;
1547
	/* TODO: use simple_strtoll with >32bit ext4 */
1548 1549
	sb_block = simple_strtoul(options, &options, 0);
	if (*options && *options != ',') {
1550
		printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n",
1551 1552 1553 1554 1555 1556
		       (char *) *data);
		return 1;
	}
	if (*options == ',')
		options++;
	*data = (void *) options;
1557

1558 1559 1560
	return sb_block;
}

1561
#define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
1562 1563
static const char deprecated_msg[] =
	"Mount option \"%s\" will be removed by %s\n"
1564
	"Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
1565

D
Dmitry Monakhov 已提交
1566 1567 1568 1569
#ifdef CONFIG_QUOTA
static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
1570
	char *qname, *old_qname = get_qf_name(sb, sbi, qtype);
1571
	int ret = -1;
D
Dmitry Monakhov 已提交
1572

1573
	if (sb_any_quota_loaded(sb) && !old_qname) {
D
Dmitry Monakhov 已提交
1574 1575 1576
		ext4_msg(sb, KERN_ERR,
			"Cannot change journaled "
			"quota options when quota turned on");
1577
		return -1;
D
Dmitry Monakhov 已提交
1578
	}
1579
	if (ext4_has_feature_quota(sb)) {
1580 1581 1582
		ext4_msg(sb, KERN_INFO, "Journaled quota options "
			 "ignored when QUOTA feature is enabled");
		return 1;
1583
	}
D
Dmitry Monakhov 已提交
1584 1585 1586 1587
	qname = match_strdup(args);
	if (!qname) {
		ext4_msg(sb, KERN_ERR,
			"Not enough memory for storing quotafile name");
1588
		return -1;
D
Dmitry Monakhov 已提交
1589
	}
1590 1591
	if (old_qname) {
		if (strcmp(old_qname, qname) == 0)
1592 1593 1594 1595 1596 1597
			ret = 1;
		else
			ext4_msg(sb, KERN_ERR,
				 "%s quota file already specified",
				 QTYPE2NAME(qtype));
		goto errout;
D
Dmitry Monakhov 已提交
1598
	}
1599
	if (strchr(qname, '/')) {
D
Dmitry Monakhov 已提交
1600 1601
		ext4_msg(sb, KERN_ERR,
			"quotafile must be on filesystem root");
1602
		goto errout;
D
Dmitry Monakhov 已提交
1603
	}
1604
	rcu_assign_pointer(sbi->s_qf_names[qtype], qname);
1605
	set_opt(sb, QUOTA);
D
Dmitry Monakhov 已提交
1606
	return 1;
1607 1608 1609
errout:
	kfree(qname);
	return ret;
D
Dmitry Monakhov 已提交
1610 1611 1612 1613 1614 1615
}

static int clear_qf_name(struct super_block *sb, int qtype)
{

	struct ext4_sb_info *sbi = EXT4_SB(sb);
1616
	char *old_qname = get_qf_name(sb, sbi, qtype);
D
Dmitry Monakhov 已提交
1617

1618
	if (sb_any_quota_loaded(sb) && old_qname) {
D
Dmitry Monakhov 已提交
1619 1620
		ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
			" when quota turned on");
1621
		return -1;
D
Dmitry Monakhov 已提交
1622
	}
1623 1624 1625
	rcu_assign_pointer(sbi->s_qf_names[qtype], NULL);
	synchronize_rcu();
	kfree(old_qname);
D
Dmitry Monakhov 已提交
1626 1627 1628 1629
	return 1;
}
#endif

1630 1631 1632 1633 1634 1635
#define MOPT_SET	0x0001
#define MOPT_CLEAR	0x0002
#define MOPT_NOSUPPORT	0x0004
#define MOPT_EXPLICIT	0x0008
#define MOPT_CLEAR_ERR	0x0010
#define MOPT_GTE0	0x0020
1636
#ifdef CONFIG_QUOTA
1637 1638 1639 1640 1641
#define MOPT_Q		0
#define MOPT_QFMT	0x0040
#else
#define MOPT_Q		MOPT_NOSUPPORT
#define MOPT_QFMT	MOPT_NOSUPPORT
1642
#endif
1643
#define MOPT_DATAJ	0x0080
1644 1645 1646
#define MOPT_NO_EXT2	0x0100
#define MOPT_NO_EXT3	0x0200
#define MOPT_EXT4_ONLY	(MOPT_NO_EXT2 | MOPT_NO_EXT3)
1647
#define MOPT_STRING	0x0400
1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659

static const struct mount_opts {
	int	token;
	int	mount_opt;
	int	flags;
} ext4_mount_opts[] = {
	{Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET},
	{Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR},
	{Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET},
	{Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR},
	{Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET},
	{Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR},
1660 1661 1662 1663
	{Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK,
	 MOPT_EXT4_ONLY | MOPT_SET},
	{Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK,
	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1664 1665
	{Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET},
	{Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR},
1666 1667 1668
	{Opt_delalloc, EXT4_MOUNT_DELALLOC,
	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
	{Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
1669
	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1670 1671
	{Opt_warn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_SET},
	{Opt_nowarn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_CLEAR},
1672 1673
	{Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1674
	{Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1675
	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1676
	{Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
1677
				    EXT4_MOUNT_JOURNAL_CHECKSUM),
1678
	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1679
	{Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET},
1680 1681 1682
	{Opt_err_panic, EXT4_MOUNT_ERRORS_PANIC, MOPT_SET | MOPT_CLEAR_ERR},
	{Opt_err_ro, EXT4_MOUNT_ERRORS_RO, MOPT_SET | MOPT_CLEAR_ERR},
	{Opt_err_cont, EXT4_MOUNT_ERRORS_CONT, MOPT_SET | MOPT_CLEAR_ERR},
1683
	{Opt_data_err_abort, EXT4_MOUNT_DATA_ERR_ABORT,
1684
	 MOPT_NO_EXT2},
1685
	{Opt_data_err_ignore, EXT4_MOUNT_DATA_ERR_ABORT,
1686
	 MOPT_NO_EXT2},
1687 1688 1689 1690 1691 1692 1693 1694 1695 1696
	{Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET},
	{Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR},
	{Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
	{Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
	{Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
	{Opt_commit, 0, MOPT_GTE0},
	{Opt_max_batch_time, 0, MOPT_GTE0},
	{Opt_min_batch_time, 0, MOPT_GTE0},
	{Opt_inode_readahead_blks, 0, MOPT_GTE0},
	{Opt_init_itable, 0, MOPT_GTE0},
R
Ross Zwisler 已提交
1697
	{Opt_dax, EXT4_MOUNT_DAX, MOPT_SET},
1698
	{Opt_stripe, 0, MOPT_GTE0},
1699 1700
	{Opt_resuid, 0, MOPT_GTE0},
	{Opt_resgid, 0, MOPT_GTE0},
1701 1702 1703
	{Opt_journal_dev, 0, MOPT_NO_EXT2 | MOPT_GTE0},
	{Opt_journal_path, 0, MOPT_NO_EXT2 | MOPT_STRING},
	{Opt_journal_ioprio, 0, MOPT_NO_EXT2 | MOPT_GTE0},
1704 1705 1706 1707
	{Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
	{Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
	{Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA,
	 MOPT_NO_EXT2 | MOPT_DATAJ},
1708 1709
	{Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
	{Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR},
T
Theodore Ts'o 已提交
1710
#ifdef CONFIG_EXT4_FS_POSIX_ACL
1711 1712
	{Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET},
	{Opt_noacl, EXT4_MOUNT_POSIX_ACL, MOPT_CLEAR},
1713
#else
1714 1715
	{Opt_acl, 0, MOPT_NOSUPPORT},
	{Opt_noacl, 0, MOPT_NOSUPPORT},
1716
#endif
1717 1718
	{Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET},
	{Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET},
1719
	{Opt_debug_want_extra_isize, 0, MOPT_GTE0},
1720 1721 1722 1723 1724
	{Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q},
	{Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA,
							MOPT_SET | MOPT_Q},
	{Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA,
							MOPT_SET | MOPT_Q},
1725 1726
	{Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA,
							MOPT_SET | MOPT_Q},
1727
	{Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
1728 1729
		       EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
							MOPT_CLEAR | MOPT_Q},
1730 1731 1732 1733 1734 1735 1736
	{Opt_usrjquota, 0, MOPT_Q},
	{Opt_grpjquota, 0, MOPT_Q},
	{Opt_offusrjquota, 0, MOPT_Q},
	{Opt_offgrpjquota, 0, MOPT_Q},
	{Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
	{Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
	{Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
1737
	{Opt_max_dir_size_kb, 0, MOPT_GTE0},
1738
	{Opt_test_dummy_encryption, 0, MOPT_GTE0},
1739
	{Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
1740 1741 1742 1743 1744 1745 1746 1747 1748
	{Opt_err, 0, 0}
};

static int handle_mount_opt(struct super_block *sb, char *opt, int token,
			    substring_t *args, unsigned long *journal_devnum,
			    unsigned int *journal_ioprio, int is_remount)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	const struct mount_opts *m;
1749 1750
	kuid_t uid;
	kgid_t gid;
1751 1752
	int arg = 0;

1753 1754 1755 1756 1757 1758 1759 1760 1761 1762
#ifdef CONFIG_QUOTA
	if (token == Opt_usrjquota)
		return set_qf_name(sb, USRQUOTA, &args[0]);
	else if (token == Opt_grpjquota)
		return set_qf_name(sb, GRPQUOTA, &args[0]);
	else if (token == Opt_offusrjquota)
		return clear_qf_name(sb, USRQUOTA);
	else if (token == Opt_offgrpjquota)
		return clear_qf_name(sb, GRPQUOTA);
#endif
1763
	switch (token) {
1764 1765 1766 1767
	case Opt_noacl:
	case Opt_nouser_xattr:
		ext4_msg(sb, KERN_WARNING, deprecated_msg, opt, "3.5");
		break;
1768 1769 1770
	case Opt_sb:
		return 1;	/* handled by get_sb_block() */
	case Opt_removed:
1771
		ext4_msg(sb, KERN_WARNING, "Ignoring removed %s option", opt);
1772 1773 1774 1775 1776
		return 1;
	case Opt_abort:
		sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
		return 1;
	case Opt_i_version:
M
Matthew Garrett 已提交
1777
		sb->s_flags |= SB_I_VERSION;
1778
		return 1;
1779
	case Opt_lazytime:
1780
		sb->s_flags |= SB_LAZYTIME;
1781 1782
		return 1;
	case Opt_nolazytime:
1783
		sb->s_flags &= ~SB_LAZYTIME;
1784
		return 1;
1785 1786
	}

1787 1788 1789 1790 1791 1792 1793 1794 1795 1796
	for (m = ext4_mount_opts; m->token != Opt_err; m++)
		if (token == m->token)
			break;

	if (m->token == Opt_err) {
		ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" "
			 "or missing value", opt);
		return -1;
	}

1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807
	if ((m->flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) {
		ext4_msg(sb, KERN_ERR,
			 "Mount option \"%s\" incompatible with ext2", opt);
		return -1;
	}
	if ((m->flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) {
		ext4_msg(sb, KERN_ERR,
			 "Mount option \"%s\" incompatible with ext3", opt);
		return -1;
	}

1808
	if (args->from && !(m->flags & MOPT_STRING) && match_int(args, &arg))
1809 1810 1811
		return -1;
	if (args->from && (m->flags & MOPT_GTE0) && (arg < 0))
		return -1;
1812 1813 1814
	if (m->flags & MOPT_EXPLICIT) {
		if (m->mount_opt & EXT4_MOUNT_DELALLOC) {
			set_opt2(sb, EXPLICIT_DELALLOC);
1815 1816
		} else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) {
			set_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM);
1817 1818 1819
		} else
			return -1;
	}
1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833
	if (m->flags & MOPT_CLEAR_ERR)
		clear_opt(sb, ERRORS_MASK);
	if (token == Opt_noquota && sb_any_quota_loaded(sb)) {
		ext4_msg(sb, KERN_ERR, "Cannot change quota "
			 "options when quota turned on");
		return -1;
	}

	if (m->flags & MOPT_NOSUPPORT) {
		ext4_msg(sb, KERN_ERR, "%s option not supported", opt);
	} else if (token == Opt_commit) {
		if (arg == 0)
			arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
		sbi->s_commit_interval = HZ * arg;
1834 1835
	} else if (token == Opt_debug_want_extra_isize) {
		sbi->s_want_extra_isize = arg;
1836 1837 1838 1839 1840
	} else if (token == Opt_max_batch_time) {
		sbi->s_max_batch_time = arg;
	} else if (token == Opt_min_batch_time) {
		sbi->s_min_batch_time = arg;
	} else if (token == Opt_inode_readahead_blks) {
1841 1842 1843 1844
		if (arg && (arg > (1 << 30) || !is_power_of_2(arg))) {
			ext4_msg(sb, KERN_ERR,
				 "EXT4-fs: inode_readahead_blks must be "
				 "0 or a power of 2 smaller than 2^31");
1845
			return -1;
1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860
		}
		sbi->s_inode_readahead_blks = arg;
	} else if (token == Opt_init_itable) {
		set_opt(sb, INIT_INODE_TABLE);
		if (!args->from)
			arg = EXT4_DEF_LI_WAIT_MULT;
		sbi->s_li_wait_mult = arg;
	} else if (token == Opt_max_dir_size_kb) {
		sbi->s_max_dir_size_kb = arg;
	} else if (token == Opt_stripe) {
		sbi->s_stripe = arg;
	} else if (token == Opt_resuid) {
		uid = make_kuid(current_user_ns(), arg);
		if (!uid_valid(uid)) {
			ext4_msg(sb, KERN_ERR, "Invalid uid value %d", arg);
1861 1862
			return -1;
		}
1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877
		sbi->s_resuid = uid;
	} else if (token == Opt_resgid) {
		gid = make_kgid(current_user_ns(), arg);
		if (!gid_valid(gid)) {
			ext4_msg(sb, KERN_ERR, "Invalid gid value %d", arg);
			return -1;
		}
		sbi->s_resgid = gid;
	} else if (token == Opt_journal_dev) {
		if (is_remount) {
			ext4_msg(sb, KERN_ERR,
				 "Cannot specify journal on remount");
			return -1;
		}
		*journal_devnum = arg;
1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903
	} else if (token == Opt_journal_path) {
		char *journal_path;
		struct inode *journal_inode;
		struct path path;
		int error;

		if (is_remount) {
			ext4_msg(sb, KERN_ERR,
				 "Cannot specify journal on remount");
			return -1;
		}
		journal_path = match_strdup(&args[0]);
		if (!journal_path) {
			ext4_msg(sb, KERN_ERR, "error: could not dup "
				"journal device string");
			return -1;
		}

		error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
		if (error) {
			ext4_msg(sb, KERN_ERR, "error: could not find "
				"journal device path: error %d", error);
			kfree(journal_path);
			return -1;
		}

1904
		journal_inode = d_inode(path.dentry);
1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915
		if (!S_ISBLK(journal_inode->i_mode)) {
			ext4_msg(sb, KERN_ERR, "error: journal path %s "
				"is not a block device", journal_path);
			path_put(&path);
			kfree(journal_path);
			return -1;
		}

		*journal_devnum = new_encode_dev(journal_inode->i_rdev);
		path_put(&path);
		kfree(journal_path);
1916 1917 1918 1919 1920 1921 1922 1923
	} else if (token == Opt_journal_ioprio) {
		if (arg > 7) {
			ext4_msg(sb, KERN_ERR, "Invalid journal IO priority"
				 " (must be 0-7)");
			return -1;
		}
		*journal_ioprio =
			IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
1924 1925 1926 1927 1928 1929 1930 1931 1932
	} else if (token == Opt_test_dummy_encryption) {
#ifdef CONFIG_EXT4_FS_ENCRYPTION
		sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION;
		ext4_msg(sb, KERN_WARNING,
			 "Test dummy encryption mode enabled");
#else
		ext4_msg(sb, KERN_WARNING,
			 "Test dummy encryption mount option ignored");
#endif
1933 1934 1935 1936 1937
	} else if (m->flags & MOPT_DATAJ) {
		if (is_remount) {
			if (!sbi->s_journal)
				ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
			else if (test_opt(sb, DATA_FLAGS) != m->mount_opt) {
1938
				ext4_msg(sb, KERN_ERR,
1939 1940
					 "Cannot change data mode on remount");
				return -1;
1941
			}
1942
		} else {
1943 1944
			clear_opt(sb, DATA_FLAGS);
			sbi->s_mount_opt |= m->mount_opt;
1945
		}
1946 1947 1948 1949 1950 1951 1952 1953
#ifdef CONFIG_QUOTA
	} else if (m->flags & MOPT_QFMT) {
		if (sb_any_quota_loaded(sb) &&
		    sbi->s_jquota_fmt != m->mount_opt) {
			ext4_msg(sb, KERN_ERR, "Cannot change journaled "
				 "quota options when quota turned on");
			return -1;
		}
1954
		if (ext4_has_feature_quota(sb)) {
1955 1956
			ext4_msg(sb, KERN_INFO,
				 "Quota format mount options ignored "
1957
				 "when QUOTA feature is enabled");
1958
			return 1;
1959
		}
1960
		sbi->s_jquota_fmt = m->mount_opt;
R
Ross Zwisler 已提交
1961 1962
#endif
	} else if (token == Opt_dax) {
1963 1964 1965 1966 1967
#ifdef CONFIG_FS_DAX
		ext4_msg(sb, KERN_WARNING,
		"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
			sbi->s_mount_opt |= m->mount_opt;
#else
R
Ross Zwisler 已提交
1968 1969
		ext4_msg(sb, KERN_INFO, "dax option not supported");
		return -1;
1970
#endif
1971 1972 1973 1974
	} else if (token == Opt_data_err_abort) {
		sbi->s_mount_opt |= m->mount_opt;
	} else if (token == Opt_data_err_ignore) {
		sbi->s_mount_opt &= ~m->mount_opt;
1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989
	} else {
		if (!args->from)
			arg = 1;
		if (m->flags & MOPT_CLEAR)
			arg = !arg;
		else if (unlikely(!(m->flags & MOPT_SET))) {
			ext4_msg(sb, KERN_WARNING,
				 "buggy handling of option %s", opt);
			WARN_ON(1);
			return -1;
		}
		if (arg != 0)
			sbi->s_mount_opt |= m->mount_opt;
		else
			sbi->s_mount_opt &= ~m->mount_opt;
1990
	}
1991
	return 1;
1992 1993 1994 1995 1996 1997 1998 1999
}

static int parse_options(char *options, struct super_block *sb,
			 unsigned long *journal_devnum,
			 unsigned int *journal_ioprio,
			 int is_remount)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
2000
	char *p, __maybe_unused *usr_qf_name, __maybe_unused *grp_qf_name;
2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013
	substring_t args[MAX_OPT_ARGS];
	int token;

	if (!options)
		return 1;

	while ((p = strsep(&options, ",")) != NULL) {
		if (!*p)
			continue;
		/*
		 * Initialize args struct so we know whether arg was
		 * found; some options take optional arguments.
		 */
2014
		args[0].to = args[0].from = NULL;
2015 2016 2017 2018
		token = match_token(p, tokens, args);
		if (handle_mount_opt(sb, p, token, args, journal_devnum,
				     journal_ioprio, is_remount) < 0)
			return 0;
2019 2020
	}
#ifdef CONFIG_QUOTA
2021 2022 2023 2024 2025 2026 2027 2028 2029 2030
	/*
	 * We do the test below only for project quotas. 'usrquota' and
	 * 'grpquota' mount options are allowed even without quota feature
	 * to support legacy quotas in quota files.
	 */
	if (test_opt(sb, PRJQUOTA) && !ext4_has_feature_project(sb)) {
		ext4_msg(sb, KERN_ERR, "Project quota feature not enabled. "
			 "Cannot enable project quota enforcement.");
		return 0;
	}
2031 2032 2033 2034
	usr_qf_name = get_qf_name(sb, sbi, USRQUOTA);
	grp_qf_name = get_qf_name(sb, sbi, GRPQUOTA);
	if (usr_qf_name || grp_qf_name) {
		if (test_opt(sb, USRQUOTA) && usr_qf_name)
2035
			clear_opt(sb, USRQUOTA);
2036

2037
		if (test_opt(sb, GRPQUOTA) && grp_qf_name)
2038
			clear_opt(sb, GRPQUOTA);
2039

D
Dmitry Monakhov 已提交
2040
		if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
2041 2042
			ext4_msg(sb, KERN_ERR, "old and new quota "
					"format mixing");
2043 2044 2045 2046
			return 0;
		}

		if (!sbi->s_jquota_fmt) {
2047 2048
			ext4_msg(sb, KERN_ERR, "journaled quota format "
					"not specified");
2049 2050 2051 2052
			return 0;
		}
	}
#endif
J
Jan Kara 已提交
2053 2054 2055 2056
	if (test_opt(sb, DIOREAD_NOLOCK)) {
		int blocksize =
			BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);

2057
		if (blocksize < PAGE_SIZE) {
J
Jan Kara 已提交
2058 2059 2060 2061 2062
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "dioread_nolock if block size != PAGE_SIZE");
			return 0;
		}
	}
2063 2064 2065
	return 1;
}

2066 2067 2068 2069 2070
static inline void ext4_show_quota_options(struct seq_file *seq,
					   struct super_block *sb)
{
#if defined(CONFIG_QUOTA)
	struct ext4_sb_info *sbi = EXT4_SB(sb);
2071
	char *usr_qf_name, *grp_qf_name;
2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089

	if (sbi->s_jquota_fmt) {
		char *fmtname = "";

		switch (sbi->s_jquota_fmt) {
		case QFMT_VFS_OLD:
			fmtname = "vfsold";
			break;
		case QFMT_VFS_V0:
			fmtname = "vfsv0";
			break;
		case QFMT_VFS_V1:
			fmtname = "vfsv1";
			break;
		}
		seq_printf(seq, ",jqfmt=%s", fmtname);
	}

2090 2091 2092 2093 2094 2095 2096 2097
	rcu_read_lock();
	usr_qf_name = rcu_dereference(sbi->s_qf_names[USRQUOTA]);
	grp_qf_name = rcu_dereference(sbi->s_qf_names[GRPQUOTA]);
	if (usr_qf_name)
		seq_show_option(seq, "usrjquota", usr_qf_name);
	if (grp_qf_name)
		seq_show_option(seq, "grpjquota", grp_qf_name);
	rcu_read_unlock();
2098 2099 2100
#endif
}

2101 2102
static const char *token2str(int token)
{
2103
	const struct match_token *t;
2104 2105 2106 2107 2108 2109 2110

	for (t = tokens; t->token != Opt_err; t++)
		if (t->token == token && !strchr(t->pattern, '='))
			break;
	return t->pattern;
}

2111 2112 2113 2114 2115
/*
 * Show an option if
 *  - it's set to a non-default value OR
 *  - if the per-sb default is different from the global default
 */
2116 2117
static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
			      int nodefs)
2118 2119 2120
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
2121
	int def_errors, def_mount_opt = sbi->s_def_mount_opt;
2122
	const struct mount_opts *m;
2123
	char sep = nodefs ? '\n' : ',';
2124

2125 2126
#define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep)
#define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg)
2127 2128

	if (sbi->s_sb_block != 1)
2129 2130 2131 2132 2133 2134 2135
		SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block);

	for (m = ext4_mount_opts; m->token != Opt_err; m++) {
		int want_set = m->flags & MOPT_SET;
		if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
		    (m->flags & MOPT_CLEAR_ERR))
			continue;
2136
		if (!nodefs && !(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt)))
2137 2138 2139 2140 2141 2142
			continue; /* skip if same as the default */
		if ((want_set &&
		     (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) ||
		    (!want_set && (sbi->s_mount_opt & m->mount_opt)))
			continue; /* select Opt_noFoo vs Opt_Foo */
		SEQ_OPTS_PRINT("%s", token2str(m->token));
2143
	}
2144

2145
	if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) ||
2146
	    le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID)
2147 2148 2149
		SEQ_OPTS_PRINT("resuid=%u",
				from_kuid_munged(&init_user_ns, sbi->s_resuid));
	if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) ||
2150
	    le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID)
2151 2152
		SEQ_OPTS_PRINT("resgid=%u",
				from_kgid_munged(&init_user_ns, sbi->s_resgid));
2153
	def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors);
2154 2155
	if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO)
		SEQ_OPTS_PUTS("errors=remount-ro");
2156
	if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
2157
		SEQ_OPTS_PUTS("errors=continue");
2158
	if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
2159
		SEQ_OPTS_PUTS("errors=panic");
2160
	if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
2161
		SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ);
2162
	if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME)
2163
		SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
2164
	if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
2165
		SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
M
Matthew Garrett 已提交
2166
	if (sb->s_flags & SB_I_VERSION)
2167
		SEQ_OPTS_PUTS("i_version");
2168
	if (nodefs || sbi->s_stripe)
2169
		SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
2170 2171
	if (nodefs || EXT4_MOUNT_DATA_FLAGS &
			(sbi->s_mount_opt ^ def_mount_opt)) {
2172 2173 2174 2175 2176 2177 2178
		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
			SEQ_OPTS_PUTS("data=journal");
		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
			SEQ_OPTS_PUTS("data=ordered");
		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
			SEQ_OPTS_PUTS("data=writeback");
	}
2179 2180
	if (nodefs ||
	    sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
2181 2182
		SEQ_OPTS_PRINT("inode_readahead_blks=%u",
			       sbi->s_inode_readahead_blks);
2183

2184
	if (test_opt(sb, INIT_INODE_TABLE) && (nodefs ||
2185
		       (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
2186
		SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
2187 2188
	if (nodefs || sbi->s_max_dir_size_kb)
		SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
2189 2190
	if (test_opt(sb, DATA_ERR_ABORT))
		SEQ_OPTS_PUTS("data_err=abort");
2191 2192
	if (DUMMY_ENCRYPTION_ENABLED(sbi))
		SEQ_OPTS_PUTS("test_dummy_encryption");
2193 2194 2195 2196 2197

	ext4_show_quota_options(seq, sb);
	return 0;
}

2198 2199 2200 2201 2202
static int ext4_show_options(struct seq_file *seq, struct dentry *root)
{
	return _ext4_show_options(seq, root->d_sb, 0);
}

2203
int ext4_seq_options_show(struct seq_file *seq, void *offset)
2204 2205 2206 2207
{
	struct super_block *sb = seq->private;
	int rc;

2208
	seq_puts(seq, sb_rdonly(sb) ? "ro" : "rw");
2209 2210 2211 2212 2213
	rc = _ext4_show_options(seq, sb, 1);
	seq_puts(seq, "\n");
	return rc;
}

2214
static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
2215 2216
			    int read_only)
{
2217
	struct ext4_sb_info *sbi = EXT4_SB(sb);
2218
	int err = 0;
2219

2220
	if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
2221 2222
		ext4_msg(sb, KERN_ERR, "revision level too high, "
			 "forcing read-only mode");
2223
		err = -EROFS;
2224 2225
	}
	if (read_only)
2226
		goto done;
2227
	if (!(sbi->s_mount_state & EXT4_VALID_FS))
2228 2229
		ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, "
			 "running e2fsck is recommended");
2230
	else if (sbi->s_mount_state & EXT4_ERROR_FS)
2231 2232 2233
		ext4_msg(sb, KERN_WARNING,
			 "warning: mounting fs with errors, "
			 "running e2fsck is recommended");
2234
	else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
2235 2236
		 le16_to_cpu(es->s_mnt_count) >=
		 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
2237 2238 2239
		ext4_msg(sb, KERN_WARNING,
			 "warning: maximal mount count reached, "
			 "running e2fsck is recommended");
2240
	else if (le32_to_cpu(es->s_checkinterval) &&
2241 2242
		 (ext4_get_tstamp(es, s_lastcheck) +
		  le32_to_cpu(es->s_checkinterval) <= ktime_get_real_seconds()))
2243 2244 2245
		ext4_msg(sb, KERN_WARNING,
			 "warning: checktime reached, "
			 "running e2fsck is recommended");
2246
	if (!sbi->s_journal)
2247
		es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
2248
	if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
2249
		es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
M
Marcin Slusarz 已提交
2250
	le16_add_cpu(&es->s_mnt_count, 1);
2251
	ext4_update_tstamp(es, s_mtime);
2252
	ext4_update_dynamic_rev(sb);
2253
	if (sbi->s_journal)
2254
		ext4_set_feature_journal_needs_recovery(sb);
2255

2256
	err = ext4_commit_super(sb, 1);
2257
done:
2258
	if (test_opt(sb, DEBUG))
2259
		printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
2260
				"bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
2261 2262
			sb->s_blocksize,
			sbi->s_groups_count,
2263 2264
			EXT4_BLOCKS_PER_GROUP(sb),
			EXT4_INODES_PER_GROUP(sb),
2265
			sbi->s_mount_opt, sbi->s_mount_opt2);
2266

D
Dan Magenheimer 已提交
2267
	cleancache_init_fs(sb);
2268
	return err;
2269 2270
}

2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284
int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct flex_groups *new_groups;
	int size;

	if (!sbi->s_log_groups_per_flex)
		return 0;

	size = ext4_flex_group(sbi, ngroup - 1) + 1;
	if (size <= sbi->s_flex_groups_allocated)
		return 0;

	size = roundup_pow_of_two(size * sizeof(struct flex_groups));
M
Michal Hocko 已提交
2285
	new_groups = kvzalloc(size, GFP_KERNEL);
2286 2287 2288 2289 2290 2291 2292 2293 2294 2295
	if (!new_groups) {
		ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
			 size / (int) sizeof(struct flex_groups));
		return -ENOMEM;
	}

	if (sbi->s_flex_groups) {
		memcpy(new_groups, sbi->s_flex_groups,
		       (sbi->s_flex_groups_allocated *
			sizeof(struct flex_groups)));
A
Al Viro 已提交
2296
		kvfree(sbi->s_flex_groups);
2297 2298 2299 2300 2301 2302
	}
	sbi->s_flex_groups = new_groups;
	sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
	return 0;
}

2303 2304 2305 2306 2307
static int ext4_fill_flex_info(struct super_block *sb)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_group_desc *gdp = NULL;
	ext4_group_t flex_group;
2308
	int i, err;
2309

2310
	sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
2311
	if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
2312 2313 2314 2315
		sbi->s_log_groups_per_flex = 0;
		return 1;
	}

2316 2317
	err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
	if (err)
2318
		goto failed;
2319 2320

	for (i = 0; i < sbi->s_groups_count; i++) {
2321
		gdp = ext4_get_group_desc(sb, i, NULL);
2322 2323

		flex_group = ext4_flex_group(sbi, i);
2324 2325
		atomic_add(ext4_free_inodes_count(sb, gdp),
			   &sbi->s_flex_groups[flex_group].free_inodes);
2326 2327
		atomic64_add(ext4_free_group_clusters(sb, gdp),
			     &sbi->s_flex_groups[flex_group].free_clusters);
2328 2329
		atomic_add(ext4_used_dirs_count(sb, gdp),
			   &sbi->s_flex_groups[flex_group].used_dirs);
2330 2331 2332 2333 2334 2335 2336
	}

	return 1;
failed:
	return 0;
}

2337
static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
2338
				   struct ext4_group_desc *gdp)
A
Andreas Dilger 已提交
2339
{
2340
	int offset = offsetof(struct ext4_group_desc, bg_checksum);
A
Andreas Dilger 已提交
2341
	__u16 crc = 0;
2342
	__le32 le_group = cpu_to_le32(block_group);
2343
	struct ext4_sb_info *sbi = EXT4_SB(sb);
A
Andreas Dilger 已提交
2344

2345
	if (ext4_has_metadata_csum(sbi->s_sb)) {
2346 2347
		/* Use new metadata_csum algorithm */
		__u32 csum32;
2348
		__u16 dummy_csum = 0;
2349 2350 2351

		csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
				     sizeof(le_group));
2352 2353 2354 2355 2356 2357 2358
		csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset);
		csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum,
				     sizeof(dummy_csum));
		offset += sizeof(dummy_csum);
		if (offset < sbi->s_desc_size)
			csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset,
					     sbi->s_desc_size - offset);
2359 2360 2361

		crc = csum32 & 0xFFFF;
		goto out;
A
Andreas Dilger 已提交
2362 2363
	}

2364
	/* old crc16 code */
2365
	if (!ext4_has_feature_gdt_csum(sb))
2366 2367
		return 0;

2368 2369 2370 2371 2372
	crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
	crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
	crc = crc16(crc, (__u8 *)gdp, offset);
	offset += sizeof(gdp->bg_checksum); /* skip checksum */
	/* for checksum of struct ext4_group_desc do the rest...*/
2373
	if (ext4_has_feature_64bit(sb) &&
2374 2375 2376 2377 2378 2379
	    offset < le16_to_cpu(sbi->s_es->s_desc_size))
		crc = crc16(crc, (__u8 *)gdp + offset,
			    le16_to_cpu(sbi->s_es->s_desc_size) -
				offset);

out:
A
Andreas Dilger 已提交
2380 2381 2382
	return cpu_to_le16(crc);
}

2383
int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
A
Andreas Dilger 已提交
2384 2385
				struct ext4_group_desc *gdp)
{
2386
	if (ext4_has_group_desc_csum(sb) &&
2387
	    (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp)))
A
Andreas Dilger 已提交
2388 2389 2390 2391 2392
		return 0;

	return 1;
}

2393 2394 2395 2396 2397
void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
			      struct ext4_group_desc *gdp)
{
	if (!ext4_has_group_desc_csum(sb))
		return;
2398
	gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp);
2399 2400
}

2401
/* Called at mount-time, super-block is locked */
2402
static int ext4_check_descriptors(struct super_block *sb,
2403
				  ext4_fsblk_t sb_block,
2404
				  ext4_group_t *first_not_zeroed)
2405
{
2406 2407 2408
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
	ext4_fsblk_t last_block;
2409
	ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
L
Laurent Vivier 已提交
2410 2411 2412
	ext4_fsblk_t block_bitmap;
	ext4_fsblk_t inode_bitmap;
	ext4_fsblk_t inode_table;
J
Jose R. Santos 已提交
2413
	int flexbg_flag = 0;
2414
	ext4_group_t i, grp = sbi->s_groups_count;
2415

2416
	if (ext4_has_feature_flex_bg(sb))
J
Jose R. Santos 已提交
2417 2418
		flexbg_flag = 1;

2419
	ext4_debug("Checking group descriptors");
2420

2421 2422 2423
	for (i = 0; i < sbi->s_groups_count; i++) {
		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);

J
Jose R. Santos 已提交
2424
		if (i == sbi->s_groups_count - 1 || flexbg_flag)
L
Laurent Vivier 已提交
2425
			last_block = ext4_blocks_count(sbi->s_es) - 1;
2426 2427
		else
			last_block = first_block +
2428
				(EXT4_BLOCKS_PER_GROUP(sb) - 1);
2429

2430 2431 2432 2433
		if ((grp == sbi->s_groups_count) &&
		   !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
			grp = i;

2434
		block_bitmap = ext4_block_bitmap(sb, gdp);
2435 2436 2437 2438
		if (block_bitmap == sb_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Block bitmap for group %u overlaps "
				 "superblock", i);
2439 2440
			if (!sb_rdonly(sb))
				return 0;
2441
		}
2442 2443 2444 2445 2446 2447 2448 2449
		if (block_bitmap >= sb_block + 1 &&
		    block_bitmap <= last_bg_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Block bitmap for group %u overlaps "
				 "block group descriptors", i);
			if (!sb_rdonly(sb))
				return 0;
		}
2450
		if (block_bitmap < first_block || block_bitmap > last_block) {
2451
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2452
			       "Block bitmap for group %u not in group "
2453
			       "(block %llu)!", i, block_bitmap);
2454 2455
			return 0;
		}
2456
		inode_bitmap = ext4_inode_bitmap(sb, gdp);
2457 2458 2459 2460
		if (inode_bitmap == sb_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Inode bitmap for group %u overlaps "
				 "superblock", i);
2461 2462
			if (!sb_rdonly(sb))
				return 0;
2463
		}
2464 2465 2466 2467 2468 2469 2470 2471
		if (inode_bitmap >= sb_block + 1 &&
		    inode_bitmap <= last_bg_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Inode bitmap for group %u overlaps "
				 "block group descriptors", i);
			if (!sb_rdonly(sb))
				return 0;
		}
2472
		if (inode_bitmap < first_block || inode_bitmap > last_block) {
2473
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2474
			       "Inode bitmap for group %u not in group "
2475
			       "(block %llu)!", i, inode_bitmap);
2476 2477
			return 0;
		}
2478
		inode_table = ext4_inode_table(sb, gdp);
2479 2480 2481 2482
		if (inode_table == sb_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Inode table for group %u overlaps "
				 "superblock", i);
2483 2484
			if (!sb_rdonly(sb))
				return 0;
2485
		}
2486 2487 2488 2489 2490 2491 2492 2493
		if (inode_table >= sb_block + 1 &&
		    inode_table <= last_bg_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Inode table for group %u overlaps "
				 "block group descriptors", i);
			if (!sb_rdonly(sb))
				return 0;
		}
L
Laurent Vivier 已提交
2494
		if (inode_table < first_block ||
2495
		    inode_table + sbi->s_itb_per_group - 1 > last_block) {
2496
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2497
			       "Inode table for group %u not in group "
2498
			       "(block %llu)!", i, inode_table);
2499 2500
			return 0;
		}
2501
		ext4_lock_group(sb, i);
2502
		if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
2503 2504
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Checksum for group %u failed (%u!=%u)",
2505
				 i, le16_to_cpu(ext4_group_desc_csum(sb, i,
2506
				     gdp)), le16_to_cpu(gdp->bg_checksum));
2507
			if (!sb_rdonly(sb)) {
2508
				ext4_unlock_group(sb, i);
2509
				return 0;
2510
			}
A
Andreas Dilger 已提交
2511
		}
2512
		ext4_unlock_group(sb, i);
J
Jose R. Santos 已提交
2513 2514
		if (!flexbg_flag)
			first_block += EXT4_BLOCKS_PER_GROUP(sb);
2515
	}
2516 2517
	if (NULL != first_not_zeroed)
		*first_not_zeroed = grp;
2518 2519 2520
	return 1;
}

2521
/* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at
2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533
 * the superblock) which were deleted from all directories, but held open by
 * a process at the time of a crash.  We walk the list and try to delete these
 * inodes at recovery time (only with a read-write filesystem).
 *
 * In order to keep the orphan inode chain consistent during traversal (in
 * case of crash during recovery), we link each inode into the superblock
 * orphan list_head and handle it the same way as an inode deletion during
 * normal operation (which journals the operations for us).
 *
 * We only do an iget() and an iput() on each inode, which is very safe if we
 * accidentally point at an in-use or already deleted inode.  The worst that
 * can happen in this case is that we get a "bit already cleared" message from
2534
 * ext4_free_inode().  The only reason we would point at a wrong inode is if
2535 2536 2537
 * e2fsck was run on this filesystem, and it must have already done the orphan
 * inode cleanup for us, so we can safely abort without any further action.
 */
2538 2539
static void ext4_orphan_cleanup(struct super_block *sb,
				struct ext4_super_block *es)
2540 2541
{
	unsigned int s_flags = sb->s_flags;
2542
	int ret, nr_orphans = 0, nr_truncates = 0;
2543
#ifdef CONFIG_QUOTA
2544
	int quota_update = 0;
2545 2546 2547 2548 2549 2550 2551
	int i;
#endif
	if (!es->s_last_orphan) {
		jbd_debug(4, "no orphan inodes to clean up\n");
		return;
	}

2552
	if (bdev_read_only(sb->s_bdev)) {
2553 2554
		ext4_msg(sb, KERN_ERR, "write access "
			"unavailable, skipping orphan cleanup");
2555 2556 2557
		return;
	}

2558 2559 2560 2561 2562 2563 2564
	/* Check if feature set would not allow a r/w mount */
	if (!ext4_feature_set_ok(sb, 0)) {
		ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
			 "unknown ROCOMPAT features");
		return;
	}

2565
	if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
2566
		/* don't clear list on RO mount w/ errors */
2567
		if (es->s_last_orphan && !(s_flags & SB_RDONLY)) {
2568
			ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
2569
				  "clearing orphan list.\n");
2570 2571
			es->s_last_orphan = 0;
		}
2572 2573 2574 2575
		jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
		return;
	}

2576
	if (s_flags & SB_RDONLY) {
2577
		ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
2578
		sb->s_flags &= ~SB_RDONLY;
2579 2580 2581
	}
#ifdef CONFIG_QUOTA
	/* Needed for iput() to work correctly and not trash data */
2582
	sb->s_flags |= SB_ACTIVE;
2583 2584 2585 2586 2587

	/*
	 * Turn on quotas which were not enabled for read-only mounts if
	 * filesystem has quota feature, so that they are updated correctly.
	 */
2588
	if (ext4_has_feature_quota(sb) && (s_flags & SB_RDONLY)) {
2589 2590 2591 2592 2593 2594 2595 2596 2597 2598
		int ret = ext4_enable_quotas(sb);

		if (!ret)
			quota_update = 1;
		else
			ext4_msg(sb, KERN_ERR,
				"Cannot turn on quotas: error %d", ret);
	}

	/* Turn on journaled quotas used for old sytle */
J
Jan Kara 已提交
2599
	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
2600 2601
		if (EXT4_SB(sb)->s_qf_names[i]) {
			int ret = ext4_quota_on_mount(sb, i);
2602 2603 2604 2605

			if (!ret)
				quota_update = 1;
			else
2606 2607
				ext4_msg(sb, KERN_ERR,
					"Cannot turn on journaled "
2608
					"quota: type %d: error %d", i, ret);
2609 2610 2611 2612 2613 2614 2615
		}
	}
#endif

	while (es->s_last_orphan) {
		struct inode *inode;

2616 2617 2618 2619 2620 2621 2622 2623 2624 2625
		/*
		 * We may have encountered an error during cleanup; if
		 * so, skip the rest.
		 */
		if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
			jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
			es->s_last_orphan = 0;
			break;
		}

2626 2627
		inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
		if (IS_ERR(inode)) {
2628 2629 2630 2631
			es->s_last_orphan = 0;
			break;
		}

2632
		list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
2633
		dquot_initialize(inode);
2634
		if (inode->i_nlink) {
2635 2636 2637 2638
			if (test_opt(sb, DEBUG))
				ext4_msg(sb, KERN_DEBUG,
					"%s: truncating inode %lu to %lld bytes",
					__func__, inode->i_ino, inode->i_size);
2639
			jbd_debug(2, "truncating inode %lu to %lld bytes\n",
2640
				  inode->i_ino, inode->i_size);
A
Al Viro 已提交
2641
			inode_lock(inode);
2642
			truncate_inode_pages(inode->i_mapping, inode->i_size);
2643 2644 2645
			ret = ext4_truncate(inode);
			if (ret)
				ext4_std_error(inode->i_sb, ret);
A
Al Viro 已提交
2646
			inode_unlock(inode);
2647 2648
			nr_truncates++;
		} else {
2649 2650 2651 2652
			if (test_opt(sb, DEBUG))
				ext4_msg(sb, KERN_DEBUG,
					"%s: deleting unreferenced inode %lu",
					__func__, inode->i_ino);
2653 2654 2655 2656 2657 2658 2659
			jbd_debug(2, "deleting unreferenced inode %lu\n",
				  inode->i_ino);
			nr_orphans++;
		}
		iput(inode);  /* The delete magic happens here! */
	}

2660
#define PLURAL(x) (x), ((x) == 1) ? "" : "s"
2661 2662

	if (nr_orphans)
2663 2664
		ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted",
		       PLURAL(nr_orphans));
2665
	if (nr_truncates)
2666 2667
		ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
		       PLURAL(nr_truncates));
2668
#ifdef CONFIG_QUOTA
2669 2670 2671 2672 2673 2674
	/* Turn off quotas if they were enabled for orphan cleanup */
	if (quota_update) {
		for (i = 0; i < EXT4_MAXQUOTAS; i++) {
			if (sb_dqopt(sb)->files[i])
				dquot_quota_off(sb, i);
		}
2675 2676
	}
#endif
2677
	sb->s_flags = s_flags; /* Restore SB_RDONLY status */
2678
}
2679

2680 2681 2682 2683 2684 2685 2686
/*
 * Maximal extent format file size.
 * Resulting logical blkno at s_maxbytes must fit in our on-disk
 * extent format containers, within a sector_t, and within i_blocks
 * in the vfs.  ext4 inode has 48 bits of i_block in fsblock units,
 * so that won't be a limiting factor.
 *
2687 2688 2689 2690 2691 2692
 * However there is other limiting factor. We do store extents in the form
 * of starting block and length, hence the resulting length of the extent
 * covering maximum file size must fit into on-disk format containers as
 * well. Given that length is always by 1 unit bigger than max unit (because
 * we count 0 as well) we have to lower the s_maxbytes by one fs block.
 *
2693 2694
 * Note, this does *not* consider any metadata overhead for vfs i_blocks.
 */
2695
static loff_t ext4_max_size(int blkbits, int has_huge_files)
2696 2697 2698 2699 2700
{
	loff_t res;
	loff_t upper_limit = MAX_LFS_FILESIZE;

	/* small i_blocks in vfs inode? */
2701
	if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
2702
		/*
2703
		 * CONFIG_LBDAF is not enabled implies the inode
2704 2705 2706 2707 2708 2709 2710 2711 2712 2713
		 * i_block represent total blocks in 512 bytes
		 * 32 == size of vfs inode i_blocks * 8
		 */
		upper_limit = (1LL << 32) - 1;

		/* total blocks in file system block size */
		upper_limit >>= (blkbits - 9);
		upper_limit <<= blkbits;
	}

2714 2715 2716 2717 2718 2719
	/*
	 * 32-bit extent-start container, ee_block. We lower the maxbytes
	 * by one fs block, so ee_len can cover the extent of maximum file
	 * size
	 */
	res = (1LL << 32) - 1;
2720 2721 2722 2723 2724 2725 2726 2727
	res <<= blkbits;

	/* Sanity check against vm- & vfs- imposed limits */
	if (res > upper_limit)
		res = upper_limit;

	return res;
}
2728 2729

/*
2730
 * Maximal bitmap file size.  There is a direct, and {,double-,triple-}indirect
2731 2732
 * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
 * We need to be 1 filesystem block less than the 2^48 sector limit.
2733
 */
2734
static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
2735
{
2736
	loff_t res = EXT4_NDIR_BLOCKS;
2737 2738
	int meta_blocks;
	loff_t upper_limit;
2739 2740 2741 2742 2743 2744
	/* This is calculated to be the largest file size for a dense, block
	 * mapped file such that the file's total number of 512-byte sectors,
	 * including data and all indirect blocks, does not exceed (2^48 - 1).
	 *
	 * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
	 * number of 512-byte sectors of the file.
2745 2746
	 */

2747
	if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
2748
		/*
2749
		 * !has_huge_files or CONFIG_LBDAF not enabled implies that
2750 2751
		 * the inode i_block field represents total file blocks in
		 * 2^32 512-byte sectors == size of vfs inode i_blocks * 8
2752 2753 2754 2755 2756 2757 2758
		 */
		upper_limit = (1LL << 32) - 1;

		/* total blocks in file system block size */
		upper_limit >>= (bits - 9);

	} else {
A
Aneesh Kumar K.V 已提交
2759 2760 2761 2762 2763 2764
		/*
		 * We use 48 bit ext4_inode i_blocks
		 * With EXT4_HUGE_FILE_FL set the i_blocks
		 * represent total number of blocks in
		 * file system block size
		 */
2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777
		upper_limit = (1LL << 48) - 1;

	}

	/* indirect blocks */
	meta_blocks = 1;
	/* double indirect blocks */
	meta_blocks += 1 + (1LL << (bits-2));
	/* tripple indirect blocks */
	meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));

	upper_limit -= meta_blocks;
	upper_limit <<= bits;
2778 2779 2780 2781 2782 2783 2784

	res += 1LL << (bits-2);
	res += 1LL << (2*(bits-2));
	res += 1LL << (3*(bits-2));
	res <<= bits;
	if (res > upper_limit)
		res = upper_limit;
2785 2786 2787 2788

	if (res > MAX_LFS_FILESIZE)
		res = MAX_LFS_FILESIZE;

2789 2790 2791
	return res;
}

2792
static ext4_fsblk_t descriptor_loc(struct super_block *sb,
2793
				   ext4_fsblk_t logical_sb_block, int nr)
2794
{
2795
	struct ext4_sb_info *sbi = EXT4_SB(sb);
2796
	ext4_group_t bg, first_meta_bg;
2797 2798 2799 2800
	int has_super = 0;

	first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);

2801
	if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg)
2802
		return logical_sb_block + nr + 1;
2803
	bg = sbi->s_desc_per_block * nr;
2804
	if (ext4_bg_has_super(sb, bg))
2805
		has_super = 1;
2806

2807 2808 2809 2810 2811 2812 2813
	/*
	 * If we have a meta_bg fs with 1k blocks, group 0's GDT is at
	 * block 2, not 1.  If s_first_data_block == 0 (bigalloc is enabled
	 * on modern mke2fs or blksize > 1k on older mke2fs) then we must
	 * compensate.
	 */
	if (sb->s_blocksize == 1024 && nr == 0 &&
2814
	    le32_to_cpu(sbi->s_es->s_first_data_block) == 0)
2815 2816
		has_super++;

2817
	return (has_super + ext4_group_first_block_no(sb, bg));
2818 2819
}

2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835
/**
 * ext4_get_stripe_size: Get the stripe size.
 * @sbi: In memory super block info
 *
 * If we have specified it via mount option, then
 * use the mount option value. If the value specified at mount time is
 * greater than the blocks per group use the super block value.
 * If the super block value is greater than blocks per group return 0.
 * Allocator needs it be less than blocks per group.
 *
 */
static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
{
	unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
	unsigned long stripe_width =
			le32_to_cpu(sbi->s_es->s_raid_stripe_width);
D
Dan Ehrenberg 已提交
2836
	int ret;
2837 2838

	if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
D
Dan Ehrenberg 已提交
2839
		ret = sbi->s_stripe;
2840
	else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
D
Dan Ehrenberg 已提交
2841
		ret = stripe_width;
2842
	else if (stride && stride <= sbi->s_blocks_per_group)
D
Dan Ehrenberg 已提交
2843 2844 2845
		ret = stride;
	else
		ret = 0;
2846

D
Dan Ehrenberg 已提交
2847 2848 2849 2850 2851 2852
	/*
	 * If the stripe width is 1, this makes no sense and
	 * we set it to 0 to turn off stripe handling code.
	 */
	if (ret <= 1)
		ret = 0;
2853

D
Dan Ehrenberg 已提交
2854
	return ret;
2855
}
2856

2857 2858 2859 2860 2861 2862 2863 2864
/*
 * Check whether this filesystem can be mounted based on
 * the features present and the RDONLY/RDWR mount requested.
 * Returns 1 if this filesystem can be mounted as requested,
 * 0 if it cannot be.
 */
static int ext4_feature_set_ok(struct super_block *sb, int readonly)
{
2865
	if (ext4_has_unknown_ext4_incompat_features(sb)) {
2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876
		ext4_msg(sb, KERN_ERR,
			"Couldn't mount because of "
			"unsupported optional features (%x)",
			(le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
			~EXT4_FEATURE_INCOMPAT_SUPP));
		return 0;
	}

	if (readonly)
		return 1;

2877
	if (ext4_has_feature_readonly(sb)) {
D
Darrick J. Wong 已提交
2878
		ext4_msg(sb, KERN_INFO, "filesystem is read-only");
2879
		sb->s_flags |= SB_RDONLY;
D
Darrick J. Wong 已提交
2880 2881 2882
		return 1;
	}

2883
	/* Check that feature set is OK for a read-write mount */
2884
	if (ext4_has_unknown_ext4_ro_compat_features(sb)) {
2885 2886 2887 2888 2889 2890 2891 2892 2893 2894
		ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
			 "unsupported optional features (%x)",
			 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
				~EXT4_FEATURE_RO_COMPAT_SUPP));
		return 0;
	}
	/*
	 * Large file size enabled file system can only be mounted
	 * read-write on 32-bit systems if kernel is built with CONFIG_LBDAF
	 */
2895
	if (ext4_has_feature_huge_file(sb)) {
2896 2897 2898 2899 2900 2901 2902
		if (sizeof(blkcnt_t) < sizeof(u64)) {
			ext4_msg(sb, KERN_ERR, "Filesystem with huge files "
				 "cannot be mounted RDWR without "
				 "CONFIG_LBDAF");
			return 0;
		}
	}
2903
	if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) {
2904 2905 2906 2907 2908
		ext4_msg(sb, KERN_ERR,
			 "Can't support bigalloc feature without "
			 "extents feature\n");
		return 0;
	}
2909 2910

#ifndef CONFIG_QUOTA
2911
	if (ext4_has_feature_quota(sb) && !readonly) {
2912 2913 2914 2915 2916
		ext4_msg(sb, KERN_ERR,
			 "Filesystem with quota feature cannot be mounted RDWR "
			 "without CONFIG_QUOTA");
		return 0;
	}
L
Li Xi 已提交
2917 2918 2919 2920 2921 2922
	if (ext4_has_feature_project(sb) && !readonly) {
		ext4_msg(sb, KERN_ERR,
			 "Filesystem with project quota feature cannot be mounted RDWR "
			 "without CONFIG_QUOTA");
		return 0;
	}
2923
#endif  /* CONFIG_QUOTA */
2924 2925 2926
	return 1;
}

2927 2928 2929 2930
/*
 * This function is called once a day if we have errors logged
 * on the file system
 */
2931
static void print_daily_error_info(struct timer_list *t)
2932
{
2933 2934 2935
	struct ext4_sb_info *sbi = from_timer(sbi, t, s_err_report);
	struct super_block *sb = sbi->s_sb;
	struct ext4_super_block *es = sbi->s_es;
2936 2937

	if (es->s_error_count)
2938 2939
		/* fsck newer than v1.41.13 is needed to clean this condition. */
		ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
2940 2941
			 le32_to_cpu(es->s_error_count));
	if (es->s_first_error_time) {
2942 2943 2944
		printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %llu: %.*s:%d",
		       sb->s_id,
		       ext4_get_tstamp(es, s_first_error_time),
2945 2946 2947 2948
		       (int) sizeof(es->s_first_error_func),
		       es->s_first_error_func,
		       le32_to_cpu(es->s_first_error_line));
		if (es->s_first_error_ino)
2949
			printk(KERN_CONT ": inode %u",
2950 2951
			       le32_to_cpu(es->s_first_error_ino));
		if (es->s_first_error_block)
2952
			printk(KERN_CONT ": block %llu", (unsigned long long)
2953
			       le64_to_cpu(es->s_first_error_block));
2954
		printk(KERN_CONT "\n");
2955 2956
	}
	if (es->s_last_error_time) {
2957 2958 2959
		printk(KERN_NOTICE "EXT4-fs (%s): last error at time %llu: %.*s:%d",
		       sb->s_id,
		       ext4_get_tstamp(es, s_last_error_time),
2960 2961 2962 2963
		       (int) sizeof(es->s_last_error_func),
		       es->s_last_error_func,
		       le32_to_cpu(es->s_last_error_line));
		if (es->s_last_error_ino)
2964
			printk(KERN_CONT ": inode %u",
2965 2966
			       le32_to_cpu(es->s_last_error_ino));
		if (es->s_last_error_block)
2967
			printk(KERN_CONT ": block %llu", (unsigned long long)
2968
			       le64_to_cpu(es->s_last_error_block));
2969
		printk(KERN_CONT "\n");
2970 2971 2972 2973
	}
	mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ);  /* Once a day */
}

2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996
/* Find next suitable group and run ext4_init_inode_table */
static int ext4_run_li_request(struct ext4_li_request *elr)
{
	struct ext4_group_desc *gdp = NULL;
	ext4_group_t group, ngroups;
	struct super_block *sb;
	unsigned long timeout = 0;
	int ret = 0;

	sb = elr->lr_super;
	ngroups = EXT4_SB(sb)->s_groups_count;

	for (group = elr->lr_next_group; group < ngroups; group++) {
		gdp = ext4_get_group_desc(sb, group, NULL);
		if (!gdp) {
			ret = 1;
			break;
		}

		if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
			break;
	}

2997
	if (group >= ngroups)
2998 2999 3000 3001 3002 3003 3004
		ret = 1;

	if (!ret) {
		timeout = jiffies;
		ret = ext4_init_inode_table(sb, group,
					    elr->lr_timeout ? 0 : 1);
		if (elr->lr_timeout == 0) {
3005 3006
			timeout = (jiffies - timeout) *
				  elr->lr_sbi->s_li_wait_mult;
3007 3008 3009 3010 3011 3012 3013 3014 3015 3016
			elr->lr_timeout = timeout;
		}
		elr->lr_next_sched = jiffies + elr->lr_timeout;
		elr->lr_next_group = group + 1;
	}
	return ret;
}

/*
 * Remove lr_request from the list_request and free the
3017
 * request structure. Should be called with li_list_mtx held
3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034
 */
static void ext4_remove_li_request(struct ext4_li_request *elr)
{
	struct ext4_sb_info *sbi;

	if (!elr)
		return;

	sbi = elr->lr_sbi;

	list_del(&elr->lr_request);
	sbi->s_li_request = NULL;
	kfree(elr);
}

static void ext4_unregister_li_request(struct super_block *sb)
{
3035 3036 3037
	mutex_lock(&ext4_li_mtx);
	if (!ext4_li_info) {
		mutex_unlock(&ext4_li_mtx);
3038
		return;
3039
	}
3040 3041

	mutex_lock(&ext4_li_info->li_list_mtx);
3042
	ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
3043
	mutex_unlock(&ext4_li_info->li_list_mtx);
3044
	mutex_unlock(&ext4_li_mtx);
3045 3046
}

3047 3048
static struct task_struct *ext4_lazyinit_task;

3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062
/*
 * This is the function where ext4lazyinit thread lives. It walks
 * through the request list searching for next scheduled filesystem.
 * When such a fs is found, run the lazy initialization request
 * (ext4_rn_li_request) and keep track of the time spend in this
 * function. Based on that time we compute next schedule time of
 * the request. When walking through the list is complete, compute
 * next waking time and put itself into sleep.
 */
static int ext4_lazyinit_thread(void *arg)
{
	struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg;
	struct list_head *pos, *n;
	struct ext4_li_request *elr;
3063
	unsigned long next_wakeup, cur;
3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076

	BUG_ON(NULL == eli);

cont_thread:
	while (true) {
		next_wakeup = MAX_JIFFY_OFFSET;

		mutex_lock(&eli->li_list_mtx);
		if (list_empty(&eli->li_request_list)) {
			mutex_unlock(&eli->li_list_mtx);
			goto exit_thread;
		}
		list_for_each_safe(pos, n, &eli->li_request_list) {
3077 3078
			int err = 0;
			int progress = 0;
3079 3080 3081
			elr = list_entry(pos, struct ext4_li_request,
					 lr_request);

3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099
			if (time_before(jiffies, elr->lr_next_sched)) {
				if (time_before(elr->lr_next_sched, next_wakeup))
					next_wakeup = elr->lr_next_sched;
				continue;
			}
			if (down_read_trylock(&elr->lr_super->s_umount)) {
				if (sb_start_write_trylock(elr->lr_super)) {
					progress = 1;
					/*
					 * We hold sb->s_umount, sb can not
					 * be removed from the list, it is
					 * now safe to drop li_list_mtx
					 */
					mutex_unlock(&eli->li_list_mtx);
					err = ext4_run_li_request(elr);
					sb_end_write(elr->lr_super);
					mutex_lock(&eli->li_list_mtx);
					n = pos->next;
3100
				}
3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111
				up_read((&elr->lr_super->s_umount));
			}
			/* error, remove the lazy_init job */
			if (err) {
				ext4_remove_li_request(elr);
				continue;
			}
			if (!progress) {
				elr->lr_next_sched = jiffies +
					(prandom_u32()
					 % (EXT4_DEF_LI_MAX_START_DELAY * HZ));
3112 3113 3114 3115 3116 3117
			}
			if (time_before(elr->lr_next_sched, next_wakeup))
				next_wakeup = elr->lr_next_sched;
		}
		mutex_unlock(&eli->li_list_mtx);

3118
		try_to_freeze();
3119

3120 3121
		cur = jiffies;
		if ((time_after_eq(cur, next_wakeup)) ||
3122
		    (MAX_JIFFY_OFFSET == next_wakeup)) {
3123 3124 3125 3126
			cond_resched();
			continue;
		}

3127 3128
		schedule_timeout_interruptible(next_wakeup - cur);

3129 3130 3131 3132
		if (kthread_should_stop()) {
			ext4_clear_request_list();
			goto exit_thread;
		}
3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174
	}

exit_thread:
	/*
	 * It looks like the request list is empty, but we need
	 * to check it under the li_list_mtx lock, to prevent any
	 * additions into it, and of course we should lock ext4_li_mtx
	 * to atomically free the list and ext4_li_info, because at
	 * this point another ext4 filesystem could be registering
	 * new one.
	 */
	mutex_lock(&ext4_li_mtx);
	mutex_lock(&eli->li_list_mtx);
	if (!list_empty(&eli->li_request_list)) {
		mutex_unlock(&eli->li_list_mtx);
		mutex_unlock(&ext4_li_mtx);
		goto cont_thread;
	}
	mutex_unlock(&eli->li_list_mtx);
	kfree(ext4_li_info);
	ext4_li_info = NULL;
	mutex_unlock(&ext4_li_mtx);

	return 0;
}

static void ext4_clear_request_list(void)
{
	struct list_head *pos, *n;
	struct ext4_li_request *elr;

	mutex_lock(&ext4_li_info->li_list_mtx);
	list_for_each_safe(pos, n, &ext4_li_info->li_request_list) {
		elr = list_entry(pos, struct ext4_li_request,
				 lr_request);
		ext4_remove_li_request(elr);
	}
	mutex_unlock(&ext4_li_info->li_list_mtx);
}

static int ext4_run_lazyinit_thread(void)
{
3175 3176 3177 3178
	ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
					 ext4_li_info, "ext4lazyinit");
	if (IS_ERR(ext4_lazyinit_task)) {
		int err = PTR_ERR(ext4_lazyinit_task);
3179 3180 3181
		ext4_clear_request_list();
		kfree(ext4_li_info);
		ext4_li_info = NULL;
3182
		printk(KERN_CRIT "EXT4-fs: error %d creating inode table "
3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201
				 "initialization thread\n",
				 err);
		return err;
	}
	ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
	return 0;
}

/*
 * Check whether it make sense to run itable init. thread or not.
 * If there is at least one uninitialized inode table, return
 * corresponding group number, else the loop goes through all
 * groups and return total number of groups.
 */
static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
{
	ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
	struct ext4_group_desc *gdp = NULL;

3202 3203 3204
	if (!ext4_has_group_desc_csum(sb))
		return ngroups;

3205 3206 3207 3208 3209
	for (group = 0; group < ngroups; group++) {
		gdp = ext4_get_group_desc(sb, group, NULL);
		if (!gdp)
			continue;

3210
		if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253
			break;
	}

	return group;
}

static int ext4_li_info_new(void)
{
	struct ext4_lazy_init *eli = NULL;

	eli = kzalloc(sizeof(*eli), GFP_KERNEL);
	if (!eli)
		return -ENOMEM;

	INIT_LIST_HEAD(&eli->li_request_list);
	mutex_init(&eli->li_list_mtx);

	eli->li_state |= EXT4_LAZYINIT_QUIT;

	ext4_li_info = eli;

	return 0;
}

static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
					    ext4_group_t start)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_li_request *elr;

	elr = kzalloc(sizeof(*elr), GFP_KERNEL);
	if (!elr)
		return NULL;

	elr->lr_super = sb;
	elr->lr_sbi = sbi;
	elr->lr_next_group = start;

	/*
	 * Randomize first schedule time of the request to
	 * spread the inode table initialization requests
	 * better.
	 */
3254 3255
	elr->lr_next_sched = jiffies + (prandom_u32() %
				(EXT4_DEF_LI_MAX_START_DELAY * HZ));
3256 3257 3258
	return elr;
}

3259 3260
int ext4_register_li_request(struct super_block *sb,
			     ext4_group_t first_not_zeroed)
3261 3262
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
3263
	struct ext4_li_request *elr = NULL;
3264
	ext4_group_t ngroups = sbi->s_groups_count;
3265
	int ret = 0;
3266

3267
	mutex_lock(&ext4_li_mtx);
3268 3269 3270 3271 3272 3273
	if (sbi->s_li_request != NULL) {
		/*
		 * Reset timeout so it can be computed again, because
		 * s_li_wait_mult might have changed.
		 */
		sbi->s_li_request->lr_timeout = 0;
3274
		goto out;
3275
	}
3276

3277
	if (first_not_zeroed == ngroups || sb_rdonly(sb) ||
3278
	    !test_opt(sb, INIT_INODE_TABLE))
3279
		goto out;
3280 3281

	elr = ext4_li_request_new(sb, first_not_zeroed);
3282 3283 3284 3285
	if (!elr) {
		ret = -ENOMEM;
		goto out;
	}
3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297

	if (NULL == ext4_li_info) {
		ret = ext4_li_info_new();
		if (ret)
			goto out;
	}

	mutex_lock(&ext4_li_info->li_list_mtx);
	list_add(&elr->lr_request, &ext4_li_info->li_request_list);
	mutex_unlock(&ext4_li_info->li_list_mtx);

	sbi->s_li_request = elr;
3298 3299 3300 3301 3302 3303
	/*
	 * set elr to NULL here since it has been inserted to
	 * the request_list and the removal and free of it is
	 * handled by ext4_clear_request_list from now on.
	 */
	elr = NULL;
3304 3305 3306 3307 3308 3309 3310

	if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
		ret = ext4_run_lazyinit_thread();
		if (ret)
			goto out;
	}
out:
3311 3312
	mutex_unlock(&ext4_li_mtx);
	if (ret)
3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326
		kfree(elr);
	return ret;
}

/*
 * We do not need to lock anything since this is called on
 * module unload.
 */
static void ext4_destroy_lazyinit_thread(void)
{
	/*
	 * If thread exited earlier
	 * there's nothing to be done.
	 */
3327
	if (!ext4_li_info || !ext4_lazyinit_task)
3328 3329
		return;

3330
	kthread_stop(ext4_lazyinit_task);
3331 3332
}

3333 3334 3335 3336 3337 3338
static int set_journal_csum_feature_set(struct super_block *sb)
{
	int ret = 1;
	int compat, incompat;
	struct ext4_sb_info *sbi = EXT4_SB(sb);

3339
	if (ext4_has_metadata_csum(sb)) {
3340
		/* journal checksum v3 */
3341
		compat = 0;
3342
		incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
3343 3344 3345 3346 3347 3348
	} else {
		/* journal checksum v1 */
		compat = JBD2_FEATURE_COMPAT_CHECKSUM;
		incompat = 0;
	}

3349 3350 3351 3352
	jbd2_journal_clear_features(sbi->s_journal,
			JBD2_FEATURE_COMPAT_CHECKSUM, 0,
			JBD2_FEATURE_INCOMPAT_CSUM_V3 |
			JBD2_FEATURE_INCOMPAT_CSUM_V2);
3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364
	if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
		ret = jbd2_journal_set_features(sbi->s_journal,
				compat, 0,
				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
				incompat);
	} else if (test_opt(sb, JOURNAL_CHECKSUM)) {
		ret = jbd2_journal_set_features(sbi->s_journal,
				compat, 0,
				incompat);
		jbd2_journal_clear_features(sbi->s_journal, 0, 0,
				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
	} else {
3365 3366
		jbd2_journal_clear_features(sbi->s_journal, 0, 0,
				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
3367 3368 3369 3370 3371
	}

	return ret;
}

3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395
/*
 * Note: calculating the overhead so we can be compatible with
 * historical BSD practice is quite difficult in the face of
 * clusters/bigalloc.  This is because multiple metadata blocks from
 * different block group can end up in the same allocation cluster.
 * Calculating the exact overhead in the face of clustered allocation
 * requires either O(all block bitmaps) in memory or O(number of block
 * groups**2) in time.  We will still calculate the superblock for
 * older file systems --- and if we come across with a bigalloc file
 * system with zero in s_overhead_clusters the estimate will be close to
 * correct especially for very large cluster sizes --- but for newer
 * file systems, it's better to calculate this figure once at mkfs
 * time, and store it in the superblock.  If the superblock value is
 * present (even for non-bigalloc file systems), we will use it.
 */
static int count_overhead(struct super_block *sb, ext4_group_t grp,
			  char *buf)
{
	struct ext4_sb_info	*sbi = EXT4_SB(sb);
	struct ext4_group_desc	*gdp;
	ext4_fsblk_t		first_block, last_block, b;
	ext4_group_t		i, ngroups = ext4_get_groups_count(sb);
	int			s, j, count = 0;

3396
	if (!ext4_has_feature_bigalloc(sb))
3397 3398 3399
		return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
			sbi->s_itb_per_group + 2);

3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428
	first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
		(grp * EXT4_BLOCKS_PER_GROUP(sb));
	last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
	for (i = 0; i < ngroups; i++) {
		gdp = ext4_get_group_desc(sb, i, NULL);
		b = ext4_block_bitmap(sb, gdp);
		if (b >= first_block && b <= last_block) {
			ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
			count++;
		}
		b = ext4_inode_bitmap(sb, gdp);
		if (b >= first_block && b <= last_block) {
			ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
			count++;
		}
		b = ext4_inode_table(sb, gdp);
		if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
			for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
				int c = EXT4_B2C(sbi, b - first_block);
				ext4_set_bit(c, buf);
				count++;
			}
		if (i != grp)
			continue;
		s = 0;
		if (ext4_bg_has_super(sb, grp)) {
			ext4_set_bit(s++, buf);
			count++;
		}
3429 3430 3431 3432 3433
		j = ext4_bg_num_gdb(sb, grp);
		if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
			ext4_error(sb, "Invalid number of block group "
				   "descriptor blocks: %d", j);
			j = EXT4_BLOCKS_PER_GROUP(sb) - s;
3434
		}
3435 3436 3437
		count += j;
		for (; j > 0; j--)
			ext4_set_bit(EXT4_B2C(sbi, s++), buf);
3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451
	}
	if (!count)
		return 0;
	return EXT4_CLUSTERS_PER_GROUP(sb) -
		ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
}

/*
 * Compute the overhead and stash it in sbi->s_overhead
 */
int ext4_calculate_overhead(struct super_block *sb)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
3452 3453
	struct inode *j_inode;
	unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum);
3454 3455
	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
	ext4_fsblk_t overhead = 0;
3456
	char *buf = (char *) get_zeroed_page(GFP_NOFS);
3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483

	if (!buf)
		return -ENOMEM;

	/*
	 * Compute the overhead (FS structures).  This is constant
	 * for a given filesystem unless the number of block groups
	 * changes so we cache the previous value until it does.
	 */

	/*
	 * All of the blocks before first_data_block are overhead
	 */
	overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));

	/*
	 * Add the overhead found in each block group
	 */
	for (i = 0; i < ngroups; i++) {
		int blks;

		blks = count_overhead(sb, i, buf);
		overhead += blks;
		if (blks)
			memset(buf, 0, PAGE_SIZE);
		cond_resched();
	}
3484 3485 3486 3487 3488

	/*
	 * Add the internal journal blocks whether the journal has been
	 * loaded or not
	 */
3489
	if (sbi->s_journal && !sbi->journal_bdev)
3490
		overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
3491 3492 3493 3494 3495 3496 3497 3498 3499 3500
	else if (ext4_has_feature_journal(sb) && !sbi->s_journal) {
		j_inode = ext4_get_journal_inode(sb, j_inum);
		if (j_inode) {
			j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
			overhead += EXT4_NUM_B2C(sbi, j_blocks);
			iput(j_inode);
		} else {
			ext4_msg(sb, KERN_ERR, "can't get journal size");
		}
	}
3501 3502 3503 3504 3505 3506
	sbi->s_overhead = overhead;
	smp_wmb();
	free_page((unsigned long) buf);
	return 0;
}

3507
static void ext4_set_resv_clusters(struct super_block *sb)
L
Lukas Czerner 已提交
3508 3509
{
	ext4_fsblk_t resv_clusters;
3510
	struct ext4_sb_info *sbi = EXT4_SB(sb);
L
Lukas Czerner 已提交
3511

3512 3513 3514 3515 3516 3517
	/*
	 * There's no need to reserve anything when we aren't using extents.
	 * The space estimates are exact, there are no unwritten extents,
	 * hole punching doesn't need new metadata... This is needed especially
	 * to keep ext2/3 backward compatibility.
	 */
3518
	if (!ext4_has_feature_extents(sb))
3519
		return;
L
Lukas Czerner 已提交
3520 3521 3522 3523
	/*
	 * By default we reserve 2% or 4096 clusters, whichever is smaller.
	 * This should cover the situations where we can not afford to run
	 * out of space like for example punch hole, or converting
3524
	 * unwritten extents in delalloc path. In most cases such
L
Lukas Czerner 已提交
3525 3526 3527
	 * allocation would require 1, or 2 blocks, higher numbers are
	 * very rare.
	 */
3528 3529
	resv_clusters = (ext4_blocks_count(sbi->s_es) >>
			 sbi->s_cluster_bits);
L
Lukas Czerner 已提交
3530 3531 3532 3533

	do_div(resv_clusters, 50);
	resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);

3534
	atomic64_set(&sbi->s_resv_clusters, resv_clusters);
L
Lukas Czerner 已提交
3535 3536
}

3537
static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3538
{
3539
	struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
3540
	char *orig_data = kstrdup(data, GFP_KERNEL);
3541
	struct buffer_head *bh;
3542
	struct ext4_super_block *es = NULL;
3543
	struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
3544 3545
	ext4_fsblk_t block;
	ext4_fsblk_t sb_block = get_sb_block(&data);
3546
	ext4_fsblk_t logical_sb_block;
3547 3548 3549 3550
	unsigned long offset = 0;
	unsigned long journal_devnum = 0;
	unsigned long def_mount_opts;
	struct inode *root;
3551
	const char *descr;
3552
	int ret = -ENOMEM;
3553
	int blocksize, clustersize;
3554 3555
	unsigned int db_count;
	unsigned int i;
3556
	int needs_recovery, has_huge_files, has_bigalloc;
L
Laurent Vivier 已提交
3557
	__u64 blocks_count;
3558
	int err = 0;
3559
	unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
3560
	ext4_group_t first_not_zeroed;
3561

3562 3563
	if ((data && !orig_data) || !sbi)
		goto out_free_base;
3564

3565
	sbi->s_daxdev = dax_dev;
3566 3567
	sbi->s_blockgroup_lock =
		kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
3568 3569 3570
	if (!sbi->s_blockgroup_lock)
		goto out_free_base;

3571
	sb->s_fs_info = sbi;
3572
	sbi->s_sb = sb;
3573
	sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
M
Miklos Szeredi 已提交
3574
	sbi->s_sb_block = sb_block;
3575 3576
	if (sb->s_bdev->bd_part)
		sbi->s_sectors_written_start =
3577
			part_stat_read(sb->s_bdev->bd_part, sectors[STAT_WRITE]);
3578

3579
	/* Cleanup superblock name */
3580
	strreplace(sb->s_id, '/', '!');
3581

3582
	/* -EINVAL is default */
3583
	ret = -EINVAL;
3584
	blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
3585
	if (!blocksize) {
3586
		ext4_msg(sb, KERN_ERR, "unable to set blocksize");
3587 3588 3589 3590
		goto out_fail;
	}

	/*
3591
	 * The ext4 superblock will not be buffer aligned for other than 1kB
3592 3593
	 * block sizes.  We need to calculate the offset from buffer start.
	 */
3594
	if (blocksize != EXT4_MIN_BLOCK_SIZE) {
3595 3596
		logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
		offset = do_div(logical_sb_block, blocksize);
3597
	} else {
3598
		logical_sb_block = sb_block;
3599 3600
	}

3601
	if (!(bh = sb_bread_unmovable(sb, logical_sb_block))) {
3602
		ext4_msg(sb, KERN_ERR, "unable to read superblock");
3603 3604 3605 3606
		goto out_fail;
	}
	/*
	 * Note: s_es must be initialized as soon as possible because
3607
	 *       some ext4 macro-instructions depend on its value
3608
	 */
3609
	es = (struct ext4_super_block *) (bh->b_data + offset);
3610 3611
	sbi->s_es = es;
	sb->s_magic = le16_to_cpu(es->s_magic);
3612 3613
	if (sb->s_magic != EXT4_SUPER_MAGIC)
		goto cantfind_ext4;
T
Theodore Ts'o 已提交
3614
	sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
3615

3616
	/* Warn if metadata_csum and gdt_csum are both set. */
3617 3618
	if (ext4_has_feature_metadata_csum(sb) &&
	    ext4_has_feature_gdt_csum(sb))
3619
		ext4_warning(sb, "metadata_csum and uninit_bg are "
3620 3621
			     "redundant flags; please run fsck.");

3622 3623 3624 3625 3626 3627 3628 3629
	/* Check for a known checksum algorithm */
	if (!ext4_verify_csum_type(sb, es)) {
		ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
			 "unknown checksum algorithm.");
		silent = 1;
		goto cantfind_ext4;
	}

3630
	/* Load the checksum driver */
3631 3632 3633 3634 3635 3636
	sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
	if (IS_ERR(sbi->s_chksum_driver)) {
		ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
		ret = PTR_ERR(sbi->s_chksum_driver);
		sbi->s_chksum_driver = NULL;
		goto failed_mount;
3637 3638
	}

3639 3640 3641 3642 3643
	/* Check superblock checksum */
	if (!ext4_superblock_csum_verify(sb, es)) {
		ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
			 "invalid superblock checksum.  Run e2fsck?");
		silent = 1;
3644
		ret = -EFSBADCRC;
3645 3646 3647 3648
		goto cantfind_ext4;
	}

	/* Precompute checksum seed for all metadata */
3649
	if (ext4_has_feature_csum_seed(sb))
3650
		sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
T
Tahsin Erdogan 已提交
3651
	else if (ext4_has_metadata_csum(sb) || ext4_has_feature_ea_inode(sb))
3652 3653 3654
		sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
					       sizeof(es->s_uuid));

3655 3656
	/* Set defaults before we parse the mount options */
	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
3657
	set_opt(sb, INIT_INODE_TABLE);
3658
	if (def_mount_opts & EXT4_DEFM_DEBUG)
3659
		set_opt(sb, DEBUG);
3660
	if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
3661
		set_opt(sb, GRPID);
3662
	if (def_mount_opts & EXT4_DEFM_UID16)
3663
		set_opt(sb, NO_UID32);
3664 3665
	/* xattr user namespace & acls are now defaulted on */
	set_opt(sb, XATTR_USER);
T
Theodore Ts'o 已提交
3666
#ifdef CONFIG_EXT4_FS_POSIX_ACL
3667
	set_opt(sb, POSIX_ACL);
3668
#endif
3669 3670 3671 3672
	/* don't forget to enable journal_csum when metadata_csum is enabled. */
	if (ext4_has_metadata_csum(sb))
		set_opt(sb, JOURNAL_CHECKSUM);

3673
	if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
3674
		set_opt(sb, JOURNAL_DATA);
3675
	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
3676
		set_opt(sb, ORDERED_DATA);
3677
	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
3678
		set_opt(sb, WRITEBACK_DATA);
3679 3680

	if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
3681
		set_opt(sb, ERRORS_PANIC);
3682
	else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
3683
		set_opt(sb, ERRORS_CONT);
3684
	else
3685
		set_opt(sb, ERRORS_RO);
3686 3687
	/* block_validity enabled by default; disable with noblock_validity */
	set_opt(sb, BLOCK_VALIDITY);
3688
	if (def_mount_opts & EXT4_DEFM_DISCARD)
3689
		set_opt(sb, DISCARD);
3690

3691 3692
	sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
	sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
3693 3694 3695
	sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
	sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
	sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
3696

3697
	if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
3698
		set_opt(sb, BARRIER);
3699

3700 3701 3702 3703
	/*
	 * enable delayed allocation by default
	 * Use -o nodelalloc to turn it off
	 */
3704
	if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) &&
3705
	    ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
3706
		set_opt(sb, DELALLOC);
3707

3708 3709 3710 3711 3712 3713
	/*
	 * set default s_li_wait_mult for lazyinit, for the case there is
	 * no mount option specified.
	 */
	sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;

3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726
	if (sbi->s_es->s_mount_opts[0]) {
		char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
					      sizeof(sbi->s_es->s_mount_opts),
					      GFP_KERNEL);
		if (!s_mount_opts)
			goto failed_mount;
		if (!parse_options(s_mount_opts, sb, &journal_devnum,
				   &journal_ioprio, 0)) {
			ext4_msg(sb, KERN_WARNING,
				 "failed to parse options in superblock: %s",
				 s_mount_opts);
		}
		kfree(s_mount_opts);
3727
	}
3728
	sbi->s_def_mount_opt = sbi->s_mount_opt;
3729
	if (!parse_options((char *) data, sb, &journal_devnum,
3730
			   &journal_ioprio, 0))
3731 3732
		goto failed_mount;

3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743
	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
		printk_once(KERN_WARNING "EXT4-fs: Warning: mounting "
			    "with data=journal disables delayed "
			    "allocation and O_DIRECT support!\n");
		if (test_opt2(sb, EXPLICIT_DELALLOC)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and delalloc");
			goto failed_mount;
		}
		if (test_opt(sb, DIOREAD_NOLOCK)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
3744
				 "both data=journal and dioread_nolock");
3745 3746
			goto failed_mount;
		}
R
Ross Zwisler 已提交
3747 3748 3749 3750 3751
		if (test_opt(sb, DAX)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and dax");
			goto failed_mount;
		}
3752 3753 3754 3755 3756
		if (ext4_has_feature_encrypt(sb)) {
			ext4_msg(sb, KERN_WARNING,
				 "encrypted files will use data=ordered "
				 "instead of data journaling mode");
		}
3757 3758
		if (test_opt(sb, DELALLOC))
			clear_opt(sb, DELALLOC);
3759 3760
	} else {
		sb->s_iflags |= SB_I_CGROUPWB;
3761 3762
	}

3763 3764
	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
		(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
3765

3766
	if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
3767 3768 3769
	    (ext4_has_compat_features(sb) ||
	     ext4_has_ro_compat_features(sb) ||
	     ext4_has_incompat_features(sb)))
3770 3771 3772
		ext4_msg(sb, KERN_WARNING,
		       "feature flags set on rev 0 fs, "
		       "running e2fsck is recommended");
3773

3774 3775
	if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) {
		set_opt2(sb, HURD_COMPAT);
3776
		if (ext4_has_feature_64bit(sb)) {
3777 3778 3779 3780
			ext4_msg(sb, KERN_ERR,
				 "The Hurd can't support 64-bit file systems");
			goto failed_mount;
		}
T
Tahsin Erdogan 已提交
3781 3782 3783 3784 3785 3786 3787 3788 3789 3790

		/*
		 * ea_inode feature uses l_i_version field which is not
		 * available in HURD_COMPAT mode.
		 */
		if (ext4_has_feature_ea_inode(sb)) {
			ext4_msg(sb, KERN_ERR,
				 "ea_inode feature is not supported for Hurd");
			goto failed_mount;
		}
3791 3792
	}

3793 3794 3795 3796 3797
	if (IS_EXT2_SB(sb)) {
		if (ext2_feature_set_ok(sb))
			ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
				 "using the ext4 subsystem");
		else {
3798 3799 3800 3801 3802 3803
			/*
			 * If we're probing be silent, if this looks like
			 * it's actually an ext[34] filesystem.
			 */
			if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
				goto failed_mount;
3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814
			ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
				 "to feature incompatibilities");
			goto failed_mount;
		}
	}

	if (IS_EXT3_SB(sb)) {
		if (ext3_feature_set_ok(sb))
			ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
				 "using the ext4 subsystem");
		else {
3815 3816 3817 3818 3819 3820
			/*
			 * If we're probing be silent, if this looks like
			 * it's actually an ext4 filesystem.
			 */
			if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
				goto failed_mount;
3821 3822 3823 3824 3825 3826
			ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
				 "to feature incompatibilities");
			goto failed_mount;
		}
	}

3827 3828 3829 3830 3831
	/*
	 * Check feature flags regardless of the revision level, since we
	 * previously didn't change the revision level when setting the flags,
	 * so there is a chance incompat flags are set on a rev 0 filesystem.
	 */
3832
	if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
3833
		goto failed_mount;
3834

J
Jan Kara 已提交
3835
	blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
3836 3837
	if (blocksize < EXT4_MIN_BLOCK_SIZE ||
	    blocksize > EXT4_MAX_BLOCK_SIZE) {
3838
		ext4_msg(sb, KERN_ERR,
3839 3840 3841 3842 3843 3844 3845 3846 3847
		       "Unsupported filesystem blocksize %d (%d log_block_size)",
			 blocksize, le32_to_cpu(es->s_log_block_size));
		goto failed_mount;
	}
	if (le32_to_cpu(es->s_log_block_size) >
	    (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
		ext4_msg(sb, KERN_ERR,
			 "Invalid log block size: %u",
			 le32_to_cpu(es->s_log_block_size));
3848 3849
		goto failed_mount;
	}
3850 3851 3852 3853 3854 3855 3856
	if (le32_to_cpu(es->s_log_cluster_size) >
	    (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
		ext4_msg(sb, KERN_ERR,
			 "Invalid log cluster size: %u",
			 le32_to_cpu(es->s_log_cluster_size));
		goto failed_mount;
	}
3857

3858 3859 3860 3861 3862 3863 3864
	if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
		ext4_msg(sb, KERN_ERR,
			 "Number of reserved GDT blocks insanely large: %d",
			 le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
		goto failed_mount;
	}

R
Ross Zwisler 已提交
3865
	if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
3866 3867 3868
		if (ext4_has_feature_inline_data(sb)) {
			ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem"
					" that may contain inline data");
3869
			sbi->s_mount_opt &= ~EXT4_MOUNT_DAX;
3870
		}
3871
		if (!bdev_dax_supported(sb->s_bdev, blocksize)) {
3872 3873 3874 3875
			ext4_msg(sb, KERN_ERR,
				"DAX unsupported by block device. Turning off DAX.");
			sbi->s_mount_opt &= ~EXT4_MOUNT_DAX;
		}
R
Ross Zwisler 已提交
3876 3877
	}

3878
	if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
3879 3880 3881 3882 3883
		ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
			 es->s_encryption_level);
		goto failed_mount;
	}

3884
	if (sb->s_blocksize != blocksize) {
3885 3886
		/* Validate the filesystem blocksize */
		if (!sb_set_blocksize(sb, blocksize)) {
3887
			ext4_msg(sb, KERN_ERR, "bad block size %d",
3888
					blocksize);
3889 3890 3891
			goto failed_mount;
		}

3892
		brelse(bh);
3893 3894
		logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
		offset = do_div(logical_sb_block, blocksize);
3895
		bh = sb_bread_unmovable(sb, logical_sb_block);
3896
		if (!bh) {
3897 3898
			ext4_msg(sb, KERN_ERR,
			       "Can't read superblock on 2nd try");
3899 3900
			goto failed_mount;
		}
3901
		es = (struct ext4_super_block *)(bh->b_data + offset);
3902
		sbi->s_es = es;
3903
		if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
3904 3905
			ext4_msg(sb, KERN_ERR,
			       "Magic mismatch, very weird!");
3906 3907 3908 3909
			goto failed_mount;
		}
	}

3910
	has_huge_files = ext4_has_feature_huge_file(sb);
3911 3912 3913
	sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
						      has_huge_files);
	sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
3914

3915 3916 3917
	if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
		sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
		sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
3918 3919 3920
	} else {
		sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
		sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
3921 3922 3923 3924 3925
		if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
			ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
				 sbi->s_first_ino);
			goto failed_mount;
		}
3926
		if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
V
Vignesh Babu 已提交
3927
		    (!is_power_of_2(sbi->s_inode_size)) ||
3928
		    (sbi->s_inode_size > blocksize)) {
3929 3930
			ext4_msg(sb, KERN_ERR,
			       "unsupported inode size: %d",
3931
			       sbi->s_inode_size);
3932 3933
			goto failed_mount;
		}
K
Kalpak Shah 已提交
3934 3935
		if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE)
			sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2);
3936
	}
3937

3938
	sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
3939
	if (ext4_has_feature_64bit(sb)) {
3940
		if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
3941
		    sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
V
vignesh babu 已提交
3942
		    !is_power_of_2(sbi->s_desc_size)) {
3943 3944
			ext4_msg(sb, KERN_ERR,
			       "unsupported descriptor size %lu",
3945 3946 3947 3948 3949
			       sbi->s_desc_size);
			goto failed_mount;
		}
	} else
		sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
3950

3951 3952
	sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
	sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
3953

3954
	sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
3955
	if (sbi->s_inodes_per_block == 0)
3956
		goto cantfind_ext4;
3957 3958 3959 3960 3961 3962
	if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
	    sbi->s_inodes_per_group > blocksize * 8) {
		ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
			 sbi->s_blocks_per_group);
		goto failed_mount;
	}
3963 3964
	sbi->s_itb_per_group = sbi->s_inodes_per_group /
					sbi->s_inodes_per_block;
3965
	sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
3966 3967
	sbi->s_sbh = bh;
	sbi->s_mount_state = le16_to_cpu(es->s_state);
3968 3969
	sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
	sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
3970

3971
	for (i = 0; i < 4; i++)
3972 3973
		sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
	sbi->s_def_hash_version = es->s_def_hash_version;
3974
	if (ext4_has_feature_dir_index(sb)) {
3975 3976 3977 3978
		i = le32_to_cpu(es->s_flags);
		if (i & EXT2_FLAGS_UNSIGNED_HASH)
			sbi->s_hash_unsigned = 3;
		else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
3979
#ifdef __CHAR_UNSIGNED__
3980
			if (!sb_rdonly(sb))
3981 3982 3983
				es->s_flags |=
					cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
			sbi->s_hash_unsigned = 3;
3984
#else
3985
			if (!sb_rdonly(sb))
3986 3987
				es->s_flags |=
					cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
3988
#endif
3989
		}
3990
	}
3991

3992 3993
	/* Handle clustersize */
	clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
3994
	has_bigalloc = ext4_has_feature_bigalloc(sb);
3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021
	if (has_bigalloc) {
		if (clustersize < blocksize) {
			ext4_msg(sb, KERN_ERR,
				 "cluster size (%d) smaller than "
				 "block size (%d)", clustersize, blocksize);
			goto failed_mount;
		}
		sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
			le32_to_cpu(es->s_log_block_size);
		sbi->s_clusters_per_group =
			le32_to_cpu(es->s_clusters_per_group);
		if (sbi->s_clusters_per_group > blocksize * 8) {
			ext4_msg(sb, KERN_ERR,
				 "#clusters per group too big: %lu",
				 sbi->s_clusters_per_group);
			goto failed_mount;
		}
		if (sbi->s_blocks_per_group !=
		    (sbi->s_clusters_per_group * (clustersize / blocksize))) {
			ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and "
				 "clusters per group (%lu) inconsistent",
				 sbi->s_blocks_per_group,
				 sbi->s_clusters_per_group);
			goto failed_mount;
		}
	} else {
		if (clustersize != blocksize) {
4022 4023 4024 4025
			ext4_msg(sb, KERN_ERR,
				 "fragment/cluster size (%d) != "
				 "block size (%d)", clustersize, blocksize);
			goto failed_mount;
4026 4027 4028 4029 4030 4031 4032 4033 4034
		}
		if (sbi->s_blocks_per_group > blocksize * 8) {
			ext4_msg(sb, KERN_ERR,
				 "#blocks per group too big: %lu",
				 sbi->s_blocks_per_group);
			goto failed_mount;
		}
		sbi->s_clusters_per_group = sbi->s_blocks_per_group;
		sbi->s_cluster_bits = 0;
4035
	}
4036 4037
	sbi->s_cluster_ratio = clustersize / blocksize;

4038 4039 4040 4041
	/* Do we have standard group size of clustersize * 8 blocks ? */
	if (sbi->s_blocks_per_group == clustersize << 3)
		set_opt2(sb, STD_GROUP_SIZE);

4042 4043 4044 4045
	/*
	 * Test whether we have more sectors than will fit in sector_t,
	 * and whether the max offset is addressable by the page cache.
	 */
4046
	err = generic_check_addressable(sb->s_blocksize_bits,
4047
					ext4_blocks_count(es));
4048
	if (err) {
4049
		ext4_msg(sb, KERN_ERR, "filesystem"
4050
			 " too large to mount safely on this system");
4051
		if (sizeof(sector_t) < 8)
4052
			ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled");
4053 4054 4055
		goto failed_mount;
	}

4056 4057
	if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
		goto cantfind_ext4;
4058

4059 4060 4061
	/* check blocks count against device size */
	blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
	if (blocks_count && ext4_blocks_count(es) > blocks_count) {
4062 4063
		ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
		       "exceeds size of device (%llu blocks)",
4064 4065 4066 4067
		       ext4_blocks_count(es), blocks_count);
		goto failed_mount;
	}

4068 4069 4070 4071 4072
	/*
	 * It makes no sense for the first data block to be beyond the end
	 * of the filesystem.
	 */
	if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
4073
		ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
4074 4075 4076
			 "block %u is beyond end of filesystem (%llu)",
			 le32_to_cpu(es->s_first_data_block),
			 ext4_blocks_count(es));
4077 4078
		goto failed_mount;
	}
4079 4080 4081 4082 4083 4084 4085
	if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) &&
	    (sbi->s_cluster_ratio == 1)) {
		ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
			 "block is 0 with a 1k block and cluster size");
		goto failed_mount;
	}

L
Laurent Vivier 已提交
4086 4087 4088 4089
	blocks_count = (ext4_blocks_count(es) -
			le32_to_cpu(es->s_first_data_block) +
			EXT4_BLOCKS_PER_GROUP(sb) - 1);
	do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
4090
	if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
4091
		ext4_msg(sb, KERN_WARNING, "groups count too large: %u "
4092
		       "(block count %llu, first data block %u, "
4093
		       "blocks per group %lu)", sbi->s_groups_count,
4094 4095 4096 4097 4098
		       ext4_blocks_count(es),
		       le32_to_cpu(es->s_first_data_block),
		       EXT4_BLOCKS_PER_GROUP(sb));
		goto failed_mount;
	}
L
Laurent Vivier 已提交
4099
	sbi->s_groups_count = blocks_count;
4100 4101
	sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
			(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
4102 4103 4104 4105 4106 4107 4108 4109
	if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
	    le32_to_cpu(es->s_inodes_count)) {
		ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
			 le32_to_cpu(es->s_inodes_count),
			 ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
		ret = -EINVAL;
		goto failed_mount;
	}
4110 4111
	db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
		   EXT4_DESC_PER_BLOCK(sb);
4112
	if (ext4_has_feature_meta_bg(sb)) {
4113
		if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
4114 4115 4116 4117 4118 4119 4120
			ext4_msg(sb, KERN_WARNING,
				 "first meta block group too large: %u "
				 "(group descriptor block count %u)",
				 le32_to_cpu(es->s_first_meta_bg), db_count);
			goto failed_mount;
		}
	}
4121 4122 4123
	sbi->s_group_desc = kvmalloc_array(db_count,
					   sizeof(struct buffer_head *),
					   GFP_KERNEL);
4124
	if (sbi->s_group_desc == NULL) {
4125
		ext4_msg(sb, KERN_ERR, "not enough memory");
4126
		ret = -ENOMEM;
4127 4128 4129
		goto failed_mount;
	}

4130
	bgl_lock_init(sbi->s_blockgroup_lock);
4131

4132 4133 4134 4135 4136 4137
	/* Pre-read the descriptors into the buffer cache */
	for (i = 0; i < db_count; i++) {
		block = descriptor_loc(sb, logical_sb_block, i);
		sb_breadahead(sb, block);
	}

4138
	for (i = 0; i < db_count; i++) {
4139
		block = descriptor_loc(sb, logical_sb_block, i);
4140
		sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
4141
		if (!sbi->s_group_desc[i]) {
4142 4143
			ext4_msg(sb, KERN_ERR,
			       "can't read group descriptor %d", i);
4144 4145 4146 4147
			db_count = i;
			goto failed_mount2;
		}
	}
4148
	sbi->s_gdb_count = db_count;
4149
	if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
4150
		ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
4151
		ret = -EFSCORRUPTED;
4152
		goto failed_mount2;
4153
	}
4154

4155
	timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
4156

4157
	/* Register extent status tree shrinker */
4158
	if (ext4_es_register_shrinker(sbi))
4159 4160
		goto failed_mount3;

4161
	sbi->s_stripe = ext4_get_stripe_size(sbi);
4162
	sbi->s_extent_max_zeroout_kb = 32;
4163

4164 4165 4166
	/*
	 * set up enough so that it can read an inode
	 */
4167
	sb->s_op = &ext4_sops;
4168 4169
	sb->s_export_op = &ext4_export_ops;
	sb->s_xattr = ext4_xattr_handlers;
4170
#ifdef CONFIG_EXT4_FS_ENCRYPTION
4171
	sb->s_cop = &ext4_cryptops;
4172
#endif
4173
#ifdef CONFIG_QUOTA
4174
	sb->dq_op = &ext4_quota_operations;
4175
	if (ext4_has_feature_quota(sb))
4176
		sb->s_qcop = &dquot_quotactl_sysfile_ops;
4177 4178
	else
		sb->s_qcop = &ext4_qctl_operations;
L
Li Xi 已提交
4179
	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
4180
#endif
4181
	memcpy(&sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
4182

4183
	INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
4184
	mutex_init(&sbi->s_orphan_lock);
4185 4186 4187 4188

	sb->s_root = NULL;

	needs_recovery = (es->s_last_orphan != 0 ||
4189
			  ext4_has_feature_journal_needs_recovery(sb));
4190

4191
	if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb))
4192
		if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
4193
			goto failed_mount3a;
4194

4195 4196 4197 4198
	/*
	 * The first inode we look at is the journal inode.  Don't try
	 * root first: it may be modified in the journal!
	 */
4199
	if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
4200 4201
		err = ext4_load_journal(sb, es, journal_devnum);
		if (err)
4202
			goto failed_mount3a;
4203
	} else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) &&
4204
		   ext4_has_feature_journal_needs_recovery(sb)) {
4205 4206
		ext4_msg(sb, KERN_ERR, "required journal recovery "
		       "suppressed and not mounted read-only");
4207
		goto failed_mount_wq;
4208
	} else {
4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233
		/* Nojournal mode, all journal mount options are illegal */
		if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "journal_checksum, fs mounted w/o journal");
			goto failed_mount_wq;
		}
		if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "journal_async_commit, fs mounted w/o journal");
			goto failed_mount_wq;
		}
		if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "commit=%lu, fs mounted w/o journal",
				 sbi->s_commit_interval / HZ);
			goto failed_mount_wq;
		}
		if (EXT4_MOUNT_DATA_FLAGS &
		    (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "data=, fs mounted w/o journal");
			goto failed_mount_wq;
		}
		sbi->s_def_mount_opt &= EXT4_MOUNT_JOURNAL_CHECKSUM;
		clear_opt(sb, JOURNAL_CHECKSUM);
4234
		clear_opt(sb, DATA_FLAGS);
4235 4236 4237
		sbi->s_journal = NULL;
		needs_recovery = 0;
		goto no_journal;
4238 4239
	}

4240
	if (ext4_has_feature_64bit(sb) &&
4241 4242
	    !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
				       JBD2_FEATURE_INCOMPAT_64BIT)) {
4243
		ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
4244
		goto failed_mount_wq;
4245 4246
	}

4247 4248 4249 4250
	if (!set_journal_csum_feature_set(sb)) {
		ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
			 "feature set");
		goto failed_mount_wq;
4251
	}
4252

4253 4254 4255 4256 4257
	/* We have now updated the journal if required, so we can
	 * validate the data journaling mode. */
	switch (test_opt(sb, DATA_FLAGS)) {
	case 0:
		/* No mode set, assume a default based on the journal
A
Andrew Morton 已提交
4258 4259 4260
		 * capabilities: ORDERED_DATA if the journal can
		 * cope, else JOURNAL_DATA
		 */
4261
		if (jbd2_journal_check_available_features
4262
		    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
4263
			set_opt(sb, ORDERED_DATA);
4264 4265
			sbi->s_def_mount_opt |= EXT4_MOUNT_ORDERED_DATA;
		} else {
4266
			set_opt(sb, JOURNAL_DATA);
4267 4268
			sbi->s_def_mount_opt |= EXT4_MOUNT_JOURNAL_DATA;
		}
4269 4270
		break;

4271 4272
	case EXT4_MOUNT_ORDERED_DATA:
	case EXT4_MOUNT_WRITEBACK_DATA:
4273 4274
		if (!jbd2_journal_check_available_features
		    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
4275 4276
			ext4_msg(sb, KERN_ERR, "Journal does not support "
			       "requested data journaling mode");
4277
			goto failed_mount_wq;
4278 4279 4280 4281
		}
	default:
		break;
	}
4282 4283 4284 4285 4286 4287 4288 4289

	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
	    test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
		ext4_msg(sb, KERN_ERR, "can't mount with "
			"journal_async_commit in data=ordered mode");
		goto failed_mount_wq;
	}

4290
	set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
4291

B
Bobi Jam 已提交
4292 4293
	sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;

4294
no_journal:
4295 4296 4297
	if (!test_opt(sb, NO_MBCACHE)) {
		sbi->s_ea_block_cache = ext4_xattr_create_cache();
		if (!sbi->s_ea_block_cache) {
T
Tahsin Erdogan 已提交
4298
			ext4_msg(sb, KERN_ERR,
4299
				 "Failed to create ea_block_cache");
T
Tahsin Erdogan 已提交
4300 4301
			goto failed_mount_wq;
		}
4302 4303 4304 4305 4306 4307 4308 4309 4310

		if (ext4_has_feature_ea_inode(sb)) {
			sbi->s_ea_inode_cache = ext4_xattr_create_cache();
			if (!sbi->s_ea_inode_cache) {
				ext4_msg(sb, KERN_ERR,
					 "Failed to create ea_inode_cache");
				goto failed_mount_wq;
			}
		}
4311 4312
	}

4313
	if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) &&
4314
	    (blocksize != PAGE_SIZE)) {
4315 4316 4317 4318 4319
		ext4_msg(sb, KERN_ERR,
			 "Unsupported blocksize for fs encryption");
		goto failed_mount_wq;
	}

4320
	if (DUMMY_ENCRYPTION_ENABLED(sbi) && !sb_rdonly(sb) &&
4321 4322
	    !ext4_has_feature_encrypt(sb)) {
		ext4_set_feature_encrypt(sb);
4323 4324 4325
		ext4_commit_super(sb, 1);
	}

4326 4327 4328 4329 4330 4331 4332
	/*
	 * Get the # of file system overhead blocks from the
	 * superblock if present.
	 */
	if (es->s_overhead_clusters)
		sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
	else {
4333 4334
		err = ext4_calculate_overhead(sb);
		if (err)
4335 4336 4337
			goto failed_mount_wq;
	}

T
Tejun Heo 已提交
4338 4339 4340 4341
	/*
	 * The maximum number of concurrent works can be high and
	 * concurrency isn't really necessary.  Limit it to 1.
	 */
4342 4343 4344 4345
	EXT4_SB(sb)->rsv_conversion_wq =
		alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
	if (!EXT4_SB(sb)->rsv_conversion_wq) {
		printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
4346
		ret = -ENOMEM;
4347 4348 4349
		goto failed_mount4;
	}

4350
	/*
4351
	 * The jbd2_journal_load will have done any necessary log recovery,
4352 4353 4354
	 * so we can safely mount the rest of the filesystem now.
	 */

4355
	root = ext4_iget(sb, EXT4_ROOT_INO, EXT4_IGET_SPECIAL);
4356
	if (IS_ERR(root)) {
4357
		ext4_msg(sb, KERN_ERR, "get root inode failed");
4358
		ret = PTR_ERR(root);
4359
		root = NULL;
4360 4361 4362
		goto failed_mount4;
	}
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
4363
		ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
A
Al Viro 已提交
4364
		iput(root);
4365 4366
		goto failed_mount4;
	}
4367
	sb->s_root = d_make_root(root);
4368
	if (!sb->s_root) {
4369
		ext4_msg(sb, KERN_ERR, "get root dentry failed");
4370 4371 4372
		ret = -ENOMEM;
		goto failed_mount4;
	}
4373

4374 4375
	ret = ext4_setup_super(sb, es, sb_rdonly(sb));
	if (ret == -EROFS) {
4376
		sb->s_flags |= SB_RDONLY;
4377 4378 4379
		ret = 0;
	} else if (ret)
		goto failed_mount4a;
K
Kalpak Shah 已提交
4380 4381

	/* determine the minimum size of new large inodes, if present */
4382 4383
	if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
	    sbi->s_want_extra_isize == 0) {
K
Kalpak Shah 已提交
4384 4385
		sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
						     EXT4_GOOD_OLD_INODE_SIZE;
4386
		if (ext4_has_feature_extra_isize(sb)) {
K
Kalpak Shah 已提交
4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401
			if (sbi->s_want_extra_isize <
			    le16_to_cpu(es->s_want_extra_isize))
				sbi->s_want_extra_isize =
					le16_to_cpu(es->s_want_extra_isize);
			if (sbi->s_want_extra_isize <
			    le16_to_cpu(es->s_min_extra_isize))
				sbi->s_want_extra_isize =
					le16_to_cpu(es->s_min_extra_isize);
		}
	}
	/* Check if enough inode space is available */
	if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
							sbi->s_inode_size) {
		sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
						       EXT4_GOOD_OLD_INODE_SIZE;
4402 4403
		ext4_msg(sb, KERN_INFO, "required extra inode space not"
			 "available");
K
Kalpak Shah 已提交
4404 4405
	}

4406
	ext4_set_resv_clusters(sb);
L
Lukas Czerner 已提交
4407

4408 4409
	err = ext4_setup_system_zone(sb);
	if (err) {
4410
		ext4_msg(sb, KERN_ERR, "failed to initialize system "
4411
			 "zone (%d)", err);
4412 4413 4414 4415 4416 4417 4418 4419
		goto failed_mount4a;
	}

	ext4_ext_init(sb);
	err = ext4_mb_init(sb);
	if (err) {
		ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
			 err);
4420
		goto failed_mount5;
4421 4422
	}

4423 4424 4425
	block = ext4_count_free_clusters(sb);
	ext4_free_blocks_count_set(sbi->s_es, 
				   EXT4_C2B(sbi, block));
4426
	ext4_superblock_csum_set(sb);
4427 4428
	err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
				  GFP_KERNEL);
4429 4430 4431
	if (!err) {
		unsigned long freei = ext4_count_free_inodes(sb);
		sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
4432
		ext4_superblock_csum_set(sb);
4433 4434
		err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
					  GFP_KERNEL);
4435 4436 4437
	}
	if (!err)
		err = percpu_counter_init(&sbi->s_dirs_counter,
4438
					  ext4_count_dirs(sb), GFP_KERNEL);
4439
	if (!err)
4440 4441
		err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
					  GFP_KERNEL);
4442 4443 4444
	if (!err)
		err = percpu_init_rwsem(&sbi->s_journal_flag_rwsem);

4445 4446 4447 4448 4449
	if (err) {
		ext4_msg(sb, KERN_ERR, "insufficient memory");
		goto failed_mount6;
	}

4450
	if (ext4_has_feature_flex_bg(sb))
4451 4452 4453 4454 4455 4456 4457
		if (!ext4_fill_flex_info(sb)) {
			ext4_msg(sb, KERN_ERR,
			       "unable to initialize "
			       "flex_bg meta info!");
			goto failed_mount6;
		}

4458 4459
	err = ext4_register_li_request(sb, first_not_zeroed);
	if (err)
4460
		goto failed_mount6;
4461

4462
	err = ext4_register_sysfs(sb);
4463 4464
	if (err)
		goto failed_mount7;
T
Theodore Ts'o 已提交
4465

4466 4467
#ifdef CONFIG_QUOTA
	/* Enable quota usage during mount. */
4468
	if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) {
4469 4470 4471 4472 4473 4474
		err = ext4_enable_quotas(sb);
		if (err)
			goto failed_mount8;
	}
#endif  /* CONFIG_QUOTA */

4475 4476 4477
	EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
	ext4_orphan_cleanup(sb, es);
	EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
4478
	if (needs_recovery) {
4479
		ext4_msg(sb, KERN_INFO, "recovery complete");
4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491
		ext4_mark_recovery_complete(sb, es);
	}
	if (EXT4_SB(sb)->s_journal) {
		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
			descr = " journalled data mode";
		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
			descr = " ordered data mode";
		else
			descr = " writeback data mode";
	} else
		descr = "out journal";

4492 4493 4494 4495 4496 4497 4498 4499
	if (test_opt(sb, DISCARD)) {
		struct request_queue *q = bdev_get_queue(sb->s_bdev);
		if (!blk_queue_discard(q))
			ext4_msg(sb, KERN_WARNING,
				 "mounting with \"discard\" option, but "
				 "the device does not support discard");
	}

4500 4501
	if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
		ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
4502 4503 4504
			 "Opts: %.*s%s%s", descr,
			 (int) sizeof(sbi->s_es->s_mount_opts),
			 sbi->s_es->s_mount_opts,
4505
			 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
4506

4507 4508
	if (es->s_error_count)
		mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
4509

4510 4511 4512 4513 4514
	/* Enable message ratelimiting. Default is 10 messages per 5 secs. */
	ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
	ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
	ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);

4515
	kfree(orig_data);
4516 4517
	return 0;

4518
cantfind_ext4:
4519
	if (!silent)
4520
		ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
4521 4522
	goto failed_mount;

4523 4524
#ifdef CONFIG_QUOTA
failed_mount8:
4525
	ext4_unregister_sysfs(sb);
4526
#endif
4527 4528 4529
failed_mount7:
	ext4_unregister_li_request(sb);
failed_mount6:
4530
	ext4_mb_release(sb);
4531
	if (sbi->s_flex_groups)
A
Al Viro 已提交
4532
		kvfree(sbi->s_flex_groups);
4533 4534 4535 4536
	percpu_counter_destroy(&sbi->s_freeclusters_counter);
	percpu_counter_destroy(&sbi->s_freeinodes_counter);
	percpu_counter_destroy(&sbi->s_dirs_counter);
	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
4537
	percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
4538
failed_mount5:
4539 4540 4541
	ext4_ext_release(sb);
	ext4_release_system_zone(sb);
failed_mount4a:
A
Al Viro 已提交
4542
	dput(sb->s_root);
4543
	sb->s_root = NULL;
A
Al Viro 已提交
4544
failed_mount4:
4545
	ext4_msg(sb, KERN_ERR, "mount failed");
4546 4547
	if (EXT4_SB(sb)->rsv_conversion_wq)
		destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
4548
failed_mount_wq:
T
Tahsin Erdogan 已提交
4549 4550 4551 4552
	if (sbi->s_ea_inode_cache) {
		ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
		sbi->s_ea_inode_cache = NULL;
	}
4553 4554 4555
	if (sbi->s_ea_block_cache) {
		ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
		sbi->s_ea_block_cache = NULL;
J
Jan Kara 已提交
4556
	}
4557 4558 4559 4560
	if (sbi->s_journal) {
		jbd2_journal_destroy(sbi->s_journal);
		sbi->s_journal = NULL;
	}
4561
failed_mount3a:
4562
	ext4_es_unregister_shrinker(sbi);
4563
failed_mount3:
4564
	del_timer_sync(&sbi->s_err_report);
4565 4566
	if (sbi->s_mmp_tsk)
		kthread_stop(sbi->s_mmp_tsk);
4567 4568 4569
failed_mount2:
	for (i = 0; i < db_count; i++)
		brelse(sbi->s_group_desc[i]);
A
Al Viro 已提交
4570
	kvfree(sbi->s_group_desc);
4571
failed_mount:
4572 4573
	if (sbi->s_chksum_driver)
		crypto_free_shash(sbi->s_chksum_driver);
4574
#ifdef CONFIG_QUOTA
J
Jan Kara 已提交
4575
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
4576 4577
		kfree(sbi->s_qf_names[i]);
#endif
4578
	ext4_blkdev_remove(sbi);
4579 4580 4581
	brelse(bh);
out_fail:
	sb->s_fs_info = NULL;
4582
	kfree(sbi->s_blockgroup_lock);
4583
out_free_base:
4584
	kfree(sbi);
4585
	kfree(orig_data);
4586
	fs_put_dax(dax_dev);
4587
	return err ? err : ret;
4588 4589 4590 4591 4592 4593 4594
}

/*
 * Setup any per-fs journal parameters now.  We'll do this both on
 * initial mount, once the journal has been initialised but before we've
 * done any recovery; and again on any subsequent remount.
 */
4595
static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
4596
{
4597
	struct ext4_sb_info *sbi = EXT4_SB(sb);
4598

4599 4600 4601
	journal->j_commit_interval = sbi->s_commit_interval;
	journal->j_min_batch_time = sbi->s_min_batch_time;
	journal->j_max_batch_time = sbi->s_max_batch_time;
4602

4603
	write_lock(&journal->j_state_lock);
4604
	if (test_opt(sb, BARRIER))
4605
		journal->j_flags |= JBD2_BARRIER;
4606
	else
4607
		journal->j_flags &= ~JBD2_BARRIER;
4608 4609 4610 4611
	if (test_opt(sb, DATA_ERR_ABORT))
		journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR;
	else
		journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR;
4612
	write_unlock(&journal->j_state_lock);
4613 4614
}

4615 4616
static struct inode *ext4_get_journal_inode(struct super_block *sb,
					     unsigned int journal_inum)
4617 4618 4619
{
	struct inode *journal_inode;

4620 4621 4622 4623 4624
	/*
	 * Test for the existence of a valid inode on disk.  Bad things
	 * happen if we iget() an unused inode, as the subsequent iput()
	 * will try to delete it.
	 */
4625
	journal_inode = ext4_iget(sb, journal_inum, EXT4_IGET_SPECIAL);
4626
	if (IS_ERR(journal_inode)) {
4627
		ext4_msg(sb, KERN_ERR, "no journal found");
4628 4629 4630 4631 4632
		return NULL;
	}
	if (!journal_inode->i_nlink) {
		make_bad_inode(journal_inode);
		iput(journal_inode);
4633
		ext4_msg(sb, KERN_ERR, "journal inode is deleted");
4634 4635 4636
		return NULL;
	}

4637
	jbd_debug(2, "Journal inode found at %p: %lld bytes\n",
4638
		  journal_inode, journal_inode->i_size);
4639
	if (!S_ISREG(journal_inode->i_mode)) {
4640
		ext4_msg(sb, KERN_ERR, "invalid journal inode");
4641 4642 4643
		iput(journal_inode);
		return NULL;
	}
4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657
	return journal_inode;
}

static journal_t *ext4_get_journal(struct super_block *sb,
				   unsigned int journal_inum)
{
	struct inode *journal_inode;
	journal_t *journal;

	BUG_ON(!ext4_has_feature_journal(sb));

	journal_inode = ext4_get_journal_inode(sb, journal_inum);
	if (!journal_inode)
		return NULL;
4658

4659
	journal = jbd2_journal_init_inode(journal_inode);
4660
	if (!journal) {
4661
		ext4_msg(sb, KERN_ERR, "Could not load journal inode");
4662 4663 4664 4665
		iput(journal_inode);
		return NULL;
	}
	journal->j_private = sb;
4666
	ext4_init_journal_params(sb, journal);
4667 4668 4669
	return journal;
}

4670
static journal_t *ext4_get_dev_journal(struct super_block *sb,
4671 4672
				       dev_t j_dev)
{
4673
	struct buffer_head *bh;
4674
	journal_t *journal;
4675 4676
	ext4_fsblk_t start;
	ext4_fsblk_t len;
4677
	int hblock, blocksize;
4678
	ext4_fsblk_t sb_block;
4679
	unsigned long offset;
4680
	struct ext4_super_block *es;
4681 4682
	struct block_device *bdev;

4683
	BUG_ON(!ext4_has_feature_journal(sb));
4684

4685
	bdev = ext4_blkdev_get(j_dev, sb);
4686 4687 4688 4689
	if (bdev == NULL)
		return NULL;

	blocksize = sb->s_blocksize;
4690
	hblock = bdev_logical_block_size(bdev);
4691
	if (blocksize < hblock) {
4692 4693
		ext4_msg(sb, KERN_ERR,
			"blocksize too small for journal device");
4694 4695 4696
		goto out_bdev;
	}

4697 4698
	sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
	offset = EXT4_MIN_BLOCK_SIZE % blocksize;
4699 4700
	set_blocksize(bdev, blocksize);
	if (!(bh = __bread(bdev, sb_block, blocksize))) {
4701 4702
		ext4_msg(sb, KERN_ERR, "couldn't read superblock of "
		       "external journal");
4703 4704 4705
		goto out_bdev;
	}

4706
	es = (struct ext4_super_block *) (bh->b_data + offset);
4707
	if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
4708
	    !(le32_to_cpu(es->s_feature_incompat) &
4709
	      EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
4710 4711
		ext4_msg(sb, KERN_ERR, "external journal has "
					"bad superblock");
4712 4713 4714 4715
		brelse(bh);
		goto out_bdev;
	}

4716 4717 4718 4719 4720 4721 4722 4723 4724
	if ((le32_to_cpu(es->s_feature_ro_compat) &
	     EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
	    es->s_checksum != ext4_superblock_csum(sb, es)) {
		ext4_msg(sb, KERN_ERR, "external journal has "
				       "corrupt superblock");
		brelse(bh);
		goto out_bdev;
	}

4725
	if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
4726
		ext4_msg(sb, KERN_ERR, "journal UUID does not match");
4727 4728 4729 4730
		brelse(bh);
		goto out_bdev;
	}

L
Laurent Vivier 已提交
4731
	len = ext4_blocks_count(es);
4732 4733 4734
	start = sb_block + 1;
	brelse(bh);	/* we're done with the superblock */

4735
	journal = jbd2_journal_init_dev(bdev, sb->s_bdev,
4736 4737
					start, len, blocksize);
	if (!journal) {
4738
		ext4_msg(sb, KERN_ERR, "failed to create device journal");
4739 4740 4741
		goto out_bdev;
	}
	journal->j_private = sb;
4742
	ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer);
4743 4744
	wait_on_buffer(journal->j_sb_buffer);
	if (!buffer_uptodate(journal->j_sb_buffer)) {
4745
		ext4_msg(sb, KERN_ERR, "I/O error on journal device");
4746 4747 4748
		goto out_journal;
	}
	if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
4749 4750
		ext4_msg(sb, KERN_ERR, "External journal has more than one "
					"user (unsupported) - %d",
4751 4752 4753
			be32_to_cpu(journal->j_superblock->s_nr_users));
		goto out_journal;
	}
4754 4755
	EXT4_SB(sb)->journal_bdev = bdev;
	ext4_init_journal_params(sb, journal);
4756
	return journal;
4757

4758
out_journal:
4759
	jbd2_journal_destroy(journal);
4760
out_bdev:
4761
	ext4_blkdev_put(bdev);
4762 4763 4764
	return NULL;
}

4765 4766
static int ext4_load_journal(struct super_block *sb,
			     struct ext4_super_block *es,
4767 4768 4769 4770 4771 4772 4773 4774
			     unsigned long journal_devnum)
{
	journal_t *journal;
	unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
	dev_t journal_dev;
	int err = 0;
	int really_read_only;

4775
	BUG_ON(!ext4_has_feature_journal(sb));
4776

4777 4778
	if (journal_devnum &&
	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
4779 4780
		ext4_msg(sb, KERN_INFO, "external journal device major/minor "
			"numbers have changed");
4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791
		journal_dev = new_decode_dev(journal_devnum);
	} else
		journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));

	really_read_only = bdev_read_only(sb->s_bdev);

	/*
	 * Are we loading a blank journal or performing recovery after a
	 * crash?  For recovery, we need to check in advance whether we
	 * can get read-write access to the device.
	 */
4792
	if (ext4_has_feature_journal_needs_recovery(sb)) {
4793
		if (sb_rdonly(sb)) {
4794 4795
			ext4_msg(sb, KERN_INFO, "INFO: recovery "
					"required on readonly filesystem");
4796
			if (really_read_only) {
4797
				ext4_msg(sb, KERN_ERR, "write access "
4798 4799
					"unavailable, cannot proceed "
					"(try mounting with noload)");
4800 4801
				return -EROFS;
			}
4802 4803
			ext4_msg(sb, KERN_INFO, "write access will "
			       "be enabled during recovery");
4804 4805 4806 4807
		}
	}

	if (journal_inum && journal_dev) {
4808 4809
		ext4_msg(sb, KERN_ERR, "filesystem has both journal "
		       "and inode journals!");
4810 4811 4812 4813
		return -EINVAL;
	}

	if (journal_inum) {
4814
		if (!(journal = ext4_get_journal(sb, journal_inum)))
4815 4816
			return -EINVAL;
	} else {
4817
		if (!(journal = ext4_get_dev_journal(sb, journal_dev)))
4818 4819 4820
			return -EINVAL;
	}

4821
	if (!(journal->j_flags & JBD2_BARRIER))
4822
		ext4_msg(sb, KERN_INFO, "barriers disabled");
4823

4824
	if (!ext4_has_feature_journal_needs_recovery(sb))
4825
		err = jbd2_journal_wipe(journal, !really_read_only);
4826 4827 4828 4829 4830
	if (!err) {
		char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
		if (save)
			memcpy(save, ((char *) es) +
			       EXT4_S_ERR_START, EXT4_S_ERR_LEN);
4831
		err = jbd2_journal_load(journal);
4832 4833 4834 4835 4836
		if (save)
			memcpy(((char *) es) + EXT4_S_ERR_START,
			       save, EXT4_S_ERR_LEN);
		kfree(save);
	}
4837 4838

	if (err) {
4839
		ext4_msg(sb, KERN_ERR, "error loading journal");
4840
		jbd2_journal_destroy(journal);
4841 4842 4843
		return err;
	}

4844 4845
	EXT4_SB(sb)->s_journal = journal;
	ext4_clear_journal_err(sb, es);
4846

4847
	if (!really_read_only && journal_devnum &&
4848 4849 4850 4851
	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
		es->s_journal_dev = cpu_to_le32(journal_devnum);

		/* Make sure we flush the recovery flag to disk. */
4852
		ext4_commit_super(sb, 1);
4853 4854 4855 4856 4857
	}

	return 0;
}

4858
static int ext4_commit_super(struct super_block *sb, int sync)
4859
{
4860
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
4861
	struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
4862
	int error = 0;
4863

4864
	if (!sbh || block_device_ejected(sb))
4865
		return error;
4866 4867 4868 4869 4870 4871 4872 4873

	/*
	 * The superblock bh should be mapped, but it might not be if the
	 * device was hot-removed. Not much we can do but fail the I/O.
	 */
	if (!buffer_mapped(sbh))
		return error;

4874 4875 4876 4877 4878 4879 4880 4881 4882 4883
	/*
	 * If the file system is mounted read-only, don't update the
	 * superblock write time.  This avoids updating the superblock
	 * write time when we are mounting the root file system
	 * read/only but we need to replay the journal; at that point,
	 * for people who are east of GMT and who make their clock
	 * tick in localtime for Windows bug-for-bug compatibility,
	 * the clock is set in the future, and this will cause e2fsck
	 * to complain and force a full file system check.
	 */
4884
	if (!(sb->s_flags & SB_RDONLY))
4885
		ext4_update_tstamp(es, s_wtime);
4886 4887 4888
	if (sb->s_bdev->bd_part)
		es->s_kbytes_written =
			cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
4889 4890
			    ((part_stat_read(sb->s_bdev->bd_part,
					     sectors[STAT_WRITE]) -
T
Theodore Ts'o 已提交
4891
			      EXT4_SB(sb)->s_sectors_written_start) >> 1));
4892 4893 4894
	else
		es->s_kbytes_written =
			cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
4895 4896
	if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
		ext4_free_blocks_count_set(es,
4897 4898
			EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
				&EXT4_SB(sb)->s_freeclusters_counter)));
4899 4900 4901
	if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
		es->s_free_inodes_count =
			cpu_to_le32(percpu_counter_sum_positive(
4902
				&EXT4_SB(sb)->s_freeinodes_counter));
4903
	BUFFER_TRACE(sbh, "marking dirty");
4904
	ext4_superblock_csum_set(sb);
4905 4906
	if (sync)
		lock_buffer(sbh);
4907
	if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920
		/*
		 * Oh, dear.  A previous attempt to write the
		 * superblock failed.  This could happen because the
		 * USB device was yanked out.  Or it could happen to
		 * be a transient write error and maybe the block will
		 * be remapped.  Nothing we can do but to retry the
		 * write and hope for the best.
		 */
		ext4_msg(sb, KERN_ERR, "previous I/O error to "
		       "superblock detected");
		clear_buffer_write_io_error(sbh);
		set_buffer_uptodate(sbh);
	}
4921
	mark_buffer_dirty(sbh);
4922
	if (sync) {
4923
		unlock_buffer(sbh);
4924
		error = __sync_dirty_buffer(sbh,
4925
			REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0));
4926
		if (buffer_write_io_error(sbh)) {
4927 4928
			ext4_msg(sb, KERN_ERR, "I/O error while writing "
			       "superblock");
4929 4930 4931 4932
			clear_buffer_write_io_error(sbh);
			set_buffer_uptodate(sbh);
		}
	}
4933
	return error;
4934 4935 4936 4937 4938 4939 4940
}

/*
 * Have we just finished recovery?  If so, and if we are mounting (or
 * remounting) the filesystem readonly, then we will end up with a
 * consistent fs on disk.  Record that fact.
 */
4941 4942
static void ext4_mark_recovery_complete(struct super_block *sb,
					struct ext4_super_block *es)
4943
{
4944
	journal_t *journal = EXT4_SB(sb)->s_journal;
4945

4946
	if (!ext4_has_feature_journal(sb)) {
4947 4948 4949
		BUG_ON(journal != NULL);
		return;
	}
4950
	jbd2_journal_lock_updates(journal);
4951 4952 4953
	if (jbd2_journal_flush(journal) < 0)
		goto out;

4954
	if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) {
4955
		ext4_clear_feature_journal_needs_recovery(sb);
4956
		ext4_commit_super(sb, 1);
4957
	}
4958 4959

out:
4960
	jbd2_journal_unlock_updates(journal);
4961 4962 4963 4964 4965 4966 4967
}

/*
 * If we are mounting (or read-write remounting) a filesystem whose journal
 * has recorded an error from a previous lifetime, move that error to the
 * main filesystem now.
 */
4968 4969
static void ext4_clear_journal_err(struct super_block *sb,
				   struct ext4_super_block *es)
4970 4971 4972 4973 4974
{
	journal_t *journal;
	int j_errno;
	const char *errstr;

4975
	BUG_ON(!ext4_has_feature_journal(sb));
4976

4977
	journal = EXT4_SB(sb)->s_journal;
4978 4979 4980

	/*
	 * Now check for any error status which may have been recorded in the
4981
	 * journal by a prior ext4_error() or ext4_abort()
4982 4983
	 */

4984
	j_errno = jbd2_journal_errno(journal);
4985 4986 4987
	if (j_errno) {
		char nbuf[16];

4988
		errstr = ext4_decode_error(sb, j_errno, nbuf);
4989
		ext4_warning(sb, "Filesystem error recorded "
4990
			     "from previous mount: %s", errstr);
4991
		ext4_warning(sb, "Marking fs in need of filesystem check.");
4992

4993 4994
		EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
		es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
4995
		ext4_commit_super(sb, 1);
4996

4997
		jbd2_journal_clear_err(journal);
4998
		jbd2_journal_update_sb_errno(journal);
4999 5000 5001 5002 5003 5004 5005
	}
}

/*
 * Force the running and committing transactions to commit,
 * and wait on the commit.
 */
5006
int ext4_force_commit(struct super_block *sb)
5007 5008 5009
{
	journal_t *journal;

5010
	if (sb_rdonly(sb))
5011 5012
		return 0;

5013
	journal = EXT4_SB(sb)->s_journal;
5014
	return ext4_journal_force_commit(journal);
5015 5016
}

5017
static int ext4_sync_fs(struct super_block *sb, int wait)
5018
{
5019
	int ret = 0;
5020
	tid_t target;
5021
	bool needs_barrier = false;
5022
	struct ext4_sb_info *sbi = EXT4_SB(sb);
5023

5024
	if (unlikely(ext4_forced_shutdown(sbi)))
5025 5026
		return 0;

5027
	trace_ext4_sync_fs(sb, wait);
5028
	flush_workqueue(sbi->rsv_conversion_wq);
5029 5030 5031 5032 5033
	/*
	 * Writeback quota in non-journalled quota case - journalled quota has
	 * no dirty dquots
	 */
	dquot_writeback_dquots(sb, -1);
5034 5035 5036 5037 5038
	/*
	 * Data writeback is possible w/o journal transaction, so barrier must
	 * being sent at the end of the function. But we can skip it if
	 * transaction_commit will do it for us.
	 */
5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050
	if (sbi->s_journal) {
		target = jbd2_get_latest_transaction(sbi->s_journal);
		if (wait && sbi->s_journal->j_flags & JBD2_BARRIER &&
		    !jbd2_trans_will_send_data_barrier(sbi->s_journal, target))
			needs_barrier = true;

		if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
			if (wait)
				ret = jbd2_log_wait_commit(sbi->s_journal,
							   target);
		}
	} else if (wait && test_opt(sb, BARRIER))
5051 5052 5053 5054 5055 5056
		needs_barrier = true;
	if (needs_barrier) {
		int err;
		err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
		if (!ret)
			ret = err;
5057
	}
5058 5059 5060 5061

	return ret;
}

5062 5063 5064
/*
 * LVM calls this function before a (read-only) snapshot is created.  This
 * gives us a chance to flush the journal completely and mark the fs clean.
5065 5066
 *
 * Note that only this function cannot bring a filesystem to be in a clean
5067 5068
 * state independently. It relies on upper layer to stop all data & metadata
 * modifications.
5069
 */
5070
static int ext4_freeze(struct super_block *sb)
5071
{
5072 5073
	int error = 0;
	journal_t *journal;
5074

5075
	if (sb_rdonly(sb))
5076
		return 0;
5077

5078
	journal = EXT4_SB(sb)->s_journal;
5079

5080 5081 5082
	if (journal) {
		/* Now we set up the journal barrier. */
		jbd2_journal_lock_updates(journal);
5083

5084 5085 5086 5087 5088 5089 5090
		/*
		 * Don't clear the needs_recovery flag if we failed to
		 * flush the journal.
		 */
		error = jbd2_journal_flush(journal);
		if (error < 0)
			goto out;
5091 5092

		/* Journal blocked and flushed, clear needs_recovery flag. */
5093
		ext4_clear_feature_journal_needs_recovery(sb);
5094
	}
5095 5096

	error = ext4_commit_super(sb, 1);
5097
out:
5098 5099 5100
	if (journal)
		/* we rely on upper layer to stop further updates */
		jbd2_journal_unlock_updates(journal);
5101
	return error;
5102 5103 5104 5105 5106 5107
}

/*
 * Called by LVM after the snapshot is done.  We need to reset the RECOVER
 * flag here, even though the filesystem is not technically dirty yet.
 */
5108
static int ext4_unfreeze(struct super_block *sb)
5109
{
5110
	if (sb_rdonly(sb) || ext4_forced_shutdown(EXT4_SB(sb)))
5111 5112
		return 0;

5113 5114
	if (EXT4_SB(sb)->s_journal) {
		/* Reset the needs_recovery flag before the fs is unlocked. */
5115
		ext4_set_feature_journal_needs_recovery(sb);
5116 5117
	}

5118
	ext4_commit_super(sb, 1);
5119
	return 0;
5120 5121
}

5122 5123 5124 5125 5126
/*
 * Structure to save mount options for ext4_remount's benefit
 */
struct ext4_mount_options {
	unsigned long s_mount_opt;
5127
	unsigned long s_mount_opt2;
5128 5129
	kuid_t s_resuid;
	kgid_t s_resgid;
5130 5131 5132 5133
	unsigned long s_commit_interval;
	u32 s_min_batch_time, s_max_batch_time;
#ifdef CONFIG_QUOTA
	int s_jquota_fmt;
J
Jan Kara 已提交
5134
	char *s_qf_names[EXT4_MAXQUOTAS];
5135 5136 5137
#endif
};

5138
static int ext4_remount(struct super_block *sb, int *flags, char *data)
5139
{
5140
	struct ext4_super_block *es;
5141
	struct ext4_sb_info *sbi = EXT4_SB(sb);
5142
	unsigned long old_sb_flags;
5143
	struct ext4_mount_options old_opts;
5144
	int enable_quota = 0;
5145
	ext4_group_t g;
5146
	unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
5147
	int err = 0;
5148
#ifdef CONFIG_QUOTA
5149
	int i, j;
5150
	char *to_free[EXT4_MAXQUOTAS];
5151
#endif
5152
	char *orig_data = kstrdup(data, GFP_KERNEL);
5153

5154 5155 5156
	if (data && !orig_data)
		return -ENOMEM;

5157 5158 5159
	/* Store the original options */
	old_sb_flags = sb->s_flags;
	old_opts.s_mount_opt = sbi->s_mount_opt;
5160
	old_opts.s_mount_opt2 = sbi->s_mount_opt2;
5161 5162 5163
	old_opts.s_resuid = sbi->s_resuid;
	old_opts.s_resgid = sbi->s_resgid;
	old_opts.s_commit_interval = sbi->s_commit_interval;
5164 5165
	old_opts.s_min_batch_time = sbi->s_min_batch_time;
	old_opts.s_max_batch_time = sbi->s_max_batch_time;
5166 5167
#ifdef CONFIG_QUOTA
	old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
J
Jan Kara 已提交
5168
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
5169
		if (sbi->s_qf_names[i]) {
5170 5171 5172
			char *qf_name = get_qf_name(sb, sbi, i);

			old_opts.s_qf_names[i] = kstrdup(qf_name, GFP_KERNEL);
5173 5174 5175
			if (!old_opts.s_qf_names[i]) {
				for (j = 0; j < i; j++)
					kfree(old_opts.s_qf_names[j]);
5176
				kfree(orig_data);
5177 5178 5179 5180
				return -ENOMEM;
			}
		} else
			old_opts.s_qf_names[i] = NULL;
5181
#endif
5182 5183
	if (sbi->s_journal && sbi->s_journal->j_task->io_context)
		journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
5184

5185
	if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
5186 5187 5188 5189
		err = -EINVAL;
		goto restore_opts;
	}

5190
	if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
5191 5192
	    test_opt(sb, JOURNAL_CHECKSUM)) {
		ext4_msg(sb, KERN_ERR, "changing journal_checksum "
5193 5194
			 "during remount not supported; ignoring");
		sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
5195 5196
	}

5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209
	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
		if (test_opt2(sb, EXPLICIT_DELALLOC)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and delalloc");
			err = -EINVAL;
			goto restore_opts;
		}
		if (test_opt(sb, DIOREAD_NOLOCK)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and dioread_nolock");
			err = -EINVAL;
			goto restore_opts;
		}
R
Ross Zwisler 已提交
5210 5211 5212 5213 5214 5215
		if (test_opt(sb, DAX)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and dax");
			err = -EINVAL;
			goto restore_opts;
		}
5216 5217 5218 5219 5220 5221 5222
	} else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) {
		if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				"journal_async_commit in data=ordered mode");
			err = -EINVAL;
			goto restore_opts;
		}
R
Ross Zwisler 已提交
5223 5224
	}

5225 5226 5227 5228 5229 5230
	if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_NO_MBCACHE) {
		ext4_msg(sb, KERN_ERR, "can't enable nombcache during remount");
		err = -EINVAL;
		goto restore_opts;
	}

R
Ross Zwisler 已提交
5231 5232 5233 5234
	if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) {
		ext4_msg(sb, KERN_WARNING, "warning: refusing change of "
			"dax flag with busy inodes while remounting");
		sbi->s_mount_opt ^= EXT4_MOUNT_DAX;
5235 5236
	}

5237
	if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
5238
		ext4_abort(sb, "Abort forced by user");
5239

5240 5241
	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
		(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
5242 5243 5244

	es = sbi->s_es;

5245
	if (sbi->s_journal) {
5246
		ext4_init_journal_params(sb, sbi->s_journal);
5247 5248
		set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
	}
5249

5250 5251
	if (*flags & SB_LAZYTIME)
		sb->s_flags |= SB_LAZYTIME;
5252

5253
	if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
5254
		if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
5255 5256 5257 5258
			err = -EROFS;
			goto restore_opts;
		}

5259
		if (*flags & SB_RDONLY) {
5260 5261 5262
			err = sync_filesystem(sb);
			if (err < 0)
				goto restore_opts;
5263 5264
			err = dquot_suspend(sb, -1);
			if (err < 0)
5265 5266
				goto restore_opts;

5267 5268 5269 5270
			/*
			 * First of all, the unconditional stuff we have to do
			 * to disable replay of the journal when we next remount
			 */
5271
			sb->s_flags |= SB_RDONLY;
5272 5273 5274 5275 5276 5277

			/*
			 * OK, test if we are remounting a valid rw partition
			 * readonly, and if so set the rdonly flag and then
			 * mark the partition as valid again.
			 */
5278 5279
			if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) &&
			    (sbi->s_mount_state & EXT4_VALID_FS))
5280 5281
				es->s_state = cpu_to_le16(sbi->s_mount_state);

5282
			if (sbi->s_journal)
5283
				ext4_mark_recovery_complete(sb, es);
5284 5285
			if (sbi->s_mmp_tsk)
				kthread_stop(sbi->s_mmp_tsk);
5286
		} else {
5287
			/* Make sure we can mount this feature set readwrite */
5288
			if (ext4_has_feature_readonly(sb) ||
D
Darrick J. Wong 已提交
5289
			    !ext4_feature_set_ok(sb, 0)) {
5290 5291 5292
				err = -EROFS;
				goto restore_opts;
			}
5293 5294
			/*
			 * Make sure the group descriptor checksums
5295
			 * are sane.  If they aren't, refuse to remount r/w.
5296 5297 5298 5299 5300
			 */
			for (g = 0; g < sbi->s_groups_count; g++) {
				struct ext4_group_desc *gdp =
					ext4_get_group_desc(sb, g, NULL);

5301
				if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
5302 5303
					ext4_msg(sb, KERN_ERR,
	       "ext4_remount: Checksum for group %u failed (%u!=%u)",
5304
		g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)),
5305
					       le16_to_cpu(gdp->bg_checksum));
5306
					err = -EFSBADCRC;
5307 5308 5309 5310
					goto restore_opts;
				}
			}

5311 5312 5313 5314 5315 5316
			/*
			 * If we have an unprocessed orphan list hanging
			 * around from a previously readonly bdev mount,
			 * require a full umount/remount for now.
			 */
			if (es->s_last_orphan) {
5317
				ext4_msg(sb, KERN_WARNING, "Couldn't "
5318 5319
				       "remount RDWR because of unprocessed "
				       "orphan inode list.  Please "
5320
				       "umount/remount instead");
5321 5322 5323 5324
				err = -EINVAL;
				goto restore_opts;
			}

5325 5326 5327 5328 5329 5330
			/*
			 * Mounting a RDONLY partition read-write, so reread
			 * and store the current valid flag.  (It may have
			 * been changed by e2fsck since we originally mounted
			 * the partition.)
			 */
5331 5332
			if (sbi->s_journal)
				ext4_clear_journal_err(sb, es);
5333
			sbi->s_mount_state = le16_to_cpu(es->s_state);
5334 5335 5336 5337 5338 5339

			err = ext4_setup_super(sb, es, 0);
			if (err)
				goto restore_opts;

			sb->s_flags &= ~SB_RDONLY;
5340
			if (ext4_has_feature_mmp(sb))
5341 5342 5343 5344 5345
				if (ext4_multi_mount_protect(sb,
						le64_to_cpu(es->s_mmp_block))) {
					err = -EROFS;
					goto restore_opts;
				}
5346
			enable_quota = 1;
5347 5348
		}
	}
5349 5350 5351 5352 5353

	/*
	 * Reinitialize lazy itable initialization thread based on
	 * current settings
	 */
5354
	if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE))
5355 5356 5357 5358 5359 5360 5361
		ext4_unregister_li_request(sb);
	else {
		ext4_group_t first_not_zeroed;
		first_not_zeroed = ext4_has_uninit_itable(sb);
		ext4_register_li_request(sb, first_not_zeroed);
	}

5362
	ext4_setup_system_zone(sb);
5363 5364 5365 5366 5367
	if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) {
		err = ext4_commit_super(sb, 1);
		if (err)
			goto restore_opts;
	}
5368

5369 5370
#ifdef CONFIG_QUOTA
	/* Release old quota file names */
J
Jan Kara 已提交
5371
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
5372
		kfree(old_opts.s_qf_names[i]);
5373 5374 5375
	if (enable_quota) {
		if (sb_any_quota_suspended(sb))
			dquot_resume(sb, -1);
5376
		else if (ext4_has_feature_quota(sb)) {
5377
			err = ext4_enable_quotas(sb);
5378
			if (err)
5379 5380 5381
				goto restore_opts;
		}
	}
5382
#endif
5383

5384
	*flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
5385 5386
	ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
	kfree(orig_data);
5387
	return 0;
5388

5389 5390 5391
restore_opts:
	sb->s_flags = old_sb_flags;
	sbi->s_mount_opt = old_opts.s_mount_opt;
5392
	sbi->s_mount_opt2 = old_opts.s_mount_opt2;
5393 5394 5395
	sbi->s_resuid = old_opts.s_resuid;
	sbi->s_resgid = old_opts.s_resgid;
	sbi->s_commit_interval = old_opts.s_commit_interval;
5396 5397
	sbi->s_min_batch_time = old_opts.s_min_batch_time;
	sbi->s_max_batch_time = old_opts.s_max_batch_time;
5398 5399
#ifdef CONFIG_QUOTA
	sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
J
Jan Kara 已提交
5400
	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
5401 5402
		to_free[i] = get_qf_name(sb, sbi, i);
		rcu_assign_pointer(sbi->s_qf_names[i], old_opts.s_qf_names[i]);
5403
	}
5404 5405 5406
	synchronize_rcu();
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
		kfree(to_free[i]);
5407
#endif
5408
	kfree(orig_data);
5409 5410 5411
	return err;
}

L
Li Xi 已提交
5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424
#ifdef CONFIG_QUOTA
static int ext4_statfs_project(struct super_block *sb,
			       kprojid_t projid, struct kstatfs *buf)
{
	struct kqid qid;
	struct dquot *dquot;
	u64 limit;
	u64 curblock;

	qid = make_kqid_projid(projid);
	dquot = dqget(sb, qid);
	if (IS_ERR(dquot))
		return PTR_ERR(dquot);
5425
	spin_lock(&dquot->dq_dqb_lock);
L
Li Xi 已提交
5426 5427 5428 5429 5430

	limit = (dquot->dq_dqb.dqb_bsoftlimit ?
		 dquot->dq_dqb.dqb_bsoftlimit :
		 dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
	if (limit && buf->f_blocks > limit) {
5431 5432
		curblock = (dquot->dq_dqb.dqb_curspace +
			    dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
L
Li Xi 已提交
5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448
		buf->f_blocks = limit;
		buf->f_bfree = buf->f_bavail =
			(buf->f_blocks > curblock) ?
			 (buf->f_blocks - curblock) : 0;
	}

	limit = dquot->dq_dqb.dqb_isoftlimit ?
		dquot->dq_dqb.dqb_isoftlimit :
		dquot->dq_dqb.dqb_ihardlimit;
	if (limit && buf->f_files > limit) {
		buf->f_files = limit;
		buf->f_ffree =
			(buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
			 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
	}

5449
	spin_unlock(&dquot->dq_dqb_lock);
L
Li Xi 已提交
5450 5451 5452 5453 5454
	dqput(dquot);
	return 0;
}
#endif

5455
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
5456 5457
{
	struct super_block *sb = dentry->d_sb;
5458 5459
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
L
Lukas Czerner 已提交
5460
	ext4_fsblk_t overhead = 0, resv_blocks;
P
Pekka Enberg 已提交
5461
	u64 fsid;
5462
	s64 bfree;
L
Lukas Czerner 已提交
5463
	resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters));
5464

5465 5466
	if (!test_opt(sb, MINIX_DF))
		overhead = sbi->s_overhead;
5467

5468
	buf->f_type = EXT4_SUPER_MAGIC;
5469
	buf->f_bsize = sb->s_blocksize;
5470
	buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead);
5471 5472
	bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
		percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
5473
	/* prevent underflow in case that few free space is available */
5474
	buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
L
Lukas Czerner 已提交
5475 5476 5477
	buf->f_bavail = buf->f_bfree -
			(ext4_r_blocks_count(es) + resv_blocks);
	if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks))
5478 5479
		buf->f_bavail = 0;
	buf->f_files = le32_to_cpu(es->s_inodes_count);
P
Peter Zijlstra 已提交
5480
	buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
5481
	buf->f_namelen = EXT4_NAME_LEN;
P
Pekka Enberg 已提交
5482 5483 5484 5485
	fsid = le64_to_cpup((void *)es->s_uuid) ^
	       le64_to_cpup((void *)es->s_uuid + sizeof(u64));
	buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
	buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
5486

L
Li Xi 已提交
5487 5488 5489 5490 5491
#ifdef CONFIG_QUOTA
	if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) &&
	    sb_has_quota_limits_enabled(sb, PRJQUOTA))
		ext4_statfs_project(sb, EXT4_I(dentry->d_inode)->i_projid, buf);
#endif
5492 5493 5494 5495 5496 5497
	return 0;
}


#ifdef CONFIG_QUOTA

J
Jan Kara 已提交
5498 5499 5500 5501
/*
 * Helper functions so that transaction is started before we acquire dqio_sem
 * to keep correct lock ordering of transaction > dqio_sem
 */
5502 5503
static inline struct inode *dquot_to_inode(struct dquot *dquot)
{
5504
	return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
5505 5506
}

5507
static int ext4_write_dquot(struct dquot *dquot)
5508 5509 5510 5511 5512 5513
{
	int ret, err;
	handle_t *handle;
	struct inode *inode;

	inode = dquot_to_inode(dquot);
5514
	handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
5515
				    EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
5516 5517 5518
	if (IS_ERR(handle))
		return PTR_ERR(handle);
	ret = dquot_commit(dquot);
5519
	err = ext4_journal_stop(handle);
5520 5521 5522 5523 5524
	if (!ret)
		ret = err;
	return ret;
}

5525
static int ext4_acquire_dquot(struct dquot *dquot)
5526 5527 5528 5529
{
	int ret, err;
	handle_t *handle;

5530
	handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
5531
				    EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
5532 5533 5534
	if (IS_ERR(handle))
		return PTR_ERR(handle);
	ret = dquot_acquire(dquot);
5535
	err = ext4_journal_stop(handle);
5536 5537 5538 5539 5540
	if (!ret)
		ret = err;
	return ret;
}

5541
static int ext4_release_dquot(struct dquot *dquot)
5542 5543 5544 5545
{
	int ret, err;
	handle_t *handle;

5546
	handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
5547
				    EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
J
Jan Kara 已提交
5548 5549 5550
	if (IS_ERR(handle)) {
		/* Release dquot anyway to avoid endless cycle in dqput() */
		dquot_release(dquot);
5551
		return PTR_ERR(handle);
J
Jan Kara 已提交
5552
	}
5553
	ret = dquot_release(dquot);
5554
	err = ext4_journal_stop(handle);
5555 5556 5557 5558 5559
	if (!ret)
		ret = err;
	return ret;
}

5560
static int ext4_mark_dquot_dirty(struct dquot *dquot)
5561
{
5562 5563 5564
	struct super_block *sb = dquot->dq_sb;
	struct ext4_sb_info *sbi = EXT4_SB(sb);

5565
	/* Are we journaling quotas? */
5566
	if (ext4_has_feature_quota(sb) ||
5567
	    sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
5568
		dquot_mark_dquot_dirty(dquot);
5569
		return ext4_write_dquot(dquot);
5570 5571 5572 5573 5574
	} else {
		return dquot_mark_dquot_dirty(dquot);
	}
}

5575
static int ext4_write_info(struct super_block *sb, int type)
5576 5577 5578 5579 5580
{
	int ret, err;
	handle_t *handle;

	/* Data block + inode block */
5581
	handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2);
5582 5583 5584
	if (IS_ERR(handle))
		return PTR_ERR(handle);
	ret = dquot_commit_info(sb, type);
5585
	err = ext4_journal_stop(handle);
5586 5587 5588 5589 5590 5591 5592 5593 5594
	if (!ret)
		ret = err;
	return ret;
}

/*
 * Turn on quotas during mount time - we need to find
 * the quota file and such...
 */
5595
static int ext4_quota_on_mount(struct super_block *sb, int type)
5596
{
5597
	return dquot_quota_on_mount(sb, get_qf_name(sb, EXT4_SB(sb), type),
5598
					EXT4_SB(sb)->s_jquota_fmt, type);
5599 5600
}

5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614
static void lockdep_set_quota_inode(struct inode *inode, int subclass)
{
	struct ext4_inode_info *ei = EXT4_I(inode);

	/* The first argument of lockdep_set_subclass has to be
	 * *exactly* the same as the argument to init_rwsem() --- in
	 * this case, in init_once() --- or lockdep gets unhappy
	 * because the name of the lock is set using the
	 * stringification of the argument to init_rwsem().
	 */
	(void) ei;	/* shut up clang warning if !CONFIG_LOCKDEP */
	lockdep_set_subclass(&ei->i_data_sem, subclass);
}

5615 5616 5617
/*
 * Standard function to be called on quota_on
 */
5618
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
A
Al Viro 已提交
5619
			 const struct path *path)
5620 5621 5622 5623 5624
{
	int err;

	if (!test_opt(sb, QUOTA))
		return -EINVAL;
5625

5626
	/* Quotafile not on the same filesystem? */
5627
	if (path->dentry->d_sb != sb)
5628
		return -EXDEV;
5629 5630
	/* Journaling quota? */
	if (EXT4_SB(sb)->s_qf_names[type]) {
5631
		/* Quotafile not in fs root? */
5632
		if (path->dentry->d_parent != sb->s_root)
5633 5634 5635
			ext4_msg(sb, KERN_WARNING,
				"Quota file not on filesystem root. "
				"Journaled quota will not work");
5636 5637 5638 5639 5640 5641 5642
		sb_dqopt(sb)->flags |= DQUOT_NOLIST_DIRTY;
	} else {
		/*
		 * Clear the flag just in case mount options changed since
		 * last time.
		 */
		sb_dqopt(sb)->flags &= ~DQUOT_NOLIST_DIRTY;
5643
	}
5644 5645 5646 5647 5648

	/*
	 * When we journal data on quota file, we have to flush journal to see
	 * all updates to the file when we bypass pagecache...
	 */
5649
	if (EXT4_SB(sb)->s_journal &&
5650
	    ext4_should_journal_data(d_inode(path->dentry))) {
5651 5652 5653 5654 5655
		/*
		 * We don't need to lock updates but journal_flush() could
		 * otherwise be livelocked...
		 */
		jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
5656
		err = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
5657
		jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
5658
		if (err)
5659
			return err;
5660
	}
5661

5662 5663
	lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
	err = dquot_quota_on(sb, type, format_id, path);
5664
	if (err) {
5665 5666
		lockdep_set_quota_inode(path->dentry->d_inode,
					     I_DATA_SEM_NORMAL);
5667 5668 5669 5670
	} else {
		struct inode *inode = d_inode(path->dentry);
		handle_t *handle;

5671 5672 5673 5674 5675
		/*
		 * Set inode flags to prevent userspace from messing with quota
		 * files. If this fails, we return success anyway since quotas
		 * are already enabled and this is not a hard failure.
		 */
5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687
		inode_lock(inode);
		handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
		if (IS_ERR(handle))
			goto unlock_inode;
		EXT4_I(inode)->i_flags |= EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL;
		inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
				S_NOATIME | S_IMMUTABLE);
		ext4_mark_inode_dirty(handle, inode);
		ext4_journal_stop(handle);
	unlock_inode:
		inode_unlock(inode);
	}
5688
	return err;
5689 5690
}

5691 5692 5693 5694 5695
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
			     unsigned int flags)
{
	int err;
	struct inode *qf_inode;
J
Jan Kara 已提交
5696
	unsigned long qf_inums[EXT4_MAXQUOTAS] = {
5697
		le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
L
Li Xi 已提交
5698 5699
		le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
		le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
5700 5701
	};

5702
	BUG_ON(!ext4_has_feature_quota(sb));
5703 5704 5705 5706

	if (!qf_inums[type])
		return -EPERM;

5707
	qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL);
5708 5709 5710 5711 5712
	if (IS_ERR(qf_inode)) {
		ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
		return PTR_ERR(qf_inode);
	}

J
Jan Kara 已提交
5713 5714
	/* Don't account quota for quota files to avoid recursion */
	qf_inode->i_flags |= S_NOQUOTA;
5715
	lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
5716
	err = dquot_enable(qf_inode, type, format_id, flags);
5717 5718
	if (err)
		lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
5719
	iput(qf_inode);
5720 5721 5722 5723 5724 5725 5726 5727

	return err;
}

/* Enable usage tracking for all quota types. */
static int ext4_enable_quotas(struct super_block *sb)
{
	int type, err = 0;
J
Jan Kara 已提交
5728
	unsigned long qf_inums[EXT4_MAXQUOTAS] = {
5729
		le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
L
Li Xi 已提交
5730 5731
		le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
		le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
5732
	};
5733 5734 5735 5736 5737
	bool quota_mopt[EXT4_MAXQUOTAS] = {
		test_opt(sb, USRQUOTA),
		test_opt(sb, GRPQUOTA),
		test_opt(sb, PRJQUOTA),
	};
5738

5739
	sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
J
Jan Kara 已提交
5740
	for (type = 0; type < EXT4_MAXQUOTAS; type++) {
5741 5742
		if (qf_inums[type]) {
			err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
5743 5744
				DQUOT_USAGE_ENABLED |
				(quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
5745 5746
			if (err) {
				ext4_warning(sb,
5747 5748 5749
					"Failed to enable quota tracking "
					"(type=%d, err=%d). Please run "
					"e2fsck to fix.", type, err);
5750 5751 5752
				for (type--; type >= 0; type--)
					dquot_quota_off(sb, type);

5753 5754 5755 5756 5757 5758 5759
				return err;
			}
		}
	}
	return 0;
}

5760 5761
static int ext4_quota_off(struct super_block *sb, int type)
{
5762 5763
	struct inode *inode = sb_dqopt(sb)->files[type];
	handle_t *handle;
5764
	int err;
5765

5766 5767 5768
	/* Force all delayed allocation blocks to be allocated.
	 * Caller already holds s_umount sem */
	if (test_opt(sb, DELALLOC))
5769 5770
		sync_filesystem(sb);

5771
	if (!inode || !igrab(inode))
5772 5773
		goto out;

5774
	err = dquot_quota_off(sb, type);
5775
	if (err || ext4_has_feature_quota(sb))
5776 5777 5778
		goto out_put;

	inode_lock(inode);
5779 5780 5781 5782 5783
	/*
	 * Update modification times of quota files when userspace can
	 * start looking at them. If we fail, we return success anyway since
	 * this is not a hard failure and quotas are already disabled.
	 */
5784
	handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
5785
	if (IS_ERR(handle))
5786 5787 5788
		goto out_unlock;
	EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL);
	inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
5789
	inode->i_mtime = inode->i_ctime = current_time(inode);
5790 5791
	ext4_mark_inode_dirty(handle, inode);
	ext4_journal_stop(handle);
5792 5793 5794
out_unlock:
	inode_unlock(inode);
out_put:
5795
	lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL);
5796 5797
	iput(inode);
	return err;
5798
out:
5799 5800 5801
	return dquot_quota_off(sb, type);
}

5802 5803
/* Read data from quotafile - avoid pagecache and such because we cannot afford
 * acquiring the locks... As quota files are never truncated and quota code
L
Lucas De Marchi 已提交
5804
 * itself serializes the operations (and no one else should touch the files)
5805
 * we don't have to be afraid of races */
5806
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
5807 5808 5809
			       size_t len, loff_t off)
{
	struct inode *inode = sb_dqopt(sb)->files[type];
A
Aneesh Kumar K.V 已提交
5810
	ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824
	int offset = off & (sb->s_blocksize - 1);
	int tocopy;
	size_t toread;
	struct buffer_head *bh;
	loff_t i_size = i_size_read(inode);

	if (off > i_size)
		return 0;
	if (off+len > i_size)
		len = i_size-off;
	toread = len;
	while (toread > 0) {
		tocopy = sb->s_blocksize - offset < toread ?
				sb->s_blocksize - offset : toread;
5825 5826 5827
		bh = ext4_bread(NULL, inode, blk, 0);
		if (IS_ERR(bh))
			return PTR_ERR(bh);
5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842
		if (!bh)	/* A hole? */
			memset(data, 0, tocopy);
		else
			memcpy(data, bh->b_data+offset, tocopy);
		brelse(bh);
		offset = 0;
		toread -= tocopy;
		data += tocopy;
		blk++;
	}
	return len;
}

/* Write to quotafile (we know the transaction is already started and has
 * enough credits) */
5843
static ssize_t ext4_quota_write(struct super_block *sb, int type,
5844 5845 5846
				const char *data, size_t len, loff_t off)
{
	struct inode *inode = sb_dqopt(sb)->files[type];
A
Aneesh Kumar K.V 已提交
5847
	ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
5848
	int err, offset = off & (sb->s_blocksize - 1);
5849
	int retries = 0;
5850 5851 5852
	struct buffer_head *bh;
	handle_t *handle = journal_current_handle();

5853
	if (EXT4_SB(sb)->s_journal && !handle) {
5854 5855
		ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
			" cancelled because transaction is not started",
J
Jan Kara 已提交
5856 5857 5858
			(unsigned long long)off, (unsigned long long)len);
		return -EIO;
	}
5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869
	/*
	 * Since we account only one data block in transaction credits,
	 * then it is impossible to cross a block boundary.
	 */
	if (sb->s_blocksize - offset < len) {
		ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
			" cancelled because not block aligned",
			(unsigned long long)off, (unsigned long long)len);
		return -EIO;
	}

5870 5871 5872 5873 5874 5875
	do {
		bh = ext4_bread(handle, inode, blk,
				EXT4_GET_BLOCKS_CREATE |
				EXT4_GET_BLOCKS_METADATA_NOFAIL);
	} while (IS_ERR(bh) && (PTR_ERR(bh) == -ENOSPC) &&
		 ext4_should_retry_alloc(inode->i_sb, &retries));
5876 5877
	if (IS_ERR(bh))
		return PTR_ERR(bh);
5878 5879
	if (!bh)
		goto out;
5880
	BUFFER_TRACE(bh, "get write access");
5881 5882 5883
	err = ext4_journal_get_write_access(handle, bh);
	if (err) {
		brelse(bh);
5884
		return err;
5885
	}
5886 5887 5888 5889
	lock_buffer(bh);
	memcpy(bh->b_data+offset, data, len);
	flush_dcache_page(bh->b_page);
	unlock_buffer(bh);
5890
	err = ext4_handle_dirty_metadata(handle, NULL, bh);
5891
	brelse(bh);
5892
out:
5893 5894
	if (inode->i_size < off + len) {
		i_size_write(inode, off + len);
5895
		EXT4_I(inode)->i_disksize = inode->i_size;
5896
		ext4_mark_inode_dirty(handle, inode);
5897
	}
5898
	return len;
5899 5900
}

5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911
static int ext4_get_next_id(struct super_block *sb, struct kqid *qid)
{
	const struct quota_format_ops	*ops;

	if (!sb_has_quota_loaded(sb, qid->type))
		return -ESRCH;
	ops = sb_dqopt(sb)->ops[qid->type];
	if (!ops || !ops->get_next_id)
		return -ENOSYS;
	return dquot_get_next_id(sb, qid);
}
5912 5913
#endif

A
Al Viro 已提交
5914 5915
static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
		       const char *dev_name, void *data)
5916
{
A
Al Viro 已提交
5917
	return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super);
5918 5919
}

J
Jan Kara 已提交
5920
#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932
static inline void register_as_ext2(void)
{
	int err = register_filesystem(&ext2_fs_type);
	if (err)
		printk(KERN_WARNING
		       "EXT4-fs: Unable to register as ext2 (%d)\n", err);
}

static inline void unregister_as_ext2(void)
{
	unregister_filesystem(&ext2_fs_type);
}
5933 5934 5935

static inline int ext2_feature_set_ok(struct super_block *sb)
{
5936
	if (ext4_has_unknown_ext2_incompat_features(sb))
5937
		return 0;
5938
	if (sb_rdonly(sb))
5939
		return 1;
5940
	if (ext4_has_unknown_ext2_ro_compat_features(sb))
5941 5942 5943
		return 0;
	return 1;
}
5944 5945 5946
#else
static inline void register_as_ext2(void) { }
static inline void unregister_as_ext2(void) { }
5947
static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961
#endif

static inline void register_as_ext3(void)
{
	int err = register_filesystem(&ext3_fs_type);
	if (err)
		printk(KERN_WARNING
		       "EXT4-fs: Unable to register as ext3 (%d)\n", err);
}

static inline void unregister_as_ext3(void)
{
	unregister_filesystem(&ext3_fs_type);
}
5962 5963 5964

static inline int ext3_feature_set_ok(struct super_block *sb)
{
5965
	if (ext4_has_unknown_ext3_incompat_features(sb))
5966
		return 0;
5967
	if (!ext4_has_feature_journal(sb))
5968
		return 0;
5969
	if (sb_rdonly(sb))
5970
		return 1;
5971
	if (ext4_has_unknown_ext3_ro_compat_features(sb))
5972 5973 5974
		return 0;
	return 1;
}
5975

T
Theodore Ts'o 已提交
5976 5977 5978
static struct file_system_type ext4_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "ext4",
A
Al Viro 已提交
5979
	.mount		= ext4_mount,
T
Theodore Ts'o 已提交
5980 5981 5982
	.kill_sb	= kill_block_super,
	.fs_flags	= FS_REQUIRES_DEV,
};
5983
MODULE_ALIAS_FS("ext4");
T
Theodore Ts'o 已提交
5984

5985 5986 5987
/* Shared across all ext4 file systems */
wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];

5988
static int __init ext4_init_fs(void)
5989
{
5990
	int i, err;
5991

5992
	ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
5993 5994 5995
	ext4_li_info = NULL;
	mutex_init(&ext4_li_mtx);

5996
	/* Build-time check for flags consistency */
5997
	ext4_check_flag_values();
5998

5999
	for (i = 0; i < EXT4_WQ_HASH_SZ; i++)
6000 6001
		init_waitqueue_head(&ext4__ioend_wq[i]);

6002
	err = ext4_init_es();
6003 6004
	if (err)
		return err;
6005 6006 6007

	err = ext4_init_pageio();
	if (err)
6008
		goto out5;
6009

6010
	err = ext4_init_system_zone();
6011
	if (err)
6012
		goto out4;
6013

6014
	err = ext4_init_sysfs();
T
Theodore Ts'o 已提交
6015
	if (err)
6016
		goto out3;
6017

6018
	err = ext4_init_mballoc();
6019 6020
	if (err)
		goto out2;
6021 6022 6023
	err = init_inodecache();
	if (err)
		goto out1;
6024
	register_as_ext3();
6025
	register_as_ext2();
T
Theodore Ts'o 已提交
6026
	err = register_filesystem(&ext4_fs_type);
6027 6028
	if (err)
		goto out;
6029

6030 6031
	return 0;
out:
6032 6033
	unregister_as_ext2();
	unregister_as_ext3();
6034 6035
	destroy_inodecache();
out1:
6036
	ext4_exit_mballoc();
6037
out2:
6038 6039
	ext4_exit_sysfs();
out3:
6040
	ext4_exit_system_zone();
6041
out4:
6042
	ext4_exit_pageio();
6043
out5:
6044 6045
	ext4_exit_es();

6046 6047 6048
	return err;
}

6049
static void __exit ext4_exit_fs(void)
6050
{
6051
	ext4_destroy_lazyinit_thread();
6052 6053
	unregister_as_ext2();
	unregister_as_ext3();
T
Theodore Ts'o 已提交
6054
	unregister_filesystem(&ext4_fs_type);
6055
	destroy_inodecache();
6056
	ext4_exit_mballoc();
6057
	ext4_exit_sysfs();
6058 6059
	ext4_exit_system_zone();
	ext4_exit_pageio();
6060
	ext4_exit_es();
6061 6062 6063
}

MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
6064
MODULE_DESCRIPTION("Fourth Extended Filesystem");
6065
MODULE_LICENSE("GPL");
6066
MODULE_SOFTDEP("pre: crc32c");
6067 6068
module_init(ext4_init_fs)
module_exit(ext4_exit_fs)