xfs_super.c 58.7 KB
Newer Older
D
Dave Chinner 已提交
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2
/*
3
 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4
 * All Rights Reserved.
L
Linus Torvalds 已提交
5
 */
C
Christoph Hellwig 已提交
6

L
Linus Torvalds 已提交
7
#include "xfs.h"
8
#include "xfs_shared.h"
9
#include "xfs_format.h"
10 11
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
L
Linus Torvalds 已提交
12 13 14
#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
15
#include "xfs_btree.h"
L
Linus Torvalds 已提交
16
#include "xfs_bmap.h"
17
#include "xfs_alloc.h"
C
Christoph Hellwig 已提交
18
#include "xfs_fsops.h"
19
#include "xfs_trans.h"
L
Linus Torvalds 已提交
20
#include "xfs_buf_item.h"
21
#include "xfs_log.h"
22
#include "xfs_log_priv.h"
23
#include "xfs_dir2.h"
24 25 26
#include "xfs_extfree_item.h"
#include "xfs_mru_cache.h"
#include "xfs_inode_item.h"
27
#include "xfs_icache.h"
C
Christoph Hellwig 已提交
28
#include "xfs_trace.h"
D
Dave Chinner 已提交
29
#include "xfs_icreate_item.h"
30 31
#include "xfs_filestream.h"
#include "xfs_quota.h"
32
#include "xfs_sysfs.h"
33
#include "xfs_ondisk.h"
34
#include "xfs_rmap_item.h"
35
#include "xfs_refcount_item.h"
36
#include "xfs_bmap_item.h"
37
#include "xfs_reflink.h"
38
#include "xfs_pwork.h"
39
#include "xfs_ag.h"
L
Linus Torvalds 已提交
40

41
#include <linux/magic.h>
I
Ian Kent 已提交
42 43
#include <linux/fs_context.h>
#include <linux/fs_parser.h>
L
Linus Torvalds 已提交
44

45
static const struct super_operations xfs_super_operations;
46

D
Dave Chinner 已提交
47
static struct kset *xfs_kset;		/* top-level xfs sysfs dir */
48 49 50
#ifdef DEBUG
static struct xfs_kobj xfs_dbg_kobj;	/* global debug sysfs attrs */
#endif
L
Linus Torvalds 已提交
51

52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
#ifdef CONFIG_HOTPLUG_CPU
static LIST_HEAD(xfs_mount_list);
static DEFINE_SPINLOCK(xfs_mount_list_lock);

static inline void xfs_mount_list_add(struct xfs_mount *mp)
{
	spin_lock(&xfs_mount_list_lock);
	list_add(&mp->m_mount_list, &xfs_mount_list);
	spin_unlock(&xfs_mount_list_lock);
}

static inline void xfs_mount_list_del(struct xfs_mount *mp)
{
	spin_lock(&xfs_mount_list_lock);
	list_del(&mp->m_mount_list);
	spin_unlock(&xfs_mount_list_lock);
}
#else /* !CONFIG_HOTPLUG_CPU */
static inline void xfs_mount_list_add(struct xfs_mount *mp) {}
static inline void xfs_mount_list_del(struct xfs_mount *mp) {}
#endif

74 75 76 77 78 79 80 81 82 83 84 85 86
enum xfs_dax_mode {
	XFS_DAX_INODE = 0,
	XFS_DAX_ALWAYS = 1,
	XFS_DAX_NEVER = 2,
};

static void
xfs_mount_set_dax_mode(
	struct xfs_mount	*mp,
	enum xfs_dax_mode	mode)
{
	switch (mode) {
	case XFS_DAX_INODE:
87
		mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER);
88 89
		break;
	case XFS_DAX_ALWAYS:
90 91
		mp->m_features |= XFS_FEAT_DAX_ALWAYS;
		mp->m_features &= ~XFS_FEAT_DAX_NEVER;
92 93
		break;
	case XFS_DAX_NEVER:
94 95
		mp->m_features |= XFS_FEAT_DAX_NEVER;
		mp->m_features &= ~XFS_FEAT_DAX_ALWAYS;
96 97 98 99 100 101 102 103 104 105 106
		break;
	}
}

static const struct constant_table dax_param_enums[] = {
	{"inode",	XFS_DAX_INODE },
	{"always",	XFS_DAX_ALWAYS },
	{"never",	XFS_DAX_NEVER },
	{}
};

107 108 109 110
/*
 * Table driven mount option parser.
 */
enum {
I
Ian Kent 已提交
111
	Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
112
	Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
113
	Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
114 115 116 117
	Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
	Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
	Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
	Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
118
	Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
119
	Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
120 121
};

122
static const struct fs_parameter_spec xfs_fs_parameters[] = {
I
Ian Kent 已提交
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
	fsparam_u32("logbufs",		Opt_logbufs),
	fsparam_string("logbsize",	Opt_logbsize),
	fsparam_string("logdev",	Opt_logdev),
	fsparam_string("rtdev",		Opt_rtdev),
	fsparam_flag("wsync",		Opt_wsync),
	fsparam_flag("noalign",		Opt_noalign),
	fsparam_flag("swalloc",		Opt_swalloc),
	fsparam_u32("sunit",		Opt_sunit),
	fsparam_u32("swidth",		Opt_swidth),
	fsparam_flag("nouuid",		Opt_nouuid),
	fsparam_flag("grpid",		Opt_grpid),
	fsparam_flag("nogrpid",		Opt_nogrpid),
	fsparam_flag("bsdgroups",	Opt_bsdgroups),
	fsparam_flag("sysvgroups",	Opt_sysvgroups),
	fsparam_string("allocsize",	Opt_allocsize),
	fsparam_flag("norecovery",	Opt_norecovery),
	fsparam_flag("inode64",		Opt_inode64),
	fsparam_flag("inode32",		Opt_inode32),
	fsparam_flag("ikeep",		Opt_ikeep),
	fsparam_flag("noikeep",		Opt_noikeep),
	fsparam_flag("largeio",		Opt_largeio),
	fsparam_flag("nolargeio",	Opt_nolargeio),
	fsparam_flag("attr2",		Opt_attr2),
	fsparam_flag("noattr2",		Opt_noattr2),
	fsparam_flag("filestreams",	Opt_filestreams),
	fsparam_flag("quota",		Opt_quota),
	fsparam_flag("noquota",		Opt_noquota),
	fsparam_flag("usrquota",	Opt_usrquota),
	fsparam_flag("grpquota",	Opt_grpquota),
	fsparam_flag("prjquota",	Opt_prjquota),
	fsparam_flag("uquota",		Opt_uquota),
	fsparam_flag("gquota",		Opt_gquota),
	fsparam_flag("pquota",		Opt_pquota),
	fsparam_flag("uqnoenforce",	Opt_uqnoenforce),
	fsparam_flag("gqnoenforce",	Opt_gqnoenforce),
	fsparam_flag("pqnoenforce",	Opt_pqnoenforce),
	fsparam_flag("qnoenforce",	Opt_qnoenforce),
	fsparam_flag("discard",		Opt_discard),
	fsparam_flag("nodiscard",	Opt_nodiscard),
	fsparam_flag("dax",		Opt_dax),
163
	fsparam_enum("dax",		Opt_dax_enum, dax_param_enums),
I
Ian Kent 已提交
164
	{}
165 166
};

167
struct proc_xfs_info {
D
Dave Chinner 已提交
168 169
	uint64_t	flag;
	char		*str;
170 171
};

172 173 174 175
static int
xfs_fs_show_options(
	struct seq_file		*m,
	struct dentry		*root)
176 177 178
{
	static struct proc_xfs_info xfs_info_set[] = {
		/* the few simple ones we can get from the mount struct */
179 180 181 182 183 184 185 186 187 188 189 190 191
		{ XFS_FEAT_IKEEP,		",ikeep" },
		{ XFS_FEAT_WSYNC,		",wsync" },
		{ XFS_FEAT_NOALIGN,		",noalign" },
		{ XFS_FEAT_SWALLOC,		",swalloc" },
		{ XFS_FEAT_NOUUID,		",nouuid" },
		{ XFS_FEAT_NORECOVERY,		",norecovery" },
		{ XFS_FEAT_ATTR2,		",attr2" },
		{ XFS_FEAT_FILESTREAMS,		",filestreams" },
		{ XFS_FEAT_GRPID,		",grpid" },
		{ XFS_FEAT_DISCARD,		",discard" },
		{ XFS_FEAT_LARGE_IOSIZE,	",largeio" },
		{ XFS_FEAT_DAX_ALWAYS,		",dax=always" },
		{ XFS_FEAT_DAX_NEVER,		",dax=never" },
192 193
		{ 0, NULL }
	};
194
	struct xfs_mount	*mp = XFS_M(root->d_sb);
195 196 197
	struct proc_xfs_info	*xfs_infop;

	for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
198
		if (mp->m_features & xfs_infop->flag)
199 200
			seq_puts(m, xfs_infop->str);
	}
201

202
	seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64);
203

204
	if (xfs_has_allocsize(mp))
205
		seq_printf(m, ",allocsize=%dk",
206
			   (1 << mp->m_allocsize_log) >> 10);
207 208

	if (mp->m_logbufs > 0)
209
		seq_printf(m, ",logbufs=%d", mp->m_logbufs);
210
	if (mp->m_logbsize > 0)
211
		seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
212 213

	if (mp->m_logname)
214
		seq_show_option(m, "logdev", mp->m_logname);
215
	if (mp->m_rtname)
216
		seq_show_option(m, "rtdev", mp->m_rtname);
217 218

	if (mp->m_dalign > 0)
219
		seq_printf(m, ",sunit=%d",
220 221
				(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
	if (mp->m_swidth > 0)
222
		seq_printf(m, ",swidth=%d",
223 224
				(int)XFS_FSB_TO_BB(mp, mp->m_swidth));

225 226 227 228
	if (mp->m_qflags & XFS_UQUOTA_ENFD)
		seq_puts(m, ",usrquota");
	else if (mp->m_qflags & XFS_UQUOTA_ACCT)
		seq_puts(m, ",uqnoenforce");
229

230 231 232 233 234 235 236 237 238
	if (mp->m_qflags & XFS_PQUOTA_ENFD)
		seq_puts(m, ",prjquota");
	else if (mp->m_qflags & XFS_PQUOTA_ACCT)
		seq_puts(m, ",pqnoenforce");

	if (mp->m_qflags & XFS_GQUOTA_ENFD)
		seq_puts(m, ",grpquota");
	else if (mp->m_qflags & XFS_GQUOTA_ACCT)
		seq_puts(m, ",gqnoenforce");
239 240

	if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
241
		seq_puts(m, ",noquota");
242 243

	return 0;
244
}
245

246
/*
247 248
 * Set parameters for inode allocation heuristics, taking into account
 * filesystem size and inode32/inode64 mount options; i.e. specifically
249
 * whether or not XFS_FEAT_SMALL_INUMS is set.
250 251
 *
 * Inode allocation patterns are altered only if inode32 is requested
252
 * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large.
253
 * If altered, XFS_OPSTATE_INODE32 is set as well.
254 255 256 257 258 259
 *
 * An agcount independent of that in the mount structure is provided
 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
 * to the potentially higher ag count.
 *
 * Returns the maximum AG index which may contain inodes.
260
 */
261
xfs_agnumber_t
262 263 264
xfs_set_inode_alloc(
	struct xfs_mount *mp,
	xfs_agnumber_t	agcount)
265
{
266
	xfs_agnumber_t	index;
267
	xfs_agnumber_t	maxagi = 0;
268 269
	xfs_sb_t	*sbp = &mp->m_sb;
	xfs_agnumber_t	max_metadata;
E
Eric Sandeen 已提交
270 271
	xfs_agino_t	agino;
	xfs_ino_t	ino;
272

273 274 275
	/*
	 * Calculate how much should be reserved for inodes to meet
	 * the max inode percentage.  Used only for inode32.
276
	 */
D
Darrick J. Wong 已提交
277
	if (M_IGEO(mp)->maxicount) {
278
		uint64_t	icount;
279 280 281 282 283 284 285

		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
		do_div(icount, 100);
		icount += sbp->sb_agblocks - 1;
		do_div(icount, sbp->sb_agblocks);
		max_metadata = icount;
	} else {
286
		max_metadata = agcount;
287 288
	}

289
	/* Get the last possible inode in the filesystem */
290
	agino =	XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
291 292 293 294
	ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);

	/*
	 * If user asked for no more than 32-bit inodes, and the fs is
295
	 * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter
296 297
	 * the allocator to accommodate the request.
	 */
298
	if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32)
299
		set_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
300
	else
301
		clear_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
E
Eric Sandeen 已提交
302

303
	for (index = 0; index < agcount; index++) {
304
		struct xfs_perag	*pag;
305

306
		ino = XFS_AGINO_TO_INO(mp, index, agino);
307 308 309

		pag = xfs_perag_get(mp, index);

310
		if (xfs_is_inode32(mp)) {
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
			if (ino > XFS_MAXINUMBER_32) {
				pag->pagi_inodeok = 0;
				pag->pagf_metadata = 0;
			} else {
				pag->pagi_inodeok = 1;
				maxagi++;
				if (index < max_metadata)
					pag->pagf_metadata = 1;
				else
					pag->pagf_metadata = 0;
			}
		} else {
			pag->pagi_inodeok = 1;
			pag->pagf_metadata = 0;
		}
326 327 328 329

		xfs_perag_put(pag);
	}

330
	return xfs_is_inode32(mp) ? maxagi : agcount;
331 332
}

H
Hannes Eder 已提交
333
STATIC int
L
Linus Torvalds 已提交
334 335 336 337 338 339 340
xfs_blkdev_get(
	xfs_mount_t		*mp,
	const char		*name,
	struct block_device	**bdevp)
{
	int			error = 0;

341 342
	*bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
				    mp);
L
Linus Torvalds 已提交
343 344
	if (IS_ERR(*bdevp)) {
		error = PTR_ERR(*bdevp);
345
		xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
L
Linus Torvalds 已提交
346 347
	}

D
Dave Chinner 已提交
348
	return error;
L
Linus Torvalds 已提交
349 350
}

H
Hannes Eder 已提交
351
STATIC void
L
Linus Torvalds 已提交
352 353 354 355
xfs_blkdev_put(
	struct block_device	*bdev)
{
	if (bdev)
356
		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
L
Linus Torvalds 已提交
357 358
}

359 360 361 362
STATIC void
xfs_close_devices(
	struct xfs_mount	*mp)
{
363 364
	struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev;

365
	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
366
		struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
367 368
		struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev;

369
		xfs_free_buftarg(mp->m_logdev_targp);
370
		xfs_blkdev_put(logdev);
371
		fs_put_dax(dax_logdev);
372 373
	}
	if (mp->m_rtdev_targp) {
374
		struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
375 376
		struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev;

377
		xfs_free_buftarg(mp->m_rtdev_targp);
378
		xfs_blkdev_put(rtdev);
379
		fs_put_dax(dax_rtdev);
380
	}
381
	xfs_free_buftarg(mp->m_ddev_targp);
382
	fs_put_dax(dax_ddev);
383 384 385 386 387 388 389 390 391 392 393 394 395 396
}

/*
 * The file system configurations are:
 *	(1) device (partition) with data and internal log
 *	(2) logical volume with data and log subvolumes.
 *	(3) logical volume with data, log, and realtime subvolumes.
 *
 * We only have to handle opening the log and realtime volumes here if
 * they are present.  The data subvolume has already been opened by
 * get_sb_bdev() and is stored in sb->s_bdev.
 */
STATIC int
xfs_open_devices(
397
	struct xfs_mount	*mp)
398 399
{
	struct block_device	*ddev = mp->m_super->s_bdev;
400 401
	struct dax_device	*dax_ddev = fs_dax_get_by_bdev(ddev);
	struct dax_device	*dax_logdev = NULL, *dax_rtdev = NULL;
402 403 404 405 406 407
	struct block_device	*logdev = NULL, *rtdev = NULL;
	int			error;

	/*
	 * Open real time and log devices - order is important.
	 */
408 409
	if (mp->m_logname) {
		error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
410 411
		if (error)
			goto out;
412
		dax_logdev = fs_dax_get_by_bdev(logdev);
413 414
	}

415 416
	if (mp->m_rtname) {
		error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
417 418 419 420
		if (error)
			goto out_close_logdev;

		if (rtdev == ddev || rtdev == logdev) {
421 422
			xfs_warn(mp,
	"Cannot mount filesystem with identical rtdev and ddev/logdev.");
D
Dave Chinner 已提交
423
			error = -EINVAL;
424 425
			goto out_close_rtdev;
		}
426
		dax_rtdev = fs_dax_get_by_bdev(rtdev);
427 428 429 430 431
	}

	/*
	 * Setup xfs_mount buffer target pointers
	 */
D
Dave Chinner 已提交
432
	error = -ENOMEM;
433
	mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev);
434 435 436 437
	if (!mp->m_ddev_targp)
		goto out_close_rtdev;

	if (rtdev) {
438
		mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev);
439 440 441 442 443
		if (!mp->m_rtdev_targp)
			goto out_free_ddev_targ;
	}

	if (logdev && logdev != ddev) {
444
		mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev);
445 446 447 448 449 450 451 452 453 454
		if (!mp->m_logdev_targp)
			goto out_free_rtdev_targ;
	} else {
		mp->m_logdev_targp = mp->m_ddev_targp;
	}

	return 0;

 out_free_rtdev_targ:
	if (mp->m_rtdev_targp)
455
		xfs_free_buftarg(mp->m_rtdev_targp);
456
 out_free_ddev_targ:
457
	xfs_free_buftarg(mp->m_ddev_targp);
458
 out_close_rtdev:
459
	xfs_blkdev_put(rtdev);
460
	fs_put_dax(dax_rtdev);
461
 out_close_logdev:
462
	if (logdev && logdev != ddev) {
463
		xfs_blkdev_put(logdev);
464 465
		fs_put_dax(dax_logdev);
	}
466
 out:
467
	fs_put_dax(dax_ddev);
468 469 470
	return error;
}

471 472 473 474 475 476 477 478
/*
 * Setup xfs_mount buffer target pointers based on superblock
 */
STATIC int
xfs_setup_devices(
	struct xfs_mount	*mp)
{
	int			error;
479

480
	error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
481 482 483 484 485 486
	if (error)
		return error;

	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
		unsigned int	log_sector_size = BBSIZE;

487
		if (xfs_has_sector(mp))
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
			log_sector_size = mp->m_sb.sb_logsectsize;
		error = xfs_setsize_buftarg(mp->m_logdev_targp,
					    log_sector_size);
		if (error)
			return error;
	}
	if (mp->m_rtdev_targp) {
		error = xfs_setsize_buftarg(mp->m_rtdev_targp,
					    mp->m_sb.sb_sectsize);
		if (error)
			return error;
	}

	return 0;
}
503

504 505 506 507
STATIC int
xfs_init_mount_workqueues(
	struct xfs_mount	*mp)
{
508
	mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
509 510
			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
			1, mp->m_super->s_id);
511 512 513
	if (!mp->m_buf_workqueue)
		goto out;

514
	mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
515 516
			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
			0, mp->m_super->s_id);
517
	if (!mp->m_unwritten_workqueue)
518
		goto out_destroy_buf;
519

D
Dave Chinner 已提交
520
	mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
521 522
			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
			0, mp->m_super->s_id);
D
Dave Chinner 已提交
523
	if (!mp->m_reclaim_workqueue)
524
		goto out_destroy_unwritten;
D
Dave Chinner 已提交
525

526 527
	mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s",
			XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM),
528
			0, mp->m_super->s_id);
529
	if (!mp->m_blockgc_wq)
530
		goto out_destroy_reclaim;
531

532 533 534 535 536 537
	mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s",
			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
			1, mp->m_super->s_id);
	if (!mp->m_inodegc_wq)
		goto out_destroy_blockgc;

538 539
	mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
			XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id);
540
	if (!mp->m_sync_workqueue)
541
		goto out_destroy_inodegc;
542

543 544
	return 0;

545 546 547 548
out_destroy_inodegc:
	destroy_workqueue(mp->m_inodegc_wq);
out_destroy_blockgc:
	destroy_workqueue(mp->m_blockgc_wq);
D
Dave Chinner 已提交
549 550
out_destroy_reclaim:
	destroy_workqueue(mp->m_reclaim_workqueue);
551 552
out_destroy_unwritten:
	destroy_workqueue(mp->m_unwritten_workqueue);
553 554
out_destroy_buf:
	destroy_workqueue(mp->m_buf_workqueue);
555 556 557 558 559 560 561 562
out:
	return -ENOMEM;
}

STATIC void
xfs_destroy_mount_workqueues(
	struct xfs_mount	*mp)
{
563
	destroy_workqueue(mp->m_sync_workqueue);
564 565
	destroy_workqueue(mp->m_blockgc_wq);
	destroy_workqueue(mp->m_inodegc_wq);
D
Dave Chinner 已提交
566
	destroy_workqueue(mp->m_reclaim_workqueue);
567
	destroy_workqueue(mp->m_unwritten_workqueue);
568
	destroy_workqueue(mp->m_buf_workqueue);
569 570
}

571 572 573 574 575 576 577 578 579 580 581 582 583 584
static void
xfs_flush_inodes_worker(
	struct work_struct	*work)
{
	struct xfs_mount	*mp = container_of(work, struct xfs_mount,
						   m_flush_inodes_work);
	struct super_block	*sb = mp->m_super;

	if (down_read_trylock(&sb->s_umount)) {
		sync_inodes_sb(sb);
		up_read(&sb->s_umount);
	}
}

D
Dave Chinner 已提交
585 586 587 588 589 590 591 592 593 594
/*
 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
 * for IO to complete so that we effectively throttle multiple callers to the
 * rate at which IO is completing.
 */
void
xfs_flush_inodes(
	struct xfs_mount	*mp)
{
595 596 597 598 599
	/*
	 * If flush_work() returns true then that means we waited for a flush
	 * which was already in progress.  Don't bother running another scan.
	 */
	if (flush_work(&mp->m_flush_inodes_work))
600 601
		return;

602 603
	queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
	flush_work(&mp->m_flush_inodes_work);
D
Dave Chinner 已提交
604 605
}

606
/* Catch misguided souls that try to use this interface on XFS */
L
Linus Torvalds 已提交
607
STATIC struct inode *
608
xfs_fs_alloc_inode(
L
Linus Torvalds 已提交
609 610
	struct super_block	*sb)
{
611
	BUG();
612
	return NULL;
L
Linus Torvalds 已提交
613 614
}

615
/*
616
 * Now that the generic code is guaranteed not to be accessing
D
Dave Chinner 已提交
617
 * the linux inode, we can inactivate and reclaim the inode.
618
 */
L
Linus Torvalds 已提交
619
STATIC void
620
xfs_fs_destroy_inode(
C
Christoph Hellwig 已提交
621
	struct inode		*inode)
L
Linus Torvalds 已提交
622
{
C
Christoph Hellwig 已提交
623 624
	struct xfs_inode	*ip = XFS_I(inode);

C
Christoph Hellwig 已提交
625
	trace_xfs_destroy_inode(ip);
626

627
	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
D
Dave Chinner 已提交
628 629
	XFS_STATS_INC(ip->i_mount, vn_rele);
	XFS_STATS_INC(ip->i_mount, vn_remove);
630
	xfs_inode_mark_reclaimable(ip);
L
Linus Torvalds 已提交
631 632
}

633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
static void
xfs_fs_dirty_inode(
	struct inode			*inode,
	int				flag)
{
	struct xfs_inode		*ip = XFS_I(inode);
	struct xfs_mount		*mp = ip->i_mount;
	struct xfs_trans		*tp;

	if (!(inode->i_sb->s_flags & SB_LAZYTIME))
		return;
	if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME))
		return;

	if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
		return;
	xfs_ilock(ip, XFS_ILOCK_EXCL);
	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
	xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
	xfs_trans_commit(tp);
}

655 656 657 658
/*
 * Slab object creation initialisation for the XFS inode.
 * This covers only the idempotent fields in the XFS inode;
 * all other fields need to be initialised on allocation
659
 * from the slab. This avoids the need to repeatedly initialise
660 661 662
 * fields in the xfs inode that left in the initialise state
 * when freeing the inode.
 */
663 664
STATIC void
xfs_fs_inode_init_once(
665 666 667 668 669
	void			*inode)
{
	struct xfs_inode	*ip = inode;

	memset(ip, 0, sizeof(struct xfs_inode));
670 671 672 673 674

	/* vfs inode */
	inode_init_once(VFS_I(ip));

	/* xfs inode */
675 676 677
	atomic_set(&ip->i_pincount, 0);
	spin_lock_init(&ip->i_flags_lock);

D
Dave Chinner 已提交
678 679
	mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
		     "xfsino", ip->i_ino);
680 681 682 683
	mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
		     "xfsino", ip->i_ino);
}

684 685 686 687 688 689 690 691 692 693 694 695 696
/*
 * We do an unlocked check for XFS_IDONTCACHE here because we are already
 * serialised against cache hits here via the inode->i_lock and igrab() in
 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
 * racing with us, and it avoids needing to grab a spinlock here for every inode
 * we drop the final reference on.
 */
STATIC int
xfs_fs_drop_inode(
	struct inode		*inode)
{
	struct xfs_inode	*ip = XFS_I(inode);

697 698 699 700 701 702
	/*
	 * If this unlinked inode is in the middle of recovery, don't
	 * drop the inode just yet; log recovery will take care of
	 * that.  See the comment for this inode flag.
	 */
	if (ip->i_flags & XFS_IRECOVERY) {
703
		ASSERT(xlog_recovery_needed(ip->i_mount->m_log));
704 705 706
		return 0;
	}

707
	return generic_drop_inode(inode);
708 709
}

I
Ian Kent 已提交
710 711
static void
xfs_mount_free(
712 713 714 715
	struct xfs_mount	*mp)
{
	kfree(mp->m_rtname);
	kfree(mp->m_logname);
I
Ian Kent 已提交
716
	kmem_free(mp);
717 718
}

L
Linus Torvalds 已提交
719
STATIC int
C
Christoph Hellwig 已提交
720
xfs_fs_sync_fs(
L
Linus Torvalds 已提交
721 722 723
	struct super_block	*sb,
	int			wait)
{
724
	struct xfs_mount	*mp = XFS_M(sb);
L
Linus Torvalds 已提交
725

726 727
	trace_xfs_fs_sync_fs(mp, __return_address);

728
	/*
C
Christoph Hellwig 已提交
729
	 * Doing anything during the async pass would be counterproductive.
730
	 */
C
Christoph Hellwig 已提交
731
	if (!wait)
C
Christoph Hellwig 已提交
732 733
		return 0;

D
Dave Chinner 已提交
734
	xfs_log_force(mp, XFS_LOG_SYNC);
C
Christoph Hellwig 已提交
735
	if (laptop_mode) {
L
Linus Torvalds 已提交
736 737
		/*
		 * The disk must be active because we're syncing.
738
		 * We schedule log work now (now that the disk is
L
Linus Torvalds 已提交
739 740
		 * active) instead of later (when it might not be).
		 */
741
		flush_delayed_work(&mp->m_log->l_work);
L
Linus Torvalds 已提交
742 743
	}

744 745 746 747 748 749
	/*
	 * If we are called with page faults frozen out, it means we are about
	 * to freeze the transaction subsystem. Take the opportunity to shut
	 * down inodegc because once SB_FREEZE_FS is set it's too late to
	 * prevent inactivation races with freeze. The fs doesn't get called
	 * again by the freezing process until after SB_FREEZE_FS has been set,
750 751
	 * so it's now or never.  Same logic applies to speculative allocation
	 * garbage collection.
752 753 754 755 756 757
	 *
	 * We don't care if this is a normal syncfs call that does this or
	 * freeze that does this - we can run this multiple times without issue
	 * and we won't race with a restart because a restart can only occur
	 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE.
	 */
758
	if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) {
759
		xfs_inodegc_stop(mp);
760 761
		xfs_blockgc_stop(mp);
	}
762

C
Christoph Hellwig 已提交
763
	return 0;
L
Linus Torvalds 已提交
764 765 766
}

STATIC int
767
xfs_fs_statfs(
768
	struct dentry		*dentry,
L
Linus Torvalds 已提交
769 770
	struct kstatfs		*statp)
{
C
Christoph Hellwig 已提交
771 772
	struct xfs_mount	*mp = XFS_M(dentry->d_sb);
	xfs_sb_t		*sbp = &mp->m_sb;
773
	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
774 775 776 777
	uint64_t		fakeinos, id;
	uint64_t		icount;
	uint64_t		ifree;
	uint64_t		fdblocks;
C
Christoph Hellwig 已提交
778
	xfs_extlen_t		lsize;
779
	int64_t			ffree;
C
Christoph Hellwig 已提交
780

781 782 783
	/* Wait for whatever inactivations are in progress. */
	xfs_inodegc_flush(mp);

784
	statp->f_type = XFS_SUPER_MAGIC;
C
Christoph Hellwig 已提交
785 786 787
	statp->f_namelen = MAXNAMELEN - 1;

	id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
788
	statp->f_fsid = u64_to_fsid(id);
C
Christoph Hellwig 已提交
789

790
	icount = percpu_counter_sum(&mp->m_icount);
791
	ifree = percpu_counter_sum(&mp->m_ifree);
792
	fdblocks = percpu_counter_sum(&mp->m_fdblocks);
C
Christoph Hellwig 已提交
793 794 795 796 797

	spin_lock(&mp->m_sb_lock);
	statp->f_bsize = sbp->sb_blocksize;
	lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
	statp->f_blocks = sbp->sb_dblocks - lsize;
798 799
	spin_unlock(&mp->m_sb_lock);

800 801
	/* make sure statp->f_bfree does not underflow */
	statp->f_bfree = max_t(int64_t, fdblocks - mp->m_alloc_set_aside, 0);
802 803
	statp->f_bavail = statp->f_bfree;

804
	fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
D
Dave Chinner 已提交
805
	statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
D
Darrick J. Wong 已提交
806
	if (M_IGEO(mp)->maxicount)
C
Christoph Hellwig 已提交
807 808
		statp->f_files = min_t(typeof(statp->f_files),
					statp->f_files,
D
Darrick J. Wong 已提交
809
					M_IGEO(mp)->maxicount);
810

811 812 813 814 815
	/* If sb_icount overshot maxicount, report actual allocation */
	statp->f_files = max_t(typeof(statp->f_files),
					statp->f_files,
					sbp->sb_icount);

816
	/* make sure statp->f_ffree does not underflow */
817
	ffree = statp->f_files - (icount - ifree);
818
	statp->f_ffree = max_t(int64_t, ffree, 0);
819

C
Christoph Hellwig 已提交
820

821
	if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
822 823
	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
			      (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
C
Christoph Hellwig 已提交
824
		xfs_qm_statvfs(ip, statp);
825 826

	if (XFS_IS_REALTIME_MOUNT(mp) &&
827
	    (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
828 829 830 831 832
		statp->f_blocks = sbp->sb_rblocks;
		statp->f_bavail = statp->f_bfree =
			sbp->sb_frextents * sbp->sb_rextsize;
	}

C
Christoph Hellwig 已提交
833
	return 0;
L
Linus Torvalds 已提交
834 835
}

E
Eric Sandeen 已提交
836 837 838
STATIC void
xfs_save_resvblks(struct xfs_mount *mp)
{
839
	uint64_t resblks = 0;
E
Eric Sandeen 已提交
840 841 842 843 844 845 846 847

	mp->m_resblks_save = mp->m_resblks;
	xfs_reserve_blocks(mp, &resblks, NULL);
}

STATIC void
xfs_restore_resvblks(struct xfs_mount *mp)
{
848
	uint64_t resblks;
E
Eric Sandeen 已提交
849 850 851 852 853 854 855 856 857 858

	if (mp->m_resblks_save) {
		resblks = mp->m_resblks_save;
		mp->m_resblks_save = 0;
	} else
		resblks = xfs_default_resblks(mp);

	xfs_reserve_blocks(mp, &resblks, NULL);
}

859 860 861 862 863 864 865 866 867 868 869
/*
 * Second stage of a freeze. The data is already frozen so we only
 * need to take care of the metadata. Once that's done sync the superblock
 * to the log to dirty it in case of a crash while frozen. This ensures that we
 * will recover the unlinked inode lists on the next mount.
 */
STATIC int
xfs_fs_freeze(
	struct super_block	*sb)
{
	struct xfs_mount	*mp = XFS_M(sb);
870 871
	unsigned int		flags;
	int			ret;
872

873 874 875 876 877 878
	/*
	 * The filesystem is now frozen far enough that memory reclaim
	 * cannot safely operate on the filesystem. Hence we need to
	 * set a GFP_NOFS context here to avoid recursion deadlocks.
	 */
	flags = memalloc_nofs_save();
879
	xfs_save_resvblks(mp);
880
	ret = xfs_log_quiesce(mp);
881
	memalloc_nofs_restore(flags);
882 883 884 885 886 887 888 889

	/*
	 * For read-write filesystems, we need to restart the inodegc on error
	 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not
	 * going to be run to restart it now.  We are at SB_FREEZE_FS level
	 * here, so we can restart safely without racing with a stop in
	 * xfs_fs_sync_fs().
	 */
890
	if (ret && !xfs_is_readonly(mp)) {
891
		xfs_blockgc_start(mp);
892
		xfs_inodegc_start(mp);
893
	}
894

895
	return ret;
896 897 898 899 900 901 902 903 904 905
}

STATIC int
xfs_fs_unfreeze(
	struct super_block	*sb)
{
	struct xfs_mount	*mp = XFS_M(sb);

	xfs_restore_resvblks(mp);
	xfs_log_work_queue(mp);
906 907 908

	/*
	 * Don't reactivate the inodegc worker on a readonly filesystem because
909 910 911
	 * inodes are sent directly to reclaim.  Don't reactivate the blockgc
	 * worker because there are no speculative preallocations on a readonly
	 * filesystem.
912
	 */
913
	if (!xfs_is_readonly(mp)) {
914
		xfs_blockgc_start(mp);
915
		xfs_inodegc_start(mp);
916
	}
917

918 919 920 921 922 923 924 925 926 927 928 929
	return 0;
}

/*
 * This function fills in xfs_mount_t fields based on mount args.
 * Note: the superblock _has_ now been read in.
 */
STATIC int
xfs_finish_flags(
	struct xfs_mount	*mp)
{
	/* Fail a mount where the logbuf is smaller than the log stripe */
930
	if (xfs_has_logv2(mp)) {
931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951
		if (mp->m_logbsize <= 0 &&
		    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
			mp->m_logbsize = mp->m_sb.sb_logsunit;
		} else if (mp->m_logbsize > 0 &&
			   mp->m_logbsize < mp->m_sb.sb_logsunit) {
			xfs_warn(mp,
		"logbuf size must be greater than or equal to log stripe size");
			return -EINVAL;
		}
	} else {
		/* Fail a mount if the logbuf is larger than 32K */
		if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
			xfs_warn(mp,
		"logbuf size for version 1 logs must be 16K or 32K");
			return -EINVAL;
		}
	}

	/*
	 * V5 filesystems always use attr2 format for attributes.
	 */
952
	if (xfs_has_crc(mp) && xfs_has_noattr2(mp)) {
953 954 955 956 957 958 959 960
		xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
			     "attr2 is always enabled for V5 filesystems.");
		return -EINVAL;
	}

	/*
	 * prohibit r/w mounts of read-only filesystems
	 */
961
	if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) {
962 963 964 965 966
		xfs_warn(mp,
			"cannot mount a read-only filesystem as read-write");
		return -EROFS;
	}

967 968
	if ((mp->m_qflags & XFS_GQUOTA_ACCT) &&
	    (mp->m_qflags & XFS_PQUOTA_ACCT) &&
969
	    !xfs_has_pquotino(mp)) {
970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
		xfs_warn(mp,
		  "Super block does not support project and group quota together");
		return -EINVAL;
	}

	return 0;
}

static int
xfs_init_percpu_counters(
	struct xfs_mount	*mp)
{
	int		error;

	error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
	if (error)
		return -ENOMEM;

	error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
	if (error)
		goto free_icount;

	error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
	if (error)
		goto free_ifree;

	error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
	if (error)
		goto free_fdblocks;

	return 0;

free_fdblocks:
	percpu_counter_destroy(&mp->m_fdblocks);
free_ifree:
	percpu_counter_destroy(&mp->m_ifree);
free_icount:
	percpu_counter_destroy(&mp->m_icount);
	return -ENOMEM;
}

void
xfs_reinit_percpu_counters(
	struct xfs_mount	*mp)
{
	percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
	percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
	percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
}

static void
xfs_destroy_percpu_counters(
	struct xfs_mount	*mp)
{
	percpu_counter_destroy(&mp->m_icount);
	percpu_counter_destroy(&mp->m_ifree);
	percpu_counter_destroy(&mp->m_fdblocks);
1027
	ASSERT(xfs_is_shutdown(mp) ||
1028 1029 1030 1031
	       percpu_counter_sum(&mp->m_delalloc_blks) == 0);
	percpu_counter_destroy(&mp->m_delalloc_blks);
}

1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
static int
xfs_inodegc_init_percpu(
	struct xfs_mount	*mp)
{
	struct xfs_inodegc	*gc;
	int			cpu;

	mp->m_inodegc = alloc_percpu(struct xfs_inodegc);
	if (!mp->m_inodegc)
		return -ENOMEM;

	for_each_possible_cpu(cpu) {
		gc = per_cpu_ptr(mp->m_inodegc, cpu);
		init_llist_head(&gc->list);
		gc->items = 0;
		INIT_WORK(&gc->work, xfs_inodegc_worker);
	}
	return 0;
}

static void
xfs_inodegc_free_percpu(
	struct xfs_mount	*mp)
{
	if (!mp->m_inodegc)
		return;
	free_percpu(mp->m_inodegc);
}

1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
static void
xfs_fs_put_super(
	struct super_block	*sb)
{
	struct xfs_mount	*mp = XFS_M(sb);

	/* if ->fill_super failed, we have no mount to tear down */
	if (!sb->s_fs_info)
		return;

	xfs_notice(mp, "Unmounting Filesystem");
	xfs_filestream_unmount(mp);
	xfs_unmountfs(mp);

	xfs_freesb(mp);
	free_percpu(mp->m_stats.xs_stats);
1077
	xfs_mount_list_del(mp);
1078
	xfs_inodegc_free_percpu(mp);
1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
	xfs_destroy_percpu_counters(mp);
	xfs_destroy_mount_workqueues(mp);
	xfs_close_devices(mp);

	sb->s_fs_info = NULL;
	xfs_mount_free(mp);
}

static long
xfs_fs_nr_cached_objects(
	struct super_block	*sb,
	struct shrink_control	*sc)
{
	/* Paranoia: catch incorrect calls during mount setup or teardown */
	if (WARN_ON_ONCE(!sb->s_fs_info))
		return 0;
	return xfs_reclaim_inodes_count(XFS_M(sb));
}

static long
xfs_fs_free_cached_objects(
	struct super_block	*sb,
	struct shrink_control	*sc)
1102
{
1103 1104
	return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
}
1105

1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
static const struct super_operations xfs_super_operations = {
	.alloc_inode		= xfs_fs_alloc_inode,
	.destroy_inode		= xfs_fs_destroy_inode,
	.dirty_inode		= xfs_fs_dirty_inode,
	.drop_inode		= xfs_fs_drop_inode,
	.put_super		= xfs_fs_put_super,
	.sync_fs		= xfs_fs_sync_fs,
	.freeze_fs		= xfs_fs_freeze,
	.unfreeze_fs		= xfs_fs_unfreeze,
	.statfs			= xfs_fs_statfs,
	.show_options		= xfs_fs_show_options,
	.nr_cached_objects	= xfs_fs_nr_cached_objects,
	.free_cached_objects	= xfs_fs_free_cached_objects,
};
1120

1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
static int
suffix_kstrtoint(
	const char	*s,
	unsigned int	base,
	int		*res)
{
	int		last, shift_left_factor = 0, _res;
	char		*value;
	int		ret = 0;

	value = kstrdup(s, GFP_KERNEL);
	if (!value)
		return -ENOMEM;

	last = strlen(value) - 1;
	if (value[last] == 'K' || value[last] == 'k') {
		shift_left_factor = 10;
		value[last] = '\0';
	}
	if (value[last] == 'M' || value[last] == 'm') {
		shift_left_factor = 20;
		value[last] = '\0';
	}
	if (value[last] == 'G' || value[last] == 'g') {
		shift_left_factor = 30;
		value[last] = '\0';
	}

	if (kstrtoint(value, base, &_res))
		ret = -EINVAL;
	kfree(value);
	*res = _res << shift_left_factor;
	return ret;
1154 1155
}

1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
static inline void
xfs_fs_warn_deprecated(
	struct fs_context	*fc,
	struct fs_parameter	*param,
	uint64_t		flag,
	bool			value)
{
	/* Don't print the warning if reconfiguring and current mount point
	 * already had the flag set
	 */
	if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
1167
            !!(XFS_M(fc->root->d_sb)->m_features & flag) == value)
1168 1169 1170 1171
		return;
	xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
}

C
Christoph Hellwig 已提交
1172
/*
1173 1174 1175
 * Set mount state from a mount option.
 *
 * NOTE: mp->m_super is NULL here!
C
Christoph Hellwig 已提交
1176
 */
1177
static int
1178
xfs_fs_parse_param(
1179 1180
	struct fs_context	*fc,
	struct fs_parameter	*param)
L
Linus Torvalds 已提交
1181
{
1182
	struct xfs_mount	*parsing_mp = fc->s_fs_info;
1183 1184 1185
	struct fs_parse_result	result;
	int			size = 0;
	int			opt;
C
Christoph Hellwig 已提交
1186

1187
	opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1188 1189
	if (opt < 0)
		return opt;
L
Linus Torvalds 已提交
1190

1191 1192
	switch (opt) {
	case Opt_logbufs:
1193
		parsing_mp->m_logbufs = result.uint_32;
1194 1195
		return 0;
	case Opt_logbsize:
1196
		if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize))
1197 1198 1199
			return -EINVAL;
		return 0;
	case Opt_logdev:
1200 1201 1202
		kfree(parsing_mp->m_logname);
		parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL);
		if (!parsing_mp->m_logname)
1203 1204 1205
			return -ENOMEM;
		return 0;
	case Opt_rtdev:
1206 1207 1208
		kfree(parsing_mp->m_rtname);
		parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
		if (!parsing_mp->m_rtname)
1209 1210 1211 1212 1213
			return -ENOMEM;
		return 0;
	case Opt_allocsize:
		if (suffix_kstrtoint(param->string, 10, &size))
			return -EINVAL;
1214
		parsing_mp->m_allocsize_log = ffs(size) - 1;
1215
		parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE;
1216 1217 1218
		return 0;
	case Opt_grpid:
	case Opt_bsdgroups:
1219
		parsing_mp->m_features |= XFS_FEAT_GRPID;
1220 1221 1222
		return 0;
	case Opt_nogrpid:
	case Opt_sysvgroups:
1223
		parsing_mp->m_features &= ~XFS_FEAT_GRPID;
1224 1225
		return 0;
	case Opt_wsync:
1226
		parsing_mp->m_features |= XFS_FEAT_WSYNC;
1227 1228
		return 0;
	case Opt_norecovery:
1229
		parsing_mp->m_features |= XFS_FEAT_NORECOVERY;
1230 1231
		return 0;
	case Opt_noalign:
1232
		parsing_mp->m_features |= XFS_FEAT_NOALIGN;
1233 1234
		return 0;
	case Opt_swalloc:
1235
		parsing_mp->m_features |= XFS_FEAT_SWALLOC;
1236 1237
		return 0;
	case Opt_sunit:
1238
		parsing_mp->m_dalign = result.uint_32;
1239 1240
		return 0;
	case Opt_swidth:
1241
		parsing_mp->m_swidth = result.uint_32;
1242 1243
		return 0;
	case Opt_inode32:
1244
		parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS;
1245 1246
		return 0;
	case Opt_inode64:
1247
		parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1248 1249
		return 0;
	case Opt_nouuid:
1250
		parsing_mp->m_features |= XFS_FEAT_NOUUID;
1251 1252
		return 0;
	case Opt_largeio:
1253
		parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE;
1254 1255
		return 0;
	case Opt_nolargeio:
1256
		parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE;
1257 1258
		return 0;
	case Opt_filestreams:
1259
		parsing_mp->m_features |= XFS_FEAT_FILESTREAMS;
1260 1261
		return 0;
	case Opt_noquota:
1262 1263
		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1264 1265 1266 1267
		return 0;
	case Opt_quota:
	case Opt_uquota:
	case Opt_usrquota:
1268
		parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD);
1269 1270 1271
		return 0;
	case Opt_qnoenforce:
	case Opt_uqnoenforce:
1272
		parsing_mp->m_qflags |= XFS_UQUOTA_ACCT;
1273
		parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1274 1275 1276
		return 0;
	case Opt_pquota:
	case Opt_prjquota:
1277
		parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD);
1278 1279
		return 0;
	case Opt_pqnoenforce:
1280
		parsing_mp->m_qflags |= XFS_PQUOTA_ACCT;
1281
		parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1282 1283 1284
		return 0;
	case Opt_gquota:
	case Opt_grpquota:
1285
		parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD);
1286 1287
		return 0;
	case Opt_gqnoenforce:
1288
		parsing_mp->m_qflags |= XFS_GQUOTA_ACCT;
1289
		parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1290 1291
		return 0;
	case Opt_discard:
1292
		parsing_mp->m_features |= XFS_FEAT_DISCARD;
1293 1294
		return 0;
	case Opt_nodiscard:
1295
		parsing_mp->m_features &= ~XFS_FEAT_DISCARD;
1296 1297 1298
		return 0;
#ifdef CONFIG_FS_DAX
	case Opt_dax:
1299
		xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS);
1300 1301
		return 0;
	case Opt_dax_enum:
1302
		xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
1303 1304
		return 0;
#endif
1305 1306
	/* Following mount options will be removed in September 2025 */
	case Opt_ikeep:
1307 1308
		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true);
		parsing_mp->m_features |= XFS_FEAT_IKEEP;
1309 1310
		return 0;
	case Opt_noikeep:
1311 1312
		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false);
		parsing_mp->m_features &= ~XFS_FEAT_IKEEP;
1313 1314
		return 0;
	case Opt_attr2:
1315 1316
		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true);
		parsing_mp->m_features |= XFS_FEAT_ATTR2;
1317 1318
		return 0;
	case Opt_noattr2:
1319 1320
		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true);
		parsing_mp->m_features |= XFS_FEAT_NOATTR2;
1321
		return 0;
1322
	default:
1323
		xfs_warn(parsing_mp, "unknown mount option [%s].", param->key);
1324 1325
		return -EINVAL;
	}
E
Eric Sandeen 已提交
1326 1327 1328 1329

	return 0;
}

1330
static int
1331
xfs_fs_validate_params(
1332 1333
	struct xfs_mount	*mp)
{
1334
	/* No recovery flag requires a read-only mount */
1335
	if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) {
1336
		xfs_warn(mp, "no-recovery mounts must be read-only.");
D
Dave Chinner 已提交
1337
		return -EINVAL;
1338 1339
	}

1340 1341 1342 1343 1344
	/*
	 * We have not read the superblock at this point, so only the attr2
	 * mount option can set the attr2 feature by this stage.
	 */
	if (xfs_has_attr2(mp) && xfs_has_noattr2(mp)) {
1345 1346 1347 1348 1349
		xfs_warn(mp, "attr2 and noattr2 cannot both be specified.");
		return -EINVAL;
	}


1350
	if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) {
1351
		xfs_warn(mp,
1352 1353
	"sunit and swidth options incompatible with the noalign option");
		return -EINVAL;
1354 1355
	}

1356 1357
	if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
		xfs_warn(mp, "quota support not available in this kernel.");
D
Dave Chinner 已提交
1358
		return -EINVAL;
1359 1360
	}

1361 1362 1363 1364 1365
	if ((mp->m_dalign && !mp->m_swidth) ||
	    (!mp->m_dalign && mp->m_swidth)) {
		xfs_warn(mp, "sunit and swidth must be specified together");
		return -EINVAL;
	}
D
Dave Chinner 已提交
1366

1367 1368 1369 1370 1371 1372
	if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
		xfs_warn(mp,
	"stripe width (%d) must be a multiple of the stripe unit (%d)",
			mp->m_swidth, mp->m_dalign);
		return -EINVAL;
	}
D
Dave Chinner 已提交
1373

1374 1375 1376 1377 1378 1379 1380 1381
	if (mp->m_logbufs != -1 &&
	    mp->m_logbufs != 0 &&
	    (mp->m_logbufs < XLOG_MIN_ICLOGS ||
	     mp->m_logbufs > XLOG_MAX_ICLOGS)) {
		xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
			mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
		return -EINVAL;
	}
D
Dave Chinner 已提交
1382

1383 1384 1385 1386 1387 1388 1389 1390 1391 1392
	if (mp->m_logbsize != -1 &&
	    mp->m_logbsize !=  0 &&
	    (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
	     mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
	     !is_power_of_2(mp->m_logbsize))) {
		xfs_warn(mp,
			"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
			mp->m_logbsize);
		return -EINVAL;
	}
D
Dave Chinner 已提交
1393

1394
	if (xfs_has_allocsize(mp) &&
1395 1396 1397 1398 1399 1400
	    (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
	     mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
		xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
			mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
		return -EINVAL;
	}
1401

D
Dave Chinner 已提交
1402
	return 0;
1403
}
1404

I
Ian Kent 已提交
1405
static int
1406
xfs_fs_fill_super(
1407
	struct super_block	*sb,
I
Ian Kent 已提交
1408
	struct fs_context	*fc)
1409
{
I
Ian Kent 已提交
1410
	struct xfs_mount	*mp = sb->s_fs_info;
1411
	struct inode		*root;
1412
	int			flags = 0, error;
1413

I
Ian Kent 已提交
1414
	mp->m_super = sb;
L
Linus Torvalds 已提交
1415

1416
	error = xfs_fs_validate_params(mp);
1417
	if (error)
1418
		goto out_free_names;
L
Linus Torvalds 已提交
1419 1420

	sb_min_blocksize(sb, BBSIZE);
1421
	sb->s_xattr = xfs_xattr_handlers;
1422
	sb->s_export_op = &xfs_export_operations;
1423
#ifdef CONFIG_XFS_QUOTA
1424
	sb->s_qcop = &xfs_quotactl_operations;
J
Jan Kara 已提交
1425
	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1426
#endif
1427
	sb->s_op = &xfs_super_operations;
L
Linus Torvalds 已提交
1428

D
Dave Chinner 已提交
1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439
	/*
	 * Delay mount work if the debug hook is set. This is debug
	 * instrumention to coordinate simulation of xfs mount failures with
	 * VFS superblock operations
	 */
	if (xfs_globals.mount_delay) {
		xfs_notice(mp, "Delaying mount for %d seconds.",
			xfs_globals.mount_delay);
		msleep(xfs_globals.mount_delay * 1000);
	}

I
Ian Kent 已提交
1440
	if (fc->sb_flags & SB_SILENT)
1441 1442
		flags |= XFS_MFSI_QUIET;

1443
	error = xfs_open_devices(mp);
1444
	if (error)
1445
		goto out_free_names;
1446

D
Dave Chinner 已提交
1447
	error = xfs_init_mount_workqueues(mp);
1448 1449
	if (error)
		goto out_close_devices;
C
Christoph Hellwig 已提交
1450

D
Dave Chinner 已提交
1451
	error = xfs_init_percpu_counters(mp);
1452 1453 1454
	if (error)
		goto out_destroy_workqueues;

1455 1456 1457 1458
	error = xfs_inodegc_init_percpu(mp);
	if (error)
		goto out_destroy_counters;

1459 1460 1461 1462 1463 1464 1465
	/*
	 * All percpu data structures requiring cleanup when a cpu goes offline
	 * must be allocated before adding this @mp to the cpu-dead handler's
	 * mount list.
	 */
	xfs_mount_list_add(mp);

1466 1467 1468
	/* Allocate stats memory before we do operations that might use it */
	mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
	if (!mp->m_stats.xs_stats) {
1469
		error = -ENOMEM;
1470
		goto out_destroy_inodegc;
1471 1472
	}

1473 1474
	error = xfs_readsb(mp, flags);
	if (error)
1475
		goto out_free_stats;
1476 1477

	error = xfs_finish_flags(mp);
1478
	if (error)
1479
		goto out_free_sb;
1480

1481
	error = xfs_setup_devices(mp);
1482
	if (error)
1483
		goto out_free_sb;
1484

D
Darrick J. Wong 已提交
1485
	/* V4 support is undergoing deprecation. */
1486
	if (!xfs_has_crc(mp)) {
D
Darrick J. Wong 已提交
1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497
#ifdef CONFIG_XFS_SUPPORT_V4
		xfs_warn_once(mp,
	"Deprecated V4 format (crc=0) will not be supported after September 2030.");
#else
		xfs_warn(mp,
	"Deprecated V4 format (crc=0) not supported by kernel.");
		error = -EINVAL;
		goto out_free_sb;
#endif
	}

1498 1499 1500 1501 1502 1503 1504
	/* Filesystem claims it needs repair, so refuse the mount. */
	if (xfs_sb_version_needsrepair(&mp->m_sb)) {
		xfs_warn(mp, "Filesystem needs repair.  Please run xfs_repair.");
		error = -EFSCORRUPTED;
		goto out_free_sb;
	}

1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536
	/*
	 * Don't touch the filesystem if a user tool thinks it owns the primary
	 * superblock.  mkfs doesn't clear the flag from secondary supers, so
	 * we don't check them at all.
	 */
	if (mp->m_sb.sb_inprogress) {
		xfs_warn(mp, "Offline file system operation in progress!");
		error = -EFSCORRUPTED;
		goto out_free_sb;
	}

	/*
	 * Until this is fixed only page-sized or smaller data blocks work.
	 */
	if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
		xfs_warn(mp,
		"File system with blocksize %d bytes. "
		"Only pagesize (%ld) or less will currently work.",
				mp->m_sb.sb_blocksize, PAGE_SIZE);
		error = -ENOSYS;
		goto out_free_sb;
	}

	/* Ensure this filesystem fits in the page cache limits */
	if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) ||
	    xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) {
		xfs_warn(mp,
		"file system too large to be mounted on this system.");
		error = -EFBIG;
		goto out_free_sb;
	}

1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547
	/*
	 * XFS block mappings use 54 bits to store the logical block offset.
	 * This should suffice to handle the maximum file size that the VFS
	 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
	 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
	 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
	 * to check this assertion.
	 *
	 * Avoid integer overflow by comparing the maximum bmbt offset to the
	 * maximum pagecache offset in units of fs blocks.
	 */
1548
	if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) {
1549 1550 1551 1552 1553 1554 1555 1556
		xfs_warn(mp,
"MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
			 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
			 XFS_MAX_FILEOFF);
		error = -EINVAL;
		goto out_free_sb;
	}

1557 1558
	error = xfs_filestream_mount(mp);
	if (error)
1559
		goto out_free_sb;
1560

1561 1562 1563 1564
	/*
	 * we must configure the block size in the superblock before we run the
	 * full mount process as the mount process can lookup and cache inodes.
	 */
1565
	sb->s_magic = XFS_SUPER_MAGIC;
C
Christoph Hellwig 已提交
1566 1567
	sb->s_blocksize = mp->m_sb.sb_blocksize;
	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1568
	sb->s_maxbytes = MAX_LFS_FILESIZE;
1569
	sb->s_max_links = XFS_MAXLINK;
L
Linus Torvalds 已提交
1570
	sb->s_time_gran = 1;
1571
	if (xfs_has_bigtime(mp)) {
1572 1573 1574 1575 1576 1577
		sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
		sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
	} else {
		sb->s_time_min = XFS_LEGACY_TIME_MIN;
		sb->s_time_max = XFS_LEGACY_TIME_MAX;
	}
D
Darrick J. Wong 已提交
1578
	trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
1579 1580
	sb->s_iflags |= SB_I_CGROUPWB;

L
Linus Torvalds 已提交
1581 1582
	set_posix_acl_flag(sb);

D
Dave Chinner 已提交
1583 1584
	/* version 5 superblocks support inode version counters. */
	if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
M
Matthew Garrett 已提交
1585
		sb->s_flags |= SB_I_VERSION;
D
Dave Chinner 已提交
1586

1587
	if (xfs_has_dax_always(mp)) {
1588
		bool rtdev_is_dax = false, datadev_is_dax;
1589

D
Dave Chinner 已提交
1590
		xfs_warn(mp,
1591 1592
		"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");

1593 1594
		datadev_is_dax = bdev_dax_supported(mp->m_ddev_targp->bt_bdev,
			sb->s_blocksize);
1595
		if (mp->m_rtdev_targp)
1596 1597 1598
			rtdev_is_dax = bdev_dax_supported(
				mp->m_rtdev_targp->bt_bdev, sb->s_blocksize);
		if (!rtdev_is_dax && !datadev_is_dax) {
D
Dave Chinner 已提交
1599
			xfs_alert(mp,
1600
			"DAX unsupported by block device. Turning off DAX.");
1601
			xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
D
Dave Chinner 已提交
1602
		}
1603
		if (xfs_has_reflink(mp)) {
1604
			xfs_alert(mp,
1605
		"DAX and reflink cannot be used together!");
1606 1607 1608
			error = -EINVAL;
			goto out_filestream_unmount;
		}
D
Dave Chinner 已提交
1609 1610
	}

1611
	if (xfs_has_discard(mp)) {
1612 1613 1614 1615 1616
		struct request_queue *q = bdev_get_queue(sb->s_bdev);

		if (!blk_queue_discard(q)) {
			xfs_warn(mp, "mounting with \"discard\" option, but "
					"the device does not support discard");
1617
			mp->m_features &= ~XFS_FEAT_DISCARD;
1618 1619 1620
		}
	}

1621
	if (xfs_has_reflink(mp)) {
1622 1623
		if (mp->m_sb.sb_rblocks) {
			xfs_alert(mp,
1624
	"reflink not compatible with realtime device!");
1625 1626 1627 1628 1629 1630 1631 1632
			error = -EINVAL;
			goto out_filestream_unmount;
		}

		if (xfs_globals.always_cow) {
			xfs_info(mp, "using DEBUG-only always_cow mode.");
			mp->m_always_cow = true;
		}
1633 1634
	}

1635
	if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) {
1636
		xfs_alert(mp,
1637 1638 1639
	"reverse mapping btree not compatible with realtime device!");
		error = -EINVAL;
		goto out_filestream_unmount;
1640
	}
1641

1642
	error = xfs_mountfs(mp);
1643
	if (error)
D
Dave Chinner 已提交
1644
		goto out_filestream_unmount;
1645

1646
	root = igrab(VFS_I(mp->m_rootip));
1647
	if (!root) {
D
Dave Chinner 已提交
1648
		error = -ENOENT;
1649
		goto out_unmount;
C
Christoph Hellwig 已提交
1650
	}
1651
	sb->s_root = d_make_root(root);
1652
	if (!sb->s_root) {
D
Dave Chinner 已提交
1653
		error = -ENOMEM;
1654
		goto out_unmount;
L
Linus Torvalds 已提交
1655
	}
1656

L
Linus Torvalds 已提交
1657
	return 0;
D
Dave Chinner 已提交
1658

D
Dave Chinner 已提交
1659
 out_filestream_unmount:
1660
	xfs_filestream_unmount(mp);
1661 1662
 out_free_sb:
	xfs_freesb(mp);
1663 1664
 out_free_stats:
	free_percpu(mp->m_stats.xs_stats);
1665
 out_destroy_inodegc:
1666
	xfs_mount_list_del(mp);
1667 1668
	xfs_inodegc_free_percpu(mp);
 out_destroy_counters:
D
Dave Chinner 已提交
1669
	xfs_destroy_percpu_counters(mp);
1670
 out_destroy_workqueues:
1671
	xfs_destroy_mount_workqueues(mp);
1672
 out_close_devices:
1673
	xfs_close_devices(mp);
1674
 out_free_names:
1675
	sb->s_fs_info = NULL;
I
Ian Kent 已提交
1676
	xfs_mount_free(mp);
D
Dave Chinner 已提交
1677
	return error;
1678

1679
 out_unmount:
1680
	xfs_filestream_unmount(mp);
1681
	xfs_unmountfs(mp);
1682
	goto out_free_sb;
L
Linus Torvalds 已提交
1683 1684
}

I
Ian Kent 已提交
1685
static int
1686
xfs_fs_get_tree(
I
Ian Kent 已提交
1687 1688
	struct fs_context	*fc)
{
1689
	return get_tree_bdev(fc, xfs_fs_fill_super);
I
Ian Kent 已提交
1690 1691
}

1692 1693 1694 1695 1696 1697 1698
static int
xfs_remount_rw(
	struct xfs_mount	*mp)
{
	struct xfs_sb		*sbp = &mp->m_sb;
	int error;

1699
	if (xfs_has_norecovery(mp)) {
1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713
		xfs_warn(mp,
			"ro->rw transition prohibited on norecovery mount");
		return -EINVAL;
	}

	if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
	    xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
		xfs_warn(mp,
	"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
			(sbp->sb_features_ro_compat &
				XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
		return -EINVAL;
	}

1714
	clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740

	/*
	 * If this is the first remount to writeable state we might have some
	 * superblock changes to update.
	 */
	if (mp->m_update_sb) {
		error = xfs_sync_sb(mp, false);
		if (error) {
			xfs_warn(mp, "failed to write sb changes");
			return error;
		}
		mp->m_update_sb = false;
	}

	/*
	 * Fill out the reserve pool if it is empty. Use the stashed value if
	 * it is non-zero, otherwise go with the default.
	 */
	xfs_restore_resvblks(mp);
	xfs_log_work_queue(mp);

	/* Recover any CoW blocks that never got remapped. */
	error = xfs_reflink_recover_cow(mp);
	if (error) {
		xfs_err(mp,
			"Error %d recovering leftover CoW allocations.", error);
1741
		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1742 1743
		return error;
	}
1744
	xfs_blockgc_start(mp);
1745 1746 1747 1748 1749 1750

	/* Create the per-AG metadata reservation pool .*/
	error = xfs_fs_reserve_ag_blocks(mp);
	if (error && error != -ENOSPC)
		return error;

1751 1752 1753
	/* Re-enable the background inode inactivation worker. */
	xfs_inodegc_start(mp);

1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766
	return 0;
}

static int
xfs_remount_ro(
	struct xfs_mount	*mp)
{
	int error;

	/*
	 * Cancel background eofb scanning so it cannot race with the final
	 * log force+buftarg wait and deadlock the remount.
	 */
1767
	xfs_blockgc_stop(mp);
1768 1769

	/* Get rid of any leftover CoW reservations... */
1770
	error = xfs_blockgc_free_space(mp, NULL);
1771 1772 1773 1774 1775
	if (error) {
		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
		return error;
	}

1776 1777 1778 1779 1780 1781 1782 1783 1784
	/*
	 * Stop the inodegc background worker.  xfs_fs_reconfigure already
	 * flushed all pending inodegc work when it sync'd the filesystem.
	 * The VFS holds s_umount, so we know that inodes cannot enter
	 * xfs_fs_destroy_inode during a remount operation.  In readonly mode
	 * we send inodes straight to reclaim, so no inodes will be queued.
	 */
	xfs_inodegc_stop(mp);

1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800
	/* Free the per-AG metadata reservation pool. */
	error = xfs_fs_unreserve_ag_blocks(mp);
	if (error) {
		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
		return error;
	}

	/*
	 * Before we sync the metadata, we need to free up the reserve block
	 * pool so that the used block count in the superblock on disk is
	 * correct at the end of the remount. Stash the current* reserve pool
	 * size so that if we get remounted rw, we can return it to the same
	 * size.
	 */
	xfs_save_resvblks(mp);

B
Brian Foster 已提交
1801
	xfs_log_clean(mp);
1802
	set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819

	return 0;
}

/*
 * Logically we would return an error here to prevent users from believing
 * they might have changed mount options using remount which can't be changed.
 *
 * But unfortunately mount(8) adds all options from mtab and fstab to the mount
 * arguments in some cases so we can't blindly reject options, but have to
 * check for each specified option if it actually differs from the currently
 * set option and only reject it if that's the case.
 *
 * Until that is implemented we return success for every remount request, and
 * silently ignore all options that we can't actually change.
 */
static int
1820
xfs_fs_reconfigure(
1821 1822 1823 1824 1825 1826 1827 1828
	struct fs_context *fc)
{
	struct xfs_mount	*mp = XFS_M(fc->root->d_sb);
	struct xfs_mount        *new_mp = fc->s_fs_info;
	xfs_sb_t		*sbp = &mp->m_sb;
	int			flags = fc->sb_flags;
	int			error;

1829 1830 1831 1832
	/* version 5 superblocks always support version counters. */
	if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
		fc->sb_flags |= SB_I_VERSION;

1833
	error = xfs_fs_validate_params(new_mp);
1834 1835 1836 1837 1838 1839
	if (error)
		return error;

	sync_filesystem(mp->m_super);

	/* inode32 -> inode64 */
1840 1841
	if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) {
		mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1842 1843 1844 1845
		mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
	}

	/* inode64 -> inode32 */
1846 1847
	if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) {
		mp->m_features |= XFS_FEAT_SMALL_INUMS;
1848 1849 1850 1851
		mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
	}

	/* ro -> rw */
1852
	if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) {
1853 1854 1855 1856 1857 1858
		error = xfs_remount_rw(mp);
		if (error)
			return error;
	}

	/* rw -> ro */
1859
	if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) {
1860 1861 1862 1863 1864 1865 1866 1867
		error = xfs_remount_ro(mp);
		if (error)
			return error;
	}

	return 0;
}

1868
static void xfs_fs_free(
I
Ian Kent 已提交
1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883
	struct fs_context	*fc)
{
	struct xfs_mount	*mp = fc->s_fs_info;

	/*
	 * mp is stored in the fs_context when it is initialized.
	 * mp is transferred to the superblock on a successful mount,
	 * but if an error occurs before the transfer we have to free
	 * it here.
	 */
	if (mp)
		xfs_mount_free(mp);
}

static const struct fs_context_operations xfs_context_ops = {
1884 1885 1886 1887
	.parse_param = xfs_fs_parse_param,
	.get_tree    = xfs_fs_get_tree,
	.reconfigure = xfs_fs_reconfigure,
	.free        = xfs_fs_free,
I
Ian Kent 已提交
1888 1889 1890 1891 1892 1893 1894
};

static int xfs_init_fs_context(
	struct fs_context	*fc)
{
	struct xfs_mount	*mp;

1895
	mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
I
Ian Kent 已提交
1896 1897 1898
	if (!mp)
		return -ENOMEM;

1899 1900 1901 1902 1903
	spin_lock_init(&mp->m_sb_lock);
	spin_lock_init(&mp->m_agirotor_lock);
	INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
	spin_lock_init(&mp->m_perag_lock);
	mutex_init(&mp->m_growlock);
1904
	INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
1905 1906 1907 1908 1909 1910 1911 1912 1913 1914
	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
	mp->m_kobj.kobject.kset = xfs_kset;
	/*
	 * We don't create the finobt per-ag space reservation until after log
	 * recovery, so we must set this to true so that an ifree transaction
	 * started during log recovery will not depend on space reservations
	 * for finobt expansion.
	 */
	mp->m_finobt_nores = true;

I
Ian Kent 已提交
1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925
	/*
	 * These can be overridden by the mount option parsing.
	 */
	mp->m_logbufs = -1;
	mp->m_logbsize = -1;
	mp->m_allocsize_log = 16; /* 64k */

	/*
	 * Copy binary VFS mount flags we are interested in.
	 */
	if (fc->sb_flags & SB_RDONLY)
1926
		set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
I
Ian Kent 已提交
1927
	if (fc->sb_flags & SB_DIRSYNC)
1928
		mp->m_features |= XFS_FEAT_DIRSYNC;
I
Ian Kent 已提交
1929
	if (fc->sb_flags & SB_SYNCHRONOUS)
1930
		mp->m_features |= XFS_FEAT_WSYNC;
I
Ian Kent 已提交
1931 1932 1933 1934 1935 1936 1937

	fc->s_fs_info = mp;
	fc->ops = &xfs_context_ops;

	return 0;
}

A
Andrew Morton 已提交
1938
static struct file_system_type xfs_fs_type = {
L
Linus Torvalds 已提交
1939 1940
	.owner			= THIS_MODULE,
	.name			= "xfs",
I
Ian Kent 已提交
1941
	.init_fs_context	= xfs_init_fs_context,
1942
	.parameters		= xfs_fs_parameters,
L
Linus Torvalds 已提交
1943
	.kill_sb		= kill_block_super,
C
Christoph Hellwig 已提交
1944
	.fs_flags		= FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
L
Linus Torvalds 已提交
1945
};
1946
MODULE_ALIAS_FS("xfs");
L
Linus Torvalds 已提交
1947

1948 1949 1950
STATIC int __init
xfs_init_zones(void)
{
C
Carlos Maiolino 已提交
1951 1952 1953
	xfs_log_ticket_zone = kmem_cache_create("xfs_log_ticket",
						sizeof(struct xlog_ticket),
						0, 0, NULL);
1954
	if (!xfs_log_ticket_zone)
1955
		goto out;
1956

C
Carlos Maiolino 已提交
1957 1958 1959
	xfs_bmap_free_item_zone = kmem_cache_create("xfs_bmap_free_item",
					sizeof(struct xfs_extent_free_item),
					0, 0, NULL);
1960 1961
	if (!xfs_bmap_free_item_zone)
		goto out_destroy_log_ticket_zone;
1962

C
Carlos Maiolino 已提交
1963 1964 1965
	xfs_btree_cur_zone = kmem_cache_create("xfs_btree_cur",
					       sizeof(struct xfs_btree_cur),
					       0, 0, NULL);
1966 1967 1968
	if (!xfs_btree_cur_zone)
		goto out_destroy_bmap_free_item_zone;

C
Carlos Maiolino 已提交
1969 1970 1971
	xfs_da_state_zone = kmem_cache_create("xfs_da_state",
					      sizeof(struct xfs_da_state),
					      0, 0, NULL);
1972 1973 1974
	if (!xfs_da_state_zone)
		goto out_destroy_btree_cur_zone;

C
Carlos Maiolino 已提交
1975 1976 1977
	xfs_ifork_zone = kmem_cache_create("xfs_ifork",
					   sizeof(struct xfs_ifork),
					   0, 0, NULL);
1978
	if (!xfs_ifork_zone)
1979
		goto out_destroy_da_state_zone;
1980

1981
	xfs_trans_zone = kmem_cache_create("xfs_trans",
C
Carlos Maiolino 已提交
1982 1983
					   sizeof(struct xfs_trans),
					   0, 0, NULL);
1984 1985 1986
	if (!xfs_trans_zone)
		goto out_destroy_ifork_zone;

1987

1988 1989 1990 1991 1992
	/*
	 * The size of the zone allocated buf log item is the maximum
	 * size possible under XFS.  This wastes a little bit of memory,
	 * but it is much faster.
	 */
C
Carlos Maiolino 已提交
1993 1994 1995
	xfs_buf_item_zone = kmem_cache_create("xfs_buf_item",
					      sizeof(struct xfs_buf_log_item),
					      0, 0, NULL);
1996
	if (!xfs_buf_item_zone)
1997
		goto out_destroy_trans_zone;
1998

C
Carlos Maiolino 已提交
1999 2000 2001 2002 2003
	xfs_efd_zone = kmem_cache_create("xfs_efd_item",
					(sizeof(struct xfs_efd_log_item) +
					(XFS_EFD_MAX_FAST_EXTENTS - 1) *
					sizeof(struct xfs_extent)),
					0, 0, NULL);
2004 2005 2006
	if (!xfs_efd_zone)
		goto out_destroy_buf_item_zone;

C
Carlos Maiolino 已提交
2007 2008 2009 2010 2011
	xfs_efi_zone = kmem_cache_create("xfs_efi_item",
					 (sizeof(struct xfs_efi_log_item) +
					 (XFS_EFI_MAX_FAST_EXTENTS - 1) *
					 sizeof(struct xfs_extent)),
					 0, 0, NULL);
2012 2013 2014
	if (!xfs_efi_zone)
		goto out_destroy_efd_zone;

C
Carlos Maiolino 已提交
2015 2016 2017 2018 2019 2020
	xfs_inode_zone = kmem_cache_create("xfs_inode",
					   sizeof(struct xfs_inode), 0,
					   (SLAB_HWCACHE_ALIGN |
					    SLAB_RECLAIM_ACCOUNT |
					    SLAB_MEM_SPREAD | SLAB_ACCOUNT),
					   xfs_fs_inode_init_once);
2021 2022 2023
	if (!xfs_inode_zone)
		goto out_destroy_efi_zone;

C
Carlos Maiolino 已提交
2024 2025
	xfs_ili_zone = kmem_cache_create("xfs_ili",
					 sizeof(struct xfs_inode_log_item), 0,
2026 2027
					 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
					 NULL);
2028 2029
	if (!xfs_ili_zone)
		goto out_destroy_inode_zone;
C
Carlos Maiolino 已提交
2030 2031 2032 2033

	xfs_icreate_zone = kmem_cache_create("xfs_icr",
					     sizeof(struct xfs_icreate_item),
					     0, 0, NULL);
D
Dave Chinner 已提交
2034 2035
	if (!xfs_icreate_zone)
		goto out_destroy_ili_zone;
2036

C
Carlos Maiolino 已提交
2037 2038 2039
	xfs_rud_zone = kmem_cache_create("xfs_rud_item",
					 sizeof(struct xfs_rud_log_item),
					 0, 0, NULL);
2040 2041 2042
	if (!xfs_rud_zone)
		goto out_destroy_icreate_zone;

C
Carlos Maiolino 已提交
2043
	xfs_rui_zone = kmem_cache_create("xfs_rui_item",
2044
			xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
C
Carlos Maiolino 已提交
2045
			0, 0, NULL);
2046 2047 2048
	if (!xfs_rui_zone)
		goto out_destroy_rud_zone;

C
Carlos Maiolino 已提交
2049 2050 2051
	xfs_cud_zone = kmem_cache_create("xfs_cud_item",
					 sizeof(struct xfs_cud_log_item),
					 0, 0, NULL);
2052 2053 2054
	if (!xfs_cud_zone)
		goto out_destroy_rui_zone;

C
Carlos Maiolino 已提交
2055
	xfs_cui_zone = kmem_cache_create("xfs_cui_item",
2056
			xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
C
Carlos Maiolino 已提交
2057
			0, 0, NULL);
2058 2059 2060
	if (!xfs_cui_zone)
		goto out_destroy_cud_zone;

C
Carlos Maiolino 已提交
2061 2062 2063
	xfs_bud_zone = kmem_cache_create("xfs_bud_item",
					 sizeof(struct xfs_bud_log_item),
					 0, 0, NULL);
2064 2065 2066
	if (!xfs_bud_zone)
		goto out_destroy_cui_zone;

C
Carlos Maiolino 已提交
2067
	xfs_bui_zone = kmem_cache_create("xfs_bui_item",
2068
			xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
C
Carlos Maiolino 已提交
2069
			0, 0, NULL);
2070 2071 2072
	if (!xfs_bui_zone)
		goto out_destroy_bud_zone;

2073 2074
	return 0;

2075
 out_destroy_bud_zone:
2076
	kmem_cache_destroy(xfs_bud_zone);
2077
 out_destroy_cui_zone:
2078
	kmem_cache_destroy(xfs_cui_zone);
2079
 out_destroy_cud_zone:
2080
	kmem_cache_destroy(xfs_cud_zone);
2081
 out_destroy_rui_zone:
2082
	kmem_cache_destroy(xfs_rui_zone);
2083
 out_destroy_rud_zone:
2084
	kmem_cache_destroy(xfs_rud_zone);
2085
 out_destroy_icreate_zone:
2086
	kmem_cache_destroy(xfs_icreate_zone);
D
Dave Chinner 已提交
2087
 out_destroy_ili_zone:
2088
	kmem_cache_destroy(xfs_ili_zone);
2089
 out_destroy_inode_zone:
2090
	kmem_cache_destroy(xfs_inode_zone);
2091
 out_destroy_efi_zone:
2092
	kmem_cache_destroy(xfs_efi_zone);
2093
 out_destroy_efd_zone:
2094
	kmem_cache_destroy(xfs_efd_zone);
2095
 out_destroy_buf_item_zone:
2096
	kmem_cache_destroy(xfs_buf_item_zone);
2097
 out_destroy_trans_zone:
2098
	kmem_cache_destroy(xfs_trans_zone);
2099
 out_destroy_ifork_zone:
2100
	kmem_cache_destroy(xfs_ifork_zone);
2101
 out_destroy_da_state_zone:
2102
	kmem_cache_destroy(xfs_da_state_zone);
2103
 out_destroy_btree_cur_zone:
2104
	kmem_cache_destroy(xfs_btree_cur_zone);
2105
 out_destroy_bmap_free_item_zone:
2106
	kmem_cache_destroy(xfs_bmap_free_item_zone);
2107
 out_destroy_log_ticket_zone:
2108
	kmem_cache_destroy(xfs_log_ticket_zone);
2109 2110 2111 2112 2113 2114 2115
 out:
	return -ENOMEM;
}

STATIC void
xfs_destroy_zones(void)
{
2116 2117 2118 2119 2120
	/*
	 * Make sure all delayed rcu free are flushed before we
	 * destroy caches.
	 */
	rcu_barrier();
2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138
	kmem_cache_destroy(xfs_bui_zone);
	kmem_cache_destroy(xfs_bud_zone);
	kmem_cache_destroy(xfs_cui_zone);
	kmem_cache_destroy(xfs_cud_zone);
	kmem_cache_destroy(xfs_rui_zone);
	kmem_cache_destroy(xfs_rud_zone);
	kmem_cache_destroy(xfs_icreate_zone);
	kmem_cache_destroy(xfs_ili_zone);
	kmem_cache_destroy(xfs_inode_zone);
	kmem_cache_destroy(xfs_efi_zone);
	kmem_cache_destroy(xfs_efd_zone);
	kmem_cache_destroy(xfs_buf_item_zone);
	kmem_cache_destroy(xfs_trans_zone);
	kmem_cache_destroy(xfs_ifork_zone);
	kmem_cache_destroy(xfs_da_state_zone);
	kmem_cache_destroy(xfs_btree_cur_zone);
	kmem_cache_destroy(xfs_bmap_free_item_zone);
	kmem_cache_destroy(xfs_log_ticket_zone);
2139
}
L
Linus Torvalds 已提交
2140

2141 2142 2143
STATIC int __init
xfs_init_workqueues(void)
{
2144 2145 2146 2147 2148 2149
	/*
	 * The allocation workqueue can be used in memory reclaim situations
	 * (writepage path), and parallelism is only limited by the number of
	 * AGs in all the filesystems mounted. Hence use the default large
	 * max_active value for this workqueue.
	 */
2150
	xfs_alloc_wq = alloc_workqueue("xfsalloc",
2151
			XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0);
2152
	if (!xfs_alloc_wq)
D
Dave Chinner 已提交
2153
		return -ENOMEM;
2154

2155 2156
	xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND),
			0);
2157 2158 2159
	if (!xfs_discard_wq)
		goto out_free_alloc_wq;

2160
	return 0;
2161 2162 2163
out_free_alloc_wq:
	destroy_workqueue(xfs_alloc_wq);
	return -ENOMEM;
2164 2165
}

2166
STATIC void
2167 2168
xfs_destroy_workqueues(void)
{
2169
	destroy_workqueue(xfs_discard_wq);
2170
	destroy_workqueue(xfs_alloc_wq);
2171 2172
}

2173 2174 2175 2176 2177
#ifdef CONFIG_HOTPLUG_CPU
static int
xfs_cpu_dead(
	unsigned int		cpu)
{
2178 2179 2180 2181 2182
	struct xfs_mount	*mp, *n;

	spin_lock(&xfs_mount_list_lock);
	list_for_each_entry_safe(mp, n, &xfs_mount_list, m_mount_list) {
		spin_unlock(&xfs_mount_list_lock);
2183
		xfs_inodegc_cpu_dead(mp, cpu);
2184 2185 2186
		spin_lock(&xfs_mount_list_lock);
	}
	spin_unlock(&xfs_mount_list_lock);
2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214
	return 0;
}

static int __init
xfs_cpu_hotplug_init(void)
{
	int	error;

	error = cpuhp_setup_state_nocalls(CPUHP_XFS_DEAD, "xfs:dead", NULL,
			xfs_cpu_dead);
	if (error < 0)
		xfs_alert(NULL,
"Failed to initialise CPU hotplug, error %d. XFS is non-functional.",
			error);
	return error;
}

static void
xfs_cpu_hotplug_destroy(void)
{
	cpuhp_remove_state_nocalls(CPUHP_XFS_DEAD);
}

#else /* !CONFIG_HOTPLUG_CPU */
static inline int xfs_cpu_hotplug_init(void) { return 0; }
static inline void xfs_cpu_hotplug_destroy(void) {}
#endif

L
Linus Torvalds 已提交
2215
STATIC int __init
2216
init_xfs_fs(void)
L
Linus Torvalds 已提交
2217 2218 2219
{
	int			error;

2220 2221
	xfs_check_ondisk_structs();

2222 2223
	printk(KERN_INFO XFS_VERSION_STRING " with "
			 XFS_BUILD_OPTIONS " enabled\n");
L
Linus Torvalds 已提交
2224

2225
	xfs_dir_startup();
L
Linus Torvalds 已提交
2226

2227
	error = xfs_cpu_hotplug_init();
2228 2229 2230
	if (error)
		goto out;

2231 2232 2233 2234
	error = xfs_init_zones();
	if (error)
		goto out_destroy_hp;

2235
	error = xfs_init_workqueues();
2236
	if (error)
C
Christoph Hellwig 已提交
2237
		goto out_destroy_zones;
2238

2239 2240 2241 2242
	error = xfs_mru_cache_init();
	if (error)
		goto out_destroy_wq;

2243
	error = xfs_buf_init();
2244
	if (error)
2245
		goto out_mru_cache_uninit;
2246 2247 2248 2249 2250 2251 2252 2253

	error = xfs_init_procfs();
	if (error)
		goto out_buf_terminate;

	error = xfs_sysctl_register();
	if (error)
		goto out_cleanup_procfs;
L
Linus Torvalds 已提交
2254

B
Brian Foster 已提交
2255 2256 2257
	xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
	if (!xfs_kset) {
		error = -ENOMEM;
2258
		goto out_sysctl_unregister;
B
Brian Foster 已提交
2259 2260
	}

2261 2262 2263 2264 2265 2266 2267 2268 2269
	xfsstats.xs_kobj.kobject.kset = xfs_kset;

	xfsstats.xs_stats = alloc_percpu(struct xfsstats);
	if (!xfsstats.xs_stats) {
		error = -ENOMEM;
		goto out_kset_unregister;
	}

	error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2270 2271
			       "stats");
	if (error)
2272
		goto out_free_stats;
2273

2274 2275 2276
#ifdef DEBUG
	xfs_dbg_kobj.kobject.kset = xfs_kset;
	error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2277
	if (error)
2278
		goto out_remove_stats_kobj;
2279 2280 2281 2282
#endif

	error = xfs_qm_init();
	if (error)
2283
		goto out_remove_dbg_kobj;
L
Linus Torvalds 已提交
2284 2285 2286

	error = register_filesystem(&xfs_fs_type);
	if (error)
2287
		goto out_qm_exit;
L
Linus Torvalds 已提交
2288 2289
	return 0;

2290 2291
 out_qm_exit:
	xfs_qm_exit();
2292
 out_remove_dbg_kobj:
2293 2294
#ifdef DEBUG
	xfs_sysfs_del(&xfs_dbg_kobj);
2295
 out_remove_stats_kobj:
2296
#endif
2297 2298 2299
	xfs_sysfs_del(&xfsstats.xs_kobj);
 out_free_stats:
	free_percpu(xfsstats.xs_stats);
2300
 out_kset_unregister:
B
Brian Foster 已提交
2301
	kset_unregister(xfs_kset);
2302 2303 2304 2305 2306
 out_sysctl_unregister:
	xfs_sysctl_unregister();
 out_cleanup_procfs:
	xfs_cleanup_procfs();
 out_buf_terminate:
2307
	xfs_buf_terminate();
2308 2309
 out_mru_cache_uninit:
	xfs_mru_cache_uninit();
2310 2311
 out_destroy_wq:
	xfs_destroy_workqueues();
2312
 out_destroy_zones:
2313
	xfs_destroy_zones();
2314 2315
 out_destroy_hp:
	xfs_cpu_hotplug_destroy();
2316
 out:
L
Linus Torvalds 已提交
2317 2318 2319 2320
	return error;
}

STATIC void __exit
2321
exit_xfs_fs(void)
L
Linus Torvalds 已提交
2322
{
2323
	xfs_qm_exit();
L
Linus Torvalds 已提交
2324
	unregister_filesystem(&xfs_fs_type);
2325 2326 2327
#ifdef DEBUG
	xfs_sysfs_del(&xfs_dbg_kobj);
#endif
2328 2329
	xfs_sysfs_del(&xfsstats.xs_kobj);
	free_percpu(xfsstats.xs_stats);
B
Brian Foster 已提交
2330
	kset_unregister(xfs_kset);
2331 2332
	xfs_sysctl_unregister();
	xfs_cleanup_procfs();
2333
	xfs_buf_terminate();
2334
	xfs_mru_cache_uninit();
2335
	xfs_destroy_workqueues();
2336
	xfs_destroy_zones();
2337
	xfs_uuid_table_free();
2338
	xfs_cpu_hotplug_destroy();
L
Linus Torvalds 已提交
2339 2340 2341 2342 2343 2344 2345 2346
}

module_init(init_xfs_fs);
module_exit(exit_xfs_fs);

MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
MODULE_LICENSE("GPL");