xfs_super.c 59.3 KB
Newer Older
D
Dave Chinner 已提交
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2
/*
3
 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4
 * All Rights Reserved.
L
Linus Torvalds 已提交
5
 */
C
Christoph Hellwig 已提交
6

L
Linus Torvalds 已提交
7
#include "xfs.h"
8
#include "xfs_shared.h"
9
#include "xfs_format.h"
10 11
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
L
Linus Torvalds 已提交
12 13 14
#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
15
#include "xfs_btree.h"
L
Linus Torvalds 已提交
16
#include "xfs_bmap.h"
17
#include "xfs_alloc.h"
C
Christoph Hellwig 已提交
18
#include "xfs_fsops.h"
19
#include "xfs_trans.h"
L
Linus Torvalds 已提交
20
#include "xfs_buf_item.h"
21
#include "xfs_log.h"
22
#include "xfs_log_priv.h"
23
#include "xfs_dir2.h"
24 25 26
#include "xfs_extfree_item.h"
#include "xfs_mru_cache.h"
#include "xfs_inode_item.h"
27
#include "xfs_icache.h"
C
Christoph Hellwig 已提交
28
#include "xfs_trace.h"
D
Dave Chinner 已提交
29
#include "xfs_icreate_item.h"
30 31
#include "xfs_filestream.h"
#include "xfs_quota.h"
32
#include "xfs_sysfs.h"
33
#include "xfs_ondisk.h"
34
#include "xfs_rmap_item.h"
35
#include "xfs_refcount_item.h"
36
#include "xfs_bmap_item.h"
37
#include "xfs_reflink.h"
38
#include "xfs_pwork.h"
39
#include "xfs_ag.h"
L
Linus Torvalds 已提交
40

41
#include <linux/magic.h>
I
Ian Kent 已提交
42 43
#include <linux/fs_context.h>
#include <linux/fs_parser.h>
L
Linus Torvalds 已提交
44

45
static const struct super_operations xfs_super_operations;
46

D
Dave Chinner 已提交
47
static struct kset *xfs_kset;		/* top-level xfs sysfs dir */
48 49 50
#ifdef DEBUG
static struct xfs_kobj xfs_dbg_kobj;	/* global debug sysfs attrs */
#endif
L
Linus Torvalds 已提交
51

52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
#ifdef CONFIG_HOTPLUG_CPU
static LIST_HEAD(xfs_mount_list);
static DEFINE_SPINLOCK(xfs_mount_list_lock);

static inline void xfs_mount_list_add(struct xfs_mount *mp)
{
	spin_lock(&xfs_mount_list_lock);
	list_add(&mp->m_mount_list, &xfs_mount_list);
	spin_unlock(&xfs_mount_list_lock);
}

static inline void xfs_mount_list_del(struct xfs_mount *mp)
{
	spin_lock(&xfs_mount_list_lock);
	list_del(&mp->m_mount_list);
	spin_unlock(&xfs_mount_list_lock);
}
#else /* !CONFIG_HOTPLUG_CPU */
static inline void xfs_mount_list_add(struct xfs_mount *mp) {}
static inline void xfs_mount_list_del(struct xfs_mount *mp) {}
#endif

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
enum xfs_dax_mode {
	XFS_DAX_INODE = 0,
	XFS_DAX_ALWAYS = 1,
	XFS_DAX_NEVER = 2,
};

static void
xfs_mount_set_dax_mode(
	struct xfs_mount	*mp,
	enum xfs_dax_mode	mode)
{
	switch (mode) {
	case XFS_DAX_INODE:
		mp->m_flags &= ~(XFS_MOUNT_DAX_ALWAYS | XFS_MOUNT_DAX_NEVER);
		break;
	case XFS_DAX_ALWAYS:
		mp->m_flags |= XFS_MOUNT_DAX_ALWAYS;
		mp->m_flags &= ~XFS_MOUNT_DAX_NEVER;
		break;
	case XFS_DAX_NEVER:
		mp->m_flags |= XFS_MOUNT_DAX_NEVER;
		mp->m_flags &= ~XFS_MOUNT_DAX_ALWAYS;
		break;
	}
}

static const struct constant_table dax_param_enums[] = {
	{"inode",	XFS_DAX_INODE },
	{"always",	XFS_DAX_ALWAYS },
	{"never",	XFS_DAX_NEVER },
	{}
};

107 108 109 110
/*
 * Table driven mount option parser.
 */
enum {
I
Ian Kent 已提交
111
	Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
112
	Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
113
	Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
114 115 116 117
	Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
	Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
	Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
	Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
118
	Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
119
	Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
120 121
};

122
static const struct fs_parameter_spec xfs_fs_parameters[] = {
I
Ian Kent 已提交
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
	fsparam_u32("logbufs",		Opt_logbufs),
	fsparam_string("logbsize",	Opt_logbsize),
	fsparam_string("logdev",	Opt_logdev),
	fsparam_string("rtdev",		Opt_rtdev),
	fsparam_flag("wsync",		Opt_wsync),
	fsparam_flag("noalign",		Opt_noalign),
	fsparam_flag("swalloc",		Opt_swalloc),
	fsparam_u32("sunit",		Opt_sunit),
	fsparam_u32("swidth",		Opt_swidth),
	fsparam_flag("nouuid",		Opt_nouuid),
	fsparam_flag("grpid",		Opt_grpid),
	fsparam_flag("nogrpid",		Opt_nogrpid),
	fsparam_flag("bsdgroups",	Opt_bsdgroups),
	fsparam_flag("sysvgroups",	Opt_sysvgroups),
	fsparam_string("allocsize",	Opt_allocsize),
	fsparam_flag("norecovery",	Opt_norecovery),
	fsparam_flag("inode64",		Opt_inode64),
	fsparam_flag("inode32",		Opt_inode32),
	fsparam_flag("ikeep",		Opt_ikeep),
	fsparam_flag("noikeep",		Opt_noikeep),
	fsparam_flag("largeio",		Opt_largeio),
	fsparam_flag("nolargeio",	Opt_nolargeio),
	fsparam_flag("attr2",		Opt_attr2),
	fsparam_flag("noattr2",		Opt_noattr2),
	fsparam_flag("filestreams",	Opt_filestreams),
	fsparam_flag("quota",		Opt_quota),
	fsparam_flag("noquota",		Opt_noquota),
	fsparam_flag("usrquota",	Opt_usrquota),
	fsparam_flag("grpquota",	Opt_grpquota),
	fsparam_flag("prjquota",	Opt_prjquota),
	fsparam_flag("uquota",		Opt_uquota),
	fsparam_flag("gquota",		Opt_gquota),
	fsparam_flag("pquota",		Opt_pquota),
	fsparam_flag("uqnoenforce",	Opt_uqnoenforce),
	fsparam_flag("gqnoenforce",	Opt_gqnoenforce),
	fsparam_flag("pqnoenforce",	Opt_pqnoenforce),
	fsparam_flag("qnoenforce",	Opt_qnoenforce),
	fsparam_flag("discard",		Opt_discard),
	fsparam_flag("nodiscard",	Opt_nodiscard),
	fsparam_flag("dax",		Opt_dax),
163
	fsparam_enum("dax",		Opt_dax_enum, dax_param_enums),
I
Ian Kent 已提交
164
	{}
165 166
};

167
struct proc_xfs_info {
D
Dave Chinner 已提交
168 169
	uint64_t	flag;
	char		*str;
170 171
};

172 173 174 175
static int
xfs_fs_show_options(
	struct seq_file		*m,
	struct dentry		*root)
176 177 178
{
	static struct proc_xfs_info xfs_info_set[] = {
		/* the few simple ones we can get from the mount struct */
179 180 181 182 183 184 185 186 187 188
		{ XFS_MOUNT_IKEEP,		",ikeep" },
		{ XFS_MOUNT_WSYNC,		",wsync" },
		{ XFS_MOUNT_NOALIGN,		",noalign" },
		{ XFS_MOUNT_SWALLOC,		",swalloc" },
		{ XFS_MOUNT_NOUUID,		",nouuid" },
		{ XFS_MOUNT_NORECOVERY,		",norecovery" },
		{ XFS_MOUNT_ATTR2,		",attr2" },
		{ XFS_MOUNT_FILESTREAMS,	",filestreams" },
		{ XFS_MOUNT_GRPID,		",grpid" },
		{ XFS_MOUNT_DISCARD,		",discard" },
189
		{ XFS_MOUNT_LARGEIO,		",largeio" },
190 191
		{ XFS_MOUNT_DAX_ALWAYS,		",dax=always" },
		{ XFS_MOUNT_DAX_NEVER,		",dax=never" },
192 193
		{ 0, NULL }
	};
194
	struct xfs_mount	*mp = XFS_M(root->d_sb);
195 196 197 198 199 200
	struct proc_xfs_info	*xfs_infop;

	for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
		if (mp->m_flags & xfs_infop->flag)
			seq_puts(m, xfs_infop->str);
	}
201 202 203

	seq_printf(m, ",inode%d",
		(mp->m_flags & XFS_MOUNT_SMALL_INUMS) ? 32 : 64);
204

205
	if (mp->m_flags & XFS_MOUNT_ALLOCSIZE)
206
		seq_printf(m, ",allocsize=%dk",
207
			   (1 << mp->m_allocsize_log) >> 10);
208 209

	if (mp->m_logbufs > 0)
210
		seq_printf(m, ",logbufs=%d", mp->m_logbufs);
211
	if (mp->m_logbsize > 0)
212
		seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
213 214

	if (mp->m_logname)
215
		seq_show_option(m, "logdev", mp->m_logname);
216
	if (mp->m_rtname)
217
		seq_show_option(m, "rtdev", mp->m_rtname);
218 219

	if (mp->m_dalign > 0)
220
		seq_printf(m, ",sunit=%d",
221 222
				(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
	if (mp->m_swidth > 0)
223
		seq_printf(m, ",swidth=%d",
224 225
				(int)XFS_FSB_TO_BB(mp, mp->m_swidth));

226 227 228 229
	if (mp->m_qflags & XFS_UQUOTA_ENFD)
		seq_puts(m, ",usrquota");
	else if (mp->m_qflags & XFS_UQUOTA_ACCT)
		seq_puts(m, ",uqnoenforce");
230

231 232 233 234 235 236 237 238 239
	if (mp->m_qflags & XFS_PQUOTA_ENFD)
		seq_puts(m, ",prjquota");
	else if (mp->m_qflags & XFS_PQUOTA_ACCT)
		seq_puts(m, ",pqnoenforce");

	if (mp->m_qflags & XFS_GQUOTA_ENFD)
		seq_puts(m, ",grpquota");
	else if (mp->m_qflags & XFS_GQUOTA_ACCT)
		seq_puts(m, ",gqnoenforce");
240 241

	if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
242
		seq_puts(m, ",noquota");
243 244

	return 0;
245
}
246

247
/*
248 249 250 251 252 253 254 255 256 257 258 259 260
 * Set parameters for inode allocation heuristics, taking into account
 * filesystem size and inode32/inode64 mount options; i.e. specifically
 * whether or not XFS_MOUNT_SMALL_INUMS is set.
 *
 * Inode allocation patterns are altered only if inode32 is requested
 * (XFS_MOUNT_SMALL_INUMS), and the filesystem is sufficiently large.
 * If altered, XFS_MOUNT_32BITINODES is set as well.
 *
 * An agcount independent of that in the mount structure is provided
 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
 * to the potentially higher ag count.
 *
 * Returns the maximum AG index which may contain inodes.
261
 */
262
xfs_agnumber_t
263 264 265
xfs_set_inode_alloc(
	struct xfs_mount *mp,
	xfs_agnumber_t	agcount)
266
{
267
	xfs_agnumber_t	index;
268
	xfs_agnumber_t	maxagi = 0;
269 270
	xfs_sb_t	*sbp = &mp->m_sb;
	xfs_agnumber_t	max_metadata;
E
Eric Sandeen 已提交
271 272
	xfs_agino_t	agino;
	xfs_ino_t	ino;
273

274 275 276
	/*
	 * Calculate how much should be reserved for inodes to meet
	 * the max inode percentage.  Used only for inode32.
277
	 */
D
Darrick J. Wong 已提交
278
	if (M_IGEO(mp)->maxicount) {
279
		uint64_t	icount;
280 281 282 283 284 285 286

		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
		do_div(icount, 100);
		icount += sbp->sb_agblocks - 1;
		do_div(icount, sbp->sb_agblocks);
		max_metadata = icount;
	} else {
287
		max_metadata = agcount;
288 289
	}

290
	/* Get the last possible inode in the filesystem */
291
	agino =	XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
292 293 294 295 296 297 298 299 300 301 302
	ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);

	/*
	 * If user asked for no more than 32-bit inodes, and the fs is
	 * sufficiently large, set XFS_MOUNT_32BITINODES if we must alter
	 * the allocator to accommodate the request.
	 */
	if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
		mp->m_flags |= XFS_MOUNT_32BITINODES;
	else
		mp->m_flags &= ~XFS_MOUNT_32BITINODES;
E
Eric Sandeen 已提交
303

304
	for (index = 0; index < agcount; index++) {
305
		struct xfs_perag	*pag;
306

307
		ino = XFS_AGINO_TO_INO(mp, index, agino);
308 309 310

		pag = xfs_perag_get(mp, index);

311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
		if (mp->m_flags & XFS_MOUNT_32BITINODES) {
			if (ino > XFS_MAXINUMBER_32) {
				pag->pagi_inodeok = 0;
				pag->pagf_metadata = 0;
			} else {
				pag->pagi_inodeok = 1;
				maxagi++;
				if (index < max_metadata)
					pag->pagf_metadata = 1;
				else
					pag->pagf_metadata = 0;
			}
		} else {
			pag->pagi_inodeok = 1;
			pag->pagf_metadata = 0;
		}
327 328 329 330

		xfs_perag_put(pag);
	}

331
	return (mp->m_flags & XFS_MOUNT_32BITINODES) ? maxagi : agcount;
332 333
}

H
Hannes Eder 已提交
334
STATIC int
L
Linus Torvalds 已提交
335 336 337 338 339 340 341
xfs_blkdev_get(
	xfs_mount_t		*mp,
	const char		*name,
	struct block_device	**bdevp)
{
	int			error = 0;

342 343
	*bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
				    mp);
L
Linus Torvalds 已提交
344 345
	if (IS_ERR(*bdevp)) {
		error = PTR_ERR(*bdevp);
346
		xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
L
Linus Torvalds 已提交
347 348
	}

D
Dave Chinner 已提交
349
	return error;
L
Linus Torvalds 已提交
350 351
}

H
Hannes Eder 已提交
352
STATIC void
L
Linus Torvalds 已提交
353 354 355 356
xfs_blkdev_put(
	struct block_device	*bdev)
{
	if (bdev)
357
		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
L
Linus Torvalds 已提交
358 359
}

360 361 362 363
STATIC void
xfs_close_devices(
	struct xfs_mount	*mp)
{
364 365
	struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev;

366
	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
367
		struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
368 369
		struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev;

370
		xfs_free_buftarg(mp->m_logdev_targp);
371
		xfs_blkdev_put(logdev);
372
		fs_put_dax(dax_logdev);
373 374
	}
	if (mp->m_rtdev_targp) {
375
		struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
376 377
		struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev;

378
		xfs_free_buftarg(mp->m_rtdev_targp);
379
		xfs_blkdev_put(rtdev);
380
		fs_put_dax(dax_rtdev);
381
	}
382
	xfs_free_buftarg(mp->m_ddev_targp);
383
	fs_put_dax(dax_ddev);
384 385 386 387 388 389 390 391 392 393 394 395 396 397
}

/*
 * The file system configurations are:
 *	(1) device (partition) with data and internal log
 *	(2) logical volume with data and log subvolumes.
 *	(3) logical volume with data, log, and realtime subvolumes.
 *
 * We only have to handle opening the log and realtime volumes here if
 * they are present.  The data subvolume has already been opened by
 * get_sb_bdev() and is stored in sb->s_bdev.
 */
STATIC int
xfs_open_devices(
398
	struct xfs_mount	*mp)
399 400
{
	struct block_device	*ddev = mp->m_super->s_bdev;
401 402
	struct dax_device	*dax_ddev = fs_dax_get_by_bdev(ddev);
	struct dax_device	*dax_logdev = NULL, *dax_rtdev = NULL;
403 404 405 406 407 408
	struct block_device	*logdev = NULL, *rtdev = NULL;
	int			error;

	/*
	 * Open real time and log devices - order is important.
	 */
409 410
	if (mp->m_logname) {
		error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
411 412
		if (error)
			goto out;
413
		dax_logdev = fs_dax_get_by_bdev(logdev);
414 415
	}

416 417
	if (mp->m_rtname) {
		error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
418 419 420 421
		if (error)
			goto out_close_logdev;

		if (rtdev == ddev || rtdev == logdev) {
422 423
			xfs_warn(mp,
	"Cannot mount filesystem with identical rtdev and ddev/logdev.");
D
Dave Chinner 已提交
424
			error = -EINVAL;
425 426
			goto out_close_rtdev;
		}
427
		dax_rtdev = fs_dax_get_by_bdev(rtdev);
428 429 430 431 432
	}

	/*
	 * Setup xfs_mount buffer target pointers
	 */
D
Dave Chinner 已提交
433
	error = -ENOMEM;
434
	mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev);
435 436 437 438
	if (!mp->m_ddev_targp)
		goto out_close_rtdev;

	if (rtdev) {
439
		mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev);
440 441 442 443 444
		if (!mp->m_rtdev_targp)
			goto out_free_ddev_targ;
	}

	if (logdev && logdev != ddev) {
445
		mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev);
446 447 448 449 450 451 452 453 454 455
		if (!mp->m_logdev_targp)
			goto out_free_rtdev_targ;
	} else {
		mp->m_logdev_targp = mp->m_ddev_targp;
	}

	return 0;

 out_free_rtdev_targ:
	if (mp->m_rtdev_targp)
456
		xfs_free_buftarg(mp->m_rtdev_targp);
457
 out_free_ddev_targ:
458
	xfs_free_buftarg(mp->m_ddev_targp);
459
 out_close_rtdev:
460
	xfs_blkdev_put(rtdev);
461
	fs_put_dax(dax_rtdev);
462
 out_close_logdev:
463
	if (logdev && logdev != ddev) {
464
		xfs_blkdev_put(logdev);
465 466
		fs_put_dax(dax_logdev);
	}
467
 out:
468
	fs_put_dax(dax_ddev);
469 470 471
	return error;
}

472 473 474 475 476 477 478 479
/*
 * Setup xfs_mount buffer target pointers based on superblock
 */
STATIC int
xfs_setup_devices(
	struct xfs_mount	*mp)
{
	int			error;
480

481
	error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
	if (error)
		return error;

	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
		unsigned int	log_sector_size = BBSIZE;

		if (xfs_sb_version_hassector(&mp->m_sb))
			log_sector_size = mp->m_sb.sb_logsectsize;
		error = xfs_setsize_buftarg(mp->m_logdev_targp,
					    log_sector_size);
		if (error)
			return error;
	}
	if (mp->m_rtdev_targp) {
		error = xfs_setsize_buftarg(mp->m_rtdev_targp,
					    mp->m_sb.sb_sectsize);
		if (error)
			return error;
	}

	return 0;
}
504

505 506 507 508
STATIC int
xfs_init_mount_workqueues(
	struct xfs_mount	*mp)
{
509
	mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
510 511
			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
			1, mp->m_super->s_id);
512 513 514
	if (!mp->m_buf_workqueue)
		goto out;

515
	mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
516 517
			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
			0, mp->m_super->s_id);
518
	if (!mp->m_unwritten_workqueue)
519
		goto out_destroy_buf;
520

521
	mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
522
			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND),
523
			0, mp->m_super->s_id);
524 525
	if (!mp->m_cil_workqueue)
		goto out_destroy_unwritten;
D
Dave Chinner 已提交
526 527

	mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
528 529
			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
			0, mp->m_super->s_id);
D
Dave Chinner 已提交
530 531 532
	if (!mp->m_reclaim_workqueue)
		goto out_destroy_cil;

533 534
	mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s",
			XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM),
535
			0, mp->m_super->s_id);
536
	if (!mp->m_blockgc_wq)
537
		goto out_destroy_reclaim;
538

539 540 541 542 543 544
	mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s",
			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
			1, mp->m_super->s_id);
	if (!mp->m_inodegc_wq)
		goto out_destroy_blockgc;

545 546
	mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
			XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id);
547
	if (!mp->m_sync_workqueue)
548
		goto out_destroy_inodegc;
549

550 551
	return 0;

552 553 554 555
out_destroy_inodegc:
	destroy_workqueue(mp->m_inodegc_wq);
out_destroy_blockgc:
	destroy_workqueue(mp->m_blockgc_wq);
D
Dave Chinner 已提交
556 557 558 559
out_destroy_reclaim:
	destroy_workqueue(mp->m_reclaim_workqueue);
out_destroy_cil:
	destroy_workqueue(mp->m_cil_workqueue);
560 561
out_destroy_unwritten:
	destroy_workqueue(mp->m_unwritten_workqueue);
562 563
out_destroy_buf:
	destroy_workqueue(mp->m_buf_workqueue);
564 565 566 567 568 569 570 571
out:
	return -ENOMEM;
}

STATIC void
xfs_destroy_mount_workqueues(
	struct xfs_mount	*mp)
{
572
	destroy_workqueue(mp->m_sync_workqueue);
573 574
	destroy_workqueue(mp->m_blockgc_wq);
	destroy_workqueue(mp->m_inodegc_wq);
D
Dave Chinner 已提交
575
	destroy_workqueue(mp->m_reclaim_workqueue);
576
	destroy_workqueue(mp->m_cil_workqueue);
577
	destroy_workqueue(mp->m_unwritten_workqueue);
578
	destroy_workqueue(mp->m_buf_workqueue);
579 580
}

581 582 583 584 585 586 587 588 589 590 591 592 593 594
static void
xfs_flush_inodes_worker(
	struct work_struct	*work)
{
	struct xfs_mount	*mp = container_of(work, struct xfs_mount,
						   m_flush_inodes_work);
	struct super_block	*sb = mp->m_super;

	if (down_read_trylock(&sb->s_umount)) {
		sync_inodes_sb(sb);
		up_read(&sb->s_umount);
	}
}

D
Dave Chinner 已提交
595 596 597 598 599 600 601 602 603 604
/*
 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
 * for IO to complete so that we effectively throttle multiple callers to the
 * rate at which IO is completing.
 */
void
xfs_flush_inodes(
	struct xfs_mount	*mp)
{
605 606 607 608 609
	/*
	 * If flush_work() returns true then that means we waited for a flush
	 * which was already in progress.  Don't bother running another scan.
	 */
	if (flush_work(&mp->m_flush_inodes_work))
610 611
		return;

612 613
	queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
	flush_work(&mp->m_flush_inodes_work);
D
Dave Chinner 已提交
614 615
}

616
/* Catch misguided souls that try to use this interface on XFS */
L
Linus Torvalds 已提交
617
STATIC struct inode *
618
xfs_fs_alloc_inode(
L
Linus Torvalds 已提交
619 620
	struct super_block	*sb)
{
621
	BUG();
622
	return NULL;
L
Linus Torvalds 已提交
623 624
}

625
/*
626
 * Now that the generic code is guaranteed not to be accessing
D
Dave Chinner 已提交
627
 * the linux inode, we can inactivate and reclaim the inode.
628
 */
L
Linus Torvalds 已提交
629
STATIC void
630
xfs_fs_destroy_inode(
C
Christoph Hellwig 已提交
631
	struct inode		*inode)
L
Linus Torvalds 已提交
632
{
C
Christoph Hellwig 已提交
633 634
	struct xfs_inode	*ip = XFS_I(inode);

C
Christoph Hellwig 已提交
635
	trace_xfs_destroy_inode(ip);
636

637
	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
D
Dave Chinner 已提交
638 639
	XFS_STATS_INC(ip->i_mount, vn_rele);
	XFS_STATS_INC(ip->i_mount, vn_remove);
640
	xfs_inode_mark_reclaimable(ip);
L
Linus Torvalds 已提交
641 642
}

643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664
static void
xfs_fs_dirty_inode(
	struct inode			*inode,
	int				flag)
{
	struct xfs_inode		*ip = XFS_I(inode);
	struct xfs_mount		*mp = ip->i_mount;
	struct xfs_trans		*tp;

	if (!(inode->i_sb->s_flags & SB_LAZYTIME))
		return;
	if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME))
		return;

	if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
		return;
	xfs_ilock(ip, XFS_ILOCK_EXCL);
	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
	xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
	xfs_trans_commit(tp);
}

665 666 667 668
/*
 * Slab object creation initialisation for the XFS inode.
 * This covers only the idempotent fields in the XFS inode;
 * all other fields need to be initialised on allocation
669
 * from the slab. This avoids the need to repeatedly initialise
670 671 672
 * fields in the xfs inode that left in the initialise state
 * when freeing the inode.
 */
673 674
STATIC void
xfs_fs_inode_init_once(
675 676 677 678 679
	void			*inode)
{
	struct xfs_inode	*ip = inode;

	memset(ip, 0, sizeof(struct xfs_inode));
680 681 682 683 684

	/* vfs inode */
	inode_init_once(VFS_I(ip));

	/* xfs inode */
685 686 687
	atomic_set(&ip->i_pincount, 0);
	spin_lock_init(&ip->i_flags_lock);

D
Dave Chinner 已提交
688 689
	mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
		     "xfsino", ip->i_ino);
690 691 692 693
	mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
		     "xfsino", ip->i_ino);
}

694 695 696 697 698 699 700 701 702 703 704 705 706
/*
 * We do an unlocked check for XFS_IDONTCACHE here because we are already
 * serialised against cache hits here via the inode->i_lock and igrab() in
 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
 * racing with us, and it avoids needing to grab a spinlock here for every inode
 * we drop the final reference on.
 */
STATIC int
xfs_fs_drop_inode(
	struct inode		*inode)
{
	struct xfs_inode	*ip = XFS_I(inode);

707 708 709 710 711 712 713 714 715 716
	/*
	 * If this unlinked inode is in the middle of recovery, don't
	 * drop the inode just yet; log recovery will take care of
	 * that.  See the comment for this inode flag.
	 */
	if (ip->i_flags & XFS_IRECOVERY) {
		ASSERT(ip->i_mount->m_log->l_flags & XLOG_RECOVERY_NEEDED);
		return 0;
	}

717
	return generic_drop_inode(inode);
718 719
}

I
Ian Kent 已提交
720 721
static void
xfs_mount_free(
722 723 724 725
	struct xfs_mount	*mp)
{
	kfree(mp->m_rtname);
	kfree(mp->m_logname);
I
Ian Kent 已提交
726
	kmem_free(mp);
727 728
}

L
Linus Torvalds 已提交
729
STATIC int
C
Christoph Hellwig 已提交
730
xfs_fs_sync_fs(
L
Linus Torvalds 已提交
731 732 733
	struct super_block	*sb,
	int			wait)
{
734
	struct xfs_mount	*mp = XFS_M(sb);
L
Linus Torvalds 已提交
735

736 737
	trace_xfs_fs_sync_fs(mp, __return_address);

738
	/*
C
Christoph Hellwig 已提交
739
	 * Doing anything during the async pass would be counterproductive.
740
	 */
C
Christoph Hellwig 已提交
741
	if (!wait)
C
Christoph Hellwig 已提交
742 743
		return 0;

D
Dave Chinner 已提交
744
	xfs_log_force(mp, XFS_LOG_SYNC);
C
Christoph Hellwig 已提交
745
	if (laptop_mode) {
L
Linus Torvalds 已提交
746 747
		/*
		 * The disk must be active because we're syncing.
748
		 * We schedule log work now (now that the disk is
L
Linus Torvalds 已提交
749 750
		 * active) instead of later (when it might not be).
		 */
751
		flush_delayed_work(&mp->m_log->l_work);
L
Linus Torvalds 已提交
752 753
	}

754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769
	/*
	 * If we are called with page faults frozen out, it means we are about
	 * to freeze the transaction subsystem. Take the opportunity to shut
	 * down inodegc because once SB_FREEZE_FS is set it's too late to
	 * prevent inactivation races with freeze. The fs doesn't get called
	 * again by the freezing process until after SB_FREEZE_FS has been set,
	 * so it's now or never.
	 *
	 * We don't care if this is a normal syncfs call that does this or
	 * freeze that does this - we can run this multiple times without issue
	 * and we won't race with a restart because a restart can only occur
	 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE.
	 */
	if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT)
		xfs_inodegc_stop(mp);

C
Christoph Hellwig 已提交
770
	return 0;
L
Linus Torvalds 已提交
771 772 773
}

STATIC int
774
xfs_fs_statfs(
775
	struct dentry		*dentry,
L
Linus Torvalds 已提交
776 777
	struct kstatfs		*statp)
{
C
Christoph Hellwig 已提交
778 779
	struct xfs_mount	*mp = XFS_M(dentry->d_sb);
	xfs_sb_t		*sbp = &mp->m_sb;
780
	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
781 782 783 784
	uint64_t		fakeinos, id;
	uint64_t		icount;
	uint64_t		ifree;
	uint64_t		fdblocks;
C
Christoph Hellwig 已提交
785
	xfs_extlen_t		lsize;
786
	int64_t			ffree;
C
Christoph Hellwig 已提交
787

788
	statp->f_type = XFS_SUPER_MAGIC;
C
Christoph Hellwig 已提交
789 790 791
	statp->f_namelen = MAXNAMELEN - 1;

	id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
792
	statp->f_fsid = u64_to_fsid(id);
C
Christoph Hellwig 已提交
793

794
	icount = percpu_counter_sum(&mp->m_icount);
795
	ifree = percpu_counter_sum(&mp->m_ifree);
796
	fdblocks = percpu_counter_sum(&mp->m_fdblocks);
C
Christoph Hellwig 已提交
797 798 799 800 801

	spin_lock(&mp->m_sb_lock);
	statp->f_bsize = sbp->sb_blocksize;
	lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
	statp->f_blocks = sbp->sb_dblocks - lsize;
802 803
	spin_unlock(&mp->m_sb_lock);

804 805
	/* make sure statp->f_bfree does not underflow */
	statp->f_bfree = max_t(int64_t, fdblocks - mp->m_alloc_set_aside, 0);
806 807
	statp->f_bavail = statp->f_bfree;

808
	fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
D
Dave Chinner 已提交
809
	statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
D
Darrick J. Wong 已提交
810
	if (M_IGEO(mp)->maxicount)
C
Christoph Hellwig 已提交
811 812
		statp->f_files = min_t(typeof(statp->f_files),
					statp->f_files,
D
Darrick J. Wong 已提交
813
					M_IGEO(mp)->maxicount);
814

815 816 817 818 819
	/* If sb_icount overshot maxicount, report actual allocation */
	statp->f_files = max_t(typeof(statp->f_files),
					statp->f_files,
					sbp->sb_icount);

820
	/* make sure statp->f_ffree does not underflow */
821
	ffree = statp->f_files - (icount - ifree);
822
	statp->f_ffree = max_t(int64_t, ffree, 0);
823

C
Christoph Hellwig 已提交
824

825
	if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
826 827
	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
			      (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
C
Christoph Hellwig 已提交
828
		xfs_qm_statvfs(ip, statp);
829 830

	if (XFS_IS_REALTIME_MOUNT(mp) &&
831
	    (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
832 833 834 835 836
		statp->f_blocks = sbp->sb_rblocks;
		statp->f_bavail = statp->f_bfree =
			sbp->sb_frextents * sbp->sb_rextsize;
	}

C
Christoph Hellwig 已提交
837
	return 0;
L
Linus Torvalds 已提交
838 839
}

E
Eric Sandeen 已提交
840 841 842
STATIC void
xfs_save_resvblks(struct xfs_mount *mp)
{
843
	uint64_t resblks = 0;
E
Eric Sandeen 已提交
844 845 846 847 848 849 850 851

	mp->m_resblks_save = mp->m_resblks;
	xfs_reserve_blocks(mp, &resblks, NULL);
}

STATIC void
xfs_restore_resvblks(struct xfs_mount *mp)
{
852
	uint64_t resblks;
E
Eric Sandeen 已提交
853 854 855 856 857 858 859 860 861 862

	if (mp->m_resblks_save) {
		resblks = mp->m_resblks_save;
		mp->m_resblks_save = 0;
	} else
		resblks = xfs_default_resblks(mp);

	xfs_reserve_blocks(mp, &resblks, NULL);
}

863 864 865 866 867 868 869 870 871 872 873
/*
 * Second stage of a freeze. The data is already frozen so we only
 * need to take care of the metadata. Once that's done sync the superblock
 * to the log to dirty it in case of a crash while frozen. This ensures that we
 * will recover the unlinked inode lists on the next mount.
 */
STATIC int
xfs_fs_freeze(
	struct super_block	*sb)
{
	struct xfs_mount	*mp = XFS_M(sb);
874 875
	unsigned int		flags;
	int			ret;
876

877 878 879 880 881 882
	/*
	 * The filesystem is now frozen far enough that memory reclaim
	 * cannot safely operate on the filesystem. Hence we need to
	 * set a GFP_NOFS context here to avoid recursion deadlocks.
	 */
	flags = memalloc_nofs_save();
883
	xfs_blockgc_stop(mp);
884
	xfs_save_resvblks(mp);
885
	ret = xfs_log_quiesce(mp);
886
	memalloc_nofs_restore(flags);
887 888 889 890 891 892 893 894 895 896 897

	/*
	 * For read-write filesystems, we need to restart the inodegc on error
	 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not
	 * going to be run to restart it now.  We are at SB_FREEZE_FS level
	 * here, so we can restart safely without racing with a stop in
	 * xfs_fs_sync_fs().
	 */
	if (ret && !(mp->m_flags & XFS_MOUNT_RDONLY))
		xfs_inodegc_start(mp);

898
	return ret;
899 900 901 902 903 904 905 906 907 908
}

STATIC int
xfs_fs_unfreeze(
	struct super_block	*sb)
{
	struct xfs_mount	*mp = XFS_M(sb);

	xfs_restore_resvblks(mp);
	xfs_log_work_queue(mp);
909
	xfs_blockgc_start(mp);
910 911 912 913 914 915 916 917

	/*
	 * Don't reactivate the inodegc worker on a readonly filesystem because
	 * inodes are sent directly to reclaim.
	 */
	if (!(mp->m_flags & XFS_MOUNT_RDONLY))
		xfs_inodegc_start(mp);

918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977
	return 0;
}

/*
 * This function fills in xfs_mount_t fields based on mount args.
 * Note: the superblock _has_ now been read in.
 */
STATIC int
xfs_finish_flags(
	struct xfs_mount	*mp)
{
	int			ronly = (mp->m_flags & XFS_MOUNT_RDONLY);

	/* Fail a mount where the logbuf is smaller than the log stripe */
	if (xfs_sb_version_haslogv2(&mp->m_sb)) {
		if (mp->m_logbsize <= 0 &&
		    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
			mp->m_logbsize = mp->m_sb.sb_logsunit;
		} else if (mp->m_logbsize > 0 &&
			   mp->m_logbsize < mp->m_sb.sb_logsunit) {
			xfs_warn(mp,
		"logbuf size must be greater than or equal to log stripe size");
			return -EINVAL;
		}
	} else {
		/* Fail a mount if the logbuf is larger than 32K */
		if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
			xfs_warn(mp,
		"logbuf size for version 1 logs must be 16K or 32K");
			return -EINVAL;
		}
	}

	/*
	 * V5 filesystems always use attr2 format for attributes.
	 */
	if (xfs_sb_version_hascrc(&mp->m_sb) &&
	    (mp->m_flags & XFS_MOUNT_NOATTR2)) {
		xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
			     "attr2 is always enabled for V5 filesystems.");
		return -EINVAL;
	}

	/*
	 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
	 * told by noattr2 to turn it off
	 */
	if (xfs_sb_version_hasattr2(&mp->m_sb) &&
	    !(mp->m_flags & XFS_MOUNT_NOATTR2))
		mp->m_flags |= XFS_MOUNT_ATTR2;

	/*
	 * prohibit r/w mounts of read-only filesystems
	 */
	if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
		xfs_warn(mp,
			"cannot mount a read-only filesystem as read-write");
		return -EROFS;
	}

978 979
	if ((mp->m_qflags & XFS_GQUOTA_ACCT) &&
	    (mp->m_qflags & XFS_PQUOTA_ACCT) &&
980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
	    !xfs_sb_version_has_pquotino(&mp->m_sb)) {
		xfs_warn(mp,
		  "Super block does not support project and group quota together");
		return -EINVAL;
	}

	return 0;
}

static int
xfs_init_percpu_counters(
	struct xfs_mount	*mp)
{
	int		error;

	error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
	if (error)
		return -ENOMEM;

	error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
	if (error)
		goto free_icount;

	error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
	if (error)
		goto free_ifree;

	error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
	if (error)
		goto free_fdblocks;

	return 0;

free_fdblocks:
	percpu_counter_destroy(&mp->m_fdblocks);
free_ifree:
	percpu_counter_destroy(&mp->m_ifree);
free_icount:
	percpu_counter_destroy(&mp->m_icount);
	return -ENOMEM;
}

void
xfs_reinit_percpu_counters(
	struct xfs_mount	*mp)
{
	percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
	percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
	percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
}

static void
xfs_destroy_percpu_counters(
	struct xfs_mount	*mp)
{
	percpu_counter_destroy(&mp->m_icount);
	percpu_counter_destroy(&mp->m_ifree);
	percpu_counter_destroy(&mp->m_fdblocks);
	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
	       percpu_counter_sum(&mp->m_delalloc_blks) == 0);
	percpu_counter_destroy(&mp->m_delalloc_blks);
}

1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
static int
xfs_inodegc_init_percpu(
	struct xfs_mount	*mp)
{
	struct xfs_inodegc	*gc;
	int			cpu;

	mp->m_inodegc = alloc_percpu(struct xfs_inodegc);
	if (!mp->m_inodegc)
		return -ENOMEM;

	for_each_possible_cpu(cpu) {
		gc = per_cpu_ptr(mp->m_inodegc, cpu);
		init_llist_head(&gc->list);
		gc->items = 0;
		INIT_WORK(&gc->work, xfs_inodegc_worker);
	}
	return 0;
}

static void
xfs_inodegc_free_percpu(
	struct xfs_mount	*mp)
{
	if (!mp->m_inodegc)
		return;
	free_percpu(mp->m_inodegc);
}

1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087
static void
xfs_fs_put_super(
	struct super_block	*sb)
{
	struct xfs_mount	*mp = XFS_M(sb);

	/* if ->fill_super failed, we have no mount to tear down */
	if (!sb->s_fs_info)
		return;

	xfs_notice(mp, "Unmounting Filesystem");
	xfs_filestream_unmount(mp);
	xfs_unmountfs(mp);

	xfs_freesb(mp);
	free_percpu(mp->m_stats.xs_stats);
1088
	xfs_mount_list_del(mp);
1089
	xfs_inodegc_free_percpu(mp);
1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112
	xfs_destroy_percpu_counters(mp);
	xfs_destroy_mount_workqueues(mp);
	xfs_close_devices(mp);

	sb->s_fs_info = NULL;
	xfs_mount_free(mp);
}

static long
xfs_fs_nr_cached_objects(
	struct super_block	*sb,
	struct shrink_control	*sc)
{
	/* Paranoia: catch incorrect calls during mount setup or teardown */
	if (WARN_ON_ONCE(!sb->s_fs_info))
		return 0;
	return xfs_reclaim_inodes_count(XFS_M(sb));
}

static long
xfs_fs_free_cached_objects(
	struct super_block	*sb,
	struct shrink_control	*sc)
1113
{
1114 1115
	return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
}
1116

1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
static const struct super_operations xfs_super_operations = {
	.alloc_inode		= xfs_fs_alloc_inode,
	.destroy_inode		= xfs_fs_destroy_inode,
	.dirty_inode		= xfs_fs_dirty_inode,
	.drop_inode		= xfs_fs_drop_inode,
	.put_super		= xfs_fs_put_super,
	.sync_fs		= xfs_fs_sync_fs,
	.freeze_fs		= xfs_fs_freeze,
	.unfreeze_fs		= xfs_fs_unfreeze,
	.statfs			= xfs_fs_statfs,
	.show_options		= xfs_fs_show_options,
	.nr_cached_objects	= xfs_fs_nr_cached_objects,
	.free_cached_objects	= xfs_fs_free_cached_objects,
};
1131

1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
static int
suffix_kstrtoint(
	const char	*s,
	unsigned int	base,
	int		*res)
{
	int		last, shift_left_factor = 0, _res;
	char		*value;
	int		ret = 0;

	value = kstrdup(s, GFP_KERNEL);
	if (!value)
		return -ENOMEM;

	last = strlen(value) - 1;
	if (value[last] == 'K' || value[last] == 'k') {
		shift_left_factor = 10;
		value[last] = '\0';
	}
	if (value[last] == 'M' || value[last] == 'm') {
		shift_left_factor = 20;
		value[last] = '\0';
	}
	if (value[last] == 'G' || value[last] == 'g') {
		shift_left_factor = 30;
		value[last] = '\0';
	}

	if (kstrtoint(value, base, &_res))
		ret = -EINVAL;
	kfree(value);
	*res = _res << shift_left_factor;
	return ret;
1165 1166
}

1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
static inline void
xfs_fs_warn_deprecated(
	struct fs_context	*fc,
	struct fs_parameter	*param,
	uint64_t		flag,
	bool			value)
{
	/* Don't print the warning if reconfiguring and current mount point
	 * already had the flag set
	 */
	if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
			!!(XFS_M(fc->root->d_sb)->m_flags & flag) == value)
		return;
	xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
}

C
Christoph Hellwig 已提交
1183
/*
1184 1185 1186
 * Set mount state from a mount option.
 *
 * NOTE: mp->m_super is NULL here!
C
Christoph Hellwig 已提交
1187
 */
1188
static int
1189
xfs_fs_parse_param(
1190 1191
	struct fs_context	*fc,
	struct fs_parameter	*param)
L
Linus Torvalds 已提交
1192
{
1193
	struct xfs_mount	*parsing_mp = fc->s_fs_info;
1194 1195 1196
	struct fs_parse_result	result;
	int			size = 0;
	int			opt;
C
Christoph Hellwig 已提交
1197

1198
	opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1199 1200
	if (opt < 0)
		return opt;
L
Linus Torvalds 已提交
1201

1202 1203
	switch (opt) {
	case Opt_logbufs:
1204
		parsing_mp->m_logbufs = result.uint_32;
1205 1206
		return 0;
	case Opt_logbsize:
1207
		if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize))
1208 1209 1210
			return -EINVAL;
		return 0;
	case Opt_logdev:
1211 1212 1213
		kfree(parsing_mp->m_logname);
		parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL);
		if (!parsing_mp->m_logname)
1214 1215 1216
			return -ENOMEM;
		return 0;
	case Opt_rtdev:
1217 1218 1219
		kfree(parsing_mp->m_rtname);
		parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
		if (!parsing_mp->m_rtname)
1220 1221 1222 1223 1224
			return -ENOMEM;
		return 0;
	case Opt_allocsize:
		if (suffix_kstrtoint(param->string, 10, &size))
			return -EINVAL;
1225 1226
		parsing_mp->m_allocsize_log = ffs(size) - 1;
		parsing_mp->m_flags |= XFS_MOUNT_ALLOCSIZE;
1227 1228 1229
		return 0;
	case Opt_grpid:
	case Opt_bsdgroups:
1230
		parsing_mp->m_flags |= XFS_MOUNT_GRPID;
1231 1232 1233
		return 0;
	case Opt_nogrpid:
	case Opt_sysvgroups:
1234
		parsing_mp->m_flags &= ~XFS_MOUNT_GRPID;
1235 1236
		return 0;
	case Opt_wsync:
1237
		parsing_mp->m_flags |= XFS_MOUNT_WSYNC;
1238 1239
		return 0;
	case Opt_norecovery:
1240
		parsing_mp->m_flags |= XFS_MOUNT_NORECOVERY;
1241 1242
		return 0;
	case Opt_noalign:
1243
		parsing_mp->m_flags |= XFS_MOUNT_NOALIGN;
1244 1245
		return 0;
	case Opt_swalloc:
1246
		parsing_mp->m_flags |= XFS_MOUNT_SWALLOC;
1247 1248
		return 0;
	case Opt_sunit:
1249
		parsing_mp->m_dalign = result.uint_32;
1250 1251
		return 0;
	case Opt_swidth:
1252
		parsing_mp->m_swidth = result.uint_32;
1253 1254
		return 0;
	case Opt_inode32:
1255
		parsing_mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1256 1257
		return 0;
	case Opt_inode64:
1258
		parsing_mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1259 1260
		return 0;
	case Opt_nouuid:
1261
		parsing_mp->m_flags |= XFS_MOUNT_NOUUID;
1262 1263
		return 0;
	case Opt_largeio:
1264
		parsing_mp->m_flags |= XFS_MOUNT_LARGEIO;
1265 1266
		return 0;
	case Opt_nolargeio:
1267
		parsing_mp->m_flags &= ~XFS_MOUNT_LARGEIO;
1268 1269
		return 0;
	case Opt_filestreams:
1270
		parsing_mp->m_flags |= XFS_MOUNT_FILESTREAMS;
1271 1272
		return 0;
	case Opt_noquota:
1273 1274
		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1275 1276 1277 1278
		return 0;
	case Opt_quota:
	case Opt_uquota:
	case Opt_usrquota:
1279
		parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD);
1280 1281 1282
		return 0;
	case Opt_qnoenforce:
	case Opt_uqnoenforce:
1283
		parsing_mp->m_qflags |= XFS_UQUOTA_ACCT;
1284
		parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1285 1286 1287
		return 0;
	case Opt_pquota:
	case Opt_prjquota:
1288
		parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD);
1289 1290
		return 0;
	case Opt_pqnoenforce:
1291
		parsing_mp->m_qflags |= XFS_PQUOTA_ACCT;
1292
		parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1293 1294 1295
		return 0;
	case Opt_gquota:
	case Opt_grpquota:
1296
		parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD);
1297 1298
		return 0;
	case Opt_gqnoenforce:
1299
		parsing_mp->m_qflags |= XFS_GQUOTA_ACCT;
1300
		parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1301 1302
		return 0;
	case Opt_discard:
1303
		parsing_mp->m_flags |= XFS_MOUNT_DISCARD;
1304 1305
		return 0;
	case Opt_nodiscard:
1306
		parsing_mp->m_flags &= ~XFS_MOUNT_DISCARD;
1307 1308 1309
		return 0;
#ifdef CONFIG_FS_DAX
	case Opt_dax:
1310
		xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS);
1311 1312
		return 0;
	case Opt_dax_enum:
1313
		xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
1314 1315
		return 0;
#endif
1316 1317
	/* Following mount options will be removed in September 2025 */
	case Opt_ikeep:
1318
		xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_IKEEP, true);
1319
		parsing_mp->m_flags |= XFS_MOUNT_IKEEP;
1320 1321
		return 0;
	case Opt_noikeep:
1322
		xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_IKEEP, false);
1323
		parsing_mp->m_flags &= ~XFS_MOUNT_IKEEP;
1324 1325
		return 0;
	case Opt_attr2:
1326
		xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_ATTR2, true);
1327
		parsing_mp->m_flags |= XFS_MOUNT_ATTR2;
1328 1329
		return 0;
	case Opt_noattr2:
1330
		xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_NOATTR2, true);
1331 1332
		parsing_mp->m_flags &= ~XFS_MOUNT_ATTR2;
		parsing_mp->m_flags |= XFS_MOUNT_NOATTR2;
1333
		return 0;
1334
	default:
1335
		xfs_warn(parsing_mp, "unknown mount option [%s].", param->key);
1336 1337
		return -EINVAL;
	}
E
Eric Sandeen 已提交
1338 1339 1340 1341

	return 0;
}

1342
static int
1343
xfs_fs_validate_params(
1344 1345
	struct xfs_mount	*mp)
{
1346
	/*
1347
	 * no recovery flag requires a read-only mount
1348
	 */
1349 1350 1351
	if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
	    !(mp->m_flags & XFS_MOUNT_RDONLY)) {
		xfs_warn(mp, "no-recovery mounts must be read-only.");
D
Dave Chinner 已提交
1352
		return -EINVAL;
1353 1354
	}

1355 1356
	if ((mp->m_flags & XFS_MOUNT_NOALIGN) &&
	    (mp->m_dalign || mp->m_swidth)) {
1357
		xfs_warn(mp,
1358 1359
	"sunit and swidth options incompatible with the noalign option");
		return -EINVAL;
1360 1361
	}

1362 1363
	if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
		xfs_warn(mp, "quota support not available in this kernel.");
D
Dave Chinner 已提交
1364
		return -EINVAL;
1365 1366
	}

1367 1368 1369 1370 1371
	if ((mp->m_dalign && !mp->m_swidth) ||
	    (!mp->m_dalign && mp->m_swidth)) {
		xfs_warn(mp, "sunit and swidth must be specified together");
		return -EINVAL;
	}
D
Dave Chinner 已提交
1372

1373 1374 1375 1376 1377 1378
	if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
		xfs_warn(mp,
	"stripe width (%d) must be a multiple of the stripe unit (%d)",
			mp->m_swidth, mp->m_dalign);
		return -EINVAL;
	}
D
Dave Chinner 已提交
1379

1380 1381 1382 1383 1384 1385 1386 1387
	if (mp->m_logbufs != -1 &&
	    mp->m_logbufs != 0 &&
	    (mp->m_logbufs < XLOG_MIN_ICLOGS ||
	     mp->m_logbufs > XLOG_MAX_ICLOGS)) {
		xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
			mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
		return -EINVAL;
	}
D
Dave Chinner 已提交
1388

1389 1390 1391 1392 1393 1394 1395 1396 1397 1398
	if (mp->m_logbsize != -1 &&
	    mp->m_logbsize !=  0 &&
	    (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
	     mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
	     !is_power_of_2(mp->m_logbsize))) {
		xfs_warn(mp,
			"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
			mp->m_logbsize);
		return -EINVAL;
	}
D
Dave Chinner 已提交
1399

1400 1401 1402 1403 1404 1405 1406
	if ((mp->m_flags & XFS_MOUNT_ALLOCSIZE) &&
	    (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
	     mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
		xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
			mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
		return -EINVAL;
	}
1407

D
Dave Chinner 已提交
1408
	return 0;
1409
}
1410

I
Ian Kent 已提交
1411
static int
1412
xfs_fs_fill_super(
1413
	struct super_block	*sb,
I
Ian Kent 已提交
1414
	struct fs_context	*fc)
1415
{
I
Ian Kent 已提交
1416
	struct xfs_mount	*mp = sb->s_fs_info;
1417
	struct inode		*root;
1418
	int			flags = 0, error;
1419

I
Ian Kent 已提交
1420
	mp->m_super = sb;
L
Linus Torvalds 已提交
1421

1422
	error = xfs_fs_validate_params(mp);
1423
	if (error)
1424
		goto out_free_names;
L
Linus Torvalds 已提交
1425 1426

	sb_min_blocksize(sb, BBSIZE);
1427
	sb->s_xattr = xfs_xattr_handlers;
1428
	sb->s_export_op = &xfs_export_operations;
1429
#ifdef CONFIG_XFS_QUOTA
1430
	sb->s_qcop = &xfs_quotactl_operations;
J
Jan Kara 已提交
1431
	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1432
#endif
1433
	sb->s_op = &xfs_super_operations;
L
Linus Torvalds 已提交
1434

D
Dave Chinner 已提交
1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
	/*
	 * Delay mount work if the debug hook is set. This is debug
	 * instrumention to coordinate simulation of xfs mount failures with
	 * VFS superblock operations
	 */
	if (xfs_globals.mount_delay) {
		xfs_notice(mp, "Delaying mount for %d seconds.",
			xfs_globals.mount_delay);
		msleep(xfs_globals.mount_delay * 1000);
	}

I
Ian Kent 已提交
1446
	if (fc->sb_flags & SB_SILENT)
1447 1448
		flags |= XFS_MFSI_QUIET;

1449
	error = xfs_open_devices(mp);
1450
	if (error)
1451
		goto out_free_names;
1452

D
Dave Chinner 已提交
1453
	error = xfs_init_mount_workqueues(mp);
1454 1455
	if (error)
		goto out_close_devices;
C
Christoph Hellwig 已提交
1456

D
Dave Chinner 已提交
1457
	error = xfs_init_percpu_counters(mp);
1458 1459 1460
	if (error)
		goto out_destroy_workqueues;

1461 1462 1463 1464
	error = xfs_inodegc_init_percpu(mp);
	if (error)
		goto out_destroy_counters;

1465 1466 1467 1468 1469 1470 1471
	/*
	 * All percpu data structures requiring cleanup when a cpu goes offline
	 * must be allocated before adding this @mp to the cpu-dead handler's
	 * mount list.
	 */
	xfs_mount_list_add(mp);

1472 1473 1474
	/* Allocate stats memory before we do operations that might use it */
	mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
	if (!mp->m_stats.xs_stats) {
1475
		error = -ENOMEM;
1476
		goto out_destroy_inodegc;
1477 1478
	}

1479 1480
	error = xfs_readsb(mp, flags);
	if (error)
1481
		goto out_free_stats;
1482 1483

	error = xfs_finish_flags(mp);
1484
	if (error)
1485
		goto out_free_sb;
1486

1487
	error = xfs_setup_devices(mp);
1488
	if (error)
1489
		goto out_free_sb;
1490

D
Darrick J. Wong 已提交
1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503
	/* V4 support is undergoing deprecation. */
	if (!xfs_sb_version_hascrc(&mp->m_sb)) {
#ifdef CONFIG_XFS_SUPPORT_V4
		xfs_warn_once(mp,
	"Deprecated V4 format (crc=0) will not be supported after September 2030.");
#else
		xfs_warn(mp,
	"Deprecated V4 format (crc=0) not supported by kernel.");
		error = -EINVAL;
		goto out_free_sb;
#endif
	}

1504 1505 1506 1507 1508 1509 1510
	/* Filesystem claims it needs repair, so refuse the mount. */
	if (xfs_sb_version_needsrepair(&mp->m_sb)) {
		xfs_warn(mp, "Filesystem needs repair.  Please run xfs_repair.");
		error = -EFSCORRUPTED;
		goto out_free_sb;
	}

1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542
	/*
	 * Don't touch the filesystem if a user tool thinks it owns the primary
	 * superblock.  mkfs doesn't clear the flag from secondary supers, so
	 * we don't check them at all.
	 */
	if (mp->m_sb.sb_inprogress) {
		xfs_warn(mp, "Offline file system operation in progress!");
		error = -EFSCORRUPTED;
		goto out_free_sb;
	}

	/*
	 * Until this is fixed only page-sized or smaller data blocks work.
	 */
	if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
		xfs_warn(mp,
		"File system with blocksize %d bytes. "
		"Only pagesize (%ld) or less will currently work.",
				mp->m_sb.sb_blocksize, PAGE_SIZE);
		error = -ENOSYS;
		goto out_free_sb;
	}

	/* Ensure this filesystem fits in the page cache limits */
	if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) ||
	    xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) {
		xfs_warn(mp,
		"file system too large to be mounted on this system.");
		error = -EFBIG;
		goto out_free_sb;
	}

1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553
	/*
	 * XFS block mappings use 54 bits to store the logical block offset.
	 * This should suffice to handle the maximum file size that the VFS
	 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
	 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
	 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
	 * to check this assertion.
	 *
	 * Avoid integer overflow by comparing the maximum bmbt offset to the
	 * maximum pagecache offset in units of fs blocks.
	 */
1554
	if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) {
1555 1556 1557 1558 1559 1560 1561 1562
		xfs_warn(mp,
"MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
			 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
			 XFS_MAX_FILEOFF);
		error = -EINVAL;
		goto out_free_sb;
	}

1563 1564
	error = xfs_filestream_mount(mp);
	if (error)
1565
		goto out_free_sb;
1566

1567 1568 1569 1570
	/*
	 * we must configure the block size in the superblock before we run the
	 * full mount process as the mount process can lookup and cache inodes.
	 */
1571
	sb->s_magic = XFS_SUPER_MAGIC;
C
Christoph Hellwig 已提交
1572 1573
	sb->s_blocksize = mp->m_sb.sb_blocksize;
	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1574
	sb->s_maxbytes = MAX_LFS_FILESIZE;
1575
	sb->s_max_links = XFS_MAXLINK;
L
Linus Torvalds 已提交
1576
	sb->s_time_gran = 1;
1577 1578 1579 1580 1581 1582 1583
	if (xfs_sb_version_hasbigtime(&mp->m_sb)) {
		sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
		sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
	} else {
		sb->s_time_min = XFS_LEGACY_TIME_MIN;
		sb->s_time_max = XFS_LEGACY_TIME_MAX;
	}
D
Darrick J. Wong 已提交
1584
	trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
1585 1586
	sb->s_iflags |= SB_I_CGROUPWB;

L
Linus Torvalds 已提交
1587 1588
	set_posix_acl_flag(sb);

D
Dave Chinner 已提交
1589 1590
	/* version 5 superblocks support inode version counters. */
	if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
M
Matthew Garrett 已提交
1591
		sb->s_flags |= SB_I_VERSION;
D
Dave Chinner 已提交
1592

1593 1594 1595 1596
	if (xfs_sb_version_hasbigtime(&mp->m_sb))
		xfs_warn(mp,
 "EXPERIMENTAL big timestamp feature in use. Use at your own risk!");

1597
	if (mp->m_flags & XFS_MOUNT_DAX_ALWAYS) {
1598
		bool rtdev_is_dax = false, datadev_is_dax;
1599

D
Dave Chinner 已提交
1600
		xfs_warn(mp,
1601 1602
		"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");

1603 1604
		datadev_is_dax = bdev_dax_supported(mp->m_ddev_targp->bt_bdev,
			sb->s_blocksize);
1605
		if (mp->m_rtdev_targp)
1606 1607 1608
			rtdev_is_dax = bdev_dax_supported(
				mp->m_rtdev_targp->bt_bdev, sb->s_blocksize);
		if (!rtdev_is_dax && !datadev_is_dax) {
D
Dave Chinner 已提交
1609
			xfs_alert(mp,
1610
			"DAX unsupported by block device. Turning off DAX.");
1611
			xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
D
Dave Chinner 已提交
1612
		}
1613
		if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1614
			xfs_alert(mp,
1615
		"DAX and reflink cannot be used together!");
1616 1617 1618
			error = -EINVAL;
			goto out_filestream_unmount;
		}
D
Dave Chinner 已提交
1619 1620
	}

1621 1622 1623 1624 1625 1626 1627 1628 1629 1630
	if (mp->m_flags & XFS_MOUNT_DISCARD) {
		struct request_queue *q = bdev_get_queue(sb->s_bdev);

		if (!blk_queue_discard(q)) {
			xfs_warn(mp, "mounting with \"discard\" option, but "
					"the device does not support discard");
			mp->m_flags &= ~XFS_MOUNT_DISCARD;
		}
	}

1631 1632 1633
	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
		if (mp->m_sb.sb_rblocks) {
			xfs_alert(mp,
1634
	"reflink not compatible with realtime device!");
1635 1636 1637 1638 1639 1640 1641 1642
			error = -EINVAL;
			goto out_filestream_unmount;
		}

		if (xfs_globals.always_cow) {
			xfs_info(mp, "using DEBUG-only always_cow mode.");
			mp->m_always_cow = true;
		}
1643 1644
	}

1645
	if (xfs_sb_version_hasrmapbt(&mp->m_sb) && mp->m_sb.sb_rblocks) {
1646
		xfs_alert(mp,
1647 1648 1649
	"reverse mapping btree not compatible with realtime device!");
		error = -EINVAL;
		goto out_filestream_unmount;
1650
	}
1651

1652 1653 1654 1655
	if (xfs_sb_version_hasinobtcounts(&mp->m_sb))
		xfs_warn(mp,
 "EXPERIMENTAL inode btree counters feature in use. Use at your own risk!");

1656
	error = xfs_mountfs(mp);
1657
	if (error)
D
Dave Chinner 已提交
1658
		goto out_filestream_unmount;
1659

1660
	root = igrab(VFS_I(mp->m_rootip));
1661
	if (!root) {
D
Dave Chinner 已提交
1662
		error = -ENOENT;
1663
		goto out_unmount;
C
Christoph Hellwig 已提交
1664
	}
1665
	sb->s_root = d_make_root(root);
1666
	if (!sb->s_root) {
D
Dave Chinner 已提交
1667
		error = -ENOMEM;
1668
		goto out_unmount;
L
Linus Torvalds 已提交
1669
	}
1670

L
Linus Torvalds 已提交
1671
	return 0;
D
Dave Chinner 已提交
1672

D
Dave Chinner 已提交
1673
 out_filestream_unmount:
1674
	xfs_filestream_unmount(mp);
1675 1676
 out_free_sb:
	xfs_freesb(mp);
1677 1678
 out_free_stats:
	free_percpu(mp->m_stats.xs_stats);
1679
 out_destroy_inodegc:
1680
	xfs_mount_list_del(mp);
1681 1682
	xfs_inodegc_free_percpu(mp);
 out_destroy_counters:
D
Dave Chinner 已提交
1683
	xfs_destroy_percpu_counters(mp);
1684
 out_destroy_workqueues:
1685
	xfs_destroy_mount_workqueues(mp);
1686
 out_close_devices:
1687
	xfs_close_devices(mp);
1688
 out_free_names:
1689
	sb->s_fs_info = NULL;
I
Ian Kent 已提交
1690
	xfs_mount_free(mp);
D
Dave Chinner 已提交
1691
	return error;
1692

1693
 out_unmount:
1694
	xfs_filestream_unmount(mp);
1695
	xfs_unmountfs(mp);
1696
	goto out_free_sb;
L
Linus Torvalds 已提交
1697 1698
}

I
Ian Kent 已提交
1699
static int
1700
xfs_fs_get_tree(
I
Ian Kent 已提交
1701 1702
	struct fs_context	*fc)
{
1703
	return get_tree_bdev(fc, xfs_fs_fill_super);
I
Ian Kent 已提交
1704 1705
}

1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754
static int
xfs_remount_rw(
	struct xfs_mount	*mp)
{
	struct xfs_sb		*sbp = &mp->m_sb;
	int error;

	if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
		xfs_warn(mp,
			"ro->rw transition prohibited on norecovery mount");
		return -EINVAL;
	}

	if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
	    xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
		xfs_warn(mp,
	"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
			(sbp->sb_features_ro_compat &
				XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
		return -EINVAL;
	}

	mp->m_flags &= ~XFS_MOUNT_RDONLY;

	/*
	 * If this is the first remount to writeable state we might have some
	 * superblock changes to update.
	 */
	if (mp->m_update_sb) {
		error = xfs_sync_sb(mp, false);
		if (error) {
			xfs_warn(mp, "failed to write sb changes");
			return error;
		}
		mp->m_update_sb = false;
	}

	/*
	 * Fill out the reserve pool if it is empty. Use the stashed value if
	 * it is non-zero, otherwise go with the default.
	 */
	xfs_restore_resvblks(mp);
	xfs_log_work_queue(mp);

	/* Recover any CoW blocks that never got remapped. */
	error = xfs_reflink_recover_cow(mp);
	if (error) {
		xfs_err(mp,
			"Error %d recovering leftover CoW allocations.", error);
1755
		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1756 1757
		return error;
	}
1758
	xfs_blockgc_start(mp);
1759 1760 1761 1762 1763 1764

	/* Create the per-AG metadata reservation pool .*/
	error = xfs_fs_reserve_ag_blocks(mp);
	if (error && error != -ENOSPC)
		return error;

1765 1766 1767
	/* Re-enable the background inode inactivation worker. */
	xfs_inodegc_start(mp);

1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780
	return 0;
}

static int
xfs_remount_ro(
	struct xfs_mount	*mp)
{
	int error;

	/*
	 * Cancel background eofb scanning so it cannot race with the final
	 * log force+buftarg wait and deadlock the remount.
	 */
1781
	xfs_blockgc_stop(mp);
1782 1783

	/* Get rid of any leftover CoW reservations... */
1784
	error = xfs_blockgc_free_space(mp, NULL);
1785 1786 1787 1788 1789
	if (error) {
		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
		return error;
	}

1790 1791 1792 1793 1794 1795 1796 1797 1798
	/*
	 * Stop the inodegc background worker.  xfs_fs_reconfigure already
	 * flushed all pending inodegc work when it sync'd the filesystem.
	 * The VFS holds s_umount, so we know that inodes cannot enter
	 * xfs_fs_destroy_inode during a remount operation.  In readonly mode
	 * we send inodes straight to reclaim, so no inodes will be queued.
	 */
	xfs_inodegc_stop(mp);

1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814
	/* Free the per-AG metadata reservation pool. */
	error = xfs_fs_unreserve_ag_blocks(mp);
	if (error) {
		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
		return error;
	}

	/*
	 * Before we sync the metadata, we need to free up the reserve block
	 * pool so that the used block count in the superblock on disk is
	 * correct at the end of the remount. Stash the current* reserve pool
	 * size so that if we get remounted rw, we can return it to the same
	 * size.
	 */
	xfs_save_resvblks(mp);

B
Brian Foster 已提交
1815
	xfs_log_clean(mp);
1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833
	mp->m_flags |= XFS_MOUNT_RDONLY;

	return 0;
}

/*
 * Logically we would return an error here to prevent users from believing
 * they might have changed mount options using remount which can't be changed.
 *
 * But unfortunately mount(8) adds all options from mtab and fstab to the mount
 * arguments in some cases so we can't blindly reject options, but have to
 * check for each specified option if it actually differs from the currently
 * set option and only reject it if that's the case.
 *
 * Until that is implemented we return success for every remount request, and
 * silently ignore all options that we can't actually change.
 */
static int
1834
xfs_fs_reconfigure(
1835 1836 1837 1838 1839 1840 1841 1842
	struct fs_context *fc)
{
	struct xfs_mount	*mp = XFS_M(fc->root->d_sb);
	struct xfs_mount        *new_mp = fc->s_fs_info;
	xfs_sb_t		*sbp = &mp->m_sb;
	int			flags = fc->sb_flags;
	int			error;

1843 1844 1845 1846
	/* version 5 superblocks always support version counters. */
	if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
		fc->sb_flags |= SB_I_VERSION;

1847
	error = xfs_fs_validate_params(new_mp);
1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883
	if (error)
		return error;

	sync_filesystem(mp->m_super);

	/* inode32 -> inode64 */
	if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
	    !(new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
		mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
		mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
	}

	/* inode64 -> inode32 */
	if (!(mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
	    (new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
		mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
		mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
	}

	/* ro -> rw */
	if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(flags & SB_RDONLY)) {
		error = xfs_remount_rw(mp);
		if (error)
			return error;
	}

	/* rw -> ro */
	if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (flags & SB_RDONLY)) {
		error = xfs_remount_ro(mp);
		if (error)
			return error;
	}

	return 0;
}

1884
static void xfs_fs_free(
I
Ian Kent 已提交
1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899
	struct fs_context	*fc)
{
	struct xfs_mount	*mp = fc->s_fs_info;

	/*
	 * mp is stored in the fs_context when it is initialized.
	 * mp is transferred to the superblock on a successful mount,
	 * but if an error occurs before the transfer we have to free
	 * it here.
	 */
	if (mp)
		xfs_mount_free(mp);
}

static const struct fs_context_operations xfs_context_ops = {
1900 1901 1902 1903
	.parse_param = xfs_fs_parse_param,
	.get_tree    = xfs_fs_get_tree,
	.reconfigure = xfs_fs_reconfigure,
	.free        = xfs_fs_free,
I
Ian Kent 已提交
1904 1905 1906 1907 1908 1909 1910
};

static int xfs_init_fs_context(
	struct fs_context	*fc)
{
	struct xfs_mount	*mp;

1911
	mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
I
Ian Kent 已提交
1912 1913 1914
	if (!mp)
		return -ENOMEM;

1915 1916 1917 1918 1919
	spin_lock_init(&mp->m_sb_lock);
	spin_lock_init(&mp->m_agirotor_lock);
	INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
	spin_lock_init(&mp->m_perag_lock);
	mutex_init(&mp->m_growlock);
1920
	INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
1921 1922 1923 1924 1925 1926 1927 1928 1929 1930
	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
	mp->m_kobj.kobject.kset = xfs_kset;
	/*
	 * We don't create the finobt per-ag space reservation until after log
	 * recovery, so we must set this to true so that an ifree transaction
	 * started during log recovery will not depend on space reservations
	 * for finobt expansion.
	 */
	mp->m_finobt_nores = true;

I
Ian Kent 已提交
1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953
	/*
	 * These can be overridden by the mount option parsing.
	 */
	mp->m_logbufs = -1;
	mp->m_logbsize = -1;
	mp->m_allocsize_log = 16; /* 64k */

	/*
	 * Copy binary VFS mount flags we are interested in.
	 */
	if (fc->sb_flags & SB_RDONLY)
		mp->m_flags |= XFS_MOUNT_RDONLY;
	if (fc->sb_flags & SB_DIRSYNC)
		mp->m_flags |= XFS_MOUNT_DIRSYNC;
	if (fc->sb_flags & SB_SYNCHRONOUS)
		mp->m_flags |= XFS_MOUNT_WSYNC;

	fc->s_fs_info = mp;
	fc->ops = &xfs_context_ops;

	return 0;
}

A
Andrew Morton 已提交
1954
static struct file_system_type xfs_fs_type = {
L
Linus Torvalds 已提交
1955 1956
	.owner			= THIS_MODULE,
	.name			= "xfs",
I
Ian Kent 已提交
1957
	.init_fs_context	= xfs_init_fs_context,
1958
	.parameters		= xfs_fs_parameters,
L
Linus Torvalds 已提交
1959
	.kill_sb		= kill_block_super,
C
Christoph Hellwig 已提交
1960
	.fs_flags		= FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
L
Linus Torvalds 已提交
1961
};
1962
MODULE_ALIAS_FS("xfs");
L
Linus Torvalds 已提交
1963

1964 1965 1966
STATIC int __init
xfs_init_zones(void)
{
C
Carlos Maiolino 已提交
1967 1968 1969
	xfs_log_ticket_zone = kmem_cache_create("xfs_log_ticket",
						sizeof(struct xlog_ticket),
						0, 0, NULL);
1970
	if (!xfs_log_ticket_zone)
1971
		goto out;
1972

C
Carlos Maiolino 已提交
1973 1974 1975
	xfs_bmap_free_item_zone = kmem_cache_create("xfs_bmap_free_item",
					sizeof(struct xfs_extent_free_item),
					0, 0, NULL);
1976 1977
	if (!xfs_bmap_free_item_zone)
		goto out_destroy_log_ticket_zone;
1978

C
Carlos Maiolino 已提交
1979 1980 1981
	xfs_btree_cur_zone = kmem_cache_create("xfs_btree_cur",
					       sizeof(struct xfs_btree_cur),
					       0, 0, NULL);
1982 1983 1984
	if (!xfs_btree_cur_zone)
		goto out_destroy_bmap_free_item_zone;

C
Carlos Maiolino 已提交
1985 1986 1987
	xfs_da_state_zone = kmem_cache_create("xfs_da_state",
					      sizeof(struct xfs_da_state),
					      0, 0, NULL);
1988 1989 1990
	if (!xfs_da_state_zone)
		goto out_destroy_btree_cur_zone;

C
Carlos Maiolino 已提交
1991 1992 1993
	xfs_ifork_zone = kmem_cache_create("xfs_ifork",
					   sizeof(struct xfs_ifork),
					   0, 0, NULL);
1994
	if (!xfs_ifork_zone)
1995
		goto out_destroy_da_state_zone;
1996

1997
	xfs_trans_zone = kmem_cache_create("xfs_trans",
C
Carlos Maiolino 已提交
1998 1999
					   sizeof(struct xfs_trans),
					   0, 0, NULL);
2000 2001 2002
	if (!xfs_trans_zone)
		goto out_destroy_ifork_zone;

2003

2004 2005 2006 2007 2008
	/*
	 * The size of the zone allocated buf log item is the maximum
	 * size possible under XFS.  This wastes a little bit of memory,
	 * but it is much faster.
	 */
C
Carlos Maiolino 已提交
2009 2010 2011
	xfs_buf_item_zone = kmem_cache_create("xfs_buf_item",
					      sizeof(struct xfs_buf_log_item),
					      0, 0, NULL);
2012
	if (!xfs_buf_item_zone)
2013
		goto out_destroy_trans_zone;
2014

C
Carlos Maiolino 已提交
2015 2016 2017 2018 2019
	xfs_efd_zone = kmem_cache_create("xfs_efd_item",
					(sizeof(struct xfs_efd_log_item) +
					(XFS_EFD_MAX_FAST_EXTENTS - 1) *
					sizeof(struct xfs_extent)),
					0, 0, NULL);
2020 2021 2022
	if (!xfs_efd_zone)
		goto out_destroy_buf_item_zone;

C
Carlos Maiolino 已提交
2023 2024 2025 2026 2027
	xfs_efi_zone = kmem_cache_create("xfs_efi_item",
					 (sizeof(struct xfs_efi_log_item) +
					 (XFS_EFI_MAX_FAST_EXTENTS - 1) *
					 sizeof(struct xfs_extent)),
					 0, 0, NULL);
2028 2029 2030
	if (!xfs_efi_zone)
		goto out_destroy_efd_zone;

C
Carlos Maiolino 已提交
2031 2032 2033 2034 2035 2036
	xfs_inode_zone = kmem_cache_create("xfs_inode",
					   sizeof(struct xfs_inode), 0,
					   (SLAB_HWCACHE_ALIGN |
					    SLAB_RECLAIM_ACCOUNT |
					    SLAB_MEM_SPREAD | SLAB_ACCOUNT),
					   xfs_fs_inode_init_once);
2037 2038 2039
	if (!xfs_inode_zone)
		goto out_destroy_efi_zone;

C
Carlos Maiolino 已提交
2040 2041
	xfs_ili_zone = kmem_cache_create("xfs_ili",
					 sizeof(struct xfs_inode_log_item), 0,
2042 2043
					 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
					 NULL);
2044 2045
	if (!xfs_ili_zone)
		goto out_destroy_inode_zone;
C
Carlos Maiolino 已提交
2046 2047 2048 2049

	xfs_icreate_zone = kmem_cache_create("xfs_icr",
					     sizeof(struct xfs_icreate_item),
					     0, 0, NULL);
D
Dave Chinner 已提交
2050 2051
	if (!xfs_icreate_zone)
		goto out_destroy_ili_zone;
2052

C
Carlos Maiolino 已提交
2053 2054 2055
	xfs_rud_zone = kmem_cache_create("xfs_rud_item",
					 sizeof(struct xfs_rud_log_item),
					 0, 0, NULL);
2056 2057 2058
	if (!xfs_rud_zone)
		goto out_destroy_icreate_zone;

C
Carlos Maiolino 已提交
2059
	xfs_rui_zone = kmem_cache_create("xfs_rui_item",
2060
			xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
C
Carlos Maiolino 已提交
2061
			0, 0, NULL);
2062 2063 2064
	if (!xfs_rui_zone)
		goto out_destroy_rud_zone;

C
Carlos Maiolino 已提交
2065 2066 2067
	xfs_cud_zone = kmem_cache_create("xfs_cud_item",
					 sizeof(struct xfs_cud_log_item),
					 0, 0, NULL);
2068 2069 2070
	if (!xfs_cud_zone)
		goto out_destroy_rui_zone;

C
Carlos Maiolino 已提交
2071
	xfs_cui_zone = kmem_cache_create("xfs_cui_item",
2072
			xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
C
Carlos Maiolino 已提交
2073
			0, 0, NULL);
2074 2075 2076
	if (!xfs_cui_zone)
		goto out_destroy_cud_zone;

C
Carlos Maiolino 已提交
2077 2078 2079
	xfs_bud_zone = kmem_cache_create("xfs_bud_item",
					 sizeof(struct xfs_bud_log_item),
					 0, 0, NULL);
2080 2081 2082
	if (!xfs_bud_zone)
		goto out_destroy_cui_zone;

C
Carlos Maiolino 已提交
2083
	xfs_bui_zone = kmem_cache_create("xfs_bui_item",
2084
			xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
C
Carlos Maiolino 已提交
2085
			0, 0, NULL);
2086 2087 2088
	if (!xfs_bui_zone)
		goto out_destroy_bud_zone;

2089 2090
	return 0;

2091
 out_destroy_bud_zone:
2092
	kmem_cache_destroy(xfs_bud_zone);
2093
 out_destroy_cui_zone:
2094
	kmem_cache_destroy(xfs_cui_zone);
2095
 out_destroy_cud_zone:
2096
	kmem_cache_destroy(xfs_cud_zone);
2097
 out_destroy_rui_zone:
2098
	kmem_cache_destroy(xfs_rui_zone);
2099
 out_destroy_rud_zone:
2100
	kmem_cache_destroy(xfs_rud_zone);
2101
 out_destroy_icreate_zone:
2102
	kmem_cache_destroy(xfs_icreate_zone);
D
Dave Chinner 已提交
2103
 out_destroy_ili_zone:
2104
	kmem_cache_destroy(xfs_ili_zone);
2105
 out_destroy_inode_zone:
2106
	kmem_cache_destroy(xfs_inode_zone);
2107
 out_destroy_efi_zone:
2108
	kmem_cache_destroy(xfs_efi_zone);
2109
 out_destroy_efd_zone:
2110
	kmem_cache_destroy(xfs_efd_zone);
2111
 out_destroy_buf_item_zone:
2112
	kmem_cache_destroy(xfs_buf_item_zone);
2113
 out_destroy_trans_zone:
2114
	kmem_cache_destroy(xfs_trans_zone);
2115
 out_destroy_ifork_zone:
2116
	kmem_cache_destroy(xfs_ifork_zone);
2117
 out_destroy_da_state_zone:
2118
	kmem_cache_destroy(xfs_da_state_zone);
2119
 out_destroy_btree_cur_zone:
2120
	kmem_cache_destroy(xfs_btree_cur_zone);
2121
 out_destroy_bmap_free_item_zone:
2122
	kmem_cache_destroy(xfs_bmap_free_item_zone);
2123
 out_destroy_log_ticket_zone:
2124
	kmem_cache_destroy(xfs_log_ticket_zone);
2125 2126 2127 2128 2129 2130 2131
 out:
	return -ENOMEM;
}

STATIC void
xfs_destroy_zones(void)
{
2132 2133 2134 2135 2136
	/*
	 * Make sure all delayed rcu free are flushed before we
	 * destroy caches.
	 */
	rcu_barrier();
2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154
	kmem_cache_destroy(xfs_bui_zone);
	kmem_cache_destroy(xfs_bud_zone);
	kmem_cache_destroy(xfs_cui_zone);
	kmem_cache_destroy(xfs_cud_zone);
	kmem_cache_destroy(xfs_rui_zone);
	kmem_cache_destroy(xfs_rud_zone);
	kmem_cache_destroy(xfs_icreate_zone);
	kmem_cache_destroy(xfs_ili_zone);
	kmem_cache_destroy(xfs_inode_zone);
	kmem_cache_destroy(xfs_efi_zone);
	kmem_cache_destroy(xfs_efd_zone);
	kmem_cache_destroy(xfs_buf_item_zone);
	kmem_cache_destroy(xfs_trans_zone);
	kmem_cache_destroy(xfs_ifork_zone);
	kmem_cache_destroy(xfs_da_state_zone);
	kmem_cache_destroy(xfs_btree_cur_zone);
	kmem_cache_destroy(xfs_bmap_free_item_zone);
	kmem_cache_destroy(xfs_log_ticket_zone);
2155
}
L
Linus Torvalds 已提交
2156

2157 2158 2159
STATIC int __init
xfs_init_workqueues(void)
{
2160 2161 2162 2163 2164 2165
	/*
	 * The allocation workqueue can be used in memory reclaim situations
	 * (writepage path), and parallelism is only limited by the number of
	 * AGs in all the filesystems mounted. Hence use the default large
	 * max_active value for this workqueue.
	 */
2166
	xfs_alloc_wq = alloc_workqueue("xfsalloc",
2167
			XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0);
2168
	if (!xfs_alloc_wq)
D
Dave Chinner 已提交
2169
		return -ENOMEM;
2170

2171 2172
	xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND),
			0);
2173 2174 2175
	if (!xfs_discard_wq)
		goto out_free_alloc_wq;

2176
	return 0;
2177 2178 2179
out_free_alloc_wq:
	destroy_workqueue(xfs_alloc_wq);
	return -ENOMEM;
2180 2181
}

2182
STATIC void
2183 2184
xfs_destroy_workqueues(void)
{
2185
	destroy_workqueue(xfs_discard_wq);
2186
	destroy_workqueue(xfs_alloc_wq);
2187 2188
}

2189 2190 2191 2192 2193
#ifdef CONFIG_HOTPLUG_CPU
static int
xfs_cpu_dead(
	unsigned int		cpu)
{
2194 2195 2196 2197 2198
	struct xfs_mount	*mp, *n;

	spin_lock(&xfs_mount_list_lock);
	list_for_each_entry_safe(mp, n, &xfs_mount_list, m_mount_list) {
		spin_unlock(&xfs_mount_list_lock);
2199
		xfs_inodegc_cpu_dead(mp, cpu);
2200 2201 2202
		spin_lock(&xfs_mount_list_lock);
	}
	spin_unlock(&xfs_mount_list_lock);
2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230
	return 0;
}

static int __init
xfs_cpu_hotplug_init(void)
{
	int	error;

	error = cpuhp_setup_state_nocalls(CPUHP_XFS_DEAD, "xfs:dead", NULL,
			xfs_cpu_dead);
	if (error < 0)
		xfs_alert(NULL,
"Failed to initialise CPU hotplug, error %d. XFS is non-functional.",
			error);
	return error;
}

static void
xfs_cpu_hotplug_destroy(void)
{
	cpuhp_remove_state_nocalls(CPUHP_XFS_DEAD);
}

#else /* !CONFIG_HOTPLUG_CPU */
static inline int xfs_cpu_hotplug_init(void) { return 0; }
static inline void xfs_cpu_hotplug_destroy(void) {}
#endif

L
Linus Torvalds 已提交
2231
STATIC int __init
2232
init_xfs_fs(void)
L
Linus Torvalds 已提交
2233 2234 2235
{
	int			error;

2236 2237
	xfs_check_ondisk_structs();

2238 2239
	printk(KERN_INFO XFS_VERSION_STRING " with "
			 XFS_BUILD_OPTIONS " enabled\n");
L
Linus Torvalds 已提交
2240

2241
	xfs_dir_startup();
L
Linus Torvalds 已提交
2242

2243
	error = xfs_cpu_hotplug_init();
2244 2245 2246
	if (error)
		goto out;

2247 2248 2249 2250
	error = xfs_init_zones();
	if (error)
		goto out_destroy_hp;

2251
	error = xfs_init_workqueues();
2252
	if (error)
C
Christoph Hellwig 已提交
2253
		goto out_destroy_zones;
2254

2255 2256 2257 2258
	error = xfs_mru_cache_init();
	if (error)
		goto out_destroy_wq;

2259
	error = xfs_buf_init();
2260
	if (error)
2261
		goto out_mru_cache_uninit;
2262 2263 2264 2265 2266 2267 2268 2269

	error = xfs_init_procfs();
	if (error)
		goto out_buf_terminate;

	error = xfs_sysctl_register();
	if (error)
		goto out_cleanup_procfs;
L
Linus Torvalds 已提交
2270

B
Brian Foster 已提交
2271 2272 2273
	xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
	if (!xfs_kset) {
		error = -ENOMEM;
2274
		goto out_sysctl_unregister;
B
Brian Foster 已提交
2275 2276
	}

2277 2278 2279 2280 2281 2282 2283 2284 2285
	xfsstats.xs_kobj.kobject.kset = xfs_kset;

	xfsstats.xs_stats = alloc_percpu(struct xfsstats);
	if (!xfsstats.xs_stats) {
		error = -ENOMEM;
		goto out_kset_unregister;
	}

	error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2286 2287
			       "stats");
	if (error)
2288
		goto out_free_stats;
2289

2290 2291 2292
#ifdef DEBUG
	xfs_dbg_kobj.kobject.kset = xfs_kset;
	error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2293
	if (error)
2294
		goto out_remove_stats_kobj;
2295 2296 2297 2298
#endif

	error = xfs_qm_init();
	if (error)
2299
		goto out_remove_dbg_kobj;
L
Linus Torvalds 已提交
2300 2301 2302

	error = register_filesystem(&xfs_fs_type);
	if (error)
2303
		goto out_qm_exit;
L
Linus Torvalds 已提交
2304 2305
	return 0;

2306 2307
 out_qm_exit:
	xfs_qm_exit();
2308
 out_remove_dbg_kobj:
2309 2310
#ifdef DEBUG
	xfs_sysfs_del(&xfs_dbg_kobj);
2311
 out_remove_stats_kobj:
2312
#endif
2313 2314 2315
	xfs_sysfs_del(&xfsstats.xs_kobj);
 out_free_stats:
	free_percpu(xfsstats.xs_stats);
2316
 out_kset_unregister:
B
Brian Foster 已提交
2317
	kset_unregister(xfs_kset);
2318 2319 2320 2321 2322
 out_sysctl_unregister:
	xfs_sysctl_unregister();
 out_cleanup_procfs:
	xfs_cleanup_procfs();
 out_buf_terminate:
2323
	xfs_buf_terminate();
2324 2325
 out_mru_cache_uninit:
	xfs_mru_cache_uninit();
2326 2327
 out_destroy_wq:
	xfs_destroy_workqueues();
2328
 out_destroy_zones:
2329
	xfs_destroy_zones();
2330 2331
 out_destroy_hp:
	xfs_cpu_hotplug_destroy();
2332
 out:
L
Linus Torvalds 已提交
2333 2334 2335 2336
	return error;
}

STATIC void __exit
2337
exit_xfs_fs(void)
L
Linus Torvalds 已提交
2338
{
2339
	xfs_qm_exit();
L
Linus Torvalds 已提交
2340
	unregister_filesystem(&xfs_fs_type);
2341 2342 2343
#ifdef DEBUG
	xfs_sysfs_del(&xfs_dbg_kobj);
#endif
2344 2345
	xfs_sysfs_del(&xfsstats.xs_kobj);
	free_percpu(xfsstats.xs_stats);
B
Brian Foster 已提交
2346
	kset_unregister(xfs_kset);
2347 2348
	xfs_sysctl_unregister();
	xfs_cleanup_procfs();
2349
	xfs_buf_terminate();
2350
	xfs_mru_cache_uninit();
2351
	xfs_destroy_workqueues();
2352
	xfs_destroy_zones();
2353
	xfs_uuid_table_free();
2354
	xfs_cpu_hotplug_destroy();
L
Linus Torvalds 已提交
2355 2356 2357 2358 2359 2360 2361 2362
}

module_init(init_xfs_fs);
module_exit(exit_xfs_fs);

MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
MODULE_LICENSE("GPL");