inode.c 18.1 KB
Newer Older
1

2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * SPU file system
 *
 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
 *
 * Author: Arnd Bergmann <arndb@de.ibm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

#include <linux/file.h>
#include <linux/fs.h>
26
#include <linux/fsnotify.h>
27 28 29 30
#include <linux/backing-dev.h>
#include <linux/init.h>
#include <linux/ioctl.h>
#include <linux/module.h>
31
#include <linux/mount.h>
32 33 34 35 36 37
#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/parser.h>

38
#include <asm/prom.h>
39
#include <asm/spu.h>
40
#include <asm/spu_priv1.h>
41 42 43 44
#include <asm/uaccess.h>

#include "spufs.h"

45 46 47 48
struct spufs_sb_info {
	int debug;
};

49
static struct kmem_cache *spufs_inode_cache;
50
char *isolated_loader;
51
static int isolated_loader_size;
52

53 54 55 56 57
static struct spufs_sb_info *spufs_get_sb_info(struct super_block *sb)
{
	return sb->s_fs_info;
}

58 59 60 61 62
static struct inode *
spufs_alloc_inode(struct super_block *sb)
{
	struct spufs_inode_info *ei;

63
	ei = kmem_cache_alloc(spufs_inode_cache, GFP_KERNEL);
64 65
	if (!ei)
		return NULL;
66 67 68

	ei->i_gang = NULL;
	ei->i_ctx = NULL;
69
	ei->i_openers = 0;
70

71 72 73 74 75 76 77 78 79 80
	return &ei->vfs_inode;
}

static void
spufs_destroy_inode(struct inode *inode)
{
	kmem_cache_free(spufs_inode_cache, SPUFS_I(inode));
}

static void
81
spufs_init_once(void *p)
82 83 84
{
	struct spufs_inode_info *ei = p;

C
Christoph Lameter 已提交
85
	inode_init_once(&ei->vfs_inode);
86 87 88 89 90 91 92 93 94 95 96 97
}

static struct inode *
spufs_new_inode(struct super_block *sb, int mode)
{
	struct inode *inode;

	inode = new_inode(sb);
	if (!inode)
		goto out;

	inode->i_mode = mode;
98 99
	inode->i_uid = current_fsuid();
	inode->i_gid = current_fsgid();
100 101 102 103 104 105 106 107 108 109 110 111 112
	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
out:
	return inode;
}

static int
spufs_setattr(struct dentry *dentry, struct iattr *attr)
{
	struct inode *inode = dentry->d_inode;

	if ((attr->ia_valid & ATTR_SIZE) &&
	    (attr->ia_size != inode->i_size))
		return -EINVAL;
C
Christoph Hellwig 已提交
113 114 115
	setattr_copy(inode, attr);
	mark_inode_dirty(inode);
	return 0;
116 117 118 119 120
}


static int
spufs_new_file(struct super_block *sb, struct dentry *dentry,
121
		const struct file_operations *fops, int mode,
122
		size_t size, struct spu_context *ctx)
123
{
124
	static const struct inode_operations spufs_file_iops = {
125 126 127 128 129 130 131 132 133 134 135 136 137
		.setattr = spufs_setattr,
	};
	struct inode *inode;
	int ret;

	ret = -ENOSPC;
	inode = spufs_new_inode(sb, S_IFREG | mode);
	if (!inode)
		goto out;

	ret = 0;
	inode->i_op = &spufs_file_iops;
	inode->i_fop = fops;
138
	inode->i_size = size;
139
	inode->i_private = SPUFS_I(inode)->i_ctx = get_spu_context(ctx);
140 141 142 143 144 145
	d_add(dentry, inode);
out:
	return ret;
}

static void
A
Al Viro 已提交
146
spufs_evict_inode(struct inode *inode)
147
{
148
	struct spufs_inode_info *ei = SPUFS_I(inode);
A
Al Viro 已提交
149
	end_writeback(inode);
150 151 152 153
	if (ei->i_ctx)
		put_spu_context(ei->i_ctx);
	if (ei->i_gang)
		put_spu_gang(ei->i_gang);
154 155
}

156
static void spufs_prune_dir(struct dentry *dir)
157
{
158
	struct dentry *dentry, *tmp;
159

160
	mutex_lock(&dir->d_inode->i_mutex);
161
	list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
162
		spin_lock(&dentry->d_lock);
163
		if (!(d_unhashed(dentry)) && dentry->d_inode) {
164
			dget_dlock(dentry);
165 166
			__d_drop(dentry);
			spin_unlock(&dentry->d_lock);
167
			simple_unlink(dir->d_inode, dentry);
N
Nick Piggin 已提交
168
			/* XXX: what was dcache_lock protecting here? Other
N
Nick Piggin 已提交
169 170
			 * filesystems (IB, configfs) release dcache_lock
			 * before unlink */
171 172 173 174
			dput(dentry);
		} else {
			spin_unlock(&dentry->d_lock);
		}
175
	}
176
	shrink_dcache_parent(dir);
177
	mutex_unlock(&dir->d_inode->i_mutex);
178 179
}

180 181
/* Caller must hold parent->i_mutex */
static int spufs_rmdir(struct inode *parent, struct dentry *dir)
182 183
{
	/* remove all entries */
184
	spufs_prune_dir(dir);
185
	d_drop(dir);
186

187
	return simple_rmdir(parent, dir);
188 189
}

190 191 192
static int spufs_fill_dir(struct dentry *dir,
		const struct spufs_tree_descr *files, int mode,
		struct spu_context *ctx)
193
{
194
	struct dentry *dentry, *tmp;
195 196 197 198 199 200 201 202
	int ret;

	while (files->name && files->name[0]) {
		ret = -ENOMEM;
		dentry = d_alloc_name(dir, files->name);
		if (!dentry)
			goto out;
		ret = spufs_new_file(dir->d_sb, dentry, files->ops,
203
					files->mode & mode, files->size, ctx);
204 205 206 207 208 209
		if (ret)
			goto out;
		files++;
	}
	return 0;
out:
210 211 212 213 214 215 216 217 218 219 220 221 222 223
	/*
	 * remove all children from dir. dir->inode is not set so don't
	 * just simply use spufs_prune_dir() and panic afterwards :)
	 * dput() looks like it will do the right thing:
	 * - dec parent's ref counter
	 * - remove child from parent's child list
	 * - free child's inode if possible
	 * - free child
	 */
	list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
		dput(dentry);
	}

	shrink_dcache_parent(dir);
224 225 226
	return ret;
}

227 228
static int spufs_dir_close(struct inode *inode, struct file *file)
{
229
	struct spu_context *ctx;
230 231
	struct inode *parent;
	struct dentry *dir;
232 233
	int ret;

J
Josef Sipek 已提交
234
	dir = file->f_path.dentry;
235 236
	parent = dir->d_parent->d_inode;
	ctx = SPUFS_I(dir->d_inode)->i_ctx;
237

238
	mutex_lock_nested(&parent->i_mutex, I_MUTEX_PARENT);
239 240
	ret = spufs_rmdir(parent, dir);
	mutex_unlock(&parent->i_mutex);
241
	WARN_ON(ret);
242

243 244 245
	/* We have to give up the mm_struct */
	spu_forget(ctx);

246 247 248
	return dcache_dir_close(inode, file);
}

249
const struct file_operations spufs_context_fops = {
250 251 252 253 254
	.open		= dcache_dir_open,
	.release	= spufs_dir_close,
	.llseek		= dcache_dir_lseek,
	.read		= generic_read_dir,
	.readdir	= dcache_readdir,
255
	.fsync		= noop_fsync,
256
};
257
EXPORT_SYMBOL_GPL(spufs_context_fops);
258 259

static int
260 261
spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
		int mode)
262 263 264 265 266 267 268 269 270 271 272 273 274 275
{
	int ret;
	struct inode *inode;
	struct spu_context *ctx;

	ret = -ENOSPC;
	inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
	if (!inode)
		goto out;

	if (dir->i_mode & S_ISGID) {
		inode->i_gid = dir->i_gid;
		inode->i_mode &= S_ISGID;
	}
276
	ctx = alloc_spu_context(SPUFS_I(dir)->i_gang); /* XXX gang */
277 278 279 280
	SPUFS_I(inode)->i_ctx = ctx;
	if (!ctx)
		goto out_iput;

281
	ctx->flags = flags;
282
	inode->i_op = &simple_dir_inode_operations;
283
	inode->i_fop = &simple_dir_operations;
284 285 286 287 288 289
	if (flags & SPU_CREATE_NOSCHED)
		ret = spufs_fill_dir(dentry, spufs_dir_nosched_contents,
					 mode, ctx);
	else
		ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);

290 291 292
	if (ret)
		goto out_free_ctx;

293 294 295 296 297 298 299
	if (spufs_get_sb_info(dir->i_sb)->debug)
		ret = spufs_fill_dir(dentry, spufs_dir_debug_contents,
				mode, ctx);

	if (ret)
		goto out_free_ctx;

300 301
	d_instantiate(dentry, inode);
	dget(dentry);
J
Jeremy Kerr 已提交
302 303
	inc_nlink(dir);
	inc_nlink(dentry->d_inode);
304 305 306
	goto out;

out_free_ctx:
307
	spu_forget(ctx);
308 309 310 311 312 313 314
	put_spu_context(ctx);
out_iput:
	iput(inode);
out:
	return ret;
}

315 316 317 318 319 320 321 322 323 324 325 326
static int spufs_context_open(struct dentry *dentry, struct vfsmount *mnt)
{
	int ret;
	struct file *filp;

	ret = get_unused_fd();
	if (ret < 0) {
		dput(dentry);
		mntput(mnt);
		goto out;
	}

327
	filp = dentry_open(dentry, mnt, O_RDONLY, current_cred());
328 329 330 331 332 333 334 335 336 337 338 339
	if (IS_ERR(filp)) {
		put_unused_fd(ret);
		ret = PTR_ERR(filp);
		goto out;
	}

	filp->f_op = &spufs_context_fops;
	fd_install(ret, filp);
out:
	return ret;
}

340 341 342 343
static struct spu_context *
spufs_assert_affinity(unsigned int flags, struct spu_gang *gang,
						struct file *filp)
{
344
	struct spu_context *tmp, *neighbor, *err;
345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
	int count, node;
	int aff_supp;

	aff_supp = !list_empty(&(list_entry(cbe_spu_info[0].spus.next,
					struct spu, cbe_list))->aff_list);

	if (!aff_supp)
		return ERR_PTR(-EINVAL);

	if (flags & SPU_CREATE_GANG)
		return ERR_PTR(-EINVAL);

	if (flags & SPU_CREATE_AFFINITY_MEM &&
	    gang->aff_ref_ctx &&
	    gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM)
		return ERR_PTR(-EEXIST);

	if (gang->aff_flags & AFF_MERGED)
		return ERR_PTR(-EBUSY);

	neighbor = NULL;
	if (flags & SPU_CREATE_AFFINITY_SPU) {
		if (!filp || filp->f_op != &spufs_context_fops)
			return ERR_PTR(-EINVAL);

		neighbor = get_spu_context(
				SPUFS_I(filp->f_dentry->d_inode)->i_ctx);

		if (!list_empty(&neighbor->aff_list) && !(neighbor->aff_head) &&
		    !list_is_last(&neighbor->aff_list, &gang->aff_list_head) &&
		    !list_entry(neighbor->aff_list.next, struct spu_context,
376 377 378 379
		    aff_list)->aff_head) {
			err = ERR_PTR(-EEXIST);
			goto out_put_neighbor;
		}
380

381 382 383 384
		if (gang != neighbor->gang) {
			err = ERR_PTR(-EINVAL);
			goto out_put_neighbor;
		}
385 386 387 388 389 390 391 392 393 394 395 396 397

		count = 1;
		list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
			count++;
		if (list_empty(&neighbor->aff_list))
			count++;

		for (node = 0; node < MAX_NUMNODES; node++) {
			if ((cbe_spu_info[node].n_spus - atomic_read(
				&cbe_spu_info[node].reserved_spus)) >= count)
				break;
		}

398 399 400 401
		if (node == MAX_NUMNODES) {
			err = ERR_PTR(-EEXIST);
			goto out_put_neighbor;
		}
402 403 404
	}

	return neighbor;
405 406 407 408

out_put_neighbor:
	put_spu_context(neighbor);
	return err;
409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
}

static void
spufs_set_affinity(unsigned int flags, struct spu_context *ctx,
					struct spu_context *neighbor)
{
	if (flags & SPU_CREATE_AFFINITY_MEM)
		ctx->gang->aff_ref_ctx = ctx;

	if (flags & SPU_CREATE_AFFINITY_SPU) {
		if (list_empty(&neighbor->aff_list)) {
			list_add_tail(&neighbor->aff_list,
				&ctx->gang->aff_list_head);
			neighbor->aff_head = 1;
		}

		if (list_is_last(&neighbor->aff_list, &ctx->gang->aff_list_head)
		    || list_entry(neighbor->aff_list.next, struct spu_context,
							aff_list)->aff_head) {
			list_add(&ctx->aff_list, &neighbor->aff_list);
		} else  {
			list_add_tail(&ctx->aff_list, &neighbor->aff_list);
			if (neighbor->aff_head) {
				neighbor->aff_head = 0;
				ctx->aff_head = 1;
			}
		}

		if (!ctx->gang->aff_ref_ctx)
			ctx->gang->aff_ref_ctx = ctx;
	}
}

static int
spufs_create_context(struct inode *inode, struct dentry *dentry,
			struct vfsmount *mnt, int flags, int mode,
			struct file *aff_filp)
446 447
{
	int ret;
448 449 450
	int affinity;
	struct spu_gang *gang;
	struct spu_context *neighbor;
451

452 453 454 455 456 457 458 459 460 461
	ret = -EPERM;
	if ((flags & SPU_CREATE_NOSCHED) &&
	    !capable(CAP_SYS_NICE))
		goto out_unlock;

	ret = -EINVAL;
	if ((flags & (SPU_CREATE_NOSCHED | SPU_CREATE_ISOLATE))
	    == SPU_CREATE_ISOLATE)
		goto out_unlock;

462 463 464 465
	ret = -ENODEV;
	if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader)
		goto out_unlock;

466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
	gang = NULL;
	neighbor = NULL;
	affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU);
	if (affinity) {
		gang = SPUFS_I(inode)->i_gang;
		ret = -EINVAL;
		if (!gang)
			goto out_unlock;
		mutex_lock(&gang->aff_mutex);
		neighbor = spufs_assert_affinity(flags, gang, aff_filp);
		if (IS_ERR(neighbor)) {
			ret = PTR_ERR(neighbor);
			goto out_aff_unlock;
		}
	}

482 483
	ret = spufs_mkdir(inode, dentry, flags, mode & S_IRWXUGO);
	if (ret)
484 485
		goto out_aff_unlock;

486
	if (affinity) {
487 488
		spufs_set_affinity(flags, SPUFS_I(dentry->d_inode)->i_ctx,
								neighbor);
489 490 491
		if (neighbor)
			put_spu_context(neighbor);
	}
492 493 494 495 496 497 498 499

	/*
	 * get references for dget and mntget, will be released
	 * in error path of *_open().
	 */
	ret = spufs_context_open(dget(dentry), mntget(mnt));
	if (ret < 0) {
		WARN_ON(spufs_rmdir(inode, dentry));
500 501
		if (affinity)
			mutex_unlock(&gang->aff_mutex);
502 503 504 505 506
		mutex_unlock(&inode->i_mutex);
		spu_forget(SPUFS_I(dentry->d_inode)->i_ctx);
		goto out;
	}

507 508 509
out_aff_unlock:
	if (affinity)
		mutex_unlock(&gang->aff_mutex);
510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
out_unlock:
	mutex_unlock(&inode->i_mutex);
out:
	dput(dentry);
	return ret;
}

static int
spufs_mkgang(struct inode *dir, struct dentry *dentry, int mode)
{
	int ret;
	struct inode *inode;
	struct spu_gang *gang;

	ret = -ENOSPC;
	inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
	if (!inode)
		goto out;

	ret = 0;
	if (dir->i_mode & S_ISGID) {
		inode->i_gid = dir->i_gid;
		inode->i_mode &= S_ISGID;
	}
	gang = alloc_spu_gang();
	SPUFS_I(inode)->i_ctx = NULL;
	SPUFS_I(inode)->i_gang = gang;
	if (!gang)
		goto out_iput;

540
	inode->i_op = &simple_dir_inode_operations;
541 542 543
	inode->i_fop = &simple_dir_operations;

	d_instantiate(dentry, inode);
J
Jeremy Kerr 已提交
544 545
	inc_nlink(dir);
	inc_nlink(dentry->d_inode);
546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
	return ret;

out_iput:
	iput(inode);
out:
	return ret;
}

static int spufs_gang_open(struct dentry *dentry, struct vfsmount *mnt)
{
	int ret;
	struct file *filp;

	ret = get_unused_fd();
	if (ret < 0) {
		dput(dentry);
		mntput(mnt);
		goto out;
	}

566
	filp = dentry_open(dentry, mnt, O_RDONLY, current_cred());
567 568 569 570 571 572
	if (IS_ERR(filp)) {
		put_unused_fd(ret);
		ret = PTR_ERR(filp);
		goto out;
	}

573
	filp->f_op = &simple_dir_operations;
574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
	fd_install(ret, filp);
out:
	return ret;
}

static int spufs_create_gang(struct inode *inode,
			struct dentry *dentry,
			struct vfsmount *mnt, int mode)
{
	int ret;

	ret = spufs_mkgang(inode, dentry, mode & S_IRWXUGO);
	if (ret)
		goto out;

	/*
	 * get references for dget and mntget, will be released
	 * in error path of *_open().
	 */
	ret = spufs_gang_open(dget(dentry), mntget(mnt));
594 595 596 597
	if (ret < 0) {
		int err = simple_rmdir(inode, dentry);
		WARN_ON(err);
	}
598 599 600 601 602 603 604 605

out:
	mutex_unlock(&inode->i_mutex);
	dput(dentry);
	return ret;
}


606 607
static struct file_system_type spufs_type;

608 609
long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode,
							struct file *filp)
610 611 612 613 614
{
	struct dentry *dentry;
	int ret;

	ret = -EINVAL;
615
	/* check if we are on spufs */
616
	if (nd->path.dentry->d_sb->s_type != &spufs_type)
617 618
		goto out;

619
	/* don't accept undefined flags */
620
	if (flags & (~SPU_CREATE_FLAG_ALL))
621 622
		goto out;

623
	/* only threads can be underneath a gang */
624
	if (nd->path.dentry != nd->path.dentry->d_sb->s_root) {
625
		if ((flags & SPU_CREATE_GANG) ||
626
		    !SPUFS_I(nd->path.dentry->d_inode)->i_gang)
627 628 629
			goto out;
	}

630 631 632 633 634
	dentry = lookup_create(nd, 1);
	ret = PTR_ERR(dentry);
	if (IS_ERR(dentry))
		goto out_dir;

A
Al Viro 已提交
635
	mode &= ~current_umask();
636

637
	if (flags & SPU_CREATE_GANG)
638
		ret = spufs_create_gang(nd->path.dentry->d_inode,
639
					 dentry, nd->path.mnt, mode);
640
	else
641
		ret = spufs_create_context(nd->path.dentry->d_inode,
642 643
					    dentry, nd->path.mnt, flags, mode,
					    filp);
644 645 646
	if (ret >= 0)
		fsnotify_mkdir(nd->path.dentry->d_inode, dentry);
	return ret;
647 648

out_dir:
649
	mutex_unlock(&nd->path.dentry->d_inode->i_mutex);
650 651 652 653 654 655
out:
	return ret;
}

/* File system initialization */
enum {
656
	Opt_uid, Opt_gid, Opt_mode, Opt_debug, Opt_err,
657 658
};

659
static const match_table_t spufs_tokens = {
660 661 662 663 664
	{ Opt_uid,   "uid=%d" },
	{ Opt_gid,   "gid=%d" },
	{ Opt_mode,  "mode=%o" },
	{ Opt_debug, "debug" },
	{ Opt_err,    NULL  },
665 666 667
};

static int
668
spufs_parse_options(struct super_block *sb, char *options, struct inode *root)
669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690
{
	char *p;
	substring_t args[MAX_OPT_ARGS];

	while ((p = strsep(&options, ",")) != NULL) {
		int token, option;

		if (!*p)
			continue;

		token = match_token(p, spufs_tokens, args);
		switch (token) {
		case Opt_uid:
			if (match_int(&args[0], &option))
				return 0;
			root->i_uid = option;
			break;
		case Opt_gid:
			if (match_int(&args[0], &option))
				return 0;
			root->i_gid = option;
			break;
691 692 693 694 695
		case Opt_mode:
			if (match_octal(&args[0], &option))
				return 0;
			root->i_mode = option | S_IFDIR;
			break;
696 697 698
		case Opt_debug:
			spufs_get_sb_info(sb)->debug = 1;
			break;
699 700 701 702 703 704 705
		default:
			return 0;
		}
	}
	return 1;
}

706 707
static void spufs_exit_isolated_loader(void)
{
708 709
	free_pages((unsigned long) isolated_loader,
			get_order(isolated_loader_size));
710 711
}

712 713 714 715 716 717 718 719 720 721 722
static void
spufs_init_isolated_loader(void)
{
	struct device_node *dn;
	const char *loader;
	int size;

	dn = of_find_node_by_path("/spu-isolation");
	if (!dn)
		return;

723
	loader = of_get_property(dn, "loader", &size);
724 725 726
	if (!loader)
		return;

727 728
	/* the loader must be align on a 16 byte boundary */
	isolated_loader = (char *)__get_free_pages(GFP_KERNEL, get_order(size));
729 730 731
	if (!isolated_loader)
		return;

732
	isolated_loader_size = size;
733 734 735 736
	memcpy(isolated_loader, loader, size);
	printk(KERN_INFO "spufs: SPU isolation mode enabled\n");
}

737
static int
738 739
spufs_create_root(struct super_block *sb, void *data)
{
740 741 742
	struct inode *inode;
	int ret;

743 744 745 746
	ret = -ENODEV;
	if (!spu_management_ops)
		goto out;

747 748 749 750 751
	ret = -ENOMEM;
	inode = spufs_new_inode(sb, S_IFDIR | 0775);
	if (!inode)
		goto out;

752
	inode->i_op = &simple_dir_inode_operations;
753 754
	inode->i_fop = &simple_dir_operations;
	SPUFS_I(inode)->i_ctx = NULL;
755
	inc_nlink(inode);
756 757

	ret = -EINVAL;
758
	if (!spufs_parse_options(sb, data, inode))
759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775
		goto out_iput;

	ret = -ENOMEM;
	sb->s_root = d_alloc_root(inode);
	if (!sb->s_root)
		goto out_iput;

	return 0;
out_iput:
	iput(inode);
out:
	return ret;
}

static int
spufs_fill_super(struct super_block *sb, void *data, int silent)
{
776
	struct spufs_sb_info *info;
777
	static const struct super_operations s_ops = {
778 779 780
		.alloc_inode = spufs_alloc_inode,
		.destroy_inode = spufs_destroy_inode,
		.statfs = simple_statfs,
A
Al Viro 已提交
781
		.evict_inode = spufs_evict_inode,
M
Miklos Szeredi 已提交
782
		.show_options = generic_show_options,
783 784
	};

M
Miklos Szeredi 已提交
785 786
	save_mount_options(sb, data);

787 788 789 790
	info = kzalloc(sizeof(*info), GFP_KERNEL);
	if (!info)
		return -ENOMEM;

791 792 793 794 795
	sb->s_maxbytes = MAX_LFS_FILESIZE;
	sb->s_blocksize = PAGE_CACHE_SIZE;
	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
	sb->s_magic = SPUFS_MAGIC;
	sb->s_op = &s_ops;
796
	sb->s_fs_info = info;
797 798 799 800

	return spufs_create_root(sb, data);
}

A
Al Viro 已提交
801 802 803
static struct dentry *
spufs_mount(struct file_system_type *fstype, int flags,
		const char *name, void *data)
804
{
A
Al Viro 已提交
805
	return mount_single(fstype, flags, data, spufs_fill_super);
806 807 808 809 810
}

static struct file_system_type spufs_type = {
	.owner = THIS_MODULE,
	.name = "spufs",
A
Al Viro 已提交
811
	.mount = spufs_mount,
812 813 814
	.kill_sb = kill_litter_super,
};

815
static int __init spufs_init(void)
816 817
{
	int ret;
818

819 820 821 822
	ret = -ENODEV;
	if (!spu_management_ops)
		goto out;

823 824 825
	ret = -ENOMEM;
	spufs_inode_cache = kmem_cache_create("spufs_inode_cache",
			sizeof(struct spufs_inode_info), 0,
826
			SLAB_HWCACHE_ALIGN, spufs_init_once);
827 828 829

	if (!spufs_inode_cache)
		goto out;
830
	ret = spu_sched_init();
831 832
	if (ret)
		goto out_cache;
833 834 835
	ret = register_filesystem(&spufs_type);
	if (ret)
		goto out_sched;
836
	ret = register_spu_syscalls(&spufs_calls);
837 838
	if (ret)
		goto out_fs;
839 840

	spufs_init_isolated_loader();
841

842
	return 0;
843

844 845
out_fs:
	unregister_filesystem(&spufs_type);
846 847
out_sched:
	spu_sched_exit();
848 849 850 851 852 853 854
out_cache:
	kmem_cache_destroy(spufs_inode_cache);
out:
	return ret;
}
module_init(spufs_init);

855
static void __exit spufs_exit(void)
856
{
857
	spu_sched_exit();
858
	spufs_exit_isolated_loader();
859 860 861 862 863 864 865 866 867
	unregister_spu_syscalls(&spufs_calls);
	unregister_filesystem(&spufs_type);
	kmem_cache_destroy(spufs_inode_cache);
}
module_exit(spufs_exit);

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");