inotify_user.c 18.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
/*
 * fs/inotify_user.c - inotify support for userspace
 *
 * Authors:
 *	John McCutchan	<ttb@tentacle.dhs.org>
 *	Robert Love	<rml@novell.com>
 *
 * Copyright (C) 2005 John McCutchan
 * Copyright 2006 Hewlett-Packard Development Company, L.P.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the
 * Free Software Foundation; either version 2, or (at your option) any
 * later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 */

#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/inotify.h>
#include <linux/syscalls.h>
34
#include <linux/magic.h>
35 36 37

#include <asm/ioctls.h>

38 39
static struct kmem_cache *watch_cachep __read_mostly;
static struct kmem_cache *event_cachep __read_mostly;
40 41 42 43

static struct vfsmount *inotify_mnt __read_mostly;

/* these are configurable via /proc/sys/fs/inotify/ */
44 45 46
static int inotify_max_user_instances __read_mostly;
static int inotify_max_user_watches __read_mostly;
static int inotify_max_queued_events __read_mostly;
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80

/*
 * Lock ordering:
 *
 * inotify_dev->up_mutex (ensures we don't re-add the same watch)
 * 	inode->inotify_mutex (protects inode's watch list)
 * 		inotify_handle->mutex (protects inotify_handle's watch list)
 * 			inotify_dev->ev_mutex (protects device's event queue)
 */

/*
 * Lifetimes of the main data structures:
 *
 * inotify_device: Lifetime is managed by reference count, from
 * sys_inotify_init() until release.  Additional references can bump the count
 * via get_inotify_dev() and drop the count via put_inotify_dev().
 *
 * inotify_user_watch: Lifetime is from create_watch() to the receipt of an
 * IN_IGNORED event from inotify, or when using IN_ONESHOT, to receipt of the
 * first event, or to inotify_destroy().
 */

/*
 * struct inotify_device - represents an inotify instance
 *
 * This structure is protected by the mutex 'mutex'.
 */
struct inotify_device {
	wait_queue_head_t 	wq;		/* wait queue for i/o */
	struct mutex		ev_mutex;	/* protects event queue */
	struct mutex		up_mutex;	/* synchronizes watch updates */
	struct list_head 	events;		/* list of queued events */
	struct user_struct	*user;		/* user who opened this dev */
	struct inotify_handle	*ih;		/* inotify handle */
81
	struct fasync_struct    *fa;            /* async notification */
82
	atomic_t		count;		/* reference count */
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
	unsigned int		queue_size;	/* size of the queue (bytes) */
	unsigned int		event_count;	/* number of pending events */
	unsigned int		max_events;	/* maximum number of events */
};

/*
 * struct inotify_kernel_event - An inotify event, originating from a watch and
 * queued for user-space.  A list of these is attached to each instance of the
 * device.  In read(), this list is walked and all events that can fit in the
 * buffer are returned.
 *
 * Protected by dev->ev_mutex of the device in which we are queued.
 */
struct inotify_kernel_event {
	struct inotify_event	event;	/* the user-space event */
	struct list_head        list;	/* entry in inotify_device's list */
	char			*name;	/* filename, if any */
};

/*
 * struct inotify_user_watch - our version of an inotify_watch, we add
 * a reference to the associated inotify_device.
 */
struct inotify_user_watch {
	struct inotify_device	*dev;	/* associated device */
	struct inotify_watch	wdata;	/* inotify watch data */
};

#ifdef CONFIG_SYSCTL

#include <linux/sysctl.h>

static int zero;

ctl_table inotify_table[] = {
	{
		.ctl_name	= INOTIFY_MAX_USER_INSTANCES,
		.procname	= "max_user_instances",
		.data		= &inotify_max_user_instances,
		.maxlen		= sizeof(int),
		.mode		= 0644,
		.proc_handler	= &proc_dointvec_minmax,
		.strategy	= &sysctl_intvec,
		.extra1		= &zero,
	},
	{
		.ctl_name	= INOTIFY_MAX_USER_WATCHES,
		.procname	= "max_user_watches",
		.data		= &inotify_max_user_watches,
		.maxlen		= sizeof(int),
		.mode		= 0644,
		.proc_handler	= &proc_dointvec_minmax,
		.strategy	= &sysctl_intvec,
		.extra1		= &zero,
	},
	{
		.ctl_name	= INOTIFY_MAX_QUEUED_EVENTS,
		.procname	= "max_queued_events",
		.data		= &inotify_max_queued_events,
		.maxlen		= sizeof(int),
		.mode		= 0644,
		.proc_handler	= &proc_dointvec_minmax,
		.strategy	= &sysctl_intvec,
		.extra1		= &zero
	},
	{ .ctl_name = 0 }
};
#endif /* CONFIG_SYSCTL */

static inline void get_inotify_dev(struct inotify_device *dev)
{
	atomic_inc(&dev->count);
}

static inline void put_inotify_dev(struct inotify_device *dev)
{
	if (atomic_dec_and_test(&dev->count)) {
		atomic_dec(&dev->user->inotify_devs);
		free_uid(dev->user);
		kfree(dev);
	}
}

/*
 * free_inotify_user_watch - cleans up the watch and its references
 */
static void free_inotify_user_watch(struct inotify_watch *w)
{
	struct inotify_user_watch *watch;
	struct inotify_device *dev;

	watch = container_of(w, struct inotify_user_watch, wdata);
	dev = watch->dev;

	atomic_dec(&dev->user->inotify_watches);
	put_inotify_dev(dev);
	kmem_cache_free(watch_cachep, watch);
}

/*
 * kernel_event - create a new kernel event with the given parameters
 *
 * This function can sleep.
 */
static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie,
						  const char *name)
{
	struct inotify_kernel_event *kevent;

192
	kevent = kmem_cache_alloc(event_cachep, GFP_NOFS);
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
	if (unlikely(!kevent))
		return NULL;

	/* we hand this out to user-space, so zero it just in case */
	memset(&kevent->event, 0, sizeof(struct inotify_event));

	kevent->event.wd = wd;
	kevent->event.mask = mask;
	kevent->event.cookie = cookie;

	INIT_LIST_HEAD(&kevent->list);

	if (name) {
		size_t len, rem, event_size = sizeof(struct inotify_event);

		/*
		 * We need to pad the filename so as to properly align an
		 * array of inotify_event structures.  Because the structure is
		 * small and the common case is a small filename, we just round
		 * up to the next multiple of the structure's sizeof.  This is
		 * simple and safe for all architectures.
		 */
		len = strlen(name) + 1;
		rem = event_size - len;
		if (len > event_size) {
			rem = event_size - (len % event_size);
			if (len % event_size == 0)
				rem = 0;
		}

		kevent->name = kmalloc(len + rem, GFP_KERNEL);
		if (unlikely(!kevent->name)) {
			kmem_cache_free(event_cachep, kevent);
			return NULL;
		}
		memcpy(kevent->name, name, len);
		if (rem)
			memset(kevent->name + len, 0, rem);
		kevent->event.len = len + rem;
	} else {
		kevent->event.len = 0;
		kevent->name = NULL;
	}

	return kevent;
}

/*
 * inotify_dev_get_event - return the next event in the given dev's queue
 *
 * Caller must hold dev->ev_mutex.
 */
static inline struct inotify_kernel_event *
inotify_dev_get_event(struct inotify_device *dev)
{
	return list_entry(dev->events.next, struct inotify_kernel_event, list);
}

Y
Yan Zheng 已提交
251 252 253 254 255 256 257 258 259 260 261 262 263
/*
 * inotify_dev_get_last_event - return the last event in the given dev's queue
 *
 * Caller must hold dev->ev_mutex.
 */
static inline struct inotify_kernel_event *
inotify_dev_get_last_event(struct inotify_device *dev)
{
	if (list_empty(&dev->events))
		return NULL;
	return list_entry(dev->events.prev, struct inotify_kernel_event, list);
}

264 265 266 267 268 269 270
/*
 * inotify_dev_queue_event - event handler registered with core inotify, adds
 * a new event to the given device
 *
 * Can sleep (calls kernel_event()).
 */
static void inotify_dev_queue_event(struct inotify_watch *w, u32 wd, u32 mask,
271 272
				    u32 cookie, const char *name,
				    struct inode *ignored)
273 274 275 276 277 278 279 280 281 282 283 284 285
{
	struct inotify_user_watch *watch;
	struct inotify_device *dev;
	struct inotify_kernel_event *kevent, *last;

	watch = container_of(w, struct inotify_user_watch, wdata);
	dev = watch->dev;

	mutex_lock(&dev->ev_mutex);

	/* we can safely put the watch as we don't reference it while
	 * generating the event
	 */
286
	if (mask & IN_IGNORED || w->mask & IN_ONESHOT)
287 288 289
		put_inotify_watch(w); /* final put */

	/* coalescing: drop this event if it is a dupe of the previous */
Y
Yan Zheng 已提交
290
	last = inotify_dev_get_last_event(dev);
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
	if (last && last->event.mask == mask && last->event.wd == wd &&
			last->event.cookie == cookie) {
		const char *lastname = last->name;

		if (!name && !lastname)
			goto out;
		if (name && lastname && !strcmp(lastname, name))
			goto out;
	}

	/* the queue overflowed and we already sent the Q_OVERFLOW event */
	if (unlikely(dev->event_count > dev->max_events))
		goto out;

	/* if the queue overflows, we need to notify user space */
	if (unlikely(dev->event_count == dev->max_events))
		kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL);
	else
		kevent = kernel_event(wd, mask, cookie, name);

	if (unlikely(!kevent))
		goto out;

	/* queue the event and wake up anyone waiting */
	dev->event_count++;
	dev->queue_size += sizeof(struct inotify_event) + kevent->event.len;
	list_add_tail(&kevent->list, &dev->events);
	wake_up_interruptible(&dev->wq);
319
	kill_fasync(&dev->fa, SIGIO, POLL_IN);
320 321 322 323 324 325

out:
	mutex_unlock(&dev->ev_mutex);
}

/*
326
 * remove_kevent - cleans up the given kevent
327 328 329 330 331 332 333 334 335 336
 *
 * Caller must hold dev->ev_mutex.
 */
static void remove_kevent(struct inotify_device *dev,
			  struct inotify_kernel_event *kevent)
{
	list_del(&kevent->list);

	dev->event_count--;
	dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len;
337
}
338

339 340 341 342 343
/*
 * free_kevent - frees the given kevent.
 */
static void free_kevent(struct inotify_kernel_event *kevent)
{
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
	kfree(kevent->name);
	kmem_cache_free(event_cachep, kevent);
}

/*
 * inotify_dev_event_dequeue - destroy an event on the given device
 *
 * Caller must hold dev->ev_mutex.
 */
static void inotify_dev_event_dequeue(struct inotify_device *dev)
{
	if (!list_empty(&dev->events)) {
		struct inotify_kernel_event *kevent;
		kevent = inotify_dev_get_event(dev);
		remove_kevent(dev, kevent);
359
		free_kevent(kevent);
360 361 362 363
	}
}

/*
364
 * find_inode - resolve a user-given path to a specific inode
365
 */
366
static int find_inode(const char __user *dirname, struct path *path,
367 368 369 370
		      unsigned flags)
{
	int error;

371
	error = user_path_at(AT_FDCWD, dirname, flags, path);
372 373 374
	if (error)
		return error;
	/* you can only watch an inode if you have read permissions on it */
375
	error = inode_permission(path->dentry->d_inode, MAY_READ);
376
	if (error)
377
		path_put(path);
378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
	return error;
}

/*
 * create_watch - creates a watch on the given device.
 *
 * Callers must hold dev->up_mutex.
 */
static int create_watch(struct inotify_device *dev, struct inode *inode,
			u32 mask)
{
	struct inotify_user_watch *watch;
	int ret;

	if (atomic_read(&dev->user->inotify_watches) >=
			inotify_max_user_watches)
		return -ENOSPC;

	watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL);
	if (unlikely(!watch))
		return -ENOMEM;

	/* save a reference to device and bump the count to make it official */
	get_inotify_dev(dev);
	watch->dev = dev;

	atomic_inc(&dev->user->inotify_watches);

406
	inotify_init_watch(&watch->wdata);
407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
	ret = inotify_add_watch(dev->ih, &watch->wdata, inode, mask);
	if (ret < 0)
		free_inotify_user_watch(&watch->wdata);

	return ret;
}

/* Device Interface */

static unsigned int inotify_poll(struct file *file, poll_table *wait)
{
	struct inotify_device *dev = file->private_data;
	int ret = 0;

	poll_wait(file, &dev->wq, wait);
	mutex_lock(&dev->ev_mutex);
	if (!list_empty(&dev->events))
		ret = POLLIN | POLLRDNORM;
	mutex_unlock(&dev->ev_mutex);

	return ret;
}

static ssize_t inotify_read(struct file *file, char __user *buf,
			    size_t count, loff_t *pos)
{
	size_t event_size = sizeof (struct inotify_event);
	struct inotify_device *dev;
	char __user *start;
	int ret;
	DEFINE_WAIT(wait);

	start = buf;
	dev = file->private_data;

	while (1) {

		prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE);

		mutex_lock(&dev->ev_mutex);
447
		if (!list_empty(&dev->events)) {
448 449 450
			ret = 0;
			break;
		}
451
		mutex_unlock(&dev->ev_mutex);
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477

		if (file->f_flags & O_NONBLOCK) {
			ret = -EAGAIN;
			break;
		}

		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}

		schedule();
	}

	finish_wait(&dev->wq, &wait);
	if (ret)
		return ret;

	while (1) {
		struct inotify_kernel_event *kevent;

		ret = buf - start;
		if (list_empty(&dev->events))
			break;

		kevent = inotify_dev_get_event(dev);
478 479 480 481 482 483 484 485
		if (event_size + kevent->event.len > count) {
			if (ret == 0 && count > 0) {
				/*
				 * could not get a single event because we
				 * didn't have enough buffer space.
				 */
				ret = -EINVAL;
			}
486
			break;
487
		}
488 489 490 491 492 493 494
		remove_kevent(dev, kevent);

		/*
		 * Must perform the copy_to_user outside the mutex in order
		 * to avoid a lock order reversal with mmap_sem.
		 */
		mutex_unlock(&dev->ev_mutex);
495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511

		if (copy_to_user(buf, &kevent->event, event_size)) {
			ret = -EFAULT;
			break;
		}
		buf += event_size;
		count -= event_size;

		if (kevent->name) {
			if (copy_to_user(buf, kevent->name, kevent->event.len)){
				ret = -EFAULT;
				break;
			}
			buf += kevent->event.len;
			count -= kevent->event.len;
		}

512 513 514
		free_kevent(kevent);

		mutex_lock(&dev->ev_mutex);
515 516 517 518 519 520
	}
	mutex_unlock(&dev->ev_mutex);

	return ret;
}

521 522 523 524 525 526 527
static int inotify_fasync(int fd, struct file *file, int on)
{
	struct inotify_device *dev = file->private_data;

	return fasync_helper(fd, file, on, &dev->fa) >= 0 ? 0 : -EIO;
}

528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
static int inotify_release(struct inode *ignored, struct file *file)
{
	struct inotify_device *dev = file->private_data;

	inotify_destroy(dev->ih);

	/* destroy all of the events on this device */
	mutex_lock(&dev->ev_mutex);
	while (!list_empty(&dev->events))
		inotify_dev_event_dequeue(dev);
	mutex_unlock(&dev->ev_mutex);

	/* free this device: the put matching the get in inotify_init() */
	put_inotify_dev(dev);

	return 0;
}

static long inotify_ioctl(struct file *file, unsigned int cmd,
			  unsigned long arg)
{
	struct inotify_device *dev;
	void __user *p;
	int ret = -ENOTTY;

	dev = file->private_data;
	p = (void __user *) arg;

	switch (cmd) {
	case FIONREAD:
		ret = put_user(dev->queue_size, (int __user *) p);
		break;
	}

	return ret;
}

static const struct file_operations inotify_fops = {
	.poll           = inotify_poll,
	.read           = inotify_read,
568
	.fasync         = inotify_fasync,
569 570 571 572 573 574 575 576 577 578
	.release        = inotify_release,
	.unlocked_ioctl = inotify_ioctl,
	.compat_ioctl	= inotify_ioctl,
};

static const struct inotify_operations inotify_user_ops = {
	.handle_event	= inotify_dev_queue_event,
	.destroy_watch	= free_inotify_user_watch,
};

579
SYSCALL_DEFINE1(inotify_init1, int, flags)
580 581 582 583 584 585 586
{
	struct inotify_device *dev;
	struct inotify_handle *ih;
	struct user_struct *user;
	struct file *filp;
	int fd, ret;

587 588 589 590
	/* Check the IN_* constants for consistency.  */
	BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
	BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);

591
	if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
U
Ulrich Drepper 已提交
592 593 594
		return -EINVAL;

	fd = get_unused_fd_flags(flags & O_CLOEXEC);
595 596 597 598 599 600 601 602 603
	if (fd < 0)
		return fd;

	filp = get_empty_filp();
	if (!filp) {
		ret = -ENFILE;
		goto out_put_fd;
	}

604
	user = get_current_user();
605 606 607 608 609 610 611 612 613 614 615 616 617
	if (unlikely(atomic_read(&user->inotify_devs) >=
			inotify_max_user_instances)) {
		ret = -EMFILE;
		goto out_free_uid;
	}

	dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL);
	if (unlikely(!dev)) {
		ret = -ENOMEM;
		goto out_free_uid;
	}

	ih = inotify_init(&inotify_user_ops);
618
	if (IS_ERR(ih)) {
619 620 621 622
		ret = PTR_ERR(ih);
		goto out_free_dev;
	}
	dev->ih = ih;
623
	dev->fa = NULL;
624 625

	filp->f_op = &inotify_fops;
626 627 628
	filp->f_path.mnt = mntget(inotify_mnt);
	filp->f_path.dentry = dget(inotify_mnt->mnt_root);
	filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping;
629
	filp->f_mode = FMODE_READ;
630
	filp->f_flags = O_RDONLY | (flags & O_NONBLOCK);
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657
	filp->private_data = dev;

	INIT_LIST_HEAD(&dev->events);
	init_waitqueue_head(&dev->wq);
	mutex_init(&dev->ev_mutex);
	mutex_init(&dev->up_mutex);
	dev->event_count = 0;
	dev->queue_size = 0;
	dev->max_events = inotify_max_queued_events;
	dev->user = user;
	atomic_set(&dev->count, 0);

	get_inotify_dev(dev);
	atomic_inc(&user->inotify_devs);
	fd_install(fd, filp);

	return fd;
out_free_dev:
	kfree(dev);
out_free_uid:
	free_uid(user);
	put_filp(filp);
out_put_fd:
	put_unused_fd(fd);
	return ret;
}

658
SYSCALL_DEFINE0(inotify_init)
U
Ulrich Drepper 已提交
659 660 661 662
{
	return sys_inotify_init1(0);
}

663 664
SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
		u32, mask)
665 666 667
{
	struct inode *inode;
	struct inotify_device *dev;
668
	struct path path;
669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687
	struct file *filp;
	int ret, fput_needed;
	unsigned flags = 0;

	filp = fget_light(fd, &fput_needed);
	if (unlikely(!filp))
		return -EBADF;

	/* verify that this is indeed an inotify instance */
	if (unlikely(filp->f_op != &inotify_fops)) {
		ret = -EINVAL;
		goto fput_and_out;
	}

	if (!(mask & IN_DONT_FOLLOW))
		flags |= LOOKUP_FOLLOW;
	if (mask & IN_ONLYDIR)
		flags |= LOOKUP_DIRECTORY;

688
	ret = find_inode(pathname, &path, flags);
689 690 691
	if (unlikely(ret))
		goto fput_and_out;

692 693
	/* inode held in place by reference to path; dev by fget on fd */
	inode = path.dentry->d_inode;
694 695 696 697 698 699 700 701
	dev = filp->private_data;

	mutex_lock(&dev->up_mutex);
	ret = inotify_find_update_watch(dev->ih, inode, mask);
	if (ret == -ENOENT)
		ret = create_watch(dev, inode, mask);
	mutex_unlock(&dev->up_mutex);

702
	path_put(&path);
703 704 705 706 707
fput_and_out:
	fput_light(filp, fput_needed);
	return ret;
}

708
SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
{
	struct file *filp;
	struct inotify_device *dev;
	int ret, fput_needed;

	filp = fget_light(fd, &fput_needed);
	if (unlikely(!filp))
		return -EBADF;

	/* verify that this is indeed an inotify instance */
	if (unlikely(filp->f_op != &inotify_fops)) {
		ret = -EINVAL;
		goto out;
	}

	dev = filp->private_data;

	/* we free our watch data when we get IN_IGNORED */
	ret = inotify_rm_wd(dev->ih, wd);

out:
	fput_light(filp, fput_needed);
	return ret;
}

734
static int
735
inotify_get_sb(struct file_system_type *fs_type, int flags,
736
	       const char *dev_name, void *data, struct vfsmount *mnt)
737
{
738 739
	return get_sb_pseudo(fs_type, "inotify", NULL,
			INOTIFYFS_SUPER_MAGIC, mnt);
740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770
}

static struct file_system_type inotify_fs_type = {
    .name           = "inotifyfs",
    .get_sb         = inotify_get_sb,
    .kill_sb        = kill_anon_super,
};

/*
 * inotify_user_setup - Our initialization function.  Note that we cannnot return
 * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
 * must result in panic().
 */
static int __init inotify_user_setup(void)
{
	int ret;

	ret = register_filesystem(&inotify_fs_type);
	if (unlikely(ret))
		panic("inotify: register_filesystem returned %d!\n", ret);

	inotify_mnt = kern_mount(&inotify_fs_type);
	if (IS_ERR(inotify_mnt))
		panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt));

	inotify_max_queued_events = 16384;
	inotify_max_user_instances = 128;
	inotify_max_user_watches = 8192;

	watch_cachep = kmem_cache_create("inotify_watch_cache",
					 sizeof(struct inotify_user_watch),
771
					 0, SLAB_PANIC, NULL);
772 773
	event_cachep = kmem_cache_create("inotify_event_cache",
					 sizeof(struct inotify_kernel_event),
774
					 0, SLAB_PANIC, NULL);
775 776 777 778 779

	return 0;
}

module_init(inotify_user_setup);