inotify_user.c 19.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/*
 * fs/inotify_user.c - inotify support for userspace
 *
 * Authors:
 *	John McCutchan	<ttb@tentacle.dhs.org>
 *	Robert Love	<rml@novell.com>
 *
 * Copyright (C) 2005 John McCutchan
 * Copyright 2006 Hewlett-Packard Development Company, L.P.
 *
11 12 13
 * Copyright (C) 2009 Eric Paris <Red Hat Inc>
 * inotify was largely rewriten to make use of the fsnotify infrastructure
 *
14 15 16 17 18 19 20 21 22 23 24 25
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the
 * Free Software Foundation; either version 2, or (at your option) any
 * later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 */

#include <linux/file.h>
26 27 28 29
#include <linux/fs.h> /* struct inode */
#include <linux/fsnotify_backend.h>
#include <linux/idr.h>
#include <linux/init.h> /* module_init */
30
#include <linux/inotify.h>
31 32 33 34 35 36 37
#include <linux/kernel.h> /* roundup() */
#include <linux/magic.h> /* superblock magic number */
#include <linux/mount.h> /* mntget */
#include <linux/namei.h> /* LOOKUP_FOLLOW */
#include <linux/path.h> /* struct path */
#include <linux/sched.h> /* struct user */
#include <linux/slab.h> /* struct kmem_cache */
38
#include <linux/syscalls.h>
39 40 41 42
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/poll.h>
#include <linux/wait.h>
43

44
#include "inotify.h"
45

46
#include <asm/ioctls.h>
47 48 49

static struct vfsmount *inotify_mnt __read_mostly;

50 51 52
/* this just sits here and wastes global memory.  used to just pad userspace messages with zeros */
static struct inotify_event nul_inotify_event;

53
/* these are configurable via /proc/sys/fs/inotify/ */
54 55
static int inotify_max_user_instances __read_mostly;
static int inotify_max_queued_events __read_mostly;
56
int inotify_max_user_watches __read_mostly;
57

58 59
static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
struct kmem_cache *event_priv_cachep __read_mostly;
60 61

/*
62 63
 * When inotify registers a new group it increments this and uses that
 * value as an offset to set the fsnotify group "name" and priority.
64
 */
65
static atomic_t inotify_grp_num;
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107

#ifdef CONFIG_SYSCTL

#include <linux/sysctl.h>

static int zero;

ctl_table inotify_table[] = {
	{
		.ctl_name	= INOTIFY_MAX_USER_INSTANCES,
		.procname	= "max_user_instances",
		.data		= &inotify_max_user_instances,
		.maxlen		= sizeof(int),
		.mode		= 0644,
		.proc_handler	= &proc_dointvec_minmax,
		.strategy	= &sysctl_intvec,
		.extra1		= &zero,
	},
	{
		.ctl_name	= INOTIFY_MAX_USER_WATCHES,
		.procname	= "max_user_watches",
		.data		= &inotify_max_user_watches,
		.maxlen		= sizeof(int),
		.mode		= 0644,
		.proc_handler	= &proc_dointvec_minmax,
		.strategy	= &sysctl_intvec,
		.extra1		= &zero,
	},
	{
		.ctl_name	= INOTIFY_MAX_QUEUED_EVENTS,
		.procname	= "max_queued_events",
		.data		= &inotify_max_queued_events,
		.maxlen		= sizeof(int),
		.mode		= 0644,
		.proc_handler	= &proc_dointvec_minmax,
		.strategy	= &sysctl_intvec,
		.extra1		= &zero
	},
	{ .ctl_name = 0 }
};
#endif /* CONFIG_SYSCTL */

108
static inline __u32 inotify_arg_to_mask(u32 arg)
Y
Yan Zheng 已提交
109
{
110
	__u32 mask;
Y
Yan Zheng 已提交
111

112 113
	/* everything should accept their own ignored and cares about children */
	mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD);
114

115 116
	/* mask off the flags used to open the fd */
	mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT));
117

118
	return mask;
119 120
}

121
static inline u32 inotify_mask_to_arg(__u32 mask)
122
{
123 124
	return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
		       IN_Q_OVERFLOW);
125 126
}

127
/* intofiy userspace file descriptor functions */
128 129
static unsigned int inotify_poll(struct file *file, poll_table *wait)
{
130
	struct fsnotify_group *group = file->private_data;
131 132
	int ret = 0;

133 134 135
	poll_wait(file, &group->notification_waitq, wait);
	mutex_lock(&group->notification_mutex);
	if (!fsnotify_notify_queue_is_empty(group))
136
		ret = POLLIN | POLLRDNORM;
137
	mutex_unlock(&group->notification_mutex);
138 139 140 141

	return ret;
}

142 143 144 145 146
/*
 * Get an inotify_kernel_event if one exists and is small
 * enough to fit in "count". Return an error pointer if
 * not large enough.
 *
147
 * Called with the group->notification_mutex held.
148
 */
149 150
static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
					    size_t count)
151 152
{
	size_t event_size = sizeof(struct inotify_event);
153
	struct fsnotify_event *event;
154

155
	if (fsnotify_notify_queue_is_empty(group))
156 157
		return NULL;

158 159 160
	event = fsnotify_peek_notify_event(group);

	event_size += roundup(event->name_len, event_size);
161 162 163 164

	if (event_size > count)
		return ERR_PTR(-EINVAL);

165 166 167 168 169
	/* held the notification_mutex the whole time, so this is the
	 * same event we peeked above */
	fsnotify_remove_notify_event(group);

	return event;
170 171 172 173 174 175 176 177
}

/*
 * Copy an event to user space, returning how much we copied.
 *
 * We already checked that the event size is smaller than the
 * buffer we had in "get_one_event()" above.
 */
178 179
static ssize_t copy_event_to_user(struct fsnotify_group *group,
				  struct fsnotify_event *event,
180 181
				  char __user *buf)
{
182 183 184
	struct inotify_event inotify_event;
	struct fsnotify_event_private_data *fsn_priv;
	struct inotify_event_private_data *priv;
185
	size_t event_size = sizeof(struct inotify_event);
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
	size_t name_len;

	/* we get the inotify watch descriptor from the event private data */
	spin_lock(&event->lock);
	fsn_priv = fsnotify_remove_priv_from_event(group, event);
	spin_unlock(&event->lock);

	if (!fsn_priv)
		inotify_event.wd = -1;
	else {
		priv = container_of(fsn_priv, struct inotify_event_private_data,
				    fsnotify_event_priv_data);
		inotify_event.wd = priv->wd;
		inotify_free_event_priv(fsn_priv);
	}

	/* round up event->name_len so it is a multiple of event_size */
	name_len = roundup(event->name_len, event_size);
	inotify_event.len = name_len;

	inotify_event.mask = inotify_mask_to_arg(event->mask);
	inotify_event.cookie = event->sync_cookie;
208

209 210
	/* send the main event */
	if (copy_to_user(buf, &inotify_event, event_size))
211 212
		return -EFAULT;

213
	buf += event_size;
214

215 216 217 218 219 220 221 222 223
	/*
	 * fsnotify only stores the pathname, so here we have to send the pathname
	 * and then pad that pathname out to a multiple of sizeof(inotify_event)
	 * with zeros.  I get my zeros from the nul_inotify_event.
	 */
	if (name_len) {
		unsigned int len_to_zero = name_len - event->name_len;
		/* copy the path name */
		if (copy_to_user(buf, event->file_name, event->name_len))
224
			return -EFAULT;
225
		buf += event->name_len;
226

227 228 229 230 231
		/* fill userspace with 0's from nul_inotify_event */
		if (copy_to_user(buf, &nul_inotify_event, len_to_zero))
			return -EFAULT;
		buf += len_to_zero;
		event_size += name_len;
232
	}
233

234 235 236
	return event_size;
}

237 238 239
static ssize_t inotify_read(struct file *file, char __user *buf,
			    size_t count, loff_t *pos)
{
240 241
	struct fsnotify_group *group;
	struct fsnotify_event *kevent;
242 243 244 245 246
	char __user *start;
	int ret;
	DEFINE_WAIT(wait);

	start = buf;
247
	group = file->private_data;
248 249

	while (1) {
250
		prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
251

252 253 254
		mutex_lock(&group->notification_mutex);
		kevent = get_one_event(group, count);
		mutex_unlock(&group->notification_mutex);
255

256 257 258 259
		if (kevent) {
			ret = PTR_ERR(kevent);
			if (IS_ERR(kevent))
				break;
260 261
			ret = copy_event_to_user(group, kevent, buf);
			fsnotify_put_event(kevent);
262 263 264 265 266
			if (ret < 0)
				break;
			buf += ret;
			count -= ret;
			continue;
267 268
		}

269 270
		ret = -EAGAIN;
		if (file->f_flags & O_NONBLOCK)
271
			break;
272 273
		ret = -EINTR;
		if (signal_pending(current))
274
			break;
275

276
		if (start != buf)
277
			break;
278

279
		schedule();
280 281
	}

282
	finish_wait(&group->notification_waitq, &wait);
283 284
	if (start != buf && ret != -EFAULT)
		ret = buf - start;
285 286 287
	return ret;
}

288 289
static int inotify_fasync(int fd, struct file *file, int on)
{
290
	struct fsnotify_group *group = file->private_data;
291

292
	return fasync_helper(fd, file, on, &group->inotify_data.fa) >= 0 ? 0 : -EIO;
293 294
}

295 296
static int inotify_release(struct inode *ignored, struct file *file)
{
297
	struct fsnotify_group *group = file->private_data;
298
	struct user_struct *user = group->inotify_data.user;
299

300
	fsnotify_clear_marks_by_group(group);
301

302 303
	/* free this group, matching get was inotify_init->fsnotify_obtain_group */
	fsnotify_put_group(group);
304

305 306
	atomic_dec(&user->inotify_devs);

307 308 309 310 311 312
	return 0;
}

static long inotify_ioctl(struct file *file, unsigned int cmd,
			  unsigned long arg)
{
313 314 315
	struct fsnotify_group *group;
	struct fsnotify_event_holder *holder;
	struct fsnotify_event *event;
316 317
	void __user *p;
	int ret = -ENOTTY;
318
	size_t send_len = 0;
319

320
	group = file->private_data;
321 322 323 324
	p = (void __user *) arg;

	switch (cmd) {
	case FIONREAD:
325 326 327 328 329 330 331 332 333
		mutex_lock(&group->notification_mutex);
		list_for_each_entry(holder, &group->notification_list, event_list) {
			event = holder->event;
			send_len += sizeof(struct inotify_event);
			send_len += roundup(event->name_len,
					     sizeof(struct inotify_event));
		}
		mutex_unlock(&group->notification_mutex);
		ret = put_user(send_len, (int __user *) p);
334 335 336 337 338 339 340
		break;
	}

	return ret;
}

static const struct file_operations inotify_fops = {
341 342 343 344 345
	.poll		= inotify_poll,
	.read		= inotify_read,
	.fasync		= inotify_fasync,
	.release	= inotify_release,
	.unlocked_ioctl	= inotify_ioctl,
346 347 348 349
	.compat_ioctl	= inotify_ioctl,
};


350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
/*
 * find_inode - resolve a user-given path to a specific inode
 */
static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags)
{
	int error;

	error = user_path_at(AT_FDCWD, dirname, flags, path);
	if (error)
		return error;
	/* you can only watch an inode if you have read permissions on it */
	error = inode_permission(path->dentry->d_inode, MAY_READ);
	if (error)
		path_put(path);
	return error;
}

367 368 369 370 371 372 373 374 375 376 377
static void inotify_remove_from_idr(struct fsnotify_group *group,
				    struct inotify_inode_mark_entry *ientry)
{
	struct idr *idr;

	spin_lock(&group->inotify_data.idr_lock);
	idr = &group->inotify_data.idr;
	idr_remove(idr, ientry->wd);
	spin_unlock(&group->inotify_data.idr_lock);
	ientry->wd = -1;
}
378
/*
379 380
 * Send IN_IGNORED for this wd, remove this wd from the idr, and drop the
 * internal reference help on the mark because it is in the idr.
381
 */
382 383
void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
				    struct fsnotify_group *group)
384 385
{
	struct inotify_inode_mark_entry *ientry;
386
	struct fsnotify_event *ignored_event;
387 388 389
	struct inotify_event_private_data *event_priv;
	struct fsnotify_event_private_data *fsn_event_priv;

390 391 392 393 394 395
	ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL,
					      FSNOTIFY_EVENT_NONE, NULL, 0,
					      GFP_NOFS);
	if (!ignored_event)
		return;

396 397
	ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);

398
	event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS);
399 400 401 402 403 404 405 406
	if (unlikely(!event_priv))
		goto skip_send_ignore;

	fsn_event_priv = &event_priv->fsnotify_event_priv_data;

	fsn_event_priv->group = group;
	event_priv->wd = ientry->wd;

407
	fsnotify_add_notify_event(group, ignored_event, fsn_event_priv);
408 409 410 411 412 413 414

	/* did the private data get added? */
	if (list_empty(&fsn_event_priv->event_list))
		inotify_free_event_priv(fsn_event_priv);

skip_send_ignore:

415 416 417
	/* matches the reference taken when the event was created */
	fsnotify_put_event(ignored_event);

418
	/* remove this entry from the idr */
419
	inotify_remove_from_idr(group, ientry);
420 421 422

	/* removed from idr, drop that reference */
	fsnotify_put_mark(entry);
423 424

	atomic_dec(&group->inotify_data.user->inotify_watches);
425 426 427 428 429 430 431 432 433 434 435 436 437 438
}

/* ding dong the mark is dead */
static void inotify_free_mark(struct fsnotify_mark_entry *entry)
{
	struct inotify_inode_mark_entry *ientry = (struct inotify_inode_mark_entry *)entry;

	kmem_cache_free(inotify_inode_mark_cachep, ientry);
}

static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
{
	struct fsnotify_mark_entry *entry = NULL;
	struct inotify_inode_mark_entry *ientry;
439
	struct inotify_inode_mark_entry *tmp_ientry;
440 441 442 443 444 445 446 447 448 449
	int ret = 0;
	int add = (arg & IN_MASK_ADD);
	__u32 mask;
	__u32 old_mask, new_mask;

	/* don't allow invalid bits: we don't want flags set */
	mask = inotify_arg_to_mask(arg);
	if (unlikely(!mask))
		return -EINVAL;

450 451
	tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
	if (unlikely(!tmp_ientry))
452 453
		return -ENOMEM;
	/* we set the mask at the end after attaching it */
454 455
	fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark);
	tmp_ientry->wd = -1;
456 457 458 459 460 461 462 463

find_entry:
	spin_lock(&inode->i_lock);
	entry = fsnotify_find_mark_entry(group, inode);
	spin_unlock(&inode->i_lock);
	if (entry) {
		ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
	} else {
464 465
		ret = -ENOSPC;
		if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
466 467 468 469 470 471 472
			goto out_err;
retry:
		ret = -ENOMEM;
		if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL)))
			goto out_err;

		spin_lock(&group->inotify_data.idr_lock);
473 474 475
		ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry,
					group->inotify_data.last_wd,
					&tmp_ientry->wd);
476 477 478 479 480 481
		spin_unlock(&group->inotify_data.idr_lock);
		if (ret) {
			if (ret == -EAGAIN)
				goto retry;
			goto out_err;
		}
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498

		ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode);
		if (ret) {
			inotify_remove_from_idr(group, tmp_ientry);
			if (ret == -EEXIST)
				goto find_entry;
			goto out_err;
		}

		/* tmp_ientry has been added to the inode, so we are all set up.
		 * now we just need to make sure tmp_ientry doesn't get freed and
		 * we need to set up entry and ientry so the generic code can
		 * do its thing. */
		ientry = tmp_ientry;
		entry = &ientry->fsn_entry;
		tmp_ientry = NULL;

499
		atomic_inc(&group->inotify_data.user->inotify_watches);
500

501 502 503
		/* update the idr hint */
		group->inotify_data.last_wd = ientry->wd;

504 505
		/* we put the mark on the idr, take a reference */
		fsnotify_get_mark(entry);
506 507
	}

508 509
	ret = ientry->wd;

510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
	spin_lock(&entry->lock);

	old_mask = entry->mask;
	if (add) {
		entry->mask |= mask;
		new_mask = entry->mask;
	} else {
		entry->mask = mask;
		new_mask = entry->mask;
	}

	spin_unlock(&entry->lock);

	if (old_mask != new_mask) {
		/* more bits in old than in new? */
		int dropped = (old_mask & ~new_mask);
		/* more bits in this entry than the inode's mask? */
		int do_inode = (new_mask & ~inode->i_fsnotify_mask);
		/* more bits in this entry than the group? */
		int do_group = (new_mask & ~group->mask);

		/* update the inode with this new entry */
		if (dropped || do_inode)
			fsnotify_recalc_inode_mask(inode);

		/* update the group mask with the new mask */
		if (dropped || do_group)
			fsnotify_recalc_group_mask(group);
	}

540 541 542 543
	/* this either matches fsnotify_find_mark_entry, or init_mark_entry
	 * depending on which path we took... */
	fsnotify_put_mark(entry);

544
out_err:
545 546 547 548 549 550
	/* could be an error, could be that we found an existing mark */
	if (tmp_ientry) {
		/* on the idr but didn't make it on the inode */
		if (tmp_ientry->wd != -1)
			inotify_remove_from_idr(group, tmp_ientry);
		kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry);
551
	}
552

553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579
	return ret;
}

static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events)
{
	struct fsnotify_group *group;
	unsigned int grp_num;

	/* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
	grp_num = (INOTIFY_GROUP_NUM - atomic_inc_return(&inotify_grp_num));
	group = fsnotify_obtain_group(grp_num, 0, &inotify_fsnotify_ops);
	if (IS_ERR(group))
		return group;

	group->max_events = max_events;

	spin_lock_init(&group->inotify_data.idr_lock);
	idr_init(&group->inotify_data.idr);
	group->inotify_data.last_wd = 0;
	group->inotify_data.user = user;
	group->inotify_data.fa = NULL;

	return group;
}


/* inotify syscalls */
580
SYSCALL_DEFINE1(inotify_init1, int, flags)
581
{
582
	struct fsnotify_group *group;
583 584 585 586
	struct user_struct *user;
	struct file *filp;
	int fd, ret;

587 588 589 590
	/* Check the IN_* constants for consistency.  */
	BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
	BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);

591
	if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
U
Ulrich Drepper 已提交
592 593 594
		return -EINVAL;

	fd = get_unused_fd_flags(flags & O_CLOEXEC);
595 596 597 598 599 600 601 602 603
	if (fd < 0)
		return fd;

	filp = get_empty_filp();
	if (!filp) {
		ret = -ENFILE;
		goto out_put_fd;
	}

604
	user = get_current_user();
605 606 607 608 609 610
	if (unlikely(atomic_read(&user->inotify_devs) >=
			inotify_max_user_instances)) {
		ret = -EMFILE;
		goto out_free_uid;
	}

611 612 613 614
	/* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
	group = inotify_new_group(user, inotify_max_queued_events);
	if (IS_ERR(group)) {
		ret = PTR_ERR(group);
615 616 617 618
		goto out_free_uid;
	}

	filp->f_op = &inotify_fops;
619 620 621
	filp->f_path.mnt = mntget(inotify_mnt);
	filp->f_path.dentry = dget(inotify_mnt->mnt_root);
	filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping;
622
	filp->f_mode = FMODE_READ;
623
	filp->f_flags = O_RDONLY | (flags & O_NONBLOCK);
624 625
	filp->private_data = group;

626
	atomic_inc(&user->inotify_devs);
627

628 629 630
	fd_install(fd, filp);

	return fd;
631

632 633 634 635 636 637 638 639
out_free_uid:
	free_uid(user);
	put_filp(filp);
out_put_fd:
	put_unused_fd(fd);
	return ret;
}

640
SYSCALL_DEFINE0(inotify_init)
U
Ulrich Drepper 已提交
641 642 643 644
{
	return sys_inotify_init1(0);
}

645 646
SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
		u32, mask)
647
{
648
	struct fsnotify_group *group;
649
	struct inode *inode;
650
	struct path path;
651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669
	struct file *filp;
	int ret, fput_needed;
	unsigned flags = 0;

	filp = fget_light(fd, &fput_needed);
	if (unlikely(!filp))
		return -EBADF;

	/* verify that this is indeed an inotify instance */
	if (unlikely(filp->f_op != &inotify_fops)) {
		ret = -EINVAL;
		goto fput_and_out;
	}

	if (!(mask & IN_DONT_FOLLOW))
		flags |= LOOKUP_FOLLOW;
	if (mask & IN_ONLYDIR)
		flags |= LOOKUP_DIRECTORY;

670 671
	ret = inotify_find_inode(pathname, &path, flags);
	if (ret)
672 673
		goto fput_and_out;

674
	/* inode held in place by reference to path; group by fget on fd */
675
	inode = path.dentry->d_inode;
676
	group = filp->private_data;
677

678 679 680 681
	/* create/update an inode mark */
	ret = inotify_update_watch(group, inode, mask);
	if (unlikely(ret))
		goto path_put_and_out;
682

683
path_put_and_out:
684
	path_put(&path);
685 686 687 688 689
fput_and_out:
	fput_light(filp, fput_needed);
	return ret;
}

690
SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
691
{
692 693
	struct fsnotify_group *group;
	struct fsnotify_mark_entry *entry;
694
	struct file *filp;
695
	int ret = 0, fput_needed;
696 697 698 699 700 701 702 703 704 705 706

	filp = fget_light(fd, &fput_needed);
	if (unlikely(!filp))
		return -EBADF;

	/* verify that this is indeed an inotify instance */
	if (unlikely(filp->f_op != &inotify_fops)) {
		ret = -EINVAL;
		goto out;
	}

707
	group = filp->private_data;
708

709 710 711 712 713 714 715 716 717 718
	spin_lock(&group->inotify_data.idr_lock);
	entry = idr_find(&group->inotify_data.idr, wd);
	if (unlikely(!entry)) {
		spin_unlock(&group->inotify_data.idr_lock);
		ret = -EINVAL;
		goto out;
	}
	fsnotify_get_mark(entry);
	spin_unlock(&group->inotify_data.idr_lock);

719
	fsnotify_destroy_mark_by_entry(entry);
720
	fsnotify_put_mark(entry);
721 722 723 724 725 726

out:
	fput_light(filp, fput_needed);
	return ret;
}

727
static int
728
inotify_get_sb(struct file_system_type *fs_type, int flags,
729
	       const char *dev_name, void *data, struct vfsmount *mnt)
730
{
731 732
	return get_sb_pseudo(fs_type, "inotify", NULL,
			INOTIFYFS_SUPER_MAGIC, mnt);
733 734 735
}

static struct file_system_type inotify_fs_type = {
736 737 738
    .name	= "inotifyfs",
    .get_sb	= inotify_get_sb,
    .kill_sb	= kill_anon_super,
739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757
};

/*
 * inotify_user_setup - Our initialization function.  Note that we cannnot return
 * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
 * must result in panic().
 */
static int __init inotify_user_setup(void)
{
	int ret;

	ret = register_filesystem(&inotify_fs_type);
	if (unlikely(ret))
		panic("inotify: register_filesystem returned %d!\n", ret);

	inotify_mnt = kern_mount(&inotify_fs_type);
	if (IS_ERR(inotify_mnt))
		panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt));

758 759 760
	inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC);
	event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);

761 762 763 764 765 766 767
	inotify_max_queued_events = 16384;
	inotify_max_user_instances = 128;
	inotify_max_user_watches = 8192;

	return 0;
}
module_init(inotify_user_setup);