inotify_user.c 20.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/*
 * fs/inotify_user.c - inotify support for userspace
 *
 * Authors:
 *	John McCutchan	<ttb@tentacle.dhs.org>
 *	Robert Love	<rml@novell.com>
 *
 * Copyright (C) 2005 John McCutchan
 * Copyright 2006 Hewlett-Packard Development Company, L.P.
 *
11 12 13
 * Copyright (C) 2009 Eric Paris <Red Hat Inc>
 * inotify was largely rewriten to make use of the fsnotify infrastructure
 *
14 15 16 17 18 19 20 21 22 23 24 25
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the
 * Free Software Foundation; either version 2, or (at your option) any
 * later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 */

#include <linux/file.h>
26 27 28
#include <linux/fs.h> /* struct inode */
#include <linux/fsnotify_backend.h>
#include <linux/idr.h>
29
#include <linux/init.h> /* fs_initcall */
30
#include <linux/inotify.h>
31 32
#include <linux/kernel.h> /* roundup() */
#include <linux/namei.h> /* LOOKUP_FOLLOW */
33
#include <linux/sched/signal.h>
34
#include <linux/slab.h> /* struct kmem_cache */
35
#include <linux/syscalls.h>
36
#include <linux/types.h>
A
Al Viro 已提交
37
#include <linux/anon_inodes.h>
38 39 40
#include <linux/uaccess.h>
#include <linux/poll.h>
#include <linux/wait.h>
41

42
#include "inotify.h"
43
#include "../fdinfo.h"
44

45
#include <asm/ioctls.h>
46

47
/* configurable via /proc/sys/fs/inotify/ */
48
static int inotify_max_queued_events __read_mostly;
49

50
static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
51 52 53 54 55 56 57

#ifdef CONFIG_SYSCTL

#include <linux/sysctl.h>

static int zero;

58
struct ctl_table inotify_table[] = {
59 60
	{
		.procname	= "max_user_instances",
61
		.data		= &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES],
62 63
		.maxlen		= sizeof(int),
		.mode		= 0644,
64
		.proc_handler	= proc_dointvec_minmax,
65 66 67 68
		.extra1		= &zero,
	},
	{
		.procname	= "max_user_watches",
69
		.data		= &init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES],
70 71
		.maxlen		= sizeof(int),
		.mode		= 0644,
72
		.proc_handler	= proc_dointvec_minmax,
73 74 75 76 77 78 79
		.extra1		= &zero,
	},
	{
		.procname	= "max_queued_events",
		.data		= &inotify_max_queued_events,
		.maxlen		= sizeof(int),
		.mode		= 0644,
80
		.proc_handler	= proc_dointvec_minmax,
81 82
		.extra1		= &zero
	},
83
	{ }
84 85 86
};
#endif /* CONFIG_SYSCTL */

87
static inline __u32 inotify_arg_to_mask(u32 arg)
Y
Yan Zheng 已提交
88
{
89
	__u32 mask;
Y
Yan Zheng 已提交
90

E
Eric Paris 已提交
91 92 93 94 95
	/*
	 * everything should accept their own ignored, cares about children,
	 * and should receive events when the inode is unmounted
	 */
	mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT);
96

97
	/* mask off the flags used to open the fd */
98
	mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK));
99

100
	return mask;
101 102
}

103
static inline u32 inotify_mask_to_arg(__u32 mask)
104
{
105 106
	return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
		       IN_Q_OVERFLOW);
107 108
}

109
/* intofiy userspace file descriptor functions */
110 111
static unsigned int inotify_poll(struct file *file, poll_table *wait)
{
112
	struct fsnotify_group *group = file->private_data;
113 114
	int ret = 0;

115
	poll_wait(file, &group->notification_waitq, wait);
116
	spin_lock(&group->notification_lock);
117
	if (!fsnotify_notify_queue_is_empty(group))
118
		ret = POLLIN | POLLRDNORM;
119
	spin_unlock(&group->notification_lock);
120 121 122 123

	return ret;
}

124
static int round_event_name_len(struct fsnotify_event *fsn_event)
125
{
126 127 128
	struct inotify_event_info *event;

	event = INOTIFY_E(fsn_event);
129 130 131 132 133
	if (!event->name_len)
		return 0;
	return roundup(event->name_len + 1, sizeof(struct inotify_event));
}

134 135 136 137 138
/*
 * Get an inotify_kernel_event if one exists and is small
 * enough to fit in "count". Return an error pointer if
 * not large enough.
 *
139
 * Called with the group->notification_lock held.
140
 */
141 142
static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
					    size_t count)
143 144
{
	size_t event_size = sizeof(struct inotify_event);
145
	struct fsnotify_event *event;
146

147
	if (fsnotify_notify_queue_is_empty(group))
148 149
		return NULL;

150
	event = fsnotify_peek_first_event(group);
151

E
Eric Paris 已提交
152 153
	pr_debug("%s: group=%p event=%p\n", __func__, group, event);

154
	event_size += round_event_name_len(event);
155 156 157
	if (event_size > count)
		return ERR_PTR(-EINVAL);

158
	/* held the notification_lock the whole time, so this is the
159
	 * same event we peeked above */
160
	fsnotify_remove_first_event(group);
161 162

	return event;
163 164 165 166 167 168 169 170
}

/*
 * Copy an event to user space, returning how much we copied.
 *
 * We already checked that the event size is smaller than the
 * buffer we had in "get_one_event()" above.
 */
171
static ssize_t copy_event_to_user(struct fsnotify_group *group,
172
				  struct fsnotify_event *fsn_event,
173 174
				  char __user *buf)
{
175
	struct inotify_event inotify_event;
176
	struct inotify_event_info *event;
177
	size_t event_size = sizeof(struct inotify_event);
178 179
	size_t name_len;
	size_t pad_name_len;
180

181
	pr_debug("%s: group=%p event=%p\n", __func__, group, fsn_event);
182

183
	event = INOTIFY_E(fsn_event);
184
	name_len = event->name_len;
185
	/*
186
	 * round up name length so it is a multiple of event_size
187 188
	 * plus an extra byte for the terminating '\0'.
	 */
189
	pad_name_len = round_event_name_len(fsn_event);
190
	inotify_event.len = pad_name_len;
191 192
	inotify_event.mask = inotify_mask_to_arg(fsn_event->mask);
	inotify_event.wd = event->wd;
193
	inotify_event.cookie = event->sync_cookie;
194

195 196
	/* send the main event */
	if (copy_to_user(buf, &inotify_event, event_size))
197 198
		return -EFAULT;

199
	buf += event_size;
200

201 202 203
	/*
	 * fsnotify only stores the pathname, so here we have to send the pathname
	 * and then pad that pathname out to a multiple of sizeof(inotify_event)
204
	 * with zeros.
205
	 */
206
	if (pad_name_len) {
207
		/* copy the path name */
208
		if (copy_to_user(buf, event->name, name_len))
209
			return -EFAULT;
210
		buf += name_len;
211

212
		/* fill userspace with 0's */
213
		if (clear_user(buf, pad_name_len - name_len))
214
			return -EFAULT;
215
		event_size += pad_name_len;
216
	}
217

218 219 220
	return event_size;
}

221 222 223
static ssize_t inotify_read(struct file *file, char __user *buf,
			    size_t count, loff_t *pos)
{
224 225
	struct fsnotify_group *group;
	struct fsnotify_event *kevent;
226 227
	char __user *start;
	int ret;
228
	DEFINE_WAIT_FUNC(wait, woken_wake_function);
229 230

	start = buf;
231
	group = file->private_data;
232

233
	add_wait_queue(&group->notification_waitq, &wait);
234
	while (1) {
235
		spin_lock(&group->notification_lock);
236
		kevent = get_one_event(group, count);
237
		spin_unlock(&group->notification_lock);
238

E
Eric Paris 已提交
239 240
		pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);

241 242 243 244
		if (kevent) {
			ret = PTR_ERR(kevent);
			if (IS_ERR(kevent))
				break;
245
			ret = copy_event_to_user(group, kevent, buf);
246
			fsnotify_destroy_event(group, kevent);
247 248 249 250 251
			if (ret < 0)
				break;
			buf += ret;
			count -= ret;
			continue;
252 253
		}

254 255
		ret = -EAGAIN;
		if (file->f_flags & O_NONBLOCK)
256
			break;
257
		ret = -ERESTARTSYS;
258
		if (signal_pending(current))
259
			break;
260

261
		if (start != buf)
262
			break;
263

264
		wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
265
	}
266
	remove_wait_queue(&group->notification_waitq, &wait);
267

268 269
	if (start != buf && ret != -EFAULT)
		ret = buf - start;
270 271 272 273 274
	return ret;
}

static int inotify_release(struct inode *ignored, struct file *file)
{
275
	struct fsnotify_group *group = file->private_data;
276

E
Eric Paris 已提交
277 278
	pr_debug("%s: group=%p\n", __func__, group);

279
	/* free this group, matching get was inotify_init->fsnotify_obtain_group */
280
	fsnotify_destroy_group(group);
281 282 283 284 285 286 287

	return 0;
}

static long inotify_ioctl(struct file *file, unsigned int cmd,
			  unsigned long arg)
{
288
	struct fsnotify_group *group;
289
	struct fsnotify_event *fsn_event;
290 291
	void __user *p;
	int ret = -ENOTTY;
292
	size_t send_len = 0;
293

294
	group = file->private_data;
295 296
	p = (void __user *) arg;

E
Eric Paris 已提交
297 298
	pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd);

299 300
	switch (cmd) {
	case FIONREAD:
301
		spin_lock(&group->notification_lock);
302 303
		list_for_each_entry(fsn_event, &group->notification_list,
				    list) {
304
			send_len += sizeof(struct inotify_event);
305
			send_len += round_event_name_len(fsn_event);
306
		}
307
		spin_unlock(&group->notification_lock);
308
		ret = put_user(send_len, (int __user *) p);
309 310 311 312 313 314 315
		break;
	}

	return ret;
}

static const struct file_operations inotify_fops = {
316
	.show_fdinfo	= inotify_show_fdinfo,
317 318
	.poll		= inotify_poll,
	.read		= inotify_read,
319
	.fasync		= fsnotify_fasync,
320 321
	.release	= inotify_release,
	.unlocked_ioctl	= inotify_ioctl,
322
	.compat_ioctl	= inotify_ioctl,
323
	.llseek		= noop_llseek,
324 325 326
};


327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
/*
 * find_inode - resolve a user-given path to a specific inode
 */
static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags)
{
	int error;

	error = user_path_at(AT_FDCWD, dirname, flags, path);
	if (error)
		return error;
	/* you can only watch an inode if you have read permissions on it */
	error = inode_permission(path->dentry->d_inode, MAY_READ);
	if (error)
		path_put(path);
	return error;
}

344
static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
345
			      struct inotify_inode_mark *i_mark)
346 347 348
{
	int ret;

T
Tejun Heo 已提交
349 350
	idr_preload(GFP_KERNEL);
	spin_lock(idr_lock);
351

352
	ret = idr_alloc_cyclic(idr, i_mark, 1, 0, GFP_NOWAIT);
T
Tejun Heo 已提交
353
	if (ret >= 0) {
354
		/* we added the mark to the idr, take a reference */
T
Tejun Heo 已提交
355 356 357
		i_mark->wd = ret;
		fsnotify_get_mark(&i_mark->fsn_mark);
	}
358

T
Tejun Heo 已提交
359 360 361
	spin_unlock(idr_lock);
	idr_preload_end();
	return ret < 0 ? ret : 0;
362 363
}

364
static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
365 366 367 368
								int wd)
{
	struct idr *idr = &group->inotify_data.idr;
	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
369
	struct inotify_inode_mark *i_mark;
370 371 372

	assert_spin_locked(idr_lock);

373 374 375
	i_mark = idr_find(idr, wd);
	if (i_mark) {
		struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark;
376

377
		fsnotify_get_mark(fsn_mark);
378
		/* One ref for being in the idr, one ref we just took */
379
		BUG_ON(atomic_read(&fsn_mark->refcnt) < 2);
380 381
	}

382
	return i_mark;
383 384
}

385
static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group,
386 387
							 int wd)
{
388
	struct inotify_inode_mark *i_mark;
389 390 391
	spinlock_t *idr_lock = &group->inotify_data.idr_lock;

	spin_lock(idr_lock);
392
	i_mark = inotify_idr_find_locked(group, wd);
393 394
	spin_unlock(idr_lock);

395
	return i_mark;
396 397
}

398 399 400 401
/*
 * Remove the mark from the idr (if present) and drop the reference
 * on the mark because it was in the idr.
 */
402
static void inotify_remove_from_idr(struct fsnotify_group *group,
403
				    struct inotify_inode_mark *i_mark)
404
{
405
	struct idr *idr = &group->inotify_data.idr;
406
	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
407
	struct inotify_inode_mark *found_i_mark = NULL;
408
	int wd;
409

410
	spin_lock(idr_lock);
411
	wd = i_mark->wd;
412

413
	/*
414
	 * does this i_mark think it is in the idr?  we shouldn't get called
415 416 417
	 * if it wasn't....
	 */
	if (wd == -1) {
418 419
		WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
			__func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
420
		goto out;
421
	}
422

423
	/* Lets look in the idr to see if we find it */
424 425
	found_i_mark = inotify_idr_find_locked(group, wd);
	if (unlikely(!found_i_mark)) {
426 427
		WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
			__func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
428
		goto out;
429
	}
430

431
	/*
432 433
	 * We found an mark in the idr at the right wd, but it's
	 * not the mark we were told to remove.  eparis seriously
434 435
	 * fucked up somewhere.
	 */
436 437
	if (unlikely(found_i_mark != i_mark)) {
		WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
438 439 440 441
			"found_i_mark=%p found_i_mark->wd=%d "
			"found_i_mark->group=%p\n", __func__, i_mark,
			i_mark->wd, i_mark->fsn_mark.group, found_i_mark,
			found_i_mark->wd, found_i_mark->fsn_mark.group);
442 443 444
		goto out;
	}

445 446 447 448
	/*
	 * One ref for being in the idr
	 * one ref grabbed by inotify_idr_find
	 */
449
	if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 2)) {
450 451
		printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
			 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
452 453 454
		/* we can't really recover with bad ref cnting.. */
		BUG();
	}
455

456 457 458
	idr_remove(idr, wd);
	/* Removed from the idr, drop that ref. */
	fsnotify_put_mark(&i_mark->fsn_mark);
459
out:
460 461
	i_mark->wd = -1;
	spin_unlock(idr_lock);
462
	/* match the ref taken by inotify_idr_find_locked() */
463 464
	if (found_i_mark)
		fsnotify_put_mark(&found_i_mark->fsn_mark);
465
}
466

467
/*
468
 * Send IN_IGNORED for this wd, remove this wd from the idr.
469
 */
470
void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
471
				    struct fsnotify_group *group)
472
{
473
	struct inotify_inode_mark *i_mark;
474

475 476
	/* Queue ignore event for the watch */
	inotify_handle_event(group, NULL, fsn_mark, NULL, FS_IN_IGNORED,
477
			     NULL, FSNOTIFY_EVENT_NONE, NULL, 0, NULL);
478

479
	i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
480 481
	/* remove this mark from the idr */
	inotify_remove_from_idr(group, i_mark);
482

483
	dec_inotify_watches(group->inotify_data.ucounts);
484 485 486
}

/* ding dong the mark is dead */
487
static void inotify_free_mark(struct fsnotify_mark *fsn_mark)
488
{
489
	struct inotify_inode_mark *i_mark;
490

491
	i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
492

493
	kmem_cache_free(inotify_inode_mark_cachep, i_mark);
494 495
}

496 497 498
static int inotify_update_existing_watch(struct fsnotify_group *group,
					 struct inode *inode,
					 u32 arg)
499
{
500 501
	struct fsnotify_mark *fsn_mark;
	struct inotify_inode_mark *i_mark;
502
	__u32 old_mask, new_mask;
503 504 505
	__u32 mask;
	int add = (arg & IN_MASK_ADD);
	int ret;
506 507 508

	mask = inotify_arg_to_mask(arg);

509
	fsn_mark = fsnotify_find_inode_mark(group, inode);
510
	if (!fsn_mark)
511
		return -ENOENT;
512

513
	i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
514

515 516
	spin_lock(&fsn_mark->lock);
	old_mask = fsn_mark->mask;
517
	if (add)
518
		fsn_mark->mask |= mask;
519
	else
520
		fsn_mark->mask = mask;
521
	new_mask = fsn_mark->mask;
522
	spin_unlock(&fsn_mark->lock);
523 524 525 526

	if (old_mask != new_mask) {
		/* more bits in old than in new? */
		int dropped = (old_mask & ~new_mask);
527
		/* more bits in this fsn_mark than the inode's mask? */
528 529
		int do_inode = (new_mask & ~inode->i_fsnotify_mask);

530
		/* update the inode with this new fsn_mark */
531 532 533 534 535
		if (dropped || do_inode)
			fsnotify_recalc_inode_mask(inode);

	}

536
	/* return the wd */
537
	ret = i_mark->wd;
538

539
	/* match the get from fsnotify_find_mark() */
540
	fsnotify_put_mark(fsn_mark);
541

542 543 544 545 546 547 548
	return ret;
}

static int inotify_new_watch(struct fsnotify_group *group,
			     struct inode *inode,
			     u32 arg)
{
549
	struct inotify_inode_mark *tmp_i_mark;
550 551
	__u32 mask;
	int ret;
552 553
	struct idr *idr = &group->inotify_data.idr;
	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
554 555 556

	mask = inotify_arg_to_mask(arg);

557 558
	tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
	if (unlikely(!tmp_i_mark))
559 560
		return -ENOMEM;

561 562 563
	fsnotify_init_mark(&tmp_i_mark->fsn_mark, inotify_free_mark);
	tmp_i_mark->fsn_mark.mask = mask;
	tmp_i_mark->wd = -1;
564

565
	ret = inotify_add_to_idr(idr, idr_lock, tmp_i_mark);
566
	if (ret)
567 568
		goto out_err;

569 570 571 572 573 574 575
	/* increment the number of watches the user has */
	if (!inc_inotify_watches(group->inotify_data.ucounts)) {
		inotify_remove_from_idr(group, tmp_i_mark);
		ret = -ENOSPC;
		goto out_err;
	}

576
	/* we are on the idr, now get on the inode */
577 578
	ret = fsnotify_add_mark_locked(&tmp_i_mark->fsn_mark, group, inode,
				       NULL, 0);
579 580
	if (ret) {
		/* we failed to get on the inode, get off the idr */
581
		inotify_remove_from_idr(group, tmp_i_mark);
582 583 584 585
		goto out_err;
	}


586 587
	/* return the watch descriptor for this new mark */
	ret = tmp_i_mark->wd;
588

589
out_err:
590 591
	/* match the ref from fsnotify_init_mark() */
	fsnotify_put_mark(&tmp_i_mark->fsn_mark);
592 593 594 595 596 597 598 599

	return ret;
}

static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
{
	int ret = 0;

600
	mutex_lock(&group->mark_mutex);
601 602 603 604 605
	/* try to update and existing watch with the new arg */
	ret = inotify_update_existing_watch(group, inode, arg);
	/* no mark present, try to add a new one */
	if (ret == -ENOENT)
		ret = inotify_new_watch(group, inode, arg);
606
	mutex_unlock(&group->mark_mutex);
607

608 609 610
	return ret;
}

611
static struct fsnotify_group *inotify_new_group(unsigned int max_events)
612 613
{
	struct fsnotify_group *group;
614
	struct inotify_event_info *oevent;
615

616
	group = fsnotify_alloc_group(&inotify_fsnotify_ops);
617 618 619
	if (IS_ERR(group))
		return group;

620 621 622 623 624 625 626 627 628 629 630
	oevent = kmalloc(sizeof(struct inotify_event_info), GFP_KERNEL);
	if (unlikely(!oevent)) {
		fsnotify_destroy_group(group);
		return ERR_PTR(-ENOMEM);
	}
	group->overflow_event = &oevent->fse;
	fsnotify_init_event(group->overflow_event, NULL, FS_Q_OVERFLOW);
	oevent->wd = -1;
	oevent->sync_cookie = 0;
	oevent->name_len = 0;

631 632 633 634
	group->max_events = max_events;

	spin_lock_init(&group->inotify_data.idr_lock);
	idr_init(&group->inotify_data.idr);
635 636 637
	group->inotify_data.ucounts = inc_ucount(current_user_ns(),
						 current_euid(),
						 UCOUNT_INOTIFY_INSTANCES);
638

639
	if (!group->inotify_data.ucounts) {
640
		fsnotify_destroy_group(group);
641 642
		return ERR_PTR(-EMFILE);
	}
643 644 645 646 647 648

	return group;
}


/* inotify syscalls */
649
SYSCALL_DEFINE1(inotify_init1, int, flags)
650
{
651
	struct fsnotify_group *group;
A
Al Viro 已提交
652
	int ret;
653

654 655 656 657
	/* Check the IN_* constants for consistency.  */
	BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
	BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);

658
	if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
U
Ulrich Drepper 已提交
659 660
		return -EINVAL;

661
	/* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
662 663 664
	group = inotify_new_group(inotify_max_queued_events);
	if (IS_ERR(group))
		return PTR_ERR(group);
A
Al Viro 已提交
665

A
Al Viro 已提交
666 667
	ret = anon_inode_getfd("inotify", &inotify_fops, group,
				  O_RDONLY | flags);
668
	if (ret < 0)
669
		fsnotify_destroy_group(group);
A
Al Viro 已提交
670

671 672 673
	return ret;
}

674
SYSCALL_DEFINE0(inotify_init)
U
Ulrich Drepper 已提交
675 676 677 678
{
	return sys_inotify_init1(0);
}

679 680
SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
		u32, mask)
681
{
682
	struct fsnotify_group *group;
683
	struct inode *inode;
684
	struct path path;
685 686
	struct fd f;
	int ret;
687 688
	unsigned flags = 0;

689 690 691 692 693 694 695 696 697 698 699 700 701
	/*
	 * We share a lot of code with fs/dnotify.  We also share
	 * the bit layout between inotify's IN_* and the fsnotify
	 * FS_*.  This check ensures that only the inotify IN_*
	 * bits get passed in and set in watches/events.
	 */
	if (unlikely(mask & ~ALL_INOTIFY_BITS))
		return -EINVAL;
	/*
	 * Require at least one valid bit set in the mask.
	 * Without _something_ set, we would have no events to
	 * watch for.
	 */
702 703 704
	if (unlikely(!(mask & ALL_INOTIFY_BITS)))
		return -EINVAL;

705 706
	f = fdget(fd);
	if (unlikely(!f.file))
707 708 709
		return -EBADF;

	/* verify that this is indeed an inotify instance */
710
	if (unlikely(f.file->f_op != &inotify_fops)) {
711 712 713 714 715 716 717 718 719
		ret = -EINVAL;
		goto fput_and_out;
	}

	if (!(mask & IN_DONT_FOLLOW))
		flags |= LOOKUP_FOLLOW;
	if (mask & IN_ONLYDIR)
		flags |= LOOKUP_DIRECTORY;

720 721
	ret = inotify_find_inode(pathname, &path, flags);
	if (ret)
722 723
		goto fput_and_out;

724
	/* inode held in place by reference to path; group by fget on fd */
725
	inode = path.dentry->d_inode;
726
	group = f.file->private_data;
727

728 729
	/* create/update an inode mark */
	ret = inotify_update_watch(group, inode, mask);
730
	path_put(&path);
731
fput_and_out:
732
	fdput(f);
733 734 735
	return ret;
}

736
SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
737
{
738
	struct fsnotify_group *group;
739
	struct inotify_inode_mark *i_mark;
740 741
	struct fd f;
	int ret = 0;
742

743 744
	f = fdget(fd);
	if (unlikely(!f.file))
745 746 747
		return -EBADF;

	/* verify that this is indeed an inotify instance */
748
	ret = -EINVAL;
749
	if (unlikely(f.file->f_op != &inotify_fops))
750 751
		goto out;

752
	group = f.file->private_data;
753

754
	ret = -EINVAL;
755 756
	i_mark = inotify_idr_find(group, wd);
	if (unlikely(!i_mark))
757 758
		goto out;

759 760
	ret = 0;

761
	fsnotify_destroy_mark(&i_mark->fsn_mark, group);
762 763

	/* match ref taken by inotify_idr_find */
764
	fsnotify_put_mark(&i_mark->fsn_mark);
765 766

out:
767
	fdput(f);
768 769 770 771
	return ret;
}

/*
772
 * inotify_user_setup - Our initialization function.  Note that we cannot return
773 774 775 776 777
 * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
 * must result in panic().
 */
static int __init inotify_user_setup(void)
{
778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793
	BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
	BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
	BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
	BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
	BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
	BUILD_BUG_ON(IN_OPEN != FS_OPEN);
	BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
	BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
	BUILD_BUG_ON(IN_CREATE != FS_CREATE);
	BUILD_BUG_ON(IN_DELETE != FS_DELETE);
	BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
	BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
	BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
	BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
	BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
	BUILD_BUG_ON(IN_EXCL_UNLINK != FS_EXCL_UNLINK);
794
	BUILD_BUG_ON(IN_ISDIR != FS_ISDIR);
795 796 797 798
	BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);

	BUG_ON(hweight32(ALL_INOTIFY_BITS) != 21);

799
	inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark, SLAB_PANIC);
800

801
	inotify_max_queued_events = 16384;
802 803
	init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES] = 128;
	init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES] = 8192;
804 805 806

	return 0;
}
807
fs_initcall(inotify_user_setup);