fanotify_user.c 21.6 KB
Newer Older
1
#include <linux/fanotify.h>
2
#include <linux/fcntl.h>
3
#include <linux/file.h>
4
#include <linux/fs.h>
5
#include <linux/anon_inodes.h>
6
#include <linux/fsnotify_backend.h>
7
#include <linux/init.h>
E
Eric Paris 已提交
8
#include <linux/mount.h>
9
#include <linux/namei.h>
E
Eric Paris 已提交
10
#include <linux/poll.h>
11 12
#include <linux/security.h>
#include <linux/syscalls.h>
T
Tejun Heo 已提交
13
#include <linux/slab.h>
14
#include <linux/types.h>
E
Eric Paris 已提交
15
#include <linux/uaccess.h>
16
#include <linux/compat.h>
E
Eric Paris 已提交
17 18

#include <asm/ioctls.h>
19

20
#include "../../mount.h"
21
#include "../fdinfo.h"
22
#include "fanotify.h"
23

24
#define FANOTIFY_DEFAULT_MAX_EVENTS	16384
25
#define FANOTIFY_DEFAULT_MAX_MARKS	8192
26
#define FANOTIFY_DEFAULT_MAX_LISTENERS	128
27

28
extern const struct fsnotify_ops fanotify_fsnotify_ops;
29

30
static struct kmem_cache *fanotify_mark_cache __read_mostly;
31
struct kmem_cache *fanotify_event_cachep __read_mostly;
32
struct kmem_cache *fanotify_perm_event_cachep __read_mostly;
33

E
Eric Paris 已提交
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
/*
 * Get an fsnotify notification event if one exists and is small
 * enough to fit in "count". Return an error pointer if the count
 * is not large enough.
 *
 * Called with the group->notification_mutex held.
 */
static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
					    size_t count)
{
	BUG_ON(!mutex_is_locked(&group->notification_mutex));

	pr_debug("%s: group=%p count=%zd\n", __func__, group, count);

	if (fsnotify_notify_queue_is_empty(group))
		return NULL;

	if (FAN_EVENT_METADATA_LEN > count)
		return ERR_PTR(-EINVAL);

	/* held the notification_mutex the whole time, so this is the
	 * same event we peeked above */
	return fsnotify_remove_notify_event(group);
}

59
static int create_fd(struct fsnotify_group *group,
60 61
		     struct fanotify_event_info *event,
		     struct file **file)
E
Eric Paris 已提交
62 63 64 65
{
	int client_fd;
	struct file *new_file;

66
	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
E
Eric Paris 已提交
67 68 69 70 71 72 73 74 75 76 77

	client_fd = get_unused_fd();
	if (client_fd < 0)
		return client_fd;

	/*
	 * we need a new file handle for the userspace program so it can read even if it was
	 * originally opened O_WRONLY.
	 */
	/* it's possible this event was an overflow event.  in that case dentry and mnt
	 * are NULL;  That's fine, just don't call dentry open */
78 79
	if (event->path.dentry && event->path.mnt)
		new_file = dentry_open(&event->path,
80
				       group->fanotify_data.f_flags | FMODE_NONOTIFY,
E
Eric Paris 已提交
81 82 83 84 85 86 87 88 89 90 91 92 93 94
				       current_cred());
	else
		new_file = ERR_PTR(-EOVERFLOW);
	if (IS_ERR(new_file)) {
		/*
		 * we still send an event even if we can't open the file.  this
		 * can happen when say tasks are gone and we try to open their
		 * /proc files or we try to open a WRONLY file like in sysfs
		 * we just send the errno to userspace since there isn't much
		 * else we can do.
		 */
		put_unused_fd(client_fd);
		client_fd = PTR_ERR(new_file);
	} else {
95
		*file = new_file;
E
Eric Paris 已提交
96 97
	}

98
	return client_fd;
E
Eric Paris 已提交
99 100
}

101
static int fill_event_metadata(struct fsnotify_group *group,
102 103 104
			       struct fanotify_event_metadata *metadata,
			       struct fsnotify_event *fsn_event,
			       struct file **file)
E
Eric Paris 已提交
105
{
106
	int ret = 0;
107
	struct fanotify_event_info *event;
108

E
Eric Paris 已提交
109
	pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
110
		 group, metadata, fsn_event);
E
Eric Paris 已提交
111

112
	*file = NULL;
113
	event = container_of(fsn_event, struct fanotify_event_info, fse);
E
Eric Paris 已提交
114
	metadata->event_len = FAN_EVENT_METADATA_LEN;
115
	metadata->metadata_len = FAN_EVENT_METADATA_LEN;
E
Eric Paris 已提交
116
	metadata->vers = FANOTIFY_METADATA_VERSION;
117
	metadata->reserved = 0;
118
	metadata->mask = fsn_event->mask & FAN_ALL_OUTGOING_EVENTS;
119
	metadata->pid = pid_vnr(event->tgid);
120
	if (unlikely(fsn_event->mask & FAN_Q_OVERFLOW))
121 122
		metadata->fd = FAN_NOFD;
	else {
123
		metadata->fd = create_fd(group, event, file);
124 125 126
		if (metadata->fd < 0)
			ret = metadata->fd;
	}
E
Eric Paris 已提交
127

128
	return ret;
E
Eric Paris 已提交
129 130
}

131
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
132 133
static struct fanotify_perm_event_info *dequeue_event(
				struct fsnotify_group *group, int fd)
134
{
135
	struct fanotify_perm_event_info *event, *return_e = NULL;
136 137

	mutex_lock(&group->fanotify_data.access_mutex);
138 139 140
	list_for_each_entry(event, &group->fanotify_data.access_list,
			    fae.fse.list) {
		if (event->fd != fd)
141 142
			continue;

143 144
		list_del_init(&event->fae.fse.list);
		return_e = event;
145 146 147 148
		break;
	}
	mutex_unlock(&group->fanotify_data.access_mutex);

149
	pr_debug("%s: found return_re=%p\n", __func__, return_e);
150

151
	return return_e;
152 153 154 155 156
}

static int process_access_response(struct fsnotify_group *group,
				   struct fanotify_response *response_struct)
{
157 158 159
	struct fanotify_perm_event_info *event;
	int fd = response_struct->fd;
	int response = response_struct->response;
160 161 162 163 164

	pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
		 fd, response);
	/*
	 * make sure the response is valid, if invalid we do nothing and either
L
Lucas De Marchi 已提交
165
	 * userspace can send a valid response or we will clean it up after the
166 167 168 169 170 171 172 173 174 175 176 177 178
	 * timeout
	 */
	switch (response) {
	case FAN_ALLOW:
	case FAN_DENY:
		break;
	default:
		return -EINVAL;
	}

	if (fd < 0)
		return -EINVAL;

179 180
	event = dequeue_event(group, fd);
	if (!event)
181 182
		return -ENOENT;

183
	event->response = response;
184 185 186 187 188 189
	wake_up(&group->fanotify_data.access_waitq);

	return 0;
}
#endif

E
Eric Paris 已提交
190 191 192 193 194
static ssize_t copy_event_to_user(struct fsnotify_group *group,
				  struct fsnotify_event *event,
				  char __user *buf)
{
	struct fanotify_event_metadata fanotify_event_metadata;
195
	struct file *f;
196
	int fd, ret;
E
Eric Paris 已提交
197 198 199

	pr_debug("%s: group=%p event=%p\n", __func__, group, event);

200
	ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f);
201 202
	if (ret < 0)
		goto out;
203

204
	fd = fanotify_event_metadata.fd;
205
	ret = -EFAULT;
206 207
	if (copy_to_user(buf, &fanotify_event_metadata,
			 fanotify_event_metadata.event_len))
208 209
		goto out_close_fd;

210 211 212 213 214 215 216 217 218 219 220 221
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
	if (event->mask & FAN_ALL_PERM_EVENTS) {
		struct fanotify_perm_event_info *pevent;

		pevent = FANOTIFY_PE(event);
		pevent->fd = fd;
		mutex_lock(&group->fanotify_data.access_mutex);
		list_add_tail(&pevent->fae.fse.list,
			      &group->fanotify_data.access_list);
		mutex_unlock(&group->fanotify_data.access_mutex);
	}
#endif
E
Eric Paris 已提交
222

223 224
	if (fd != FAN_NOFD)
		fd_install(fd, f);
225
	return fanotify_event_metadata.event_len;
226 227

out_close_fd:
228 229 230 231
	if (fd != FAN_NOFD) {
		put_unused_fd(fd);
		fput(f);
	}
232 233 234
out:
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
	if (event->mask & FAN_ALL_PERM_EVENTS) {
235
		FANOTIFY_PE(event)->response = FAN_DENY;
236 237 238
		wake_up(&group->fanotify_data.access_waitq);
	}
#endif
239
	return ret;
E
Eric Paris 已提交
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
}

/* intofiy userspace file descriptor functions */
static unsigned int fanotify_poll(struct file *file, poll_table *wait)
{
	struct fsnotify_group *group = file->private_data;
	int ret = 0;

	poll_wait(file, &group->notification_waitq, wait);
	mutex_lock(&group->notification_mutex);
	if (!fsnotify_notify_queue_is_empty(group))
		ret = POLLIN | POLLRDNORM;
	mutex_unlock(&group->notification_mutex);

	return ret;
}

static ssize_t fanotify_read(struct file *file, char __user *buf,
			     size_t count, loff_t *pos)
{
	struct fsnotify_group *group;
	struct fsnotify_event *kevent;
	char __user *start;
	int ret;
	DEFINE_WAIT(wait);

	start = buf;
	group = file->private_data;

	pr_debug("%s: group=%p\n", __func__, group);

	while (1) {
		prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);

		mutex_lock(&group->notification_mutex);
		kevent = get_one_event(group, count);
		mutex_unlock(&group->notification_mutex);

		if (kevent) {
			ret = PTR_ERR(kevent);
			if (IS_ERR(kevent))
				break;
			ret = copy_event_to_user(group, kevent, buf);
283
			/*
284 285
			 * Permission events get queued to wait for response.
			 * Other events can be destroyed now.
286 287 288
			 */
			if (!(kevent->mask & FAN_ALL_PERM_EVENTS))
				fsnotify_destroy_event(group, kevent);
E
Eric Paris 已提交
289 290 291 292 293 294 295 296 297 298
			if (ret < 0)
				break;
			buf += ret;
			count -= ret;
			continue;
		}

		ret = -EAGAIN;
		if (file->f_flags & O_NONBLOCK)
			break;
299
		ret = -ERESTARTSYS;
E
Eric Paris 已提交
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
		if (signal_pending(current))
			break;

		if (start != buf)
			break;

		schedule();
	}

	finish_wait(&group->notification_waitq, &wait);
	if (start != buf && ret != -EFAULT)
		ret = buf - start;
	return ret;
}

315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
{
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
	struct fanotify_response response = { .fd = -1, .response = -1 };
	struct fsnotify_group *group;
	int ret;

	group = file->private_data;

	if (count > sizeof(response))
		count = sizeof(response);

	pr_debug("%s: group=%p count=%zu\n", __func__, group, count);

	if (copy_from_user(&response, buf, count))
		return -EFAULT;

	ret = process_access_response(group, &response);
	if (ret < 0)
		count = ret;

	return count;
#else
	return -EINVAL;
#endif
}

342 343 344 345
static int fanotify_release(struct inode *ignored, struct file *file)
{
	struct fsnotify_group *group = file->private_data;

346
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
347
	struct fanotify_perm_event_info *event, *next;
348

349 350
	mutex_lock(&group->fanotify_data.access_mutex);

351
	atomic_inc(&group->fanotify_data.bypass_perm);
352

353 354 355 356
	list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
				 fae.fse.list) {
		pr_debug("%s: found group=%p event=%p\n", __func__, group,
			 event);
357

358 359
		list_del_init(&event->fae.fse.list);
		event->response = FAN_ALLOW;
360 361 362 363 364
	}
	mutex_unlock(&group->fanotify_data.access_mutex);

	wake_up(&group->fanotify_data.access_waitq);
#endif
365

366
	/* matches the fanotify_init->fsnotify_alloc_group */
367
	fsnotify_destroy_group(group);
368 369 370 371

	return 0;
}

E
Eric Paris 已提交
372 373 374
static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	struct fsnotify_group *group;
375
	struct fsnotify_event *fsn_event;
E
Eric Paris 已提交
376 377 378 379 380 381 382 383 384 385 386
	void __user *p;
	int ret = -ENOTTY;
	size_t send_len = 0;

	group = file->private_data;

	p = (void __user *) arg;

	switch (cmd) {
	case FIONREAD:
		mutex_lock(&group->notification_mutex);
387
		list_for_each_entry(fsn_event, &group->notification_list, list)
E
Eric Paris 已提交
388 389 390 391 392 393 394 395 396
			send_len += FAN_EVENT_METADATA_LEN;
		mutex_unlock(&group->notification_mutex);
		ret = put_user(send_len, (int __user *) p);
		break;
	}

	return ret;
}

397
static const struct file_operations fanotify_fops = {
398
	.show_fdinfo	= fanotify_show_fdinfo,
E
Eric Paris 已提交
399 400
	.poll		= fanotify_poll,
	.read		= fanotify_read,
401
	.write		= fanotify_write,
402 403
	.fasync		= NULL,
	.release	= fanotify_release,
E
Eric Paris 已提交
404 405
	.unlocked_ioctl	= fanotify_ioctl,
	.compat_ioctl	= fanotify_ioctl,
406
	.llseek		= noop_llseek,
407 408
};

409 410 411 412 413 414 415 416 417 418 419 420 421 422
static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
{
	kmem_cache_free(fanotify_mark_cache, fsn_mark);
}

static int fanotify_find_path(int dfd, const char __user *filename,
			      struct path *path, unsigned int flags)
{
	int ret;

	pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
		 dfd, filename, flags);

	if (filename == NULL) {
423
		struct fd f = fdget(dfd);
424 425

		ret = -EBADF;
426
		if (!f.file)
427 428 429 430
			goto out;

		ret = -ENOTDIR;
		if ((flags & FAN_MARK_ONLYDIR) &&
A
Al Viro 已提交
431
		    !(S_ISDIR(file_inode(f.file)->i_mode))) {
432
			fdput(f);
433 434 435
			goto out;
		}

436
		*path = f.file->f_path;
437
		path_get(path);
438
		fdput(f);
439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
	} else {
		unsigned int lookup_flags = 0;

		if (!(flags & FAN_MARK_DONT_FOLLOW))
			lookup_flags |= LOOKUP_FOLLOW;
		if (flags & FAN_MARK_ONLYDIR)
			lookup_flags |= LOOKUP_DIRECTORY;

		ret = user_path_at(dfd, filename, lookup_flags, path);
		if (ret)
			goto out;
	}

	/* you can only watch an inode if you have read permissions on it */
	ret = inode_permission(path->dentry->d_inode, MAY_READ);
	if (ret)
		path_put(path);
out:
	return ret;
}

460 461
static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
					    __u32 mask,
462 463
					    unsigned int flags,
					    int *destroy)
464 465 466 467
{
	__u32 oldmask;

	spin_lock(&fsn_mark->lock);
468 469 470 471 472 473 474
	if (!(flags & FAN_MARK_IGNORED_MASK)) {
		oldmask = fsn_mark->mask;
		fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask));
	} else {
		oldmask = fsn_mark->ignored_mask;
		fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
	}
475 476
	spin_unlock(&fsn_mark->lock);

477
	*destroy = !(oldmask & ~mask);
478 479 480 481

	return mask & oldmask;
}

482
static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
483 484
					 struct vfsmount *mnt, __u32 mask,
					 unsigned int flags)
485 486
{
	struct fsnotify_mark *fsn_mark = NULL;
487
	__u32 removed;
488
	int destroy_mark;
489

490
	mutex_lock(&group->mark_mutex);
491
	fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
492 493
	if (!fsn_mark) {
		mutex_unlock(&group->mark_mutex);
494
		return -ENOENT;
495
	}
496

497 498 499
	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
						 &destroy_mark);
	if (destroy_mark)
500 501
		fsnotify_destroy_mark_locked(fsn_mark, group);
	mutex_unlock(&group->mark_mutex);
502

503
	fsnotify_put_mark(fsn_mark);
504
	if (removed & real_mount(mnt)->mnt_fsnotify_mask)
505 506 507 508
		fsnotify_recalc_vfsmount_mask(mnt);

	return 0;
}
509

510
static int fanotify_remove_inode_mark(struct fsnotify_group *group,
511 512
				      struct inode *inode, __u32 mask,
				      unsigned int flags)
513 514 515
{
	struct fsnotify_mark *fsn_mark = NULL;
	__u32 removed;
516
	int destroy_mark;
517

518
	mutex_lock(&group->mark_mutex);
519
	fsn_mark = fsnotify_find_inode_mark(group, inode);
520 521
	if (!fsn_mark) {
		mutex_unlock(&group->mark_mutex);
522
		return -ENOENT;
523
	}
524

525 526 527
	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
						 &destroy_mark);
	if (destroy_mark)
528 529 530
		fsnotify_destroy_mark_locked(fsn_mark, group);
	mutex_unlock(&group->mark_mutex);

531
	/* matches the fsnotify_find_inode_mark() */
532
	fsnotify_put_mark(fsn_mark);
533 534
	if (removed & inode->i_fsnotify_mask)
		fsnotify_recalc_inode_mask(inode);
535

536 537 538
	return 0;
}

539 540 541
static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
				       __u32 mask,
				       unsigned int flags)
542
{
543
	__u32 oldmask = -1;
544 545

	spin_lock(&fsn_mark->lock);
546 547 548 549
	if (!(flags & FAN_MARK_IGNORED_MASK)) {
		oldmask = fsn_mark->mask;
		fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask));
	} else {
550 551
		__u32 tmask = fsn_mark->ignored_mask | mask;
		fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
552 553
		if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
			fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
554
	}
555 556 557 558 559 560

	if (!(flags & FAN_MARK_ONDIR)) {
		__u32 tmask = fsn_mark->ignored_mask | FAN_ONDIR;
		fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
	}

561 562 563 564 565
	spin_unlock(&fsn_mark->lock);

	return mask & ~oldmask;
}

566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590
static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group,
						   struct inode *inode,
						   struct vfsmount *mnt)
{
	struct fsnotify_mark *mark;
	int ret;

	if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
		return ERR_PTR(-ENOSPC);

	mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
	if (!mark)
		return ERR_PTR(-ENOMEM);

	fsnotify_init_mark(mark, fanotify_free_mark);
	ret = fsnotify_add_mark_locked(mark, group, inode, mnt, 0);
	if (ret) {
		fsnotify_put_mark(mark);
		return ERR_PTR(ret);
	}

	return mark;
}


591
static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
592 593
				      struct vfsmount *mnt, __u32 mask,
				      unsigned int flags)
594 595
{
	struct fsnotify_mark *fsn_mark;
596
	__u32 added;
597

598
	mutex_lock(&group->mark_mutex);
599 600
	fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
	if (!fsn_mark) {
601 602
		fsn_mark = fanotify_add_new_mark(group, NULL, mnt);
		if (IS_ERR(fsn_mark)) {
603
			mutex_unlock(&group->mark_mutex);
604
			return PTR_ERR(fsn_mark);
605
		}
606
	}
607
	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
608
	mutex_unlock(&group->mark_mutex);
609

610
	if (added & ~real_mount(mnt)->mnt_fsnotify_mask)
E
Eric Paris 已提交
611
		fsnotify_recalc_vfsmount_mask(mnt);
612

613
	fsnotify_put_mark(fsn_mark);
614
	return 0;
615 616
}

617
static int fanotify_add_inode_mark(struct fsnotify_group *group,
618 619
				   struct inode *inode, __u32 mask,
				   unsigned int flags)
620 621
{
	struct fsnotify_mark *fsn_mark;
622
	__u32 added;
623 624

	pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
625

626 627 628 629 630 631 632 633 634 635
	/*
	 * If some other task has this inode open for write we should not add
	 * an ignored mark, unless that ignored mark is supposed to survive
	 * modification changes anyway.
	 */
	if ((flags & FAN_MARK_IGNORED_MASK) &&
	    !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
	    (atomic_read(&inode->i_writecount) > 0))
		return 0;

636
	mutex_lock(&group->mark_mutex);
637
	fsn_mark = fsnotify_find_inode_mark(group, inode);
638
	if (!fsn_mark) {
639 640
		fsn_mark = fanotify_add_new_mark(group, inode, NULL);
		if (IS_ERR(fsn_mark)) {
641
			mutex_unlock(&group->mark_mutex);
642
			return PTR_ERR(fsn_mark);
643
		}
644
	}
645
	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
646
	mutex_unlock(&group->mark_mutex);
647

E
Eric Paris 已提交
648 649
	if (added & ~inode->i_fsnotify_mask)
		fsnotify_recalc_inode_mask(inode);
650

651
	fsnotify_put_mark(fsn_mark);
652
	return 0;
653
}
654

655
/* fanotify syscalls */
656
SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
657
{
658 659
	struct fsnotify_group *group;
	int f_flags, fd;
660
	struct user_struct *user;
661
	struct fanotify_event_info *oevent;
662

663 664
	pr_debug("%s: flags=%d event_f_flags=%d\n",
		__func__, flags, event_f_flags);
665 666

	if (!capable(CAP_SYS_ADMIN))
667
		return -EPERM;
668 669 670 671

	if (flags & ~FAN_ALL_INIT_FLAGS)
		return -EINVAL;

672 673 674 675 676 677
	user = get_current_user();
	if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
		free_uid(user);
		return -EMFILE;
	}

678
	f_flags = O_RDWR | FMODE_NONOTIFY;
679 680 681 682 683 684 685
	if (flags & FAN_CLOEXEC)
		f_flags |= O_CLOEXEC;
	if (flags & FAN_NONBLOCK)
		f_flags |= O_NONBLOCK;

	/* fsnotify_alloc_group takes a ref.  Dropped in fanotify_release */
	group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
686 687
	if (IS_ERR(group)) {
		free_uid(user);
688
		return PTR_ERR(group);
689
	}
690

691 692 693
	group->fanotify_data.user = user;
	atomic_inc(&user->fanotify_listeners);

694
	oevent = fanotify_alloc_event(NULL, FS_Q_OVERFLOW, NULL);
695 696 697 698 699 700
	if (unlikely(!oevent)) {
		fd = -ENOMEM;
		goto out_destroy_group;
	}
	group->overflow_event = &oevent->fse;

701
	group->fanotify_data.f_flags = event_f_flags;
E
Eric Paris 已提交
702 703 704 705
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
	mutex_init(&group->fanotify_data.access_mutex);
	init_waitqueue_head(&group->fanotify_data.access_waitq);
	INIT_LIST_HEAD(&group->fanotify_data.access_list);
706
	atomic_set(&group->fanotify_data.bypass_perm, 0);
E
Eric Paris 已提交
707
#endif
708 709 710 711 712 713 714 715 716 717 718 719
	switch (flags & FAN_ALL_CLASS_BITS) {
	case FAN_CLASS_NOTIF:
		group->priority = FS_PRIO_0;
		break;
	case FAN_CLASS_CONTENT:
		group->priority = FS_PRIO_1;
		break;
	case FAN_CLASS_PRE_CONTENT:
		group->priority = FS_PRIO_2;
		break;
	default:
		fd = -EINVAL;
720
		goto out_destroy_group;
721
	}
E
Eric Paris 已提交
722

723 724 725
	if (flags & FAN_UNLIMITED_QUEUE) {
		fd = -EPERM;
		if (!capable(CAP_SYS_ADMIN))
726
			goto out_destroy_group;
727 728 729 730
		group->max_events = UINT_MAX;
	} else {
		group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
	}
731

732 733 734
	if (flags & FAN_UNLIMITED_MARKS) {
		fd = -EPERM;
		if (!capable(CAP_SYS_ADMIN))
735
			goto out_destroy_group;
736 737 738 739
		group->fanotify_data.max_marks = UINT_MAX;
	} else {
		group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
	}
740

741 742
	fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
	if (fd < 0)
743
		goto out_destroy_group;
744 745 746

	return fd;

747 748
out_destroy_group:
	fsnotify_destroy_group(group);
749
	return fd;
750
}
751

752 753 754
SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags,
			      __u64, mask, int, dfd,
			      const char  __user *, pathname)
755
{
756 757
	struct inode *inode = NULL;
	struct vfsmount *mnt = NULL;
758
	struct fsnotify_group *group;
759
	struct fd f;
760
	struct path path;
761
	int ret;
762 763 764 765 766 767 768 769

	pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
		 __func__, fanotify_fd, flags, dfd, pathname, mask);

	/* we only use the lower 32 bits as of right now. */
	if (mask & ((__u64)0xffffffff << 32))
		return -EINVAL;

770 771
	if (flags & ~FAN_ALL_MARK_FLAGS)
		return -EINVAL;
E
Eric Paris 已提交
772
	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
773
	case FAN_MARK_ADD:		/* fallthrough */
774
	case FAN_MARK_REMOVE:
775 776
		if (!mask)
			return -EINVAL;
E
Eric Paris 已提交
777
	case FAN_MARK_FLUSH:
778 779 780 781
		break;
	default:
		return -EINVAL;
	}
782 783 784 785 786 787

	if (mask & FAN_ONDIR) {
		flags |= FAN_MARK_ONDIR;
		mask &= ~FAN_ONDIR;
	}

788 789 790
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
	if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
#else
791
	if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
792
#endif
793 794
		return -EINVAL;

795 796
	f = fdget(fanotify_fd);
	if (unlikely(!f.file))
797 798 799 800
		return -EBADF;

	/* verify that this is indeed an fanotify instance */
	ret = -EINVAL;
801
	if (unlikely(f.file->f_op != &fanotify_fops))
802
		goto fput_and_out;
803
	group = f.file->private_data;
804 805 806 807 808 809 810 811 812

	/*
	 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF.  These are not
	 * allowed to set permissions events.
	 */
	ret = -EINVAL;
	if (mask & FAN_ALL_PERM_EVENTS &&
	    group->priority == FS_PRIO_0)
		goto fput_and_out;
813 814 815 816 817 818

	ret = fanotify_find_path(dfd, pathname, &path, flags);
	if (ret)
		goto fput_and_out;

	/* inode held in place by reference to path; group by fget on fd */
819
	if (!(flags & FAN_MARK_MOUNT))
820 821 822
		inode = path.dentry->d_inode;
	else
		mnt = path.mnt;
823 824

	/* create/update an inode mark */
E
Eric Paris 已提交
825
	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
826
	case FAN_MARK_ADD:
827
		if (flags & FAN_MARK_MOUNT)
828
			ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
829
		else
830
			ret = fanotify_add_inode_mark(group, inode, mask, flags);
831 832
		break;
	case FAN_MARK_REMOVE:
833
		if (flags & FAN_MARK_MOUNT)
834
			ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
835
		else
836
			ret = fanotify_remove_inode_mark(group, inode, mask, flags);
837
		break;
E
Eric Paris 已提交
838 839 840 841 842 843
	case FAN_MARK_FLUSH:
		if (flags & FAN_MARK_MOUNT)
			fsnotify_clear_vfsmount_marks_by_group(group);
		else
			fsnotify_clear_inode_marks_by_group(group);
		break;
844 845 846
	default:
		ret = -EINVAL;
	}
847 848 849

	path_put(&path);
fput_and_out:
850
	fdput(f);
851 852 853
	return ret;
}

854 855 856 857 858 859 860 861 862
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE6(fanotify_mark,
				int, fanotify_fd, unsigned int, flags,
				__u32, mask0, __u32, mask1, int, dfd,
				const char  __user *, pathname)
{
	return sys_fanotify_mark(fanotify_fd, flags,
#ifdef __BIG_ENDIAN
				((__u64)mask0 << 32) | mask1,
H
Heiko Carstens 已提交
863 864
#else
				((__u64)mask1 << 32) | mask0,
865 866 867 868 869
#endif
				 dfd, pathname);
}
#endif

870
/*
871
 * fanotify_user_setup - Our initialization function.  Note that we cannot return
872 873 874 875 876 877
 * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
 * must result in panic().
 */
static int __init fanotify_user_setup(void)
{
	fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
878
	fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC);
879 880 881 882
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
	fanotify_perm_event_cachep = KMEM_CACHE(fanotify_perm_event_info,
						SLAB_PANIC);
#endif
883 884

	return 0;
885
}
886
device_initcall(fanotify_user_setup);