fanotify_user.c 21.6 KB
Newer Older
1
#include <linux/fanotify.h>
2
#include <linux/fcntl.h>
3
#include <linux/file.h>
4
#include <linux/fs.h>
5
#include <linux/anon_inodes.h>
6
#include <linux/fsnotify_backend.h>
7
#include <linux/init.h>
E
Eric Paris 已提交
8
#include <linux/mount.h>
9
#include <linux/namei.h>
E
Eric Paris 已提交
10
#include <linux/poll.h>
11 12
#include <linux/security.h>
#include <linux/syscalls.h>
T
Tejun Heo 已提交
13
#include <linux/slab.h>
14
#include <linux/types.h>
E
Eric Paris 已提交
15
#include <linux/uaccess.h>
16
#include <linux/compat.h>
E
Eric Paris 已提交
17 18

#include <asm/ioctls.h>
19

20
#include "../../mount.h"
21
#include "../fdinfo.h"
22
#include "fanotify.h"
23

24
#define FANOTIFY_DEFAULT_MAX_EVENTS	16384
25
#define FANOTIFY_DEFAULT_MAX_MARKS	8192
26
#define FANOTIFY_DEFAULT_MAX_LISTENERS	128
27

28
extern const struct fsnotify_ops fanotify_fsnotify_ops;
29

30
static struct kmem_cache *fanotify_mark_cache __read_mostly;
31
struct kmem_cache *fanotify_event_cachep __read_mostly;
32
struct kmem_cache *fanotify_perm_event_cachep __read_mostly;
33

E
Eric Paris 已提交
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
/*
 * Get an fsnotify notification event if one exists and is small
 * enough to fit in "count". Return an error pointer if the count
 * is not large enough.
 *
 * Called with the group->notification_mutex held.
 */
static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
					    size_t count)
{
	BUG_ON(!mutex_is_locked(&group->notification_mutex));

	pr_debug("%s: group=%p count=%zd\n", __func__, group, count);

	if (fsnotify_notify_queue_is_empty(group))
		return NULL;

	if (FAN_EVENT_METADATA_LEN > count)
		return ERR_PTR(-EINVAL);

	/* held the notification_mutex the whole time, so this is the
	 * same event we peeked above */
	return fsnotify_remove_notify_event(group);
}

59
static int create_fd(struct fsnotify_group *group,
60 61
		     struct fanotify_event_info *event,
		     struct file **file)
E
Eric Paris 已提交
62 63 64 65
{
	int client_fd;
	struct file *new_file;

66
	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
E
Eric Paris 已提交
67 68 69 70 71 72 73 74 75 76 77

	client_fd = get_unused_fd();
	if (client_fd < 0)
		return client_fd;

	/*
	 * we need a new file handle for the userspace program so it can read even if it was
	 * originally opened O_WRONLY.
	 */
	/* it's possible this event was an overflow event.  in that case dentry and mnt
	 * are NULL;  That's fine, just don't call dentry open */
78 79
	if (event->path.dentry && event->path.mnt)
		new_file = dentry_open(&event->path,
80
				       group->fanotify_data.f_flags | FMODE_NONOTIFY,
E
Eric Paris 已提交
81 82 83 84 85 86 87 88 89 90 91 92 93 94
				       current_cred());
	else
		new_file = ERR_PTR(-EOVERFLOW);
	if (IS_ERR(new_file)) {
		/*
		 * we still send an event even if we can't open the file.  this
		 * can happen when say tasks are gone and we try to open their
		 * /proc files or we try to open a WRONLY file like in sysfs
		 * we just send the errno to userspace since there isn't much
		 * else we can do.
		 */
		put_unused_fd(client_fd);
		client_fd = PTR_ERR(new_file);
	} else {
95
		*file = new_file;
E
Eric Paris 已提交
96 97
	}

98
	return client_fd;
E
Eric Paris 已提交
99 100
}

101
static int fill_event_metadata(struct fsnotify_group *group,
102 103 104
			       struct fanotify_event_metadata *metadata,
			       struct fsnotify_event *fsn_event,
			       struct file **file)
E
Eric Paris 已提交
105
{
106
	int ret = 0;
107
	struct fanotify_event_info *event;
108

E
Eric Paris 已提交
109
	pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
110
		 group, metadata, fsn_event);
E
Eric Paris 已提交
111

112
	*file = NULL;
113
	event = container_of(fsn_event, struct fanotify_event_info, fse);
E
Eric Paris 已提交
114
	metadata->event_len = FAN_EVENT_METADATA_LEN;
115
	metadata->metadata_len = FAN_EVENT_METADATA_LEN;
E
Eric Paris 已提交
116
	metadata->vers = FANOTIFY_METADATA_VERSION;
117
	metadata->reserved = 0;
118
	metadata->mask = fsn_event->mask & FAN_ALL_OUTGOING_EVENTS;
119
	metadata->pid = pid_vnr(event->tgid);
120
	if (unlikely(fsn_event->mask & FAN_Q_OVERFLOW))
121 122
		metadata->fd = FAN_NOFD;
	else {
123
		metadata->fd = create_fd(group, event, file);
124 125 126
		if (metadata->fd < 0)
			ret = metadata->fd;
	}
E
Eric Paris 已提交
127

128
	return ret;
E
Eric Paris 已提交
129 130
}

131
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
132 133
static struct fanotify_perm_event_info *dequeue_event(
				struct fsnotify_group *group, int fd)
134
{
135
	struct fanotify_perm_event_info *event, *return_e = NULL;
136

137
	spin_lock(&group->fanotify_data.access_lock);
138 139 140
	list_for_each_entry(event, &group->fanotify_data.access_list,
			    fae.fse.list) {
		if (event->fd != fd)
141 142
			continue;

143 144
		list_del_init(&event->fae.fse.list);
		return_e = event;
145 146
		break;
	}
147
	spin_unlock(&group->fanotify_data.access_lock);
148

149
	pr_debug("%s: found return_re=%p\n", __func__, return_e);
150

151
	return return_e;
152 153 154 155 156
}

static int process_access_response(struct fsnotify_group *group,
				   struct fanotify_response *response_struct)
{
157 158 159
	struct fanotify_perm_event_info *event;
	int fd = response_struct->fd;
	int response = response_struct->response;
160 161 162 163 164

	pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
		 fd, response);
	/*
	 * make sure the response is valid, if invalid we do nothing and either
L
Lucas De Marchi 已提交
165
	 * userspace can send a valid response or we will clean it up after the
166 167 168 169 170 171 172 173 174 175 176 177 178
	 * timeout
	 */
	switch (response) {
	case FAN_ALLOW:
	case FAN_DENY:
		break;
	default:
		return -EINVAL;
	}

	if (fd < 0)
		return -EINVAL;

179 180
	event = dequeue_event(group, fd);
	if (!event)
181 182
		return -ENOENT;

183
	event->response = response;
184 185 186 187 188 189
	wake_up(&group->fanotify_data.access_waitq);

	return 0;
}
#endif

E
Eric Paris 已提交
190 191 192 193 194
static ssize_t copy_event_to_user(struct fsnotify_group *group,
				  struct fsnotify_event *event,
				  char __user *buf)
{
	struct fanotify_event_metadata fanotify_event_metadata;
195
	struct file *f;
196
	int fd, ret;
E
Eric Paris 已提交
197 198 199

	pr_debug("%s: group=%p event=%p\n", __func__, group, event);

200
	ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f);
201
	if (ret < 0)
202
		return ret;
203

204
	fd = fanotify_event_metadata.fd;
205
	ret = -EFAULT;
206 207
	if (copy_to_user(buf, &fanotify_event_metadata,
			 fanotify_event_metadata.event_len))
208 209
		goto out_close_fd;

210
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
211 212
	if (event->mask & FAN_ALL_PERM_EVENTS)
		FANOTIFY_PE(event)->fd = fd;
213
#endif
E
Eric Paris 已提交
214

215 216
	if (fd != FAN_NOFD)
		fd_install(fd, f);
217
	return fanotify_event_metadata.event_len;
218 219

out_close_fd:
220 221 222 223
	if (fd != FAN_NOFD) {
		put_unused_fd(fd);
		fput(f);
	}
224
	return ret;
E
Eric Paris 已提交
225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
}

/* intofiy userspace file descriptor functions */
static unsigned int fanotify_poll(struct file *file, poll_table *wait)
{
	struct fsnotify_group *group = file->private_data;
	int ret = 0;

	poll_wait(file, &group->notification_waitq, wait);
	mutex_lock(&group->notification_mutex);
	if (!fsnotify_notify_queue_is_empty(group))
		ret = POLLIN | POLLRDNORM;
	mutex_unlock(&group->notification_mutex);

	return ret;
}

static ssize_t fanotify_read(struct file *file, char __user *buf,
			     size_t count, loff_t *pos)
{
	struct fsnotify_group *group;
	struct fsnotify_event *kevent;
	char __user *start;
	int ret;
	DEFINE_WAIT(wait);

	start = buf;
	group = file->private_data;

	pr_debug("%s: group=%p\n", __func__, group);

	while (1) {
		prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);

		mutex_lock(&group->notification_mutex);
		kevent = get_one_event(group, count);
		mutex_unlock(&group->notification_mutex);

263
		if (IS_ERR(kevent)) {
E
Eric Paris 已提交
264
			ret = PTR_ERR(kevent);
265 266 267 268 269 270
			break;
		}

		if (!kevent) {
			ret = -EAGAIN;
			if (file->f_flags & O_NONBLOCK)
E
Eric Paris 已提交
271
				break;
272 273 274 275 276 277

			ret = -ERESTARTSYS;
			if (signal_pending(current))
				break;

			if (start != buf)
E
Eric Paris 已提交
278
				break;
279
			schedule();
E
Eric Paris 已提交
280 281 282
			continue;
		}

283 284 285 286 287
		ret = copy_event_to_user(group, kevent, buf);
		/*
		 * Permission events get queued to wait for response.  Other
		 * events can be destroyed now.
		 */
288
		if (!(kevent->mask & FAN_ALL_PERM_EVENTS)) {
289
			fsnotify_destroy_event(group, kevent);
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
			if (ret < 0)
				break;
		} else {
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
			if (ret < 0) {
				FANOTIFY_PE(kevent)->response = FAN_DENY;
				wake_up(&group->fanotify_data.access_waitq);
				break;
			}
			spin_lock(&group->fanotify_data.access_lock);
			list_add_tail(&kevent->list,
				      &group->fanotify_data.access_list);
			spin_unlock(&group->fanotify_data.access_lock);
#endif
		}
305 306
		buf += ret;
		count -= ret;
E
Eric Paris 已提交
307 308 309 310 311 312 313 314
	}

	finish_wait(&group->notification_waitq, &wait);
	if (start != buf && ret != -EFAULT)
		ret = buf - start;
	return ret;
}

315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
{
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
	struct fanotify_response response = { .fd = -1, .response = -1 };
	struct fsnotify_group *group;
	int ret;

	group = file->private_data;

	if (count > sizeof(response))
		count = sizeof(response);

	pr_debug("%s: group=%p count=%zu\n", __func__, group, count);

	if (copy_from_user(&response, buf, count))
		return -EFAULT;

	ret = process_access_response(group, &response);
	if (ret < 0)
		count = ret;

	return count;
#else
	return -EINVAL;
#endif
}

342 343 344 345
static int fanotify_release(struct inode *ignored, struct file *file)
{
	struct fsnotify_group *group = file->private_data;

346
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
347
	struct fanotify_perm_event_info *event, *next;
348

349
	spin_lock(&group->fanotify_data.access_lock);
350

351
	atomic_inc(&group->fanotify_data.bypass_perm);
352

353 354 355 356
	list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
				 fae.fse.list) {
		pr_debug("%s: found group=%p event=%p\n", __func__, group,
			 event);
357

358 359
		list_del_init(&event->fae.fse.list);
		event->response = FAN_ALLOW;
360
	}
361
	spin_unlock(&group->fanotify_data.access_lock);
362 363 364

	wake_up(&group->fanotify_data.access_waitq);
#endif
365

366
	/* matches the fanotify_init->fsnotify_alloc_group */
367
	fsnotify_destroy_group(group);
368 369 370 371

	return 0;
}

E
Eric Paris 已提交
372 373 374
static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	struct fsnotify_group *group;
375
	struct fsnotify_event *fsn_event;
E
Eric Paris 已提交
376 377 378 379 380 381 382 383 384 385 386
	void __user *p;
	int ret = -ENOTTY;
	size_t send_len = 0;

	group = file->private_data;

	p = (void __user *) arg;

	switch (cmd) {
	case FIONREAD:
		mutex_lock(&group->notification_mutex);
387
		list_for_each_entry(fsn_event, &group->notification_list, list)
E
Eric Paris 已提交
388 389 390 391 392 393 394 395 396
			send_len += FAN_EVENT_METADATA_LEN;
		mutex_unlock(&group->notification_mutex);
		ret = put_user(send_len, (int __user *) p);
		break;
	}

	return ret;
}

397
static const struct file_operations fanotify_fops = {
398
	.show_fdinfo	= fanotify_show_fdinfo,
E
Eric Paris 已提交
399 400
	.poll		= fanotify_poll,
	.read		= fanotify_read,
401
	.write		= fanotify_write,
402 403
	.fasync		= NULL,
	.release	= fanotify_release,
E
Eric Paris 已提交
404 405
	.unlocked_ioctl	= fanotify_ioctl,
	.compat_ioctl	= fanotify_ioctl,
406
	.llseek		= noop_llseek,
407 408
};

409 410 411 412 413 414 415 416 417 418 419 420 421 422
static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
{
	kmem_cache_free(fanotify_mark_cache, fsn_mark);
}

static int fanotify_find_path(int dfd, const char __user *filename,
			      struct path *path, unsigned int flags)
{
	int ret;

	pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
		 dfd, filename, flags);

	if (filename == NULL) {
423
		struct fd f = fdget(dfd);
424 425

		ret = -EBADF;
426
		if (!f.file)
427 428 429 430
			goto out;

		ret = -ENOTDIR;
		if ((flags & FAN_MARK_ONLYDIR) &&
A
Al Viro 已提交
431
		    !(S_ISDIR(file_inode(f.file)->i_mode))) {
432
			fdput(f);
433 434 435
			goto out;
		}

436
		*path = f.file->f_path;
437
		path_get(path);
438
		fdput(f);
439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
	} else {
		unsigned int lookup_flags = 0;

		if (!(flags & FAN_MARK_DONT_FOLLOW))
			lookup_flags |= LOOKUP_FOLLOW;
		if (flags & FAN_MARK_ONLYDIR)
			lookup_flags |= LOOKUP_DIRECTORY;

		ret = user_path_at(dfd, filename, lookup_flags, path);
		if (ret)
			goto out;
	}

	/* you can only watch an inode if you have read permissions on it */
	ret = inode_permission(path->dentry->d_inode, MAY_READ);
	if (ret)
		path_put(path);
out:
	return ret;
}

460 461
static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
					    __u32 mask,
462 463
					    unsigned int flags,
					    int *destroy)
464 465 466 467
{
	__u32 oldmask;

	spin_lock(&fsn_mark->lock);
468 469 470 471 472 473 474
	if (!(flags & FAN_MARK_IGNORED_MASK)) {
		oldmask = fsn_mark->mask;
		fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask));
	} else {
		oldmask = fsn_mark->ignored_mask;
		fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
	}
475 476
	spin_unlock(&fsn_mark->lock);

477
	*destroy = !(oldmask & ~mask);
478 479 480 481

	return mask & oldmask;
}

482
static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
483 484
					 struct vfsmount *mnt, __u32 mask,
					 unsigned int flags)
485 486
{
	struct fsnotify_mark *fsn_mark = NULL;
487
	__u32 removed;
488
	int destroy_mark;
489

490
	mutex_lock(&group->mark_mutex);
491
	fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
492 493
	if (!fsn_mark) {
		mutex_unlock(&group->mark_mutex);
494
		return -ENOENT;
495
	}
496

497 498 499
	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
						 &destroy_mark);
	if (destroy_mark)
500 501
		fsnotify_destroy_mark_locked(fsn_mark, group);
	mutex_unlock(&group->mark_mutex);
502

503
	fsnotify_put_mark(fsn_mark);
504
	if (removed & real_mount(mnt)->mnt_fsnotify_mask)
505 506 507 508
		fsnotify_recalc_vfsmount_mask(mnt);

	return 0;
}
509

510
static int fanotify_remove_inode_mark(struct fsnotify_group *group,
511 512
				      struct inode *inode, __u32 mask,
				      unsigned int flags)
513 514 515
{
	struct fsnotify_mark *fsn_mark = NULL;
	__u32 removed;
516
	int destroy_mark;
517

518
	mutex_lock(&group->mark_mutex);
519
	fsn_mark = fsnotify_find_inode_mark(group, inode);
520 521
	if (!fsn_mark) {
		mutex_unlock(&group->mark_mutex);
522
		return -ENOENT;
523
	}
524

525 526 527
	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
						 &destroy_mark);
	if (destroy_mark)
528 529 530
		fsnotify_destroy_mark_locked(fsn_mark, group);
	mutex_unlock(&group->mark_mutex);

531
	/* matches the fsnotify_find_inode_mark() */
532
	fsnotify_put_mark(fsn_mark);
533 534
	if (removed & inode->i_fsnotify_mask)
		fsnotify_recalc_inode_mask(inode);
535

536 537 538
	return 0;
}

539 540 541
static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
				       __u32 mask,
				       unsigned int flags)
542
{
543
	__u32 oldmask = -1;
544 545

	spin_lock(&fsn_mark->lock);
546 547 548 549
	if (!(flags & FAN_MARK_IGNORED_MASK)) {
		oldmask = fsn_mark->mask;
		fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask));
	} else {
550 551
		__u32 tmask = fsn_mark->ignored_mask | mask;
		fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
552 553
		if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
			fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
554
	}
555 556 557 558 559 560

	if (!(flags & FAN_MARK_ONDIR)) {
		__u32 tmask = fsn_mark->ignored_mask | FAN_ONDIR;
		fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
	}

561 562 563 564 565
	spin_unlock(&fsn_mark->lock);

	return mask & ~oldmask;
}

566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590
static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group,
						   struct inode *inode,
						   struct vfsmount *mnt)
{
	struct fsnotify_mark *mark;
	int ret;

	if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
		return ERR_PTR(-ENOSPC);

	mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
	if (!mark)
		return ERR_PTR(-ENOMEM);

	fsnotify_init_mark(mark, fanotify_free_mark);
	ret = fsnotify_add_mark_locked(mark, group, inode, mnt, 0);
	if (ret) {
		fsnotify_put_mark(mark);
		return ERR_PTR(ret);
	}

	return mark;
}


591
static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
592 593
				      struct vfsmount *mnt, __u32 mask,
				      unsigned int flags)
594 595
{
	struct fsnotify_mark *fsn_mark;
596
	__u32 added;
597

598
	mutex_lock(&group->mark_mutex);
599 600
	fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
	if (!fsn_mark) {
601 602
		fsn_mark = fanotify_add_new_mark(group, NULL, mnt);
		if (IS_ERR(fsn_mark)) {
603
			mutex_unlock(&group->mark_mutex);
604
			return PTR_ERR(fsn_mark);
605
		}
606
	}
607
	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
608
	mutex_unlock(&group->mark_mutex);
609

610
	if (added & ~real_mount(mnt)->mnt_fsnotify_mask)
E
Eric Paris 已提交
611
		fsnotify_recalc_vfsmount_mask(mnt);
612

613
	fsnotify_put_mark(fsn_mark);
614
	return 0;
615 616
}

617
static int fanotify_add_inode_mark(struct fsnotify_group *group,
618 619
				   struct inode *inode, __u32 mask,
				   unsigned int flags)
620 621
{
	struct fsnotify_mark *fsn_mark;
622
	__u32 added;
623 624

	pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
625

626 627 628 629 630 631 632 633 634 635
	/*
	 * If some other task has this inode open for write we should not add
	 * an ignored mark, unless that ignored mark is supposed to survive
	 * modification changes anyway.
	 */
	if ((flags & FAN_MARK_IGNORED_MASK) &&
	    !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
	    (atomic_read(&inode->i_writecount) > 0))
		return 0;

636
	mutex_lock(&group->mark_mutex);
637
	fsn_mark = fsnotify_find_inode_mark(group, inode);
638
	if (!fsn_mark) {
639 640
		fsn_mark = fanotify_add_new_mark(group, inode, NULL);
		if (IS_ERR(fsn_mark)) {
641
			mutex_unlock(&group->mark_mutex);
642
			return PTR_ERR(fsn_mark);
643
		}
644
	}
645
	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
646
	mutex_unlock(&group->mark_mutex);
647

E
Eric Paris 已提交
648 649
	if (added & ~inode->i_fsnotify_mask)
		fsnotify_recalc_inode_mask(inode);
650

651
	fsnotify_put_mark(fsn_mark);
652
	return 0;
653
}
654

655
/* fanotify syscalls */
656
SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
657
{
658 659
	struct fsnotify_group *group;
	int f_flags, fd;
660
	struct user_struct *user;
661
	struct fanotify_event_info *oevent;
662

663 664
	pr_debug("%s: flags=%d event_f_flags=%d\n",
		__func__, flags, event_f_flags);
665 666

	if (!capable(CAP_SYS_ADMIN))
667
		return -EPERM;
668 669 670 671

	if (flags & ~FAN_ALL_INIT_FLAGS)
		return -EINVAL;

672 673 674 675 676 677
	user = get_current_user();
	if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
		free_uid(user);
		return -EMFILE;
	}

678
	f_flags = O_RDWR | FMODE_NONOTIFY;
679 680 681 682 683 684 685
	if (flags & FAN_CLOEXEC)
		f_flags |= O_CLOEXEC;
	if (flags & FAN_NONBLOCK)
		f_flags |= O_NONBLOCK;

	/* fsnotify_alloc_group takes a ref.  Dropped in fanotify_release */
	group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
686 687
	if (IS_ERR(group)) {
		free_uid(user);
688
		return PTR_ERR(group);
689
	}
690

691 692 693
	group->fanotify_data.user = user;
	atomic_inc(&user->fanotify_listeners);

694
	oevent = fanotify_alloc_event(NULL, FS_Q_OVERFLOW, NULL);
695 696 697 698 699 700
	if (unlikely(!oevent)) {
		fd = -ENOMEM;
		goto out_destroy_group;
	}
	group->overflow_event = &oevent->fse;

701 702
	if (force_o_largefile())
		event_f_flags |= O_LARGEFILE;
703
	group->fanotify_data.f_flags = event_f_flags;
E
Eric Paris 已提交
704
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
705
	spin_lock_init(&group->fanotify_data.access_lock);
E
Eric Paris 已提交
706 707
	init_waitqueue_head(&group->fanotify_data.access_waitq);
	INIT_LIST_HEAD(&group->fanotify_data.access_list);
708
	atomic_set(&group->fanotify_data.bypass_perm, 0);
E
Eric Paris 已提交
709
#endif
710 711 712 713 714 715 716 717 718 719 720 721
	switch (flags & FAN_ALL_CLASS_BITS) {
	case FAN_CLASS_NOTIF:
		group->priority = FS_PRIO_0;
		break;
	case FAN_CLASS_CONTENT:
		group->priority = FS_PRIO_1;
		break;
	case FAN_CLASS_PRE_CONTENT:
		group->priority = FS_PRIO_2;
		break;
	default:
		fd = -EINVAL;
722
		goto out_destroy_group;
723
	}
E
Eric Paris 已提交
724

725 726 727
	if (flags & FAN_UNLIMITED_QUEUE) {
		fd = -EPERM;
		if (!capable(CAP_SYS_ADMIN))
728
			goto out_destroy_group;
729 730 731 732
		group->max_events = UINT_MAX;
	} else {
		group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
	}
733

734 735 736
	if (flags & FAN_UNLIMITED_MARKS) {
		fd = -EPERM;
		if (!capable(CAP_SYS_ADMIN))
737
			goto out_destroy_group;
738 739 740 741
		group->fanotify_data.max_marks = UINT_MAX;
	} else {
		group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
	}
742

743 744
	fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
	if (fd < 0)
745
		goto out_destroy_group;
746 747 748

	return fd;

749 750
out_destroy_group:
	fsnotify_destroy_group(group);
751
	return fd;
752
}
753

754 755 756
SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags,
			      __u64, mask, int, dfd,
			      const char  __user *, pathname)
757
{
758 759
	struct inode *inode = NULL;
	struct vfsmount *mnt = NULL;
760
	struct fsnotify_group *group;
761
	struct fd f;
762
	struct path path;
763
	int ret;
764 765 766 767 768 769 770 771

	pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
		 __func__, fanotify_fd, flags, dfd, pathname, mask);

	/* we only use the lower 32 bits as of right now. */
	if (mask & ((__u64)0xffffffff << 32))
		return -EINVAL;

772 773
	if (flags & ~FAN_ALL_MARK_FLAGS)
		return -EINVAL;
E
Eric Paris 已提交
774
	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
775
	case FAN_MARK_ADD:		/* fallthrough */
776
	case FAN_MARK_REMOVE:
777 778
		if (!mask)
			return -EINVAL;
779
		break;
E
Eric Paris 已提交
780
	case FAN_MARK_FLUSH:
781 782
		if (flags & ~(FAN_MARK_MOUNT | FAN_MARK_FLUSH))
			return -EINVAL;
783 784 785 786
		break;
	default:
		return -EINVAL;
	}
787 788 789 790 791 792

	if (mask & FAN_ONDIR) {
		flags |= FAN_MARK_ONDIR;
		mask &= ~FAN_ONDIR;
	}

793 794 795
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
	if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
#else
796
	if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
797
#endif
798 799
		return -EINVAL;

800 801
	f = fdget(fanotify_fd);
	if (unlikely(!f.file))
802 803 804 805
		return -EBADF;

	/* verify that this is indeed an fanotify instance */
	ret = -EINVAL;
806
	if (unlikely(f.file->f_op != &fanotify_fops))
807
		goto fput_and_out;
808
	group = f.file->private_data;
809 810 811 812 813 814 815 816 817

	/*
	 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF.  These are not
	 * allowed to set permissions events.
	 */
	ret = -EINVAL;
	if (mask & FAN_ALL_PERM_EVENTS &&
	    group->priority == FS_PRIO_0)
		goto fput_and_out;
818

819 820 821 822 823 824 825 826 827
	if (flags & FAN_MARK_FLUSH) {
		ret = 0;
		if (flags & FAN_MARK_MOUNT)
			fsnotify_clear_vfsmount_marks_by_group(group);
		else
			fsnotify_clear_inode_marks_by_group(group);
		goto fput_and_out;
	}

828 829 830 831 832
	ret = fanotify_find_path(dfd, pathname, &path, flags);
	if (ret)
		goto fput_and_out;

	/* inode held in place by reference to path; group by fget on fd */
833
	if (!(flags & FAN_MARK_MOUNT))
834 835 836
		inode = path.dentry->d_inode;
	else
		mnt = path.mnt;
837 838

	/* create/update an inode mark */
839
	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE)) {
840
	case FAN_MARK_ADD:
841
		if (flags & FAN_MARK_MOUNT)
842
			ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
843
		else
844
			ret = fanotify_add_inode_mark(group, inode, mask, flags);
845 846
		break;
	case FAN_MARK_REMOVE:
847
		if (flags & FAN_MARK_MOUNT)
848
			ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
849
		else
850
			ret = fanotify_remove_inode_mark(group, inode, mask, flags);
851 852 853 854
		break;
	default:
		ret = -EINVAL;
	}
855 856 857

	path_put(&path);
fput_and_out:
858
	fdput(f);
859 860 861
	return ret;
}

862 863 864 865 866 867 868 869 870
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE6(fanotify_mark,
				int, fanotify_fd, unsigned int, flags,
				__u32, mask0, __u32, mask1, int, dfd,
				const char  __user *, pathname)
{
	return sys_fanotify_mark(fanotify_fd, flags,
#ifdef __BIG_ENDIAN
				((__u64)mask0 << 32) | mask1,
H
Heiko Carstens 已提交
871 872
#else
				((__u64)mask1 << 32) | mask0,
873 874 875 876 877
#endif
				 dfd, pathname);
}
#endif

878
/*
879
 * fanotify_user_setup - Our initialization function.  Note that we cannot return
880 881 882 883 884 885
 * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
 * must result in panic().
 */
static int __init fanotify_user_setup(void)
{
	fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
886
	fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC);
887 888 889 890
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
	fanotify_perm_event_cachep = KMEM_CACHE(fanotify_perm_event_info,
						SLAB_PANIC);
#endif
891 892

	return 0;
893
}
894
device_initcall(fanotify_user_setup);