fanotify_user.c 22.3 KB
Newer Older
1
#include <linux/fanotify.h>
2
#include <linux/fcntl.h>
3
#include <linux/file.h>
4
#include <linux/fs.h>
5
#include <linux/anon_inodes.h>
6
#include <linux/fsnotify_backend.h>
7
#include <linux/init.h>
E
Eric Paris 已提交
8
#include <linux/mount.h>
9
#include <linux/namei.h>
E
Eric Paris 已提交
10
#include <linux/poll.h>
11 12
#include <linux/security.h>
#include <linux/syscalls.h>
T
Tejun Heo 已提交
13
#include <linux/slab.h>
14
#include <linux/types.h>
E
Eric Paris 已提交
15
#include <linux/uaccess.h>
16
#include <linux/compat.h>
E
Eric Paris 已提交
17 18

#include <asm/ioctls.h>
19

20
#include "../../mount.h"
21
#include "../fdinfo.h"
22
#include "fanotify.h"
23

24
#define FANOTIFY_DEFAULT_MAX_EVENTS	16384
25
#define FANOTIFY_DEFAULT_MAX_MARKS	8192
26
#define FANOTIFY_DEFAULT_MAX_LISTENERS	128
27

28 29 30 31 32 33 34 35 36 37 38 39 40
/*
 * All flags that may be specified in parameter event_f_flags of fanotify_init.
 *
 * Internal and external open flags are stored together in field f_flags of
 * struct file. Only external open flags shall be allowed in event_f_flags.
 * Internal flags like FMODE_NONOTIFY, FMODE_EXEC, FMODE_NOCMTIME shall be
 * excluded.
 */
#define	FANOTIFY_INIT_ALL_EVENT_F_BITS				( \
		O_ACCMODE	| O_APPEND	| O_NONBLOCK	| \
		__O_SYNC	| O_DSYNC	| O_CLOEXEC     | \
		O_LARGEFILE	| O_NOATIME	)

41
extern const struct fsnotify_ops fanotify_fsnotify_ops;
42

43
static struct kmem_cache *fanotify_mark_cache __read_mostly;
44
struct kmem_cache *fanotify_event_cachep __read_mostly;
45
struct kmem_cache *fanotify_perm_event_cachep __read_mostly;
46

E
Eric Paris 已提交
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
/*
 * Get an fsnotify notification event if one exists and is small
 * enough to fit in "count". Return an error pointer if the count
 * is not large enough.
 *
 * Called with the group->notification_mutex held.
 */
static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
					    size_t count)
{
	BUG_ON(!mutex_is_locked(&group->notification_mutex));

	pr_debug("%s: group=%p count=%zd\n", __func__, group, count);

	if (fsnotify_notify_queue_is_empty(group))
		return NULL;

	if (FAN_EVENT_METADATA_LEN > count)
		return ERR_PTR(-EINVAL);

	/* held the notification_mutex the whole time, so this is the
	 * same event we peeked above */
	return fsnotify_remove_notify_event(group);
}

72
static int create_fd(struct fsnotify_group *group,
73 74
		     struct fanotify_event_info *event,
		     struct file **file)
E
Eric Paris 已提交
75 76 77 78
{
	int client_fd;
	struct file *new_file;

79
	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
E
Eric Paris 已提交
80 81 82 83 84 85 86 87 88 89 90

	client_fd = get_unused_fd();
	if (client_fd < 0)
		return client_fd;

	/*
	 * we need a new file handle for the userspace program so it can read even if it was
	 * originally opened O_WRONLY.
	 */
	/* it's possible this event was an overflow event.  in that case dentry and mnt
	 * are NULL;  That's fine, just don't call dentry open */
91 92
	if (event->path.dentry && event->path.mnt)
		new_file = dentry_open(&event->path,
93
				       group->fanotify_data.f_flags | FMODE_NONOTIFY,
E
Eric Paris 已提交
94 95 96 97 98 99 100 101 102 103 104 105 106 107
				       current_cred());
	else
		new_file = ERR_PTR(-EOVERFLOW);
	if (IS_ERR(new_file)) {
		/*
		 * we still send an event even if we can't open the file.  this
		 * can happen when say tasks are gone and we try to open their
		 * /proc files or we try to open a WRONLY file like in sysfs
		 * we just send the errno to userspace since there isn't much
		 * else we can do.
		 */
		put_unused_fd(client_fd);
		client_fd = PTR_ERR(new_file);
	} else {
108
		*file = new_file;
E
Eric Paris 已提交
109 110
	}

111
	return client_fd;
E
Eric Paris 已提交
112 113
}

114
static int fill_event_metadata(struct fsnotify_group *group,
115 116 117
			       struct fanotify_event_metadata *metadata,
			       struct fsnotify_event *fsn_event,
			       struct file **file)
E
Eric Paris 已提交
118
{
119
	int ret = 0;
120
	struct fanotify_event_info *event;
121

E
Eric Paris 已提交
122
	pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
123
		 group, metadata, fsn_event);
E
Eric Paris 已提交
124

125
	*file = NULL;
126
	event = container_of(fsn_event, struct fanotify_event_info, fse);
E
Eric Paris 已提交
127
	metadata->event_len = FAN_EVENT_METADATA_LEN;
128
	metadata->metadata_len = FAN_EVENT_METADATA_LEN;
E
Eric Paris 已提交
129
	metadata->vers = FANOTIFY_METADATA_VERSION;
130
	metadata->reserved = 0;
131
	metadata->mask = fsn_event->mask & FAN_ALL_OUTGOING_EVENTS;
132
	metadata->pid = pid_vnr(event->tgid);
133
	if (unlikely(fsn_event->mask & FAN_Q_OVERFLOW))
134 135
		metadata->fd = FAN_NOFD;
	else {
136
		metadata->fd = create_fd(group, event, file);
137 138 139
		if (metadata->fd < 0)
			ret = metadata->fd;
	}
E
Eric Paris 已提交
140

141
	return ret;
E
Eric Paris 已提交
142 143
}

144
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
145 146
static struct fanotify_perm_event_info *dequeue_event(
				struct fsnotify_group *group, int fd)
147
{
148
	struct fanotify_perm_event_info *event, *return_e = NULL;
149

150
	spin_lock(&group->fanotify_data.access_lock);
151 152 153
	list_for_each_entry(event, &group->fanotify_data.access_list,
			    fae.fse.list) {
		if (event->fd != fd)
154 155
			continue;

156 157
		list_del_init(&event->fae.fse.list);
		return_e = event;
158 159
		break;
	}
160
	spin_unlock(&group->fanotify_data.access_lock);
161

162
	pr_debug("%s: found return_re=%p\n", __func__, return_e);
163

164
	return return_e;
165 166 167 168 169
}

static int process_access_response(struct fsnotify_group *group,
				   struct fanotify_response *response_struct)
{
170 171 172
	struct fanotify_perm_event_info *event;
	int fd = response_struct->fd;
	int response = response_struct->response;
173 174 175 176 177

	pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
		 fd, response);
	/*
	 * make sure the response is valid, if invalid we do nothing and either
L
Lucas De Marchi 已提交
178
	 * userspace can send a valid response or we will clean it up after the
179 180 181 182 183 184 185 186 187 188 189 190 191
	 * timeout
	 */
	switch (response) {
	case FAN_ALLOW:
	case FAN_DENY:
		break;
	default:
		return -EINVAL;
	}

	if (fd < 0)
		return -EINVAL;

192 193
	event = dequeue_event(group, fd);
	if (!event)
194 195
		return -ENOENT;

196
	event->response = response;
197 198 199 200 201 202
	wake_up(&group->fanotify_data.access_waitq);

	return 0;
}
#endif

E
Eric Paris 已提交
203 204 205 206 207
static ssize_t copy_event_to_user(struct fsnotify_group *group,
				  struct fsnotify_event *event,
				  char __user *buf)
{
	struct fanotify_event_metadata fanotify_event_metadata;
208
	struct file *f;
209
	int fd, ret;
E
Eric Paris 已提交
210 211 212

	pr_debug("%s: group=%p event=%p\n", __func__, group, event);

213
	ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f);
214
	if (ret < 0)
215
		return ret;
216

217
	fd = fanotify_event_metadata.fd;
218
	ret = -EFAULT;
219 220
	if (copy_to_user(buf, &fanotify_event_metadata,
			 fanotify_event_metadata.event_len))
221 222
		goto out_close_fd;

223
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
224 225
	if (event->mask & FAN_ALL_PERM_EVENTS)
		FANOTIFY_PE(event)->fd = fd;
226
#endif
E
Eric Paris 已提交
227

228 229
	if (fd != FAN_NOFD)
		fd_install(fd, f);
230
	return fanotify_event_metadata.event_len;
231 232

out_close_fd:
233 234 235 236
	if (fd != FAN_NOFD) {
		put_unused_fd(fd);
		fput(f);
	}
237
	return ret;
E
Eric Paris 已提交
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
}

/* intofiy userspace file descriptor functions */
static unsigned int fanotify_poll(struct file *file, poll_table *wait)
{
	struct fsnotify_group *group = file->private_data;
	int ret = 0;

	poll_wait(file, &group->notification_waitq, wait);
	mutex_lock(&group->notification_mutex);
	if (!fsnotify_notify_queue_is_empty(group))
		ret = POLLIN | POLLRDNORM;
	mutex_unlock(&group->notification_mutex);

	return ret;
}

static ssize_t fanotify_read(struct file *file, char __user *buf,
			     size_t count, loff_t *pos)
{
	struct fsnotify_group *group;
	struct fsnotify_event *kevent;
	char __user *start;
	int ret;
	DEFINE_WAIT(wait);

	start = buf;
	group = file->private_data;

	pr_debug("%s: group=%p\n", __func__, group);

	while (1) {
		prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);

		mutex_lock(&group->notification_mutex);
		kevent = get_one_event(group, count);
		mutex_unlock(&group->notification_mutex);

276
		if (IS_ERR(kevent)) {
E
Eric Paris 已提交
277
			ret = PTR_ERR(kevent);
278 279 280 281 282 283
			break;
		}

		if (!kevent) {
			ret = -EAGAIN;
			if (file->f_flags & O_NONBLOCK)
E
Eric Paris 已提交
284
				break;
285 286 287 288 289 290

			ret = -ERESTARTSYS;
			if (signal_pending(current))
				break;

			if (start != buf)
E
Eric Paris 已提交
291
				break;
292
			schedule();
E
Eric Paris 已提交
293 294 295
			continue;
		}

296 297 298 299 300
		ret = copy_event_to_user(group, kevent, buf);
		/*
		 * Permission events get queued to wait for response.  Other
		 * events can be destroyed now.
		 */
301
		if (!(kevent->mask & FAN_ALL_PERM_EVENTS)) {
302
			fsnotify_destroy_event(group, kevent);
303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
			if (ret < 0)
				break;
		} else {
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
			if (ret < 0) {
				FANOTIFY_PE(kevent)->response = FAN_DENY;
				wake_up(&group->fanotify_data.access_waitq);
				break;
			}
			spin_lock(&group->fanotify_data.access_lock);
			list_add_tail(&kevent->list,
				      &group->fanotify_data.access_list);
			spin_unlock(&group->fanotify_data.access_lock);
#endif
		}
318 319
		buf += ret;
		count -= ret;
E
Eric Paris 已提交
320 321 322 323 324 325 326 327
	}

	finish_wait(&group->notification_waitq, &wait);
	if (start != buf && ret != -EFAULT)
		ret = buf - start;
	return ret;
}

328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
{
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
	struct fanotify_response response = { .fd = -1, .response = -1 };
	struct fsnotify_group *group;
	int ret;

	group = file->private_data;

	if (count > sizeof(response))
		count = sizeof(response);

	pr_debug("%s: group=%p count=%zu\n", __func__, group, count);

	if (copy_from_user(&response, buf, count))
		return -EFAULT;

	ret = process_access_response(group, &response);
	if (ret < 0)
		count = ret;

	return count;
#else
	return -EINVAL;
#endif
}

355 356 357 358
static int fanotify_release(struct inode *ignored, struct file *file)
{
	struct fsnotify_group *group = file->private_data;

359
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
360
	struct fanotify_perm_event_info *event, *next;
361

362
	spin_lock(&group->fanotify_data.access_lock);
363

364
	atomic_inc(&group->fanotify_data.bypass_perm);
365

366 367 368 369
	list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
				 fae.fse.list) {
		pr_debug("%s: found group=%p event=%p\n", __func__, group,
			 event);
370

371 372
		list_del_init(&event->fae.fse.list);
		event->response = FAN_ALLOW;
373
	}
374
	spin_unlock(&group->fanotify_data.access_lock);
375 376 377

	wake_up(&group->fanotify_data.access_waitq);
#endif
378

379
	/* matches the fanotify_init->fsnotify_alloc_group */
380
	fsnotify_destroy_group(group);
381 382 383 384

	return 0;
}

E
Eric Paris 已提交
385 386 387
static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	struct fsnotify_group *group;
388
	struct fsnotify_event *fsn_event;
E
Eric Paris 已提交
389 390 391 392 393 394 395 396 397 398 399
	void __user *p;
	int ret = -ENOTTY;
	size_t send_len = 0;

	group = file->private_data;

	p = (void __user *) arg;

	switch (cmd) {
	case FIONREAD:
		mutex_lock(&group->notification_mutex);
400
		list_for_each_entry(fsn_event, &group->notification_list, list)
E
Eric Paris 已提交
401 402 403 404 405 406 407 408 409
			send_len += FAN_EVENT_METADATA_LEN;
		mutex_unlock(&group->notification_mutex);
		ret = put_user(send_len, (int __user *) p);
		break;
	}

	return ret;
}

410
static const struct file_operations fanotify_fops = {
411
	.show_fdinfo	= fanotify_show_fdinfo,
E
Eric Paris 已提交
412 413
	.poll		= fanotify_poll,
	.read		= fanotify_read,
414
	.write		= fanotify_write,
415 416
	.fasync		= NULL,
	.release	= fanotify_release,
E
Eric Paris 已提交
417 418
	.unlocked_ioctl	= fanotify_ioctl,
	.compat_ioctl	= fanotify_ioctl,
419
	.llseek		= noop_llseek,
420 421
};

422 423 424 425 426 427 428 429 430 431 432 433 434 435
static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
{
	kmem_cache_free(fanotify_mark_cache, fsn_mark);
}

static int fanotify_find_path(int dfd, const char __user *filename,
			      struct path *path, unsigned int flags)
{
	int ret;

	pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
		 dfd, filename, flags);

	if (filename == NULL) {
436
		struct fd f = fdget(dfd);
437 438

		ret = -EBADF;
439
		if (!f.file)
440 441 442 443
			goto out;

		ret = -ENOTDIR;
		if ((flags & FAN_MARK_ONLYDIR) &&
A
Al Viro 已提交
444
		    !(S_ISDIR(file_inode(f.file)->i_mode))) {
445
			fdput(f);
446 447 448
			goto out;
		}

449
		*path = f.file->f_path;
450
		path_get(path);
451
		fdput(f);
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
	} else {
		unsigned int lookup_flags = 0;

		if (!(flags & FAN_MARK_DONT_FOLLOW))
			lookup_flags |= LOOKUP_FOLLOW;
		if (flags & FAN_MARK_ONLYDIR)
			lookup_flags |= LOOKUP_DIRECTORY;

		ret = user_path_at(dfd, filename, lookup_flags, path);
		if (ret)
			goto out;
	}

	/* you can only watch an inode if you have read permissions on it */
	ret = inode_permission(path->dentry->d_inode, MAY_READ);
	if (ret)
		path_put(path);
out:
	return ret;
}

473 474
static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
					    __u32 mask,
475 476
					    unsigned int flags,
					    int *destroy)
477 478 479 480
{
	__u32 oldmask;

	spin_lock(&fsn_mark->lock);
481 482 483 484 485 486 487
	if (!(flags & FAN_MARK_IGNORED_MASK)) {
		oldmask = fsn_mark->mask;
		fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask));
	} else {
		oldmask = fsn_mark->ignored_mask;
		fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
	}
488 489
	spin_unlock(&fsn_mark->lock);

490
	*destroy = !(oldmask & ~mask);
491 492 493 494

	return mask & oldmask;
}

495
static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
496 497
					 struct vfsmount *mnt, __u32 mask,
					 unsigned int flags)
498 499
{
	struct fsnotify_mark *fsn_mark = NULL;
500
	__u32 removed;
501
	int destroy_mark;
502

503
	mutex_lock(&group->mark_mutex);
504
	fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
505 506
	if (!fsn_mark) {
		mutex_unlock(&group->mark_mutex);
507
		return -ENOENT;
508
	}
509

510 511 512
	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
						 &destroy_mark);
	if (destroy_mark)
513 514
		fsnotify_destroy_mark_locked(fsn_mark, group);
	mutex_unlock(&group->mark_mutex);
515

516
	fsnotify_put_mark(fsn_mark);
517
	if (removed & real_mount(mnt)->mnt_fsnotify_mask)
518 519 520 521
		fsnotify_recalc_vfsmount_mask(mnt);

	return 0;
}
522

523
static int fanotify_remove_inode_mark(struct fsnotify_group *group,
524 525
				      struct inode *inode, __u32 mask,
				      unsigned int flags)
526 527 528
{
	struct fsnotify_mark *fsn_mark = NULL;
	__u32 removed;
529
	int destroy_mark;
530

531
	mutex_lock(&group->mark_mutex);
532
	fsn_mark = fsnotify_find_inode_mark(group, inode);
533 534
	if (!fsn_mark) {
		mutex_unlock(&group->mark_mutex);
535
		return -ENOENT;
536
	}
537

538 539 540
	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
						 &destroy_mark);
	if (destroy_mark)
541 542 543
		fsnotify_destroy_mark_locked(fsn_mark, group);
	mutex_unlock(&group->mark_mutex);

544
	/* matches the fsnotify_find_inode_mark() */
545
	fsnotify_put_mark(fsn_mark);
546 547
	if (removed & inode->i_fsnotify_mask)
		fsnotify_recalc_inode_mask(inode);
548

549 550 551
	return 0;
}

552 553 554
static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
				       __u32 mask,
				       unsigned int flags)
555
{
556
	__u32 oldmask = -1;
557 558

	spin_lock(&fsn_mark->lock);
559 560 561 562
	if (!(flags & FAN_MARK_IGNORED_MASK)) {
		oldmask = fsn_mark->mask;
		fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask));
	} else {
563 564
		__u32 tmask = fsn_mark->ignored_mask | mask;
		fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
565 566
		if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
			fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
567
	}
568 569 570 571 572 573

	if (!(flags & FAN_MARK_ONDIR)) {
		__u32 tmask = fsn_mark->ignored_mask | FAN_ONDIR;
		fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
	}

574 575 576 577 578
	spin_unlock(&fsn_mark->lock);

	return mask & ~oldmask;
}

579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603
static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group,
						   struct inode *inode,
						   struct vfsmount *mnt)
{
	struct fsnotify_mark *mark;
	int ret;

	if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
		return ERR_PTR(-ENOSPC);

	mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
	if (!mark)
		return ERR_PTR(-ENOMEM);

	fsnotify_init_mark(mark, fanotify_free_mark);
	ret = fsnotify_add_mark_locked(mark, group, inode, mnt, 0);
	if (ret) {
		fsnotify_put_mark(mark);
		return ERR_PTR(ret);
	}

	return mark;
}


604
static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
605 606
				      struct vfsmount *mnt, __u32 mask,
				      unsigned int flags)
607 608
{
	struct fsnotify_mark *fsn_mark;
609
	__u32 added;
610

611
	mutex_lock(&group->mark_mutex);
612 613
	fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
	if (!fsn_mark) {
614 615
		fsn_mark = fanotify_add_new_mark(group, NULL, mnt);
		if (IS_ERR(fsn_mark)) {
616
			mutex_unlock(&group->mark_mutex);
617
			return PTR_ERR(fsn_mark);
618
		}
619
	}
620
	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
621
	mutex_unlock(&group->mark_mutex);
622

623
	if (added & ~real_mount(mnt)->mnt_fsnotify_mask)
E
Eric Paris 已提交
624
		fsnotify_recalc_vfsmount_mask(mnt);
625

626
	fsnotify_put_mark(fsn_mark);
627
	return 0;
628 629
}

630
static int fanotify_add_inode_mark(struct fsnotify_group *group,
631 632
				   struct inode *inode, __u32 mask,
				   unsigned int flags)
633 634
{
	struct fsnotify_mark *fsn_mark;
635
	__u32 added;
636 637

	pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
638

639 640 641 642 643 644 645 646 647 648
	/*
	 * If some other task has this inode open for write we should not add
	 * an ignored mark, unless that ignored mark is supposed to survive
	 * modification changes anyway.
	 */
	if ((flags & FAN_MARK_IGNORED_MASK) &&
	    !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
	    (atomic_read(&inode->i_writecount) > 0))
		return 0;

649
	mutex_lock(&group->mark_mutex);
650
	fsn_mark = fsnotify_find_inode_mark(group, inode);
651
	if (!fsn_mark) {
652 653
		fsn_mark = fanotify_add_new_mark(group, inode, NULL);
		if (IS_ERR(fsn_mark)) {
654
			mutex_unlock(&group->mark_mutex);
655
			return PTR_ERR(fsn_mark);
656
		}
657
	}
658
	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
659
	mutex_unlock(&group->mark_mutex);
660

E
Eric Paris 已提交
661 662
	if (added & ~inode->i_fsnotify_mask)
		fsnotify_recalc_inode_mask(inode);
663

664
	fsnotify_put_mark(fsn_mark);
665
	return 0;
666
}
667

668
/* fanotify syscalls */
669
SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
670
{
671 672
	struct fsnotify_group *group;
	int f_flags, fd;
673
	struct user_struct *user;
674
	struct fanotify_event_info *oevent;
675

676 677
	pr_debug("%s: flags=%d event_f_flags=%d\n",
		__func__, flags, event_f_flags);
678 679

	if (!capable(CAP_SYS_ADMIN))
680
		return -EPERM;
681 682 683 684

	if (flags & ~FAN_ALL_INIT_FLAGS)
		return -EINVAL;

685 686 687 688 689 690 691 692 693 694 695 696
	if (event_f_flags & ~FANOTIFY_INIT_ALL_EVENT_F_BITS)
		return -EINVAL;

	switch (event_f_flags & O_ACCMODE) {
	case O_RDONLY:
	case O_RDWR:
	case O_WRONLY:
		break;
	default:
		return -EINVAL;
	}

697 698 699 700 701 702
	user = get_current_user();
	if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
		free_uid(user);
		return -EMFILE;
	}

703
	f_flags = O_RDWR | FMODE_NONOTIFY;
704 705 706 707 708 709 710
	if (flags & FAN_CLOEXEC)
		f_flags |= O_CLOEXEC;
	if (flags & FAN_NONBLOCK)
		f_flags |= O_NONBLOCK;

	/* fsnotify_alloc_group takes a ref.  Dropped in fanotify_release */
	group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
711 712
	if (IS_ERR(group)) {
		free_uid(user);
713
		return PTR_ERR(group);
714
	}
715

716 717 718
	group->fanotify_data.user = user;
	atomic_inc(&user->fanotify_listeners);

719
	oevent = fanotify_alloc_event(NULL, FS_Q_OVERFLOW, NULL);
720 721 722 723 724 725
	if (unlikely(!oevent)) {
		fd = -ENOMEM;
		goto out_destroy_group;
	}
	group->overflow_event = &oevent->fse;

726 727
	if (force_o_largefile())
		event_f_flags |= O_LARGEFILE;
728
	group->fanotify_data.f_flags = event_f_flags;
E
Eric Paris 已提交
729
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
730
	spin_lock_init(&group->fanotify_data.access_lock);
E
Eric Paris 已提交
731 732
	init_waitqueue_head(&group->fanotify_data.access_waitq);
	INIT_LIST_HEAD(&group->fanotify_data.access_list);
733
	atomic_set(&group->fanotify_data.bypass_perm, 0);
E
Eric Paris 已提交
734
#endif
735 736 737 738 739 740 741 742 743 744 745 746
	switch (flags & FAN_ALL_CLASS_BITS) {
	case FAN_CLASS_NOTIF:
		group->priority = FS_PRIO_0;
		break;
	case FAN_CLASS_CONTENT:
		group->priority = FS_PRIO_1;
		break;
	case FAN_CLASS_PRE_CONTENT:
		group->priority = FS_PRIO_2;
		break;
	default:
		fd = -EINVAL;
747
		goto out_destroy_group;
748
	}
E
Eric Paris 已提交
749

750 751 752
	if (flags & FAN_UNLIMITED_QUEUE) {
		fd = -EPERM;
		if (!capable(CAP_SYS_ADMIN))
753
			goto out_destroy_group;
754 755 756 757
		group->max_events = UINT_MAX;
	} else {
		group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
	}
758

759 760 761
	if (flags & FAN_UNLIMITED_MARKS) {
		fd = -EPERM;
		if (!capable(CAP_SYS_ADMIN))
762
			goto out_destroy_group;
763 764 765 766
		group->fanotify_data.max_marks = UINT_MAX;
	} else {
		group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
	}
767

768 769
	fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
	if (fd < 0)
770
		goto out_destroy_group;
771 772 773

	return fd;

774 775
out_destroy_group:
	fsnotify_destroy_group(group);
776
	return fd;
777
}
778

779 780 781
SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags,
			      __u64, mask, int, dfd,
			      const char  __user *, pathname)
782
{
783 784
	struct inode *inode = NULL;
	struct vfsmount *mnt = NULL;
785
	struct fsnotify_group *group;
786
	struct fd f;
787
	struct path path;
788
	int ret;
789 790 791 792 793 794 795 796

	pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
		 __func__, fanotify_fd, flags, dfd, pathname, mask);

	/* we only use the lower 32 bits as of right now. */
	if (mask & ((__u64)0xffffffff << 32))
		return -EINVAL;

797 798
	if (flags & ~FAN_ALL_MARK_FLAGS)
		return -EINVAL;
E
Eric Paris 已提交
799
	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
800
	case FAN_MARK_ADD:		/* fallthrough */
801
	case FAN_MARK_REMOVE:
802 803
		if (!mask)
			return -EINVAL;
804
		break;
E
Eric Paris 已提交
805
	case FAN_MARK_FLUSH:
806 807
		if (flags & ~(FAN_MARK_MOUNT | FAN_MARK_FLUSH))
			return -EINVAL;
808 809 810 811
		break;
	default:
		return -EINVAL;
	}
812 813 814 815 816 817

	if (mask & FAN_ONDIR) {
		flags |= FAN_MARK_ONDIR;
		mask &= ~FAN_ONDIR;
	}

818 819 820
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
	if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
#else
821
	if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
822
#endif
823 824
		return -EINVAL;

825 826
	f = fdget(fanotify_fd);
	if (unlikely(!f.file))
827 828 829 830
		return -EBADF;

	/* verify that this is indeed an fanotify instance */
	ret = -EINVAL;
831
	if (unlikely(f.file->f_op != &fanotify_fops))
832
		goto fput_and_out;
833
	group = f.file->private_data;
834 835 836 837 838 839 840 841 842

	/*
	 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF.  These are not
	 * allowed to set permissions events.
	 */
	ret = -EINVAL;
	if (mask & FAN_ALL_PERM_EVENTS &&
	    group->priority == FS_PRIO_0)
		goto fput_and_out;
843

844 845 846 847 848 849 850 851 852
	if (flags & FAN_MARK_FLUSH) {
		ret = 0;
		if (flags & FAN_MARK_MOUNT)
			fsnotify_clear_vfsmount_marks_by_group(group);
		else
			fsnotify_clear_inode_marks_by_group(group);
		goto fput_and_out;
	}

853 854 855 856 857
	ret = fanotify_find_path(dfd, pathname, &path, flags);
	if (ret)
		goto fput_and_out;

	/* inode held in place by reference to path; group by fget on fd */
858
	if (!(flags & FAN_MARK_MOUNT))
859 860 861
		inode = path.dentry->d_inode;
	else
		mnt = path.mnt;
862 863

	/* create/update an inode mark */
864
	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE)) {
865
	case FAN_MARK_ADD:
866
		if (flags & FAN_MARK_MOUNT)
867
			ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
868
		else
869
			ret = fanotify_add_inode_mark(group, inode, mask, flags);
870 871
		break;
	case FAN_MARK_REMOVE:
872
		if (flags & FAN_MARK_MOUNT)
873
			ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
874
		else
875
			ret = fanotify_remove_inode_mark(group, inode, mask, flags);
876 877 878 879
		break;
	default:
		ret = -EINVAL;
	}
880 881 882

	path_put(&path);
fput_and_out:
883
	fdput(f);
884 885 886
	return ret;
}

887 888 889 890 891 892 893 894 895
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE6(fanotify_mark,
				int, fanotify_fd, unsigned int, flags,
				__u32, mask0, __u32, mask1, int, dfd,
				const char  __user *, pathname)
{
	return sys_fanotify_mark(fanotify_fd, flags,
#ifdef __BIG_ENDIAN
				((__u64)mask0 << 32) | mask1,
H
Heiko Carstens 已提交
896 897
#else
				((__u64)mask1 << 32) | mask0,
898 899 900 901 902
#endif
				 dfd, pathname);
}
#endif

903
/*
904
 * fanotify_user_setup - Our initialization function.  Note that we cannot return
905 906 907 908 909 910
 * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
 * must result in panic().
 */
static int __init fanotify_user_setup(void)
{
	fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
911
	fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC);
912 913 914 915
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
	fanotify_perm_event_cachep = KMEM_CACHE(fanotify_perm_event_info,
						SLAB_PANIC);
#endif
916 917

	return 0;
918
}
919
device_initcall(fanotify_user_setup);