fanotify.c 6.8 KB
Newer Older
1
#include <linux/fanotify.h>
2 3 4
#include <linux/fdtable.h>
#include <linux/fsnotify_backend.h>
#include <linux/init.h>
E
Eric Paris 已提交
5
#include <linux/jiffies.h>
6
#include <linux/kernel.h> /* UINT_MAX */
7
#include <linux/mount.h>
E
Eric Paris 已提交
8
#include <linux/sched.h>
9
#include <linux/types.h>
E
Eric Paris 已提交
10
#include <linux/wait.h>
11

12 13 14 15
#include "fanotify.h"

static bool should_merge(struct fsnotify_event *old_fsn,
			 struct fsnotify_event *new_fsn)
16
{
17
	struct fanotify_event_info *old, *new;
18

19 20 21 22 23 24 25 26
	pr_debug("%s: old=%p new=%p\n", __func__, old_fsn, new_fsn);
	old = FANOTIFY_E(old_fsn);
	new = FANOTIFY_E(new_fsn);

	if (old_fsn->inode == new_fsn->inode && old->tgid == new->tgid &&
	    old->path.mnt == new->path.mnt &&
	    old->path.dentry == new->path.dentry)
		return true;
27 28 29
	return false;
}

30
/* and the list better be locked by something too! */
31
static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
32
{
33 34
	struct fsnotify_event *test_event;
	bool do_merge = false;
35 36 37

	pr_debug("%s: list=%p event=%p\n", __func__, list, event);

38 39 40 41 42 43 44
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
	/*
	 * Don't merge a permission event with any other event so that we know
	 * the event structure we have created in fanotify_handle_event() is the
	 * one we should check for permission response.
	 */
	if (event->mask & FAN_ALL_PERM_EVENTS)
45
		return 0;
46 47
#endif

48 49 50
	list_for_each_entry_reverse(test_event, list, list) {
		if (should_merge(test_event, event)) {
			do_merge = true;
51 52 53
			break;
		}
	}
54

55
	if (!do_merge)
56
		return 0;
57

58
	test_event->mask |= event->mask;
59
	return 1;
60 61
}

E
Eric Paris 已提交
62
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
63 64
static int fanotify_get_response(struct fsnotify_group *group,
				 struct fanotify_perm_event_info *event)
E
Eric Paris 已提交
65 66 67 68 69
{
	int ret;

	pr_debug("%s: group=%p event=%p\n", __func__, group, event);

70 71 72 73 74
	wait_event(group->fanotify_data.access_waitq, event->response ||
				atomic_read(&group->fanotify_data.bypass_perm));

	if (!event->response) /* bypass_perm set */
		return 0;
E
Eric Paris 已提交
75 76 77 78 79 80 81 82 83 84 85 86

	/* userspace responded, convert to something usable */
	switch (event->response) {
	case FAN_ALLOW:
		ret = 0;
		break;
	case FAN_DENY:
	default:
		ret = -EPERM;
	}
	event->response = 0;

87 88 89
	pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__,
		 group, event, ret);
	
E
Eric Paris 已提交
90 91 92 93
	return ret;
}
#endif

94
static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
95
				       struct fsnotify_mark *vfsmnt_mark,
96 97
				       u32 event_mask,
				       void *data, int data_type)
98
{
99
	__u32 marks_mask, marks_ignored_mask;
100
	struct path *path = data;
101

102 103 104
	pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p"
		 " data_type=%d\n", __func__, inode_mark, vfsmnt_mark,
		 event_mask, data, data_type);
105

106
	/* if we don't have enough info to send an event to userspace say no */
107
	if (data_type != FSNOTIFY_EVENT_PATH)
108 109
		return false;

110 111 112 113 114
	/* sorry, fanotify only gives a damn about files and dirs */
	if (!S_ISREG(path->dentry->d_inode->i_mode) &&
	    !S_ISDIR(path->dentry->d_inode->i_mode))
		return false;

115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
	if (inode_mark && vfsmnt_mark) {
		marks_mask = (vfsmnt_mark->mask | inode_mark->mask);
		marks_ignored_mask = (vfsmnt_mark->ignored_mask | inode_mark->ignored_mask);
	} else if (inode_mark) {
		/*
		 * if the event is for a child and this inode doesn't care about
		 * events on the child, don't send it!
		 */
		if ((event_mask & FS_EVENT_ON_CHILD) &&
		    !(inode_mark->mask & FS_EVENT_ON_CHILD))
			return false;
		marks_mask = inode_mark->mask;
		marks_ignored_mask = inode_mark->ignored_mask;
	} else if (vfsmnt_mark) {
		marks_mask = vfsmnt_mark->mask;
		marks_ignored_mask = vfsmnt_mark->ignored_mask;
	} else {
		BUG();
	}

135 136 137 138
	if (S_ISDIR(path->dentry->d_inode->i_mode) &&
	    (marks_ignored_mask & FS_ISDIR))
		return false;

139 140 141 142
	if (event_mask & marks_mask & ~marks_ignored_mask)
		return true;

	return false;
143 144
}

145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
struct fanotify_event_info *fanotify_alloc_event(struct inode *inode, u32 mask,
						 struct path *path)
{
	struct fanotify_event_info *event;

#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
	if (mask & FAN_ALL_PERM_EVENTS) {
		struct fanotify_perm_event_info *pevent;

		pevent = kmem_cache_alloc(fanotify_perm_event_cachep,
					  GFP_KERNEL);
		if (!pevent)
			return NULL;
		event = &pevent->fae;
		pevent->response = 0;
		goto init;
	}
#endif
	event = kmem_cache_alloc(fanotify_event_cachep, GFP_KERNEL);
	if (!event)
		return NULL;
init: __maybe_unused
	fsnotify_init_event(&event->fse, inode, mask);
	event->tgid = get_pid(task_tgid(current));
	if (path) {
		event->path = *path;
		path_get(&event->path);
	} else {
		event->path.mnt = NULL;
		event->path.dentry = NULL;
	}
	return event;
}

179 180 181 182 183
static int fanotify_handle_event(struct fsnotify_group *group,
				 struct inode *inode,
				 struct fsnotify_mark *inode_mark,
				 struct fsnotify_mark *fanotify_mark,
				 u32 mask, void *data, int data_type,
184
				 const unsigned char *file_name, u32 cookie)
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
{
	int ret = 0;
	struct fanotify_event_info *event;
	struct fsnotify_event *fsn_event;

	BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS);
	BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY);
	BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
	BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE);
	BUILD_BUG_ON(FAN_OPEN != FS_OPEN);
	BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD);
	BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW);
	BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM);
	BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM);
	BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR);

201 202 203 204
	if (!fanotify_should_send_event(inode_mark, fanotify_mark, mask, data,
					data_type))
		return 0;

205 206 207
	pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode,
		 mask);

208
	event = fanotify_alloc_event(inode, mask, data);
209 210 211 212
	if (unlikely(!event))
		return -ENOMEM;

	fsn_event = &event->fse;
213
	ret = fsnotify_add_event(group, fsn_event, fanotify_merge);
214
	if (ret) {
215 216
		/* Permission events shouldn't be merged */
		BUG_ON(ret == 1 && mask & FAN_ALL_PERM_EVENTS);
217 218
		/* Our event wasn't used in the end. Free it. */
		fsnotify_destroy_event(group, fsn_event);
219 220

		return 0;
221 222 223
	}

#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
224
	if (mask & FAN_ALL_PERM_EVENTS) {
225
		ret = fanotify_get_response(group, FANOTIFY_PE(fsn_event));
226 227
		fsnotify_destroy_event(group, fsn_event);
	}
228 229 230 231
#endif
	return ret;
}

232 233 234 235 236 237 238 239 240
static void fanotify_free_group_priv(struct fsnotify_group *group)
{
	struct user_struct *user;

	user = group->fanotify_data.user;
	atomic_dec(&user->fanotify_listeners);
	free_uid(user);
}

241 242 243 244 245 246 247
static void fanotify_free_event(struct fsnotify_event *fsn_event)
{
	struct fanotify_event_info *event;

	event = FANOTIFY_E(fsn_event);
	path_put(&event->path);
	put_pid(event->tgid);
248 249 250 251 252 253 254
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
	if (fsn_event->mask & FAN_ALL_PERM_EVENTS) {
		kmem_cache_free(fanotify_perm_event_cachep,
				FANOTIFY_PE(fsn_event));
		return;
	}
#endif
255 256 257
	kmem_cache_free(fanotify_event_cachep, event);
}

258 259
const struct fsnotify_ops fanotify_fsnotify_ops = {
	.handle_event = fanotify_handle_event,
260
	.free_group_priv = fanotify_free_group_priv,
261
	.free_event = fanotify_free_event,
262
};