fsnotify.c 11.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 *  Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2, or (at your option)
 *  any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; see the file COPYING.  If not, write to
 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
 */

#include <linux/dcache.h>
#include <linux/fs.h>
21
#include <linux/gfp.h>
22 23
#include <linux/init.h>
#include <linux/module.h>
24
#include <linux/mount.h>
25 26 27 28 29
#include <linux/srcu.h>

#include <linux/fsnotify_backend.h>
#include "fsnotify.h"

30 31 32 33 34 35 36 37 38
/*
 * Clear all of the marks on an inode when it is being evicted from core
 */
void __fsnotify_inode_delete(struct inode *inode)
{
	fsnotify_clear_marks_by_inode(inode);
}
EXPORT_SYMBOL_GPL(__fsnotify_inode_delete);

39 40 41 42 43
void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
{
	fsnotify_clear_marks_by_mount(mnt);
}

J
Jan Kara 已提交
44 45 46 47 48 49 50
/**
 * fsnotify_unmount_inodes - an sb is unmounting.  handle any watched inodes.
 * @sb: superblock being unmounted.
 *
 * Called during unmount with no locks held, so needs to be safe against
 * concurrent modifiers. We temporarily drop sb->s_inode_list_lock and CAN block.
 */
51
static void fsnotify_unmount_inodes(struct super_block *sb)
J
Jan Kara 已提交
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
{
	struct inode *inode, *iput_inode = NULL;

	spin_lock(&sb->s_inode_list_lock);
	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
		/*
		 * We cannot __iget() an inode in state I_FREEING,
		 * I_WILL_FREE, or I_NEW which is fine because by that point
		 * the inode cannot have any associated watches.
		 */
		spin_lock(&inode->i_lock);
		if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
			spin_unlock(&inode->i_lock);
			continue;
		}

		/*
		 * If i_count is zero, the inode cannot have any watches and
70
		 * doing an __iget/iput with SB_ACTIVE clear would actually
J
Jan Kara 已提交
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
		 * evict all inodes with zero i_count from icache which is
		 * unnecessarily violent and may in fact be illegal to do.
		 */
		if (!atomic_read(&inode->i_count)) {
			spin_unlock(&inode->i_lock);
			continue;
		}

		__iget(inode);
		spin_unlock(&inode->i_lock);
		spin_unlock(&sb->s_inode_list_lock);

		if (iput_inode)
			iput(iput_inode);

		/* for each watch, send FS_UNMOUNT and then remove it */
		fsnotify(inode, FS_UNMOUNT, inode, FSNOTIFY_EVENT_INODE, NULL, 0);

		fsnotify_inode_delete(inode);

		iput_inode = inode;

		spin_lock(&sb->s_inode_list_lock);
	}
	spin_unlock(&sb->s_inode_list_lock);

	if (iput_inode)
		iput(iput_inode);
}

101 102 103 104 105 106
void fsnotify_sb_delete(struct super_block *sb)
{
	fsnotify_unmount_inodes(sb);
	fsnotify_clear_marks_by_sb(sb);
}

E
Eric Paris 已提交
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
/*
 * Given an inode, first check if we care what happens to our children.  Inotify
 * and dnotify both tell their parents about events.  If we care about any event
 * on a child we run all of our children and set a dentry flag saying that the
 * parent cares.  Thus when an event happens on a child it can quickly tell if
 * if there is a need to find a parent and send the event to the parent.
 */
void __fsnotify_update_child_dentry_flags(struct inode *inode)
{
	struct dentry *alias;
	int watched;

	if (!S_ISDIR(inode->i_mode))
		return;

	/* determine if the children should tell inode about their events */
	watched = fsnotify_inode_watches_children(inode);

125
	spin_lock(&inode->i_lock);
E
Eric Paris 已提交
126 127
	/* run all of the dentries associated with this inode.  Since this is a
	 * directory, there damn well better only be one item on this list */
128
	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
E
Eric Paris 已提交
129 130 131 132 133
		struct dentry *child;

		/* run all of the children of the original inode and fix their
		 * d_flags to indicate parental interest (their parent is the
		 * original inode) */
N
Nick Piggin 已提交
134
		spin_lock(&alias->d_lock);
135
		list_for_each_entry(child, &alias->d_subdirs, d_child) {
E
Eric Paris 已提交
136 137 138
			if (!child->d_inode)
				continue;

N
Nick Piggin 已提交
139
			spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
E
Eric Paris 已提交
140 141 142 143 144 145
			if (watched)
				child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
			else
				child->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
			spin_unlock(&child->d_lock);
		}
N
Nick Piggin 已提交
146
		spin_unlock(&alias->d_lock);
E
Eric Paris 已提交
147
	}
148
	spin_unlock(&inode->i_lock);
E
Eric Paris 已提交
149 150 151
}

/* Notify this dentry's parent about a child's events. */
A
Al Viro 已提交
152
int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask)
E
Eric Paris 已提交
153 154 155
{
	struct dentry *parent;
	struct inode *p_inode;
156
	int ret = 0;
E
Eric Paris 已提交
157

158
	if (!dentry)
159
		dentry = path->dentry;
160

E
Eric Paris 已提交
161
	if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED))
162
		return 0;
E
Eric Paris 已提交
163

C
Christoph Hellwig 已提交
164
	parent = dget_parent(dentry);
E
Eric Paris 已提交
165 166
	p_inode = parent->d_inode;

C
Christoph Hellwig 已提交
167 168 169
	if (unlikely(!fsnotify_inode_watches_children(p_inode)))
		__fsnotify_update_child_dentry_flags(p_inode);
	else if (p_inode->i_fsnotify_mask & mask) {
A
Al Viro 已提交
170 171
		struct name_snapshot name;

E
Eric Paris 已提交
172 173 174 175
		/* we are notifying a parent so come up with the new mask which
		 * specifies these are events which came from a child. */
		mask |= FS_EVENT_ON_CHILD;

A
Al Viro 已提交
176
		take_dentry_name_snapshot(&name, dentry);
177
		if (path)
178
			ret = fsnotify(p_inode, mask, path, FSNOTIFY_EVENT_PATH,
A
Al Viro 已提交
179
				       name.name, 0);
180
		else
181
			ret = fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE,
A
Al Viro 已提交
182 183
				       name.name, 0);
		release_dentry_name_snapshot(&name);
E
Eric Paris 已提交
184 185
	}

C
Christoph Hellwig 已提交
186
	dput(parent);
187 188

	return ret;
E
Eric Paris 已提交
189 190 191
}
EXPORT_SYMBOL_GPL(__fsnotify_parent);

192
static int send_to_group(struct inode *to_tell,
A
Al Viro 已提交
193
			 __u32 mask, const void *data,
194
			 int data_is, u32 cookie,
195 196
			 const unsigned char *file_name,
			 struct fsnotify_iter_info *iter_info)
197
{
198
	struct fsnotify_group *group = NULL;
199
	__u32 test_mask = (mask & ALL_FSNOTIFY_EVENTS);
200 201
	__u32 marks_mask = 0;
	__u32 marks_ignored_mask = 0;
202 203
	struct fsnotify_mark *mark;
	int type;
204

205
	if (WARN_ON(!iter_info->report_mask))
206
		return 0;
207 208 209

	/* clear ignored on inode modification */
	if (mask & FS_MODIFY) {
210 211 212 213 214 215 216 217
		fsnotify_foreach_obj_type(type) {
			if (!fsnotify_iter_should_report_type(iter_info, type))
				continue;
			mark = iter_info->marks[type];
			if (mark &&
			    !(mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY))
				mark->ignored_mask = 0;
		}
218
	}
219

220 221 222 223 224 225 226 227 228 229
	fsnotify_foreach_obj_type(type) {
		if (!fsnotify_iter_should_report_type(iter_info, type))
			continue;
		mark = iter_info->marks[type];
		/* does the object mark tell us to do something? */
		if (mark) {
			group = mark->group;
			marks_mask |= mark->mask;
			marks_ignored_mask |= mark->ignored_mask;
		}
230 231
	}

232
	pr_debug("%s: group=%p to_tell=%p mask=%x marks_mask=%x marks_ignored_mask=%x"
233
		 " data=%p data_is=%d cookie=%d\n",
234 235
		 __func__, group, to_tell, mask, marks_mask, marks_ignored_mask,
		 data, data_is, cookie);
236

237
	if (!(test_mask & marks_mask & ~marks_ignored_mask))
238 239
		return 0;

240
	return group->ops->handle_event(group, to_tell, mask, data, data_is,
241
					file_name, cookie, iter_info);
242 243
}

M
Miklos Szeredi 已提交
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
static struct fsnotify_mark *fsnotify_first_mark(struct fsnotify_mark_connector **connp)
{
	struct fsnotify_mark_connector *conn;
	struct hlist_node *node = NULL;

	conn = srcu_dereference(*connp, &fsnotify_mark_srcu);
	if (conn)
		node = srcu_dereference(conn->list.first, &fsnotify_mark_srcu);

	return hlist_entry_safe(node, struct fsnotify_mark, obj_list);
}

static struct fsnotify_mark *fsnotify_next_mark(struct fsnotify_mark *mark)
{
	struct hlist_node *node = NULL;

	if (mark)
		node = srcu_dereference(mark->obj_list.next,
					&fsnotify_mark_srcu);

	return hlist_entry_safe(node, struct fsnotify_mark, obj_list);
}

267 268 269 270 271 272 273 274 275
/*
 * iter_info is a multi head priority queue of marks.
 * Pick a subset of marks from queue heads, all with the
 * same group and set the report_mask for selected subset.
 * Returns the report_mask of the selected subset.
 */
static unsigned int fsnotify_iter_select_report_types(
		struct fsnotify_iter_info *iter_info)
{
276 277 278 279 280 281 282 283 284 285 286
	struct fsnotify_group *max_prio_group = NULL;
	struct fsnotify_mark *mark;
	int type;

	/* Choose max prio group among groups of all queue heads */
	fsnotify_foreach_obj_type(type) {
		mark = iter_info->marks[type];
		if (mark &&
		    fsnotify_compare_groups(max_prio_group, mark->group) > 0)
			max_prio_group = mark->group;
	}
287

288
	if (!max_prio_group)
289 290
		return 0;

291
	/* Set the report mask for marks from same group as max prio group */
292
	iter_info->report_mask = 0;
293 294 295 296 297 298
	fsnotify_foreach_obj_type(type) {
		mark = iter_info->marks[type];
		if (mark &&
		    fsnotify_compare_groups(max_prio_group, mark->group) == 0)
			fsnotify_iter_set_report_type(iter_info, type);
	}
299 300 301 302 303 304 305 306 307 308

	return iter_info->report_mask;
}

/*
 * Pop from iter_info multi head queue, the marks that were iterated in the
 * current iteration step.
 */
static void fsnotify_iter_next(struct fsnotify_iter_info *iter_info)
{
309
	int type;
310

311 312 313 314 315
	fsnotify_foreach_obj_type(type) {
		if (fsnotify_iter_should_report_type(iter_info, type))
			iter_info->marks[type] =
				fsnotify_next_mark(iter_info->marks[type]);
	}
316 317
}

318 319 320 321 322 323
/*
 * This is the main call to fsnotify.  The VFS calls into hook specific functions
 * in linux/fsnotify.h.  Those functions then in turn call here.  Here will call
 * out to all of the registered fsnotify_group.  Those groups can then use the
 * notification event in whatever means they feel necessary.
 */
A
Al Viro 已提交
324
int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
325
	     const unsigned char *file_name, u32 cookie)
326
{
M
Miklos Szeredi 已提交
327
	struct fsnotify_iter_info iter_info = {};
328 329 330
	struct super_block *sb = NULL;
	struct mount *mnt = NULL;
	__u32 mnt_or_sb_mask = 0;
331
	int ret = 0;
332
	__u32 test_mask = (mask & ALL_FSNOTIFY_EVENTS);
333

334
	if (data_is == FSNOTIFY_EVENT_PATH) {
A
Al Viro 已提交
335
		mnt = real_mount(((const struct path *)data)->mnt);
336 337 338
		sb = mnt->mnt.mnt_sb;
		mnt_or_sb_mask = mnt->mnt_fsnotify_mask | sb->s_fsnotify_mask;
	}
339

340 341 342 343 344 345 346
	/*
	 * Optimization: srcu_read_lock() has a memory barrier which can
	 * be expensive.  It protects walking the *_fsnotify_marks lists.
	 * However, if we do not walk the lists, we do not have to do
	 * SRCU because we have no references to any objects and do not
	 * need SRCU to keep them "alive".
	 */
347
	if (!to_tell->i_fsnotify_marks &&
348
	    (!mnt || (!mnt->mnt_fsnotify_marks && !sb->s_fsnotify_marks)))
349
		return 0;
350 351
	/*
	 * if this is a modify event we may need to clear the ignored masks
352
	 * otherwise return if neither the inode nor the vfsmount/sb care about
353 354 355
	 * this type of event.
	 */
	if (!(mask & FS_MODIFY) &&
356
	    !(test_mask & (to_tell->i_fsnotify_mask | mnt_or_sb_mask)))
357
		return 0;
358

359
	iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu);
360

361 362 363
	iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] =
		fsnotify_first_mark(&to_tell->i_fsnotify_marks);
	if (mnt) {
364
		iter_info.marks[FSNOTIFY_OBJ_TYPE_VFSMOUNT] =
M
Miklos Szeredi 已提交
365
			fsnotify_first_mark(&mnt->mnt_fsnotify_marks);
366 367
		iter_info.marks[FSNOTIFY_OBJ_TYPE_SB] =
			fsnotify_first_mark(&sb->s_fsnotify_marks);
368
	}
369

370
	/*
371 372
	 * We need to merge inode/vfsmount/sb mark lists so that e.g. inode mark
	 * ignore masks are properly reflected for mount/sb mark notifications.
373 374
	 * That's why this traversal is so complicated...
	 */
375
	while (fsnotify_iter_select_report_types(&iter_info)) {
376 377
		ret = send_to_group(to_tell, mask, data, data_is, cookie,
				    file_name, &iter_info);
378

379 380 381
		if (ret && (mask & ALL_FSNOTIFY_PERM_EVENTS))
			goto out;

382
		fsnotify_iter_next(&iter_info);
383
	}
384 385
	ret = 0;
out:
386
	srcu_read_unlock(&fsnotify_mark_srcu, iter_info.srcu_idx);
387

388
	return ret;
389 390 391
}
EXPORT_SYMBOL_GPL(fsnotify);

392 393
extern struct kmem_cache *fsnotify_mark_connector_cachep;

394 395
static __init int fsnotify_init(void)
{
396 397
	int ret;

398
	BUG_ON(hweight32(ALL_FSNOTIFY_BITS) != 23);
399

400 401 402 403
	ret = init_srcu_struct(&fsnotify_mark_srcu);
	if (ret)
		panic("initializing fsnotify_mark_srcu");

404 405 406
	fsnotify_mark_connector_cachep = KMEM_CACHE(fsnotify_mark_connector,
						    SLAB_PANIC);

407
	return 0;
408
}
409
core_initcall(fsnotify_init);