fsnotify.c 11.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 *  Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2, or (at your option)
 *  any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; see the file COPYING.  If not, write to
 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
 */

#include <linux/dcache.h>
#include <linux/fs.h>
21
#include <linux/gfp.h>
22 23
#include <linux/init.h>
#include <linux/module.h>
24
#include <linux/mount.h>
25 26 27 28 29
#include <linux/srcu.h>

#include <linux/fsnotify_backend.h>
#include "fsnotify.h"

30 31 32 33 34 35 36 37 38
/*
 * Clear all of the marks on an inode when it is being evicted from core
 */
void __fsnotify_inode_delete(struct inode *inode)
{
	fsnotify_clear_marks_by_inode(inode);
}
EXPORT_SYMBOL_GPL(__fsnotify_inode_delete);

39 40 41 42 43
void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
{
	fsnotify_clear_marks_by_mount(mnt);
}

J
Jan Kara 已提交
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
/**
 * fsnotify_unmount_inodes - an sb is unmounting.  handle any watched inodes.
 * @sb: superblock being unmounted.
 *
 * Called during unmount with no locks held, so needs to be safe against
 * concurrent modifiers. We temporarily drop sb->s_inode_list_lock and CAN block.
 */
void fsnotify_unmount_inodes(struct super_block *sb)
{
	struct inode *inode, *iput_inode = NULL;

	spin_lock(&sb->s_inode_list_lock);
	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
		/*
		 * We cannot __iget() an inode in state I_FREEING,
		 * I_WILL_FREE, or I_NEW which is fine because by that point
		 * the inode cannot have any associated watches.
		 */
		spin_lock(&inode->i_lock);
		if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
			spin_unlock(&inode->i_lock);
			continue;
		}

		/*
		 * If i_count is zero, the inode cannot have any watches and
70
		 * doing an __iget/iput with SB_ACTIVE clear would actually
J
Jan Kara 已提交
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
		 * evict all inodes with zero i_count from icache which is
		 * unnecessarily violent and may in fact be illegal to do.
		 */
		if (!atomic_read(&inode->i_count)) {
			spin_unlock(&inode->i_lock);
			continue;
		}

		__iget(inode);
		spin_unlock(&inode->i_lock);
		spin_unlock(&sb->s_inode_list_lock);

		if (iput_inode)
			iput(iput_inode);

		/* for each watch, send FS_UNMOUNT and then remove it */
		fsnotify(inode, FS_UNMOUNT, inode, FSNOTIFY_EVENT_INODE, NULL, 0);

		fsnotify_inode_delete(inode);

		iput_inode = inode;

		spin_lock(&sb->s_inode_list_lock);
	}
	spin_unlock(&sb->s_inode_list_lock);

	if (iput_inode)
		iput(iput_inode);
}

E
Eric Paris 已提交
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
/*
 * Given an inode, first check if we care what happens to our children.  Inotify
 * and dnotify both tell their parents about events.  If we care about any event
 * on a child we run all of our children and set a dentry flag saying that the
 * parent cares.  Thus when an event happens on a child it can quickly tell if
 * if there is a need to find a parent and send the event to the parent.
 */
void __fsnotify_update_child_dentry_flags(struct inode *inode)
{
	struct dentry *alias;
	int watched;

	if (!S_ISDIR(inode->i_mode))
		return;

	/* determine if the children should tell inode about their events */
	watched = fsnotify_inode_watches_children(inode);

119
	spin_lock(&inode->i_lock);
E
Eric Paris 已提交
120 121
	/* run all of the dentries associated with this inode.  Since this is a
	 * directory, there damn well better only be one item on this list */
122
	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
E
Eric Paris 已提交
123 124 125 126 127
		struct dentry *child;

		/* run all of the children of the original inode and fix their
		 * d_flags to indicate parental interest (their parent is the
		 * original inode) */
N
Nick Piggin 已提交
128
		spin_lock(&alias->d_lock);
129
		list_for_each_entry(child, &alias->d_subdirs, d_child) {
E
Eric Paris 已提交
130 131 132
			if (!child->d_inode)
				continue;

N
Nick Piggin 已提交
133
			spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
E
Eric Paris 已提交
134 135 136 137 138 139
			if (watched)
				child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
			else
				child->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
			spin_unlock(&child->d_lock);
		}
N
Nick Piggin 已提交
140
		spin_unlock(&alias->d_lock);
E
Eric Paris 已提交
141
	}
142
	spin_unlock(&inode->i_lock);
E
Eric Paris 已提交
143 144 145
}

/* Notify this dentry's parent about a child's events. */
A
Al Viro 已提交
146
int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask)
E
Eric Paris 已提交
147 148 149
{
	struct dentry *parent;
	struct inode *p_inode;
150
	int ret = 0;
E
Eric Paris 已提交
151

152
	if (!dentry)
153
		dentry = path->dentry;
154

E
Eric Paris 已提交
155
	if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED))
156
		return 0;
E
Eric Paris 已提交
157

C
Christoph Hellwig 已提交
158
	parent = dget_parent(dentry);
E
Eric Paris 已提交
159 160
	p_inode = parent->d_inode;

C
Christoph Hellwig 已提交
161 162 163
	if (unlikely(!fsnotify_inode_watches_children(p_inode)))
		__fsnotify_update_child_dentry_flags(p_inode);
	else if (p_inode->i_fsnotify_mask & mask) {
A
Al Viro 已提交
164 165
		struct name_snapshot name;

E
Eric Paris 已提交
166 167 168 169
		/* we are notifying a parent so come up with the new mask which
		 * specifies these are events which came from a child. */
		mask |= FS_EVENT_ON_CHILD;

A
Al Viro 已提交
170
		take_dentry_name_snapshot(&name, dentry);
171
		if (path)
172
			ret = fsnotify(p_inode, mask, path, FSNOTIFY_EVENT_PATH,
A
Al Viro 已提交
173
				       name.name, 0);
174
		else
175
			ret = fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE,
A
Al Viro 已提交
176 177
				       name.name, 0);
		release_dentry_name_snapshot(&name);
E
Eric Paris 已提交
178 179
	}

C
Christoph Hellwig 已提交
180
	dput(parent);
181 182

	return ret;
E
Eric Paris 已提交
183 184 185
}
EXPORT_SYMBOL_GPL(__fsnotify_parent);

186
static int send_to_group(struct inode *to_tell,
A
Al Viro 已提交
187
			 __u32 mask, const void *data,
188
			 int data_is, u32 cookie,
189 190
			 const unsigned char *file_name,
			 struct fsnotify_iter_info *iter_info)
191
{
192
	struct fsnotify_group *group = NULL;
193 194 195
	__u32 test_mask = (mask & ~FS_EVENT_ON_CHILD);
	__u32 marks_mask = 0;
	__u32 marks_ignored_mask = 0;
196 197
	struct fsnotify_mark *mark;
	int type;
198

199
	if (WARN_ON(!iter_info->report_mask))
200
		return 0;
201 202 203

	/* clear ignored on inode modification */
	if (mask & FS_MODIFY) {
204 205 206 207 208 209 210 211
		fsnotify_foreach_obj_type(type) {
			if (!fsnotify_iter_should_report_type(iter_info, type))
				continue;
			mark = iter_info->marks[type];
			if (mark &&
			    !(mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY))
				mark->ignored_mask = 0;
		}
212
	}
213

214 215 216 217 218 219 220 221 222 223
	fsnotify_foreach_obj_type(type) {
		if (!fsnotify_iter_should_report_type(iter_info, type))
			continue;
		mark = iter_info->marks[type];
		/* does the object mark tell us to do something? */
		if (mark) {
			group = mark->group;
			marks_mask |= mark->mask;
			marks_ignored_mask |= mark->ignored_mask;
		}
224 225
	}

226
	pr_debug("%s: group=%p to_tell=%p mask=%x marks_mask=%x marks_ignored_mask=%x"
227
		 " data=%p data_is=%d cookie=%d\n",
228 229
		 __func__, group, to_tell, mask, marks_mask, marks_ignored_mask,
		 data, data_is, cookie);
230

231
	if (!(test_mask & marks_mask & ~marks_ignored_mask))
232 233
		return 0;

234
	return group->ops->handle_event(group, to_tell, mask, data, data_is,
235
					file_name, cookie, iter_info);
236 237
}

M
Miklos Szeredi 已提交
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
static struct fsnotify_mark *fsnotify_first_mark(struct fsnotify_mark_connector **connp)
{
	struct fsnotify_mark_connector *conn;
	struct hlist_node *node = NULL;

	conn = srcu_dereference(*connp, &fsnotify_mark_srcu);
	if (conn)
		node = srcu_dereference(conn->list.first, &fsnotify_mark_srcu);

	return hlist_entry_safe(node, struct fsnotify_mark, obj_list);
}

static struct fsnotify_mark *fsnotify_next_mark(struct fsnotify_mark *mark)
{
	struct hlist_node *node = NULL;

	if (mark)
		node = srcu_dereference(mark->obj_list.next,
					&fsnotify_mark_srcu);

	return hlist_entry_safe(node, struct fsnotify_mark, obj_list);
}

261 262 263 264 265 266 267 268 269
/*
 * iter_info is a multi head priority queue of marks.
 * Pick a subset of marks from queue heads, all with the
 * same group and set the report_mask for selected subset.
 * Returns the report_mask of the selected subset.
 */
static unsigned int fsnotify_iter_select_report_types(
		struct fsnotify_iter_info *iter_info)
{
270 271 272 273 274 275 276 277 278 279 280
	struct fsnotify_group *max_prio_group = NULL;
	struct fsnotify_mark *mark;
	int type;

	/* Choose max prio group among groups of all queue heads */
	fsnotify_foreach_obj_type(type) {
		mark = iter_info->marks[type];
		if (mark &&
		    fsnotify_compare_groups(max_prio_group, mark->group) > 0)
			max_prio_group = mark->group;
	}
281

282
	if (!max_prio_group)
283 284
		return 0;

285
	/* Set the report mask for marks from same group as max prio group */
286
	iter_info->report_mask = 0;
287 288 289 290 291 292
	fsnotify_foreach_obj_type(type) {
		mark = iter_info->marks[type];
		if (mark &&
		    fsnotify_compare_groups(max_prio_group, mark->group) == 0)
			fsnotify_iter_set_report_type(iter_info, type);
	}
293 294 295 296 297 298 299 300 301 302

	return iter_info->report_mask;
}

/*
 * Pop from iter_info multi head queue, the marks that were iterated in the
 * current iteration step.
 */
static void fsnotify_iter_next(struct fsnotify_iter_info *iter_info)
{
303
	int type;
304

305 306 307 308 309
	fsnotify_foreach_obj_type(type) {
		if (fsnotify_iter_should_report_type(iter_info, type))
			iter_info->marks[type] =
				fsnotify_next_mark(iter_info->marks[type]);
	}
310 311
}

312 313 314 315 316 317
/*
 * This is the main call to fsnotify.  The VFS calls into hook specific functions
 * in linux/fsnotify.h.  Those functions then in turn call here.  Here will call
 * out to all of the registered fsnotify_group.  Those groups can then use the
 * notification event in whatever means they feel necessary.
 */
A
Al Viro 已提交
318
int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
319
	     const unsigned char *file_name, u32 cookie)
320
{
M
Miklos Szeredi 已提交
321
	struct fsnotify_iter_info iter_info = {};
322
	struct mount *mnt;
323
	int ret = 0;
324 325
	/* global tests shouldn't care about events on child only the specific event */
	__u32 test_mask = (mask & ~FS_EVENT_ON_CHILD);
326

327
	if (data_is == FSNOTIFY_EVENT_PATH)
A
Al Viro 已提交
328
		mnt = real_mount(((const struct path *)data)->mnt);
329 330 331
	else
		mnt = NULL;

332 333 334 335 336 337 338
	/*
	 * Optimization: srcu_read_lock() has a memory barrier which can
	 * be expensive.  It protects walking the *_fsnotify_marks lists.
	 * However, if we do not walk the lists, we do not have to do
	 * SRCU because we have no references to any objects and do not
	 * need SRCU to keep them "alive".
	 */
339 340
	if (!to_tell->i_fsnotify_marks &&
	    (!mnt || !mnt->mnt_fsnotify_marks))
341
		return 0;
342 343 344 345 346 347 348 349 350
	/*
	 * if this is a modify event we may need to clear the ignored masks
	 * otherwise return if neither the inode nor the vfsmount care about
	 * this type of event.
	 */
	if (!(mask & FS_MODIFY) &&
	    !(test_mask & to_tell->i_fsnotify_mask) &&
	    !(mnt && test_mask & mnt->mnt_fsnotify_mask))
		return 0;
351

352
	iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu);
353

354
	if ((mask & FS_MODIFY) ||
355
	    (test_mask & to_tell->i_fsnotify_mask)) {
356
		iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] =
M
Miklos Szeredi 已提交
357
			fsnotify_first_mark(&to_tell->i_fsnotify_marks);
358
	}
359

360 361
	if (mnt && ((mask & FS_MODIFY) ||
		    (test_mask & mnt->mnt_fsnotify_mask))) {
362
		iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] =
M
Miklos Szeredi 已提交
363
			fsnotify_first_mark(&to_tell->i_fsnotify_marks);
364
		iter_info.marks[FSNOTIFY_OBJ_TYPE_VFSMOUNT] =
M
Miklos Szeredi 已提交
365
			fsnotify_first_mark(&mnt->mnt_fsnotify_marks);
366
	}
367

368 369 370 371 372
	/*
	 * We need to merge inode & vfsmount mark lists so that inode mark
	 * ignore masks are properly reflected for mount mark notifications.
	 * That's why this traversal is so complicated...
	 */
373
	while (fsnotify_iter_select_report_types(&iter_info)) {
374 375
		ret = send_to_group(to_tell, mask, data, data_is, cookie,
				    file_name, &iter_info);
376

377 378 379
		if (ret && (mask & ALL_FSNOTIFY_PERM_EVENTS))
			goto out;

380
		fsnotify_iter_next(&iter_info);
381
	}
382 383
	ret = 0;
out:
384
	srcu_read_unlock(&fsnotify_mark_srcu, iter_info.srcu_idx);
385

386
	return ret;
387 388 389
}
EXPORT_SYMBOL_GPL(fsnotify);

390 391
extern struct kmem_cache *fsnotify_mark_connector_cachep;

392 393
static __init int fsnotify_init(void)
{
394 395
	int ret;

396 397
	BUG_ON(hweight32(ALL_FSNOTIFY_EVENTS) != 23);

398 399 400 401
	ret = init_srcu_struct(&fsnotify_mark_srcu);
	if (ret)
		panic("initializing fsnotify_mark_srcu");

402 403 404
	fsnotify_mark_connector_cachep = KMEM_CACHE(fsnotify_mark_connector,
						    SLAB_PANIC);

405
	return 0;
406
}
407
core_initcall(fsnotify_init);