group.c 6.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
/*
 *  Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2, or (at your option)
 *  any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; see the file COPYING.  If not, write to
 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
 */

#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/srcu.h>
#include <linux/rculist.h>
#include <linux/wait.h>

#include <linux/fsnotify_backend.h>
#include "fsnotify.h"

#include <asm/atomic.h>

/* protects writes to fsnotify_groups and fsnotify_mask */
static DEFINE_MUTEX(fsnotify_grp_mutex);
/* protects reads while running the fsnotify_groups list */
struct srcu_struct fsnotify_grp_srcu;
/* all groups registered to receive filesystem notifications */
LIST_HEAD(fsnotify_groups);
/* bitwise OR of all events (FS_*) interesting to some group on this system */
__u32 fsnotify_mask;

/*
 * When a new group registers or changes it's set of interesting events
 * this function updates the fsnotify_mask to contain all interesting events
 */
void fsnotify_recalc_global_mask(void)
{
	struct fsnotify_group *group;
	__u32 mask = 0;
	int idx;

	idx = srcu_read_lock(&fsnotify_grp_srcu);
	list_for_each_entry_rcu(group, &fsnotify_groups, group_list)
		mask |= group->mask;
	srcu_read_unlock(&fsnotify_grp_srcu, idx);
	fsnotify_mask = mask;
}

57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
/*
 * Update the group->mask by running all of the marks associated with this
 * group and finding the bitwise | of all of the mark->mask.  If we change
 * the group->mask we need to update the global mask of events interesting
 * to the system.
 */
void fsnotify_recalc_group_mask(struct fsnotify_group *group)
{
	__u32 mask = 0;
	__u32 old_mask = group->mask;
	struct fsnotify_mark_entry *entry;

	spin_lock(&group->mark_lock);
	list_for_each_entry(entry, &group->mark_entries, g_list)
		mask |= entry->mask;
	spin_unlock(&group->mark_lock);

	group->mask = mask;

	if (old_mask != mask)
		fsnotify_recalc_global_mask();
}

80 81 82
/*
 * Final freeing of a group
 */
83
void fsnotify_final_destroy_group(struct fsnotify_group *group)
84
{
85 86 87
	/* clear the notification queue of all events */
	fsnotify_flush_notify(group);

88 89 90 91 92 93
	if (group->ops->free_group_priv)
		group->ops->free_group_priv(group);

	kfree(group);
}

94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
/*
 * Trying to get rid of a group.  We need to first get rid of any outstanding
 * allocations and then free the group.  Remember that fsnotify_clear_marks_by_group
 * could miss marks that are being freed by inode and those marks could still
 * hold a reference to this group (via group->num_marks)  If we get into that
 * situtation, the fsnotify_final_destroy_group will get called when that final
 * mark is freed.
 */
static void fsnotify_destroy_group(struct fsnotify_group *group)
{
	/* clear all inode mark entries for this group */
	fsnotify_clear_marks_by_group(group);

	/* past the point of no return, matches the initial value of 1 */
	if (atomic_dec_and_test(&group->num_marks))
		fsnotify_final_destroy_group(group);
}

112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
/*
 * Remove this group from the global list of groups that will get events
 * this can be done even if there are still references and things still using
 * this group.  This just stops the group from getting new events.
 */
static void __fsnotify_evict_group(struct fsnotify_group *group)
{
	BUG_ON(!mutex_is_locked(&fsnotify_grp_mutex));

	if (group->on_group_list)
		list_del_rcu(&group->group_list);
	group->on_group_list = 0;
}

/*
 * Called when a group is no longer interested in getting events.  This can be
 * used if a group is misbehaving or if for some reason a group should no longer
 * get any filesystem events.
 */
void fsnotify_evict_group(struct fsnotify_group *group)
{
	mutex_lock(&fsnotify_grp_mutex);
	__fsnotify_evict_group(group);
	mutex_unlock(&fsnotify_grp_mutex);
}

/*
 * Drop a reference to a group.  Free it if it's through.
 */
void fsnotify_put_group(struct fsnotify_group *group)
{
	if (!atomic_dec_and_mutex_lock(&group->refcnt, &fsnotify_grp_mutex))
		return;

	/*
	 * OK, now we know that there's no other users *and* we hold mutex,
	 * so no new references will appear
	 */
	__fsnotify_evict_group(group);

	/*
	 * now it's off the list, so the only thing we might care about is
	 * srcu access....
	 */
	mutex_unlock(&fsnotify_grp_mutex);
	synchronize_srcu(&fsnotify_grp_srcu);

	/* and now it is really dead. _Nothing_ could be seeing it */
	fsnotify_recalc_global_mask();
	fsnotify_destroy_group(group);
}

/*
 * Either finds an existing group which matches the group_num, mask, and ops or
 * creates a new group and adds it to the global group list.  In either case we
 * take a reference for the group returned.
 */
169
struct fsnotify_group *fsnotify_obtain_group(__u32 mask,
170 171
					     const struct fsnotify_ops *ops)
{
172
	struct fsnotify_group *group;
173 174

	/* very low use, simpler locking if we just always alloc */
E
Eric Paris 已提交
175
	group = kzalloc(sizeof(struct fsnotify_group), GFP_KERNEL);
176 177 178 179 180 181 182
	if (!group)
		return ERR_PTR(-ENOMEM);

	atomic_set(&group->refcnt, 1);

	group->mask = mask;

183 184 185 186 187
	mutex_init(&group->notification_mutex);
	INIT_LIST_HEAD(&group->notification_list);
	init_waitqueue_head(&group->notification_waitq);
	group->max_events = UINT_MAX;

188 189 190
	spin_lock_init(&group->mark_lock);
	INIT_LIST_HEAD(&group->mark_entries);

191 192 193 194 195 196 197
	group->ops = ops;

	mutex_lock(&fsnotify_grp_mutex);

	/* group not found, add a new one */
	list_add_rcu(&group->group_list, &fsnotify_groups);
	group->on_group_list = 1;
198 199
	/* being on the fsnotify_groups list holds one num_marks */
	atomic_inc(&group->num_marks);
200 201 202 203 204 205 206 207

	mutex_unlock(&fsnotify_grp_mutex);

	if (mask)
		fsnotify_recalc_global_mask();

	return group;
}