提交 8c8946f5 编写于 作者: L Linus Torvalds

Merge branch 'for-linus' of git://git.infradead.org/users/eparis/notify

* 'for-linus' of git://git.infradead.org/users/eparis/notify: (132 commits)
  fanotify: use both marks when possible
  fsnotify: pass both the vfsmount mark and inode mark
  fsnotify: walk the inode and vfsmount lists simultaneously
  fsnotify: rework ignored mark flushing
  fsnotify: remove global fsnotify groups lists
  fsnotify: remove group->mask
  fsnotify: remove the global masks
  fsnotify: cleanup should_send_event
  fanotify: use the mark in handler functions
  audit: use the mark in handler functions
  dnotify: use the mark in handler functions
  inotify: use the mark in handler functions
  fsnotify: send fsnotify_mark to groups in event handling functions
  fsnotify: Exchange list heads instead of moving elements
  fsnotify: srcu to protect read side of inode and vfsmount locks
  fsnotify: use an explicit flag to indicate fsnotify_destroy_mark has been called
  fsnotify: use _rcu functions for mark list traversal
  fsnotify: place marks on object in order of group memory address
  vfs/fsnotify: fsnotify_close can delay the final work in fput
  fsnotify: store struct file not struct path
  ...

Fix up trivial delete/modify conflict in fs/notify/inotify/inotify.c.
...@@ -360,14 +360,6 @@ When: 2.6.33 ...@@ -360,14 +360,6 @@ When: 2.6.33
Why: Should be implemented in userspace, policy daemon. Why: Should be implemented in userspace, policy daemon.
Who: Johannes Berg <johannes@sipsolutions.net> Who: Johannes Berg <johannes@sipsolutions.net>
---------------------------
What: CONFIG_INOTIFY
When: 2.6.33
Why: last user (audit) will be converted to the newer more generic
and more easily maintained fsnotify subsystem
Who: Eric Paris <eparis@redhat.com>
---------------------------- ----------------------------
What: sound-slot/service-* module aliases and related clutters in What: sound-slot/service-* module aliases and related clutters in
......
...@@ -842,4 +842,6 @@ ia32_sys_call_table: ...@@ -842,4 +842,6 @@ ia32_sys_call_table:
.quad compat_sys_rt_tgsigqueueinfo /* 335 */ .quad compat_sys_rt_tgsigqueueinfo /* 335 */
.quad sys_perf_event_open .quad sys_perf_event_open
.quad compat_sys_recvmmsg .quad compat_sys_recvmmsg
.quad sys_fanotify_init
.quad sys32_fanotify_mark
ia32_syscall_end: ia32_syscall_end:
...@@ -546,3 +546,12 @@ asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo, ...@@ -546,3 +546,12 @@ asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo,
return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo, return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
((u64)len_hi << 32) | len_lo); ((u64)len_hi << 32) | len_lo);
} }
asmlinkage long sys32_fanotify_mark(int fanotify_fd, unsigned int flags,
u32 mask_lo, u32 mask_hi,
int fd, const char __user *pathname)
{
return sys_fanotify_mark(fanotify_fd, flags,
((u64)mask_hi << 32) | mask_lo,
fd, pathname);
}
...@@ -80,4 +80,7 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *); ...@@ -80,4 +80,7 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *);
/* ia32/ipc32.c */ /* ia32/ipc32.c */
asmlinkage long sys32_ipc(u32, int, int, int, compat_uptr_t, u32); asmlinkage long sys32_ipc(u32, int, int, int, compat_uptr_t, u32);
asmlinkage long sys32_fanotify_mark(int, unsigned int, u32, u32, int,
const char __user *);
#endif /* _ASM_X86_SYS_IA32_H */ #endif /* _ASM_X86_SYS_IA32_H */
...@@ -343,10 +343,12 @@ ...@@ -343,10 +343,12 @@
#define __NR_rt_tgsigqueueinfo 335 #define __NR_rt_tgsigqueueinfo 335
#define __NR_perf_event_open 336 #define __NR_perf_event_open 336
#define __NR_recvmmsg 337 #define __NR_recvmmsg 337
#define __NR_fanotify_init 338
#define __NR_fanotify_mark 339
#ifdef __KERNEL__ #ifdef __KERNEL__
#define NR_syscalls 338 #define NR_syscalls 340
#define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_READDIR
......
...@@ -663,6 +663,10 @@ __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) ...@@ -663,6 +663,10 @@ __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo)
__SYSCALL(__NR_perf_event_open, sys_perf_event_open) __SYSCALL(__NR_perf_event_open, sys_perf_event_open)
#define __NR_recvmmsg 299 #define __NR_recvmmsg 299
__SYSCALL(__NR_recvmmsg, sys_recvmmsg) __SYSCALL(__NR_recvmmsg, sys_recvmmsg)
#define __NR_fanotify_init 300
__SYSCALL(__NR_fanotify_init, sys_fanotify_init)
#define __NR_fanotify_mark 301
__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
#ifndef __NO_STUBS #ifndef __NO_STUBS
#define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_READDIR
......
...@@ -337,3 +337,5 @@ ENTRY(sys_call_table) ...@@ -337,3 +337,5 @@ ENTRY(sys_call_table)
.long sys_rt_tgsigqueueinfo /* 335 */ .long sys_rt_tgsigqueueinfo /* 335 */
.long sys_perf_event_open .long sys_perf_event_open
.long sys_recvmmsg .long sys_recvmmsg
.long sys_fanotify_init
.long sys_fanotify_mark
...@@ -1193,11 +1193,10 @@ static ssize_t compat_do_readv_writev(int type, struct file *file, ...@@ -1193,11 +1193,10 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
if (iov != iovstack) if (iov != iovstack)
kfree(iov); kfree(iov);
if ((ret + (type == READ)) > 0) { if ((ret + (type == READ)) > 0) {
struct dentry *dentry = file->f_path.dentry;
if (type == READ) if (type == READ)
fsnotify_access(dentry); fsnotify_access(file);
else else
fsnotify_modify(dentry); fsnotify_modify(file);
} }
return ret; return ret;
} }
......
...@@ -128,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __user *, library) ...@@ -128,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __user *, library)
if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
goto exit; goto exit;
fsnotify_open(file->f_path.dentry); fsnotify_open(file);
error = -ENOEXEC; error = -ENOEXEC;
if(file->f_op) { if(file->f_op) {
...@@ -683,7 +683,7 @@ struct file *open_exec(const char *name) ...@@ -683,7 +683,7 @@ struct file *open_exec(const char *name)
if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
goto exit; goto exit;
fsnotify_open(file->f_path.dentry); fsnotify_open(file);
err = deny_write_access(file); err = deny_write_access(file);
if (err) if (err)
......
...@@ -230,6 +230,15 @@ static void __fput(struct file *file) ...@@ -230,6 +230,15 @@ static void __fput(struct file *file)
might_sleep(); might_sleep();
fsnotify_close(file); fsnotify_close(file);
/*
* fsnotify_create_event may have taken one or more references on this
* file. If it did so it left one reference for us to drop to make sure
* its calls to fput could not prematurely destroy the file.
*/
if (atomic_long_read(&file->f_count))
return fput(file);
/* /*
* The function eventpoll_release() should be the first called * The function eventpoll_release() should be the first called
* in the file cleanup chain. * in the file cleanup chain.
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/cdev.h> #include <linux/cdev.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/inotify.h>
#include <linux/fsnotify.h> #include <linux/fsnotify.h>
#include <linux/mount.h> #include <linux/mount.h>
#include <linux/async.h> #include <linux/async.h>
...@@ -264,12 +263,8 @@ void inode_init_once(struct inode *inode) ...@@ -264,12 +263,8 @@ void inode_init_once(struct inode *inode)
INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap); INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear); INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
i_size_ordered_init(inode); i_size_ordered_init(inode);
#ifdef CONFIG_INOTIFY
INIT_LIST_HEAD(&inode->inotify_watches);
mutex_init(&inode->inotify_mutex);
#endif
#ifdef CONFIG_FSNOTIFY #ifdef CONFIG_FSNOTIFY
INIT_HLIST_HEAD(&inode->i_fsnotify_mark_entries); INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
#endif #endif
} }
EXPORT_SYMBOL(inode_init_once); EXPORT_SYMBOL(inode_init_once);
...@@ -413,7 +408,6 @@ int invalidate_inodes(struct super_block *sb) ...@@ -413,7 +408,6 @@ int invalidate_inodes(struct super_block *sb)
down_write(&iprune_sem); down_write(&iprune_sem);
spin_lock(&inode_lock); spin_lock(&inode_lock);
inotify_unmount_inodes(&sb->s_inodes);
fsnotify_unmount_inodes(&sb->s_inodes); fsnotify_unmount_inodes(&sb->s_inodes);
busy = invalidate_list(&sb->s_inodes, &throw_away); busy = invalidate_list(&sb->s_inodes, &throw_away);
spin_unlock(&inode_lock); spin_unlock(&inode_lock);
......
...@@ -2633,7 +2633,7 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, ...@@ -2633,7 +2633,7 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
{ {
int error; int error;
int is_dir = S_ISDIR(old_dentry->d_inode->i_mode); int is_dir = S_ISDIR(old_dentry->d_inode->i_mode);
const char *old_name; const unsigned char *old_name;
if (old_dentry->d_inode == new_dentry->d_inode) if (old_dentry->d_inode == new_dentry->d_inode)
return 0; return 0;
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <linux/log2.h> #include <linux/log2.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/fs_struct.h> #include <linux/fs_struct.h>
#include <linux/fsnotify.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include "pnode.h" #include "pnode.h"
...@@ -150,6 +151,9 @@ struct vfsmount *alloc_vfsmnt(const char *name) ...@@ -150,6 +151,9 @@ struct vfsmount *alloc_vfsmnt(const char *name)
INIT_LIST_HEAD(&mnt->mnt_share); INIT_LIST_HEAD(&mnt->mnt_share);
INIT_LIST_HEAD(&mnt->mnt_slave_list); INIT_LIST_HEAD(&mnt->mnt_slave_list);
INIT_LIST_HEAD(&mnt->mnt_slave); INIT_LIST_HEAD(&mnt->mnt_slave);
#ifdef CONFIG_FSNOTIFY
INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
#endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
mnt->mnt_writers = alloc_percpu(int); mnt->mnt_writers = alloc_percpu(int);
if (!mnt->mnt_writers) if (!mnt->mnt_writers)
...@@ -610,6 +614,7 @@ static inline void __mntput(struct vfsmount *mnt) ...@@ -610,6 +614,7 @@ static inline void __mntput(struct vfsmount *mnt)
* provides barriers, so count_mnt_writers() below is safe. AV * provides barriers, so count_mnt_writers() below is safe. AV
*/ */
WARN_ON(count_mnt_writers(mnt)); WARN_ON(count_mnt_writers(mnt));
fsnotify_vfsmount_delete(mnt);
dput(mnt->mnt_root); dput(mnt->mnt_root);
free_vfsmnt(mnt); free_vfsmnt(mnt);
deactivate_super(sb); deactivate_super(sb);
......
...@@ -934,7 +934,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, ...@@ -934,7 +934,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
nfsdstats.io_read += host_err; nfsdstats.io_read += host_err;
*count = host_err; *count = host_err;
err = 0; err = 0;
fsnotify_access(file->f_path.dentry); fsnotify_access(file);
} else } else
err = nfserrno(host_err); err = nfserrno(host_err);
out: out:
...@@ -1045,7 +1045,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, ...@@ -1045,7 +1045,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
goto out_nfserr; goto out_nfserr;
*cnt = host_err; *cnt = host_err;
nfsdstats.io_write += host_err; nfsdstats.io_write += host_err;
fsnotify_modify(file->f_path.dentry); fsnotify_modify(file);
/* clear setuid/setgid flag after write */ /* clear setuid/setgid flag after write */
if (inode->i_mode & (S_ISUID | S_ISGID)) if (inode->i_mode & (S_ISUID | S_ISGID))
......
...@@ -3,3 +3,4 @@ config FSNOTIFY ...@@ -3,3 +3,4 @@ config FSNOTIFY
source "fs/notify/dnotify/Kconfig" source "fs/notify/dnotify/Kconfig"
source "fs/notify/inotify/Kconfig" source "fs/notify/inotify/Kconfig"
source "fs/notify/fanotify/Kconfig"
obj-$(CONFIG_FSNOTIFY) += fsnotify.o notification.o group.o inode_mark.o obj-$(CONFIG_FSNOTIFY) += fsnotify.o notification.o group.o inode_mark.o \
mark.o vfsmount_mark.o
obj-y += dnotify/ obj-y += dnotify/
obj-y += inotify/ obj-y += inotify/
obj-y += fanotify/
...@@ -29,17 +29,17 @@ ...@@ -29,17 +29,17 @@
int dir_notify_enable __read_mostly = 1; int dir_notify_enable __read_mostly = 1;
static struct kmem_cache *dnotify_struct_cache __read_mostly; static struct kmem_cache *dnotify_struct_cache __read_mostly;
static struct kmem_cache *dnotify_mark_entry_cache __read_mostly; static struct kmem_cache *dnotify_mark_cache __read_mostly;
static struct fsnotify_group *dnotify_group __read_mostly; static struct fsnotify_group *dnotify_group __read_mostly;
static DEFINE_MUTEX(dnotify_mark_mutex); static DEFINE_MUTEX(dnotify_mark_mutex);
/* /*
* dnotify will attach one of these to each inode (i_fsnotify_mark_entries) which * dnotify will attach one of these to each inode (i_fsnotify_marks) which
* is being watched by dnotify. If multiple userspace applications are watching * is being watched by dnotify. If multiple userspace applications are watching
* the same directory with dnotify their information is chained in dn * the same directory with dnotify their information is chained in dn
*/ */
struct dnotify_mark_entry { struct dnotify_mark {
struct fsnotify_mark_entry fsn_entry; struct fsnotify_mark fsn_mark;
struct dnotify_struct *dn; struct dnotify_struct *dn;
}; };
...@@ -51,27 +51,27 @@ struct dnotify_mark_entry { ...@@ -51,27 +51,27 @@ struct dnotify_mark_entry {
* it calls the fsnotify function so it can update the set of all events relevant * it calls the fsnotify function so it can update the set of all events relevant
* to this inode. * to this inode.
*/ */
static void dnotify_recalc_inode_mask(struct fsnotify_mark_entry *entry) static void dnotify_recalc_inode_mask(struct fsnotify_mark *fsn_mark)
{ {
__u32 new_mask, old_mask; __u32 new_mask, old_mask;
struct dnotify_struct *dn; struct dnotify_struct *dn;
struct dnotify_mark_entry *dnentry = container_of(entry, struct dnotify_mark *dn_mark = container_of(fsn_mark,
struct dnotify_mark_entry, struct dnotify_mark,
fsn_entry); fsn_mark);
assert_spin_locked(&entry->lock); assert_spin_locked(&fsn_mark->lock);
old_mask = entry->mask; old_mask = fsn_mark->mask;
new_mask = 0; new_mask = 0;
for (dn = dnentry->dn; dn != NULL; dn = dn->dn_next) for (dn = dn_mark->dn; dn != NULL; dn = dn->dn_next)
new_mask |= (dn->dn_mask & ~FS_DN_MULTISHOT); new_mask |= (dn->dn_mask & ~FS_DN_MULTISHOT);
entry->mask = new_mask; fsnotify_set_mark_mask_locked(fsn_mark, new_mask);
if (old_mask == new_mask) if (old_mask == new_mask)
return; return;
if (entry->inode) if (fsn_mark->i.inode)
fsnotify_recalc_inode_mask(entry->inode); fsnotify_recalc_inode_mask(fsn_mark->i.inode);
} }
/* /*
...@@ -83,29 +83,25 @@ static void dnotify_recalc_inode_mask(struct fsnotify_mark_entry *entry) ...@@ -83,29 +83,25 @@ static void dnotify_recalc_inode_mask(struct fsnotify_mark_entry *entry)
* events. * events.
*/ */
static int dnotify_handle_event(struct fsnotify_group *group, static int dnotify_handle_event(struct fsnotify_group *group,
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
struct fsnotify_event *event) struct fsnotify_event *event)
{ {
struct fsnotify_mark_entry *entry = NULL; struct dnotify_mark *dn_mark;
struct dnotify_mark_entry *dnentry;
struct inode *to_tell; struct inode *to_tell;
struct dnotify_struct *dn; struct dnotify_struct *dn;
struct dnotify_struct **prev; struct dnotify_struct **prev;
struct fown_struct *fown; struct fown_struct *fown;
__u32 test_mask = event->mask & ~FS_EVENT_ON_CHILD; __u32 test_mask = event->mask & ~FS_EVENT_ON_CHILD;
to_tell = event->to_tell; BUG_ON(vfsmount_mark);
spin_lock(&to_tell->i_lock); to_tell = event->to_tell;
entry = fsnotify_find_mark_entry(group, to_tell);
spin_unlock(&to_tell->i_lock);
/* unlikely since we alreay passed dnotify_should_send_event() */ dn_mark = container_of(inode_mark, struct dnotify_mark, fsn_mark);
if (unlikely(!entry))
return 0;
dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry);
spin_lock(&entry->lock); spin_lock(&inode_mark->lock);
prev = &dnentry->dn; prev = &dn_mark->dn;
while ((dn = *prev) != NULL) { while ((dn = *prev) != NULL) {
if ((dn->dn_mask & test_mask) == 0) { if ((dn->dn_mask & test_mask) == 0) {
prev = &dn->dn_next; prev = &dn->dn_next;
...@@ -118,12 +114,11 @@ static int dnotify_handle_event(struct fsnotify_group *group, ...@@ -118,12 +114,11 @@ static int dnotify_handle_event(struct fsnotify_group *group,
else { else {
*prev = dn->dn_next; *prev = dn->dn_next;
kmem_cache_free(dnotify_struct_cache, dn); kmem_cache_free(dnotify_struct_cache, dn);
dnotify_recalc_inode_mask(entry); dnotify_recalc_inode_mask(inode_mark);
} }
} }
spin_unlock(&entry->lock); spin_unlock(&inode_mark->lock);
fsnotify_put_mark(entry);
return 0; return 0;
} }
...@@ -133,44 +128,27 @@ static int dnotify_handle_event(struct fsnotify_group *group, ...@@ -133,44 +128,27 @@ static int dnotify_handle_event(struct fsnotify_group *group,
* userspace notification for that pair. * userspace notification for that pair.
*/ */
static bool dnotify_should_send_event(struct fsnotify_group *group, static bool dnotify_should_send_event(struct fsnotify_group *group,
struct inode *inode, __u32 mask) struct inode *inode,
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
__u32 mask, void *data, int data_type)
{ {
struct fsnotify_mark_entry *entry;
bool send;
/* !dir_notify_enable should never get here, don't waste time checking
if (!dir_notify_enable)
return 0; */
/* not a dir, dnotify doesn't care */ /* not a dir, dnotify doesn't care */
if (!S_ISDIR(inode->i_mode)) if (!S_ISDIR(inode->i_mode))
return false; return false;
spin_lock(&inode->i_lock); return true;
entry = fsnotify_find_mark_entry(group, inode);
spin_unlock(&inode->i_lock);
/* no mark means no dnotify watch */
if (!entry)
return false;
mask = (mask & ~FS_EVENT_ON_CHILD);
send = (mask & entry->mask);
fsnotify_put_mark(entry); /* matches fsnotify_find_mark_entry */
return send;
} }
static void dnotify_free_mark(struct fsnotify_mark_entry *entry) static void dnotify_free_mark(struct fsnotify_mark *fsn_mark)
{ {
struct dnotify_mark_entry *dnentry = container_of(entry, struct dnotify_mark *dn_mark = container_of(fsn_mark,
struct dnotify_mark_entry, struct dnotify_mark,
fsn_entry); fsn_mark);
BUG_ON(dnentry->dn); BUG_ON(dn_mark->dn);
kmem_cache_free(dnotify_mark_entry_cache, dnentry); kmem_cache_free(dnotify_mark_cache, dn_mark);
} }
static struct fsnotify_ops dnotify_fsnotify_ops = { static struct fsnotify_ops dnotify_fsnotify_ops = {
...@@ -183,15 +161,15 @@ static struct fsnotify_ops dnotify_fsnotify_ops = { ...@@ -183,15 +161,15 @@ static struct fsnotify_ops dnotify_fsnotify_ops = {
/* /*
* Called every time a file is closed. Looks first for a dnotify mark on the * Called every time a file is closed. Looks first for a dnotify mark on the
* inode. If one is found run all of the ->dn entries attached to that * inode. If one is found run all of the ->dn structures attached to that
* mark for one relevant to this process closing the file and remove that * mark for one relevant to this process closing the file and remove that
* dnotify_struct. If that was the last dnotify_struct also remove the * dnotify_struct. If that was the last dnotify_struct also remove the
* fsnotify_mark_entry. * fsnotify_mark.
*/ */
void dnotify_flush(struct file *filp, fl_owner_t id) void dnotify_flush(struct file *filp, fl_owner_t id)
{ {
struct fsnotify_mark_entry *entry; struct fsnotify_mark *fsn_mark;
struct dnotify_mark_entry *dnentry; struct dnotify_mark *dn_mark;
struct dnotify_struct *dn; struct dnotify_struct *dn;
struct dnotify_struct **prev; struct dnotify_struct **prev;
struct inode *inode; struct inode *inode;
...@@ -200,38 +178,34 @@ void dnotify_flush(struct file *filp, fl_owner_t id) ...@@ -200,38 +178,34 @@ void dnotify_flush(struct file *filp, fl_owner_t id)
if (!S_ISDIR(inode->i_mode)) if (!S_ISDIR(inode->i_mode))
return; return;
spin_lock(&inode->i_lock); fsn_mark = fsnotify_find_inode_mark(dnotify_group, inode);
entry = fsnotify_find_mark_entry(dnotify_group, inode); if (!fsn_mark)
spin_unlock(&inode->i_lock);
if (!entry)
return; return;
dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry); dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark);
mutex_lock(&dnotify_mark_mutex); mutex_lock(&dnotify_mark_mutex);
spin_lock(&entry->lock); spin_lock(&fsn_mark->lock);
prev = &dnentry->dn; prev = &dn_mark->dn;
while ((dn = *prev) != NULL) { while ((dn = *prev) != NULL) {
if ((dn->dn_owner == id) && (dn->dn_filp == filp)) { if ((dn->dn_owner == id) && (dn->dn_filp == filp)) {
*prev = dn->dn_next; *prev = dn->dn_next;
kmem_cache_free(dnotify_struct_cache, dn); kmem_cache_free(dnotify_struct_cache, dn);
dnotify_recalc_inode_mask(entry); dnotify_recalc_inode_mask(fsn_mark);
break; break;
} }
prev = &dn->dn_next; prev = &dn->dn_next;
} }
spin_unlock(&entry->lock); spin_unlock(&fsn_mark->lock);
/* nothing else could have found us thanks to the dnotify_mark_mutex */ /* nothing else could have found us thanks to the dnotify_mark_mutex */
if (dnentry->dn == NULL) if (dn_mark->dn == NULL)
fsnotify_destroy_mark_by_entry(entry); fsnotify_destroy_mark(fsn_mark);
fsnotify_recalc_group_mask(dnotify_group);
mutex_unlock(&dnotify_mark_mutex); mutex_unlock(&dnotify_mark_mutex);
fsnotify_put_mark(entry); fsnotify_put_mark(fsn_mark);
} }
/* this conversion is done only at watch creation */ /* this conversion is done only at watch creation */
...@@ -259,16 +233,16 @@ static __u32 convert_arg(unsigned long arg) ...@@ -259,16 +233,16 @@ static __u32 convert_arg(unsigned long arg)
/* /*
* If multiple processes watch the same inode with dnotify there is only one * If multiple processes watch the same inode with dnotify there is only one
* dnotify mark in inode->i_fsnotify_mark_entries but we chain a dnotify_struct * dnotify mark in inode->i_fsnotify_marks but we chain a dnotify_struct
* onto that mark. This function either attaches the new dnotify_struct onto * onto that mark. This function either attaches the new dnotify_struct onto
* that list, or it |= the mask onto an existing dnofiy_struct. * that list, or it |= the mask onto an existing dnofiy_struct.
*/ */
static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark_entry *dnentry, static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark *dn_mark,
fl_owner_t id, int fd, struct file *filp, __u32 mask) fl_owner_t id, int fd, struct file *filp, __u32 mask)
{ {
struct dnotify_struct *odn; struct dnotify_struct *odn;
odn = dnentry->dn; odn = dn_mark->dn;
while (odn != NULL) { while (odn != NULL) {
/* adding more events to existing dnofiy_struct? */ /* adding more events to existing dnofiy_struct? */
if ((odn->dn_owner == id) && (odn->dn_filp == filp)) { if ((odn->dn_owner == id) && (odn->dn_filp == filp)) {
...@@ -283,8 +257,8 @@ static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark_entry *dnent ...@@ -283,8 +257,8 @@ static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark_entry *dnent
dn->dn_fd = fd; dn->dn_fd = fd;
dn->dn_filp = filp; dn->dn_filp = filp;
dn->dn_owner = id; dn->dn_owner = id;
dn->dn_next = dnentry->dn; dn->dn_next = dn_mark->dn;
dnentry->dn = dn; dn_mark->dn = dn;
return 0; return 0;
} }
...@@ -296,8 +270,8 @@ static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark_entry *dnent ...@@ -296,8 +270,8 @@ static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark_entry *dnent
*/ */
int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
{ {
struct dnotify_mark_entry *new_dnentry, *dnentry; struct dnotify_mark *new_dn_mark, *dn_mark;
struct fsnotify_mark_entry *new_entry, *entry; struct fsnotify_mark *new_fsn_mark, *fsn_mark;
struct dnotify_struct *dn; struct dnotify_struct *dn;
struct inode *inode; struct inode *inode;
fl_owner_t id = current->files; fl_owner_t id = current->files;
...@@ -306,7 +280,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) ...@@ -306,7 +280,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
__u32 mask; __u32 mask;
/* we use these to tell if we need to kfree */ /* we use these to tell if we need to kfree */
new_entry = NULL; new_fsn_mark = NULL;
dn = NULL; dn = NULL;
if (!dir_notify_enable) { if (!dir_notify_enable) {
...@@ -336,8 +310,8 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) ...@@ -336,8 +310,8 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
} }
/* new fsnotify mark, we expect most fcntl calls to add a new mark */ /* new fsnotify mark, we expect most fcntl calls to add a new mark */
new_dnentry = kmem_cache_alloc(dnotify_mark_entry_cache, GFP_KERNEL); new_dn_mark = kmem_cache_alloc(dnotify_mark_cache, GFP_KERNEL);
if (!new_dnentry) { if (!new_dn_mark) {
error = -ENOMEM; error = -ENOMEM;
goto out_err; goto out_err;
} }
...@@ -345,29 +319,27 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) ...@@ -345,29 +319,27 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
/* convert the userspace DN_* "arg" to the internal FS_* defines in fsnotify */ /* convert the userspace DN_* "arg" to the internal FS_* defines in fsnotify */
mask = convert_arg(arg); mask = convert_arg(arg);
/* set up the new_entry and new_dnentry */ /* set up the new_fsn_mark and new_dn_mark */
new_entry = &new_dnentry->fsn_entry; new_fsn_mark = &new_dn_mark->fsn_mark;
fsnotify_init_mark(new_entry, dnotify_free_mark); fsnotify_init_mark(new_fsn_mark, dnotify_free_mark);
new_entry->mask = mask; new_fsn_mark->mask = mask;
new_dnentry->dn = NULL; new_dn_mark->dn = NULL;
/* this is needed to prevent the fcntl/close race described below */ /* this is needed to prevent the fcntl/close race described below */
mutex_lock(&dnotify_mark_mutex); mutex_lock(&dnotify_mark_mutex);
/* add the new_entry or find an old one. */ /* add the new_fsn_mark or find an old one. */
spin_lock(&inode->i_lock); fsn_mark = fsnotify_find_inode_mark(dnotify_group, inode);
entry = fsnotify_find_mark_entry(dnotify_group, inode); if (fsn_mark) {
spin_unlock(&inode->i_lock); dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark);
if (entry) { spin_lock(&fsn_mark->lock);
dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry);
spin_lock(&entry->lock);
} else { } else {
fsnotify_add_mark(new_entry, dnotify_group, inode); fsnotify_add_mark(new_fsn_mark, dnotify_group, inode, NULL, 0);
spin_lock(&new_entry->lock); spin_lock(&new_fsn_mark->lock);
entry = new_entry; fsn_mark = new_fsn_mark;
dnentry = new_dnentry; dn_mark = new_dn_mark;
/* we used new_entry, so don't free it */ /* we used new_fsn_mark, so don't free it */
new_entry = NULL; new_fsn_mark = NULL;
} }
rcu_read_lock(); rcu_read_lock();
...@@ -376,17 +348,17 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) ...@@ -376,17 +348,17 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
/* if (f != filp) means that we lost a race and another task/thread /* if (f != filp) means that we lost a race and another task/thread
* actually closed the fd we are still playing with before we grabbed * actually closed the fd we are still playing with before we grabbed
* the dnotify_mark_mutex and entry->lock. Since closing the fd is the * the dnotify_mark_mutex and fsn_mark->lock. Since closing the fd is the
* only time we clean up the mark entries we need to get our mark off * only time we clean up the marks we need to get our mark off
* the list. */ * the list. */
if (f != filp) { if (f != filp) {
/* if we added ourselves, shoot ourselves, it's possible that /* if we added ourselves, shoot ourselves, it's possible that
* the flush actually did shoot this entry. That's fine too * the flush actually did shoot this fsn_mark. That's fine too
* since multiple calls to destroy_mark is perfectly safe, if * since multiple calls to destroy_mark is perfectly safe, if
* we found a dnentry already attached to the inode, just sod * we found a dn_mark already attached to the inode, just sod
* off silently as the flush at close time dealt with it. * off silently as the flush at close time dealt with it.
*/ */
if (dnentry == new_dnentry) if (dn_mark == new_dn_mark)
destroy = 1; destroy = 1;
goto out; goto out;
} }
...@@ -394,13 +366,13 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) ...@@ -394,13 +366,13 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
if (error) { if (error) {
/* if we added, we must shoot */ /* if we added, we must shoot */
if (dnentry == new_dnentry) if (dn_mark == new_dn_mark)
destroy = 1; destroy = 1;
goto out; goto out;
} }
error = attach_dn(dn, dnentry, id, fd, filp, mask); error = attach_dn(dn, dn_mark, id, fd, filp, mask);
/* !error means that we attached the dn to the dnentry, so don't free it */ /* !error means that we attached the dn to the dn_mark, so don't free it */
if (!error) if (!error)
dn = NULL; dn = NULL;
/* -EEXIST means that we didn't add this new dn and used an old one. /* -EEXIST means that we didn't add this new dn and used an old one.
...@@ -408,20 +380,18 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) ...@@ -408,20 +380,18 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
else if (error == -EEXIST) else if (error == -EEXIST)
error = 0; error = 0;
dnotify_recalc_inode_mask(entry); dnotify_recalc_inode_mask(fsn_mark);
out: out:
spin_unlock(&entry->lock); spin_unlock(&fsn_mark->lock);
if (destroy) if (destroy)
fsnotify_destroy_mark_by_entry(entry); fsnotify_destroy_mark(fsn_mark);
fsnotify_recalc_group_mask(dnotify_group);
mutex_unlock(&dnotify_mark_mutex); mutex_unlock(&dnotify_mark_mutex);
fsnotify_put_mark(entry); fsnotify_put_mark(fsn_mark);
out_err: out_err:
if (new_entry) if (new_fsn_mark)
fsnotify_put_mark(new_entry); fsnotify_put_mark(new_fsn_mark);
if (dn) if (dn)
kmem_cache_free(dnotify_struct_cache, dn); kmem_cache_free(dnotify_struct_cache, dn);
return error; return error;
...@@ -430,10 +400,9 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) ...@@ -430,10 +400,9 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
static int __init dnotify_init(void) static int __init dnotify_init(void)
{ {
dnotify_struct_cache = KMEM_CACHE(dnotify_struct, SLAB_PANIC); dnotify_struct_cache = KMEM_CACHE(dnotify_struct, SLAB_PANIC);
dnotify_mark_entry_cache = KMEM_CACHE(dnotify_mark_entry, SLAB_PANIC); dnotify_mark_cache = KMEM_CACHE(dnotify_mark, SLAB_PANIC);
dnotify_group = fsnotify_obtain_group(DNOTIFY_GROUP_NUM, dnotify_group = fsnotify_alloc_group(&dnotify_fsnotify_ops);
0, &dnotify_fsnotify_ops);
if (IS_ERR(dnotify_group)) if (IS_ERR(dnotify_group))
panic("unable to allocate fsnotify group for dnotify\n"); panic("unable to allocate fsnotify group for dnotify\n");
return 0; return 0;
......
config FANOTIFY
bool "Filesystem wide access notification"
select FSNOTIFY
select ANON_INODES
default n
---help---
Say Y here to enable fanotify suport. fanotify is a file access
notification system which differs from inotify in that it sends
and open file descriptor to the userspace listener along with
the event.
If unsure, say Y.
config FANOTIFY_ACCESS_PERMISSIONS
bool "fanotify permissions checking"
depends on FANOTIFY
depends on SECURITY
default n
---help---
Say Y here is you want fanotify listeners to be able to make permissions
decisions concerning filesystem events. This is used by some fanotify
listeners which need to scan files before allowing the system access to
use those files. This is used by some anti-malware vendors and by some
hierarchical storage managent systems.
If unsure, say N.
obj-$(CONFIG_FANOTIFY) += fanotify.o fanotify_user.o
#include <linux/fanotify.h>
#include <linux/fdtable.h>
#include <linux/fsnotify_backend.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/kernel.h> /* UINT_MAX */
#include <linux/mount.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/wait.h>
static bool should_merge(struct fsnotify_event *old, struct fsnotify_event *new)
{
pr_debug("%s: old=%p new=%p\n", __func__, old, new);
if (old->to_tell == new->to_tell &&
old->data_type == new->data_type &&
old->tgid == new->tgid) {
switch (old->data_type) {
case (FSNOTIFY_EVENT_FILE):
if ((old->file->f_path.mnt == new->file->f_path.mnt) &&
(old->file->f_path.dentry == new->file->f_path.dentry))
return true;
case (FSNOTIFY_EVENT_NONE):
return true;
default:
BUG();
};
}
return false;
}
/* and the list better be locked by something too! */
static struct fsnotify_event *fanotify_merge(struct list_head *list,
struct fsnotify_event *event)
{
struct fsnotify_event_holder *test_holder;
struct fsnotify_event *test_event = NULL;
struct fsnotify_event *new_event;
pr_debug("%s: list=%p event=%p\n", __func__, list, event);
list_for_each_entry_reverse(test_holder, list, event_list) {
if (should_merge(test_holder->event, event)) {
test_event = test_holder->event;
break;
}
}
if (!test_event)
return NULL;
fsnotify_get_event(test_event);
/* if they are exactly the same we are done */
if (test_event->mask == event->mask)
return test_event;
/*
* if the refcnt == 2 this is the only queue
* for this event and so we can update the mask
* in place.
*/
if (atomic_read(&test_event->refcnt) == 2) {
test_event->mask |= event->mask;
return test_event;
}
new_event = fsnotify_clone_event(test_event);
/* done with test_event */
fsnotify_put_event(test_event);
/* couldn't allocate memory, merge was not possible */
if (unlikely(!new_event))
return ERR_PTR(-ENOMEM);
/* build new event and replace it on the list */
new_event->mask = (test_event->mask | event->mask);
fsnotify_replace_event(test_holder, new_event);
/* we hold a reference on new_event from clone_event */
return new_event;
}
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
static int fanotify_get_response_from_access(struct fsnotify_group *group,
struct fsnotify_event *event)
{
int ret;
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
wait_event(group->fanotify_data.access_waitq, event->response);
/* userspace responded, convert to something usable */
spin_lock(&event->lock);
switch (event->response) {
case FAN_ALLOW:
ret = 0;
break;
case FAN_DENY:
default:
ret = -EPERM;
}
event->response = 0;
spin_unlock(&event->lock);
pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__,
group, event, ret);
return ret;
}
#endif
static int fanotify_handle_event(struct fsnotify_group *group,
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *fanotify_mark,
struct fsnotify_event *event)
{
int ret = 0;
struct fsnotify_event *notify_event = NULL;
BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS);
BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY);
BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE);
BUILD_BUG_ON(FAN_OPEN != FS_OPEN);
BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD);
BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW);
BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM);
BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM);
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
notify_event = fsnotify_add_notify_event(group, event, NULL, fanotify_merge);
if (IS_ERR(notify_event))
return PTR_ERR(notify_event);
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
if (event->mask & FAN_ALL_PERM_EVENTS) {
/* if we merged we need to wait on the new event */
if (notify_event)
event = notify_event;
ret = fanotify_get_response_from_access(group, event);
}
#endif
if (notify_event)
fsnotify_put_event(notify_event);
return ret;
}
static bool fanotify_should_send_event(struct fsnotify_group *group,
struct inode *to_tell,
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmnt_mark,
__u32 event_mask, void *data, int data_type)
{
__u32 marks_mask, marks_ignored_mask;
pr_debug("%s: group=%p to_tell=%p inode_mark=%p vfsmnt_mark=%p "
"mask=%x data=%p data_type=%d\n", __func__, group, to_tell,
inode_mark, vfsmnt_mark, event_mask, data, data_type);
pr_debug("%s: group=%p vfsmount_mark=%p inode_mark=%p mask=%x\n",
__func__, group, vfsmnt_mark, inode_mark, event_mask);
/* sorry, fanotify only gives a damn about files and dirs */
if (!S_ISREG(to_tell->i_mode) &&
!S_ISDIR(to_tell->i_mode))
return false;
/* if we don't have enough info to send an event to userspace say no */
if (data_type != FSNOTIFY_EVENT_FILE)
return false;
if (inode_mark && vfsmnt_mark) {
marks_mask = (vfsmnt_mark->mask | inode_mark->mask);
marks_ignored_mask = (vfsmnt_mark->ignored_mask | inode_mark->ignored_mask);
} else if (inode_mark) {
/*
* if the event is for a child and this inode doesn't care about
* events on the child, don't send it!
*/
if ((event_mask & FS_EVENT_ON_CHILD) &&
!(inode_mark->mask & FS_EVENT_ON_CHILD))
return false;
marks_mask = inode_mark->mask;
marks_ignored_mask = inode_mark->ignored_mask;
} else if (vfsmnt_mark) {
marks_mask = vfsmnt_mark->mask;
marks_ignored_mask = vfsmnt_mark->ignored_mask;
} else {
BUG();
}
if (event_mask & marks_mask & ~marks_ignored_mask)
return true;
return false;
}
const struct fsnotify_ops fanotify_fsnotify_ops = {
.handle_event = fanotify_handle_event,
.should_send_event = fanotify_should_send_event,
.free_group_priv = NULL,
.free_event_priv = NULL,
.freeing_mark = NULL,
};
#include <linux/fanotify.h>
#include <linux/fcntl.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/anon_inodes.h>
#include <linux/fsnotify_backend.h>
#include <linux/init.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/poll.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <asm/ioctls.h>
extern const struct fsnotify_ops fanotify_fsnotify_ops;
static struct kmem_cache *fanotify_mark_cache __read_mostly;
static struct kmem_cache *fanotify_response_event_cache __read_mostly;
struct fanotify_response_event {
struct list_head list;
__s32 fd;
struct fsnotify_event *event;
};
/*
* Get an fsnotify notification event if one exists and is small
* enough to fit in "count". Return an error pointer if the count
* is not large enough.
*
* Called with the group->notification_mutex held.
*/
static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
size_t count)
{
BUG_ON(!mutex_is_locked(&group->notification_mutex));
pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
if (fsnotify_notify_queue_is_empty(group))
return NULL;
if (FAN_EVENT_METADATA_LEN > count)
return ERR_PTR(-EINVAL);
/* held the notification_mutex the whole time, so this is the
* same event we peeked above */
return fsnotify_remove_notify_event(group);
}
static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)
{
int client_fd;
struct dentry *dentry;
struct vfsmount *mnt;
struct file *new_file;
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
client_fd = get_unused_fd();
if (client_fd < 0)
return client_fd;
if (event->data_type != FSNOTIFY_EVENT_FILE) {
WARN_ON(1);
put_unused_fd(client_fd);
return -EINVAL;
}
/*
* we need a new file handle for the userspace program so it can read even if it was
* originally opened O_WRONLY.
*/
dentry = dget(event->file->f_path.dentry);
mnt = mntget(event->file->f_path.mnt);
/* it's possible this event was an overflow event. in that case dentry and mnt
* are NULL; That's fine, just don't call dentry open */
if (dentry && mnt)
new_file = dentry_open(dentry, mnt,
group->fanotify_data.f_flags | FMODE_NONOTIFY,
current_cred());
else
new_file = ERR_PTR(-EOVERFLOW);
if (IS_ERR(new_file)) {
/*
* we still send an event even if we can't open the file. this
* can happen when say tasks are gone and we try to open their
* /proc files or we try to open a WRONLY file like in sysfs
* we just send the errno to userspace since there isn't much
* else we can do.
*/
put_unused_fd(client_fd);
client_fd = PTR_ERR(new_file);
} else {
fd_install(client_fd, new_file);
}
return client_fd;
}
static ssize_t fill_event_metadata(struct fsnotify_group *group,
struct fanotify_event_metadata *metadata,
struct fsnotify_event *event)
{
pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
group, metadata, event);
metadata->event_len = FAN_EVENT_METADATA_LEN;
metadata->vers = FANOTIFY_METADATA_VERSION;
metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
metadata->pid = pid_vnr(event->tgid);
metadata->fd = create_fd(group, event);
return metadata->fd;
}
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group,
__s32 fd)
{
struct fanotify_response_event *re, *return_re = NULL;
mutex_lock(&group->fanotify_data.access_mutex);
list_for_each_entry(re, &group->fanotify_data.access_list, list) {
if (re->fd != fd)
continue;
list_del_init(&re->list);
return_re = re;
break;
}
mutex_unlock(&group->fanotify_data.access_mutex);
pr_debug("%s: found return_re=%p\n", __func__, return_re);
return return_re;
}
static int process_access_response(struct fsnotify_group *group,
struct fanotify_response *response_struct)
{
struct fanotify_response_event *re;
__s32 fd = response_struct->fd;
__u32 response = response_struct->response;
pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
fd, response);
/*
* make sure the response is valid, if invalid we do nothing and either
* userspace can send a valid responce or we will clean it up after the
* timeout
*/
switch (response) {
case FAN_ALLOW:
case FAN_DENY:
break;
default:
return -EINVAL;
}
if (fd < 0)
return -EINVAL;
re = dequeue_re(group, fd);
if (!re)
return -ENOENT;
re->event->response = response;
wake_up(&group->fanotify_data.access_waitq);
kmem_cache_free(fanotify_response_event_cache, re);
return 0;
}
static int prepare_for_access_response(struct fsnotify_group *group,
struct fsnotify_event *event,
__s32 fd)
{
struct fanotify_response_event *re;
if (!(event->mask & FAN_ALL_PERM_EVENTS))
return 0;
re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL);
if (!re)
return -ENOMEM;
re->event = event;
re->fd = fd;
mutex_lock(&group->fanotify_data.access_mutex);
list_add_tail(&re->list, &group->fanotify_data.access_list);
mutex_unlock(&group->fanotify_data.access_mutex);
return 0;
}
static void remove_access_response(struct fsnotify_group *group,
struct fsnotify_event *event,
__s32 fd)
{
struct fanotify_response_event *re;
if (!(event->mask & FAN_ALL_PERM_EVENTS))
return;
re = dequeue_re(group, fd);
if (!re)
return;
BUG_ON(re->event != event);
kmem_cache_free(fanotify_response_event_cache, re);
return;
}
#else
static int prepare_for_access_response(struct fsnotify_group *group,
struct fsnotify_event *event,
__s32 fd)
{
return 0;
}
static void remove_access_response(struct fsnotify_group *group,
struct fsnotify_event *event,
__s32 fd)
{
return;
}
#endif
static ssize_t copy_event_to_user(struct fsnotify_group *group,
struct fsnotify_event *event,
char __user *buf)
{
struct fanotify_event_metadata fanotify_event_metadata;
int fd, ret;
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
fd = fill_event_metadata(group, &fanotify_event_metadata, event);
if (fd < 0)
return fd;
ret = prepare_for_access_response(group, event, fd);
if (ret)
goto out_close_fd;
ret = -EFAULT;
if (copy_to_user(buf, &fanotify_event_metadata, FAN_EVENT_METADATA_LEN))
goto out_kill_access_response;
return FAN_EVENT_METADATA_LEN;
out_kill_access_response:
remove_access_response(group, event, fd);
out_close_fd:
sys_close(fd);
return ret;
}
/* intofiy userspace file descriptor functions */
static unsigned int fanotify_poll(struct file *file, poll_table *wait)
{
struct fsnotify_group *group = file->private_data;
int ret = 0;
poll_wait(file, &group->notification_waitq, wait);
mutex_lock(&group->notification_mutex);
if (!fsnotify_notify_queue_is_empty(group))
ret = POLLIN | POLLRDNORM;
mutex_unlock(&group->notification_mutex);
return ret;
}
static ssize_t fanotify_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
struct fsnotify_group *group;
struct fsnotify_event *kevent;
char __user *start;
int ret;
DEFINE_WAIT(wait);
start = buf;
group = file->private_data;
pr_debug("%s: group=%p\n", __func__, group);
while (1) {
prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
mutex_lock(&group->notification_mutex);
kevent = get_one_event(group, count);
mutex_unlock(&group->notification_mutex);
if (kevent) {
ret = PTR_ERR(kevent);
if (IS_ERR(kevent))
break;
ret = copy_event_to_user(group, kevent, buf);
fsnotify_put_event(kevent);
if (ret < 0)
break;
buf += ret;
count -= ret;
continue;
}
ret = -EAGAIN;
if (file->f_flags & O_NONBLOCK)
break;
ret = -EINTR;
if (signal_pending(current))
break;
if (start != buf)
break;
schedule();
}
finish_wait(&group->notification_waitq, &wait);
if (start != buf && ret != -EFAULT)
ret = buf - start;
return ret;
}
static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
{
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
struct fanotify_response response = { .fd = -1, .response = -1 };
struct fsnotify_group *group;
int ret;
group = file->private_data;
if (count > sizeof(response))
count = sizeof(response);
pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
if (copy_from_user(&response, buf, count))
return -EFAULT;
ret = process_access_response(group, &response);
if (ret < 0)
count = ret;
return count;
#else
return -EINVAL;
#endif
}
static int fanotify_release(struct inode *ignored, struct file *file)
{
struct fsnotify_group *group = file->private_data;
pr_debug("%s: file=%p group=%p\n", __func__, file, group);
/* matches the fanotify_init->fsnotify_alloc_group */
fsnotify_put_group(group);
return 0;
}
static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct fsnotify_group *group;
struct fsnotify_event_holder *holder;
void __user *p;
int ret = -ENOTTY;
size_t send_len = 0;
group = file->private_data;
p = (void __user *) arg;
switch (cmd) {
case FIONREAD:
mutex_lock(&group->notification_mutex);
list_for_each_entry(holder, &group->notification_list, event_list)
send_len += FAN_EVENT_METADATA_LEN;
mutex_unlock(&group->notification_mutex);
ret = put_user(send_len, (int __user *) p);
break;
}
return ret;
}
static const struct file_operations fanotify_fops = {
.poll = fanotify_poll,
.read = fanotify_read,
.write = fanotify_write,
.fasync = NULL,
.release = fanotify_release,
.unlocked_ioctl = fanotify_ioctl,
.compat_ioctl = fanotify_ioctl,
};
static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
{
kmem_cache_free(fanotify_mark_cache, fsn_mark);
}
static int fanotify_find_path(int dfd, const char __user *filename,
struct path *path, unsigned int flags)
{
int ret;
pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
dfd, filename, flags);
if (filename == NULL) {
struct file *file;
int fput_needed;
ret = -EBADF;
file = fget_light(dfd, &fput_needed);
if (!file)
goto out;
ret = -ENOTDIR;
if ((flags & FAN_MARK_ONLYDIR) &&
!(S_ISDIR(file->f_path.dentry->d_inode->i_mode))) {
fput_light(file, fput_needed);
goto out;
}
*path = file->f_path;
path_get(path);
fput_light(file, fput_needed);
} else {
unsigned int lookup_flags = 0;
if (!(flags & FAN_MARK_DONT_FOLLOW))
lookup_flags |= LOOKUP_FOLLOW;
if (flags & FAN_MARK_ONLYDIR)
lookup_flags |= LOOKUP_DIRECTORY;
ret = user_path_at(dfd, filename, lookup_flags, path);
if (ret)
goto out;
}
/* you can only watch an inode if you have read permissions on it */
ret = inode_permission(path->dentry->d_inode, MAY_READ);
if (ret)
path_put(path);
out:
return ret;
}
static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
__u32 mask,
unsigned int flags)
{
__u32 oldmask;
spin_lock(&fsn_mark->lock);
if (!(flags & FAN_MARK_IGNORED_MASK)) {
oldmask = fsn_mark->mask;
fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask));
} else {
oldmask = fsn_mark->ignored_mask;
fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
}
spin_unlock(&fsn_mark->lock);
if (!(oldmask & ~mask))
fsnotify_destroy_mark(fsn_mark);
return mask & oldmask;
}
static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
struct vfsmount *mnt, __u32 mask,
unsigned int flags)
{
struct fsnotify_mark *fsn_mark = NULL;
__u32 removed;
fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
if (!fsn_mark)
return -ENOENT;
removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
fsnotify_put_mark(fsn_mark);
if (removed & mnt->mnt_fsnotify_mask)
fsnotify_recalc_vfsmount_mask(mnt);
return 0;
}
static int fanotify_remove_inode_mark(struct fsnotify_group *group,
struct inode *inode, __u32 mask,
unsigned int flags)
{
struct fsnotify_mark *fsn_mark = NULL;
__u32 removed;
fsn_mark = fsnotify_find_inode_mark(group, inode);
if (!fsn_mark)
return -ENOENT;
removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
/* matches the fsnotify_find_inode_mark() */
fsnotify_put_mark(fsn_mark);
if (removed & inode->i_fsnotify_mask)
fsnotify_recalc_inode_mask(inode);
return 0;
}
static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
__u32 mask,
unsigned int flags)
{
__u32 oldmask;
spin_lock(&fsn_mark->lock);
if (!(flags & FAN_MARK_IGNORED_MASK)) {
oldmask = fsn_mark->mask;
fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask));
} else {
oldmask = fsn_mark->ignored_mask;
fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask | mask));
if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
}
spin_unlock(&fsn_mark->lock);
return mask & ~oldmask;
}
static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
struct vfsmount *mnt, __u32 mask,
unsigned int flags)
{
struct fsnotify_mark *fsn_mark;
__u32 added;
fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
if (!fsn_mark) {
int ret;
fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
if (!fsn_mark)
return -ENOMEM;
fsnotify_init_mark(fsn_mark, fanotify_free_mark);
ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0);
if (ret) {
fanotify_free_mark(fsn_mark);
return ret;
}
}
added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
fsnotify_put_mark(fsn_mark);
if (added & ~mnt->mnt_fsnotify_mask)
fsnotify_recalc_vfsmount_mask(mnt);
return 0;
}
static int fanotify_add_inode_mark(struct fsnotify_group *group,
struct inode *inode, __u32 mask,
unsigned int flags)
{
struct fsnotify_mark *fsn_mark;
__u32 added;
pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
fsn_mark = fsnotify_find_inode_mark(group, inode);
if (!fsn_mark) {
int ret;
fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
if (!fsn_mark)
return -ENOMEM;
fsnotify_init_mark(fsn_mark, fanotify_free_mark);
ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0);
if (ret) {
fanotify_free_mark(fsn_mark);
return ret;
}
}
added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
fsnotify_put_mark(fsn_mark);
if (added & ~inode->i_fsnotify_mask)
fsnotify_recalc_inode_mask(inode);
return 0;
}
/* fanotify syscalls */
SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
{
struct fsnotify_group *group;
int f_flags, fd;
pr_debug("%s: flags=%d event_f_flags=%d\n",
__func__, flags, event_f_flags);
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (flags & ~FAN_ALL_INIT_FLAGS)
return -EINVAL;
f_flags = O_RDWR | FMODE_NONOTIFY;
if (flags & FAN_CLOEXEC)
f_flags |= O_CLOEXEC;
if (flags & FAN_NONBLOCK)
f_flags |= O_NONBLOCK;
/* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
if (IS_ERR(group))
return PTR_ERR(group);
group->fanotify_data.f_flags = event_f_flags;
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
mutex_init(&group->fanotify_data.access_mutex);
init_waitqueue_head(&group->fanotify_data.access_waitq);
INIT_LIST_HEAD(&group->fanotify_data.access_list);
#endif
fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
if (fd < 0)
goto out_put_group;
return fd;
out_put_group:
fsnotify_put_group(group);
return fd;
}
SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags,
__u64 mask, int dfd,
const char __user * pathname)
{
struct inode *inode = NULL;
struct vfsmount *mnt = NULL;
struct fsnotify_group *group;
struct file *filp;
struct path path;
int ret, fput_needed;
pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
__func__, fanotify_fd, flags, dfd, pathname, mask);
/* we only use the lower 32 bits as of right now. */
if (mask & ((__u64)0xffffffff << 32))
return -EINVAL;
if (flags & ~FAN_ALL_MARK_FLAGS)
return -EINVAL;
switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
case FAN_MARK_ADD:
case FAN_MARK_REMOVE:
case FAN_MARK_FLUSH:
break;
default:
return -EINVAL;
}
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
#else
if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
#endif
return -EINVAL;
filp = fget_light(fanotify_fd, &fput_needed);
if (unlikely(!filp))
return -EBADF;
/* verify that this is indeed an fanotify instance */
ret = -EINVAL;
if (unlikely(filp->f_op != &fanotify_fops))
goto fput_and_out;
ret = fanotify_find_path(dfd, pathname, &path, flags);
if (ret)
goto fput_and_out;
/* inode held in place by reference to path; group by fget on fd */
if (!(flags & FAN_MARK_MOUNT))
inode = path.dentry->d_inode;
else
mnt = path.mnt;
group = filp->private_data;
/* create/update an inode mark */
switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
case FAN_MARK_ADD:
if (flags & FAN_MARK_MOUNT)
ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
else
ret = fanotify_add_inode_mark(group, inode, mask, flags);
break;
case FAN_MARK_REMOVE:
if (flags & FAN_MARK_MOUNT)
ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
else
ret = fanotify_remove_inode_mark(group, inode, mask, flags);
break;
case FAN_MARK_FLUSH:
if (flags & FAN_MARK_MOUNT)
fsnotify_clear_vfsmount_marks_by_group(group);
else
fsnotify_clear_inode_marks_by_group(group);
break;
default:
ret = -EINVAL;
}
path_put(&path);
fput_and_out:
fput_light(filp, fput_needed);
return ret;
}
#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
asmlinkage long SyS_fanotify_mark(long fanotify_fd, long flags, __u64 mask,
long dfd, long pathname)
{
return SYSC_fanotify_mark((int) fanotify_fd, (unsigned int) flags,
mask, (int) dfd,
(const char __user *) pathname);
}
SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark);
#endif
/*
* fanotify_user_setup - Our initialization function. Note that we cannnot return
* error because we have compiled-in VFS hooks. So an (unlikely) failure here
* must result in panic().
*/
static int __init fanotify_user_setup(void)
{
fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event,
SLAB_PANIC);
return 0;
}
device_initcall(fanotify_user_setup);
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mount.h>
#include <linux/srcu.h> #include <linux/srcu.h>
#include <linux/fsnotify_backend.h> #include <linux/fsnotify_backend.h>
...@@ -35,6 +36,11 @@ void __fsnotify_inode_delete(struct inode *inode) ...@@ -35,6 +36,11 @@ void __fsnotify_inode_delete(struct inode *inode)
} }
EXPORT_SYMBOL_GPL(__fsnotify_inode_delete); EXPORT_SYMBOL_GPL(__fsnotify_inode_delete);
void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
{
fsnotify_clear_marks_by_mount(mnt);
}
/* /*
* Given an inode, first check if we care what happens to our children. Inotify * Given an inode, first check if we care what happens to our children. Inotify
* and dnotify both tell their parents about events. If we care about any event * and dnotify both tell their parents about events. If we care about any event
...@@ -78,13 +84,16 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode) ...@@ -78,13 +84,16 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
} }
/* Notify this dentry's parent about a child's events. */ /* Notify this dentry's parent about a child's events. */
void __fsnotify_parent(struct dentry *dentry, __u32 mask) void __fsnotify_parent(struct file *file, struct dentry *dentry, __u32 mask)
{ {
struct dentry *parent; struct dentry *parent;
struct inode *p_inode; struct inode *p_inode;
bool send = false; bool send = false;
bool should_update_children = false; bool should_update_children = false;
if (!dentry)
dentry = file->f_path.dentry;
if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED)) if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED))
return; return;
...@@ -115,8 +124,12 @@ void __fsnotify_parent(struct dentry *dentry, __u32 mask) ...@@ -115,8 +124,12 @@ void __fsnotify_parent(struct dentry *dentry, __u32 mask)
* specifies these are events which came from a child. */ * specifies these are events which came from a child. */
mask |= FS_EVENT_ON_CHILD; mask |= FS_EVENT_ON_CHILD;
fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE, if (file)
dentry->d_name.name, 0); fsnotify(p_inode, mask, file, FSNOTIFY_EVENT_FILE,
dentry->d_name.name, 0);
else
fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE,
dentry->d_name.name, 0);
dput(parent); dput(parent);
} }
...@@ -127,63 +140,181 @@ void __fsnotify_parent(struct dentry *dentry, __u32 mask) ...@@ -127,63 +140,181 @@ void __fsnotify_parent(struct dentry *dentry, __u32 mask)
} }
EXPORT_SYMBOL_GPL(__fsnotify_parent); EXPORT_SYMBOL_GPL(__fsnotify_parent);
static int send_to_group(struct inode *to_tell, struct vfsmount *mnt,
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
__u32 mask, void *data,
int data_is, u32 cookie,
const unsigned char *file_name,
struct fsnotify_event **event)
{
struct fsnotify_group *group = inode_mark->group;
__u32 inode_test_mask = (mask & ~FS_EVENT_ON_CHILD);
__u32 vfsmount_test_mask = (mask & ~FS_EVENT_ON_CHILD);
pr_debug("%s: group=%p to_tell=%p mnt=%p mark=%p mask=%x data=%p"
" data_is=%d cookie=%d event=%p\n", __func__, group, to_tell,
mnt, inode_mark, mask, data, data_is, cookie, *event);
/* clear ignored on inode modification */
if (mask & FS_MODIFY) {
if (inode_mark &&
!(inode_mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY))
inode_mark->ignored_mask = 0;
if (vfsmount_mark &&
!(vfsmount_mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY))
vfsmount_mark->ignored_mask = 0;
}
/* does the inode mark tell us to do something? */
if (inode_mark) {
inode_test_mask &= inode_mark->mask;
inode_test_mask &= ~inode_mark->ignored_mask;
}
/* does the vfsmount_mark tell us to do something? */
if (vfsmount_mark) {
vfsmount_test_mask &= vfsmount_mark->mask;
vfsmount_test_mask &= ~vfsmount_mark->ignored_mask;
if (inode_mark)
vfsmount_test_mask &= ~inode_mark->ignored_mask;
}
if (!inode_test_mask && !vfsmount_test_mask)
return 0;
if (group->ops->should_send_event(group, to_tell, inode_mark,
vfsmount_mark, mask, data,
data_is) == false)
return 0;
if (!*event) {
*event = fsnotify_create_event(to_tell, mask, data,
data_is, file_name,
cookie, GFP_KERNEL);
if (!*event)
return -ENOMEM;
}
return group->ops->handle_event(group, inode_mark, vfsmount_mark, *event);
}
/* /*
* This is the main call to fsnotify. The VFS calls into hook specific functions * This is the main call to fsnotify. The VFS calls into hook specific functions
* in linux/fsnotify.h. Those functions then in turn call here. Here will call * in linux/fsnotify.h. Those functions then in turn call here. Here will call
* out to all of the registered fsnotify_group. Those groups can then use the * out to all of the registered fsnotify_group. Those groups can then use the
* notification event in whatever means they feel necessary. * notification event in whatever means they feel necessary.
*/ */
void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, const char *file_name, u32 cookie) int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
const unsigned char *file_name, u32 cookie)
{ {
struct fsnotify_group *group; struct hlist_node *inode_node, *vfsmount_node;
struct fsnotify_mark *inode_mark = NULL, *vfsmount_mark = NULL;
struct fsnotify_group *inode_group, *vfsmount_group;
struct fsnotify_event *event = NULL; struct fsnotify_event *event = NULL;
int idx; struct vfsmount *mnt;
int idx, ret = 0;
bool used_inode = false, used_vfsmount = false;
/* global tests shouldn't care about events on child only the specific event */ /* global tests shouldn't care about events on child only the specific event */
__u32 test_mask = (mask & ~FS_EVENT_ON_CHILD); __u32 test_mask = (mask & ~FS_EVENT_ON_CHILD);
if (list_empty(&fsnotify_groups)) if (data_is == FSNOTIFY_EVENT_FILE)
return; mnt = ((struct file *)data)->f_path.mnt;
else
mnt = NULL;
if (!(test_mask & fsnotify_mask))
return;
if (!(test_mask & to_tell->i_fsnotify_mask))
return;
/* /*
* SRCU!! the groups list is very very much read only and the path is * if this is a modify event we may need to clear the ignored masks
* very hot. The VAST majority of events are not going to need to do * otherwise return if neither the inode nor the vfsmount care about
* anything other than walk the list so it's crazy to pre-allocate. * this type of event.
*/ */
idx = srcu_read_lock(&fsnotify_grp_srcu); if (!(mask & FS_MODIFY) &&
list_for_each_entry_rcu(group, &fsnotify_groups, group_list) { !(test_mask & to_tell->i_fsnotify_mask) &&
if (test_mask & group->mask) { !(mnt && test_mask & mnt->mnt_fsnotify_mask))
if (!group->ops->should_send_event(group, to_tell, mask)) return 0;
continue;
if (!event) { idx = srcu_read_lock(&fsnotify_mark_srcu);
event = fsnotify_create_event(to_tell, mask, data,
data_is, file_name, cookie, if ((mask & FS_MODIFY) ||
GFP_KERNEL); (test_mask & to_tell->i_fsnotify_mask))
/* shit, we OOM'd and now we can't tell, maybe inode_node = srcu_dereference(to_tell->i_fsnotify_marks.first,
* someday someone else will want to do something &fsnotify_mark_srcu);
* here */ else
if (!event) inode_node = NULL;
break;
} if (mnt) {
group->ops->handle_event(group, event); if ((mask & FS_MODIFY) ||
(test_mask & mnt->mnt_fsnotify_mask))
vfsmount_node = srcu_dereference(mnt->mnt_fsnotify_marks.first,
&fsnotify_mark_srcu);
else
vfsmount_node = NULL;
} else {
mnt = NULL;
vfsmount_node = NULL;
}
while (inode_node || vfsmount_node) {
if (inode_node) {
inode_mark = hlist_entry(srcu_dereference(inode_node, &fsnotify_mark_srcu),
struct fsnotify_mark, i.i_list);
inode_group = inode_mark->group;
} else
inode_group = (void *)-1;
if (vfsmount_node) {
vfsmount_mark = hlist_entry(srcu_dereference(vfsmount_node, &fsnotify_mark_srcu),
struct fsnotify_mark, m.m_list);
vfsmount_group = vfsmount_mark->group;
} else
vfsmount_group = (void *)-1;
if (inode_group < vfsmount_group) {
/* handle inode */
send_to_group(to_tell, NULL, inode_mark, NULL, mask, data,
data_is, cookie, file_name, &event);
used_inode = true;
} else if (vfsmount_group < inode_group) {
send_to_group(to_tell, mnt, NULL, vfsmount_mark, mask, data,
data_is, cookie, file_name, &event);
used_vfsmount = true;
} else {
send_to_group(to_tell, mnt, inode_mark, vfsmount_mark,
mask, data, data_is, cookie, file_name,
&event);
used_vfsmount = true;
used_inode = true;
} }
if (used_inode)
inode_node = srcu_dereference(inode_node->next,
&fsnotify_mark_srcu);
if (used_vfsmount)
vfsmount_node = srcu_dereference(vfsmount_node->next,
&fsnotify_mark_srcu);
} }
srcu_read_unlock(&fsnotify_grp_srcu, idx);
srcu_read_unlock(&fsnotify_mark_srcu, idx);
/* /*
* fsnotify_create_event() took a reference so the event can't be cleaned * fsnotify_create_event() took a reference so the event can't be cleaned
* up while we are still trying to add it to lists, drop that one. * up while we are still trying to add it to lists, drop that one.
*/ */
if (event) if (event)
fsnotify_put_event(event); fsnotify_put_event(event);
return ret;
} }
EXPORT_SYMBOL_GPL(fsnotify); EXPORT_SYMBOL_GPL(fsnotify);
static __init int fsnotify_init(void) static __init int fsnotify_init(void)
{ {
return init_srcu_struct(&fsnotify_grp_srcu); int ret;
BUG_ON(hweight32(ALL_FSNOTIFY_EVENTS) != 23);
ret = init_srcu_struct(&fsnotify_mark_srcu);
if (ret)
panic("initializing fsnotify_mark_srcu");
return 0;
} }
subsys_initcall(fsnotify_init); core_initcall(fsnotify_init);
...@@ -6,21 +6,34 @@ ...@@ -6,21 +6,34 @@
#include <linux/srcu.h> #include <linux/srcu.h>
#include <linux/types.h> #include <linux/types.h>
/* protects reads of fsnotify_groups */
extern struct srcu_struct fsnotify_grp_srcu;
/* all groups which receive fsnotify events */
extern struct list_head fsnotify_groups;
/* all bitwise OR of all event types (FS_*) for all fsnotify_groups */
extern __u32 fsnotify_mask;
/* destroy all events sitting in this groups notification queue */ /* destroy all events sitting in this groups notification queue */
extern void fsnotify_flush_notify(struct fsnotify_group *group); extern void fsnotify_flush_notify(struct fsnotify_group *group);
/* protects reads of inode and vfsmount marks list */
extern struct srcu_struct fsnotify_mark_srcu;
extern void fsnotify_set_inode_mark_mask_locked(struct fsnotify_mark *fsn_mark,
__u32 mask);
/* add a mark to an inode */
extern int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group, struct inode *inode,
int allow_dups);
/* add a mark to a vfsmount */
extern int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group, struct vfsmount *mnt,
int allow_dups);
/* final kfree of a group */ /* final kfree of a group */
extern void fsnotify_final_destroy_group(struct fsnotify_group *group); extern void fsnotify_final_destroy_group(struct fsnotify_group *group);
/* vfsmount specific destruction of a mark */
extern void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark);
/* inode specific destruction of a mark */
extern void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark);
/* run the list of all marks associated with inode and flag them to be freed */ /* run the list of all marks associated with inode and flag them to be freed */
extern void fsnotify_clear_marks_by_inode(struct inode *inode); extern void fsnotify_clear_marks_by_inode(struct inode *inode);
/* run the list of all marks associated with vfsmount and flag them to be freed */
extern void fsnotify_clear_marks_by_mount(struct vfsmount *mnt);
/* /*
* update the dentry->d_flags of all of inode's children to indicate if inode cares * update the dentry->d_flags of all of inode's children to indicate if inode cares
* about events that happen to its children. * about events that happen to its children.
......
...@@ -28,64 +28,6 @@ ...@@ -28,64 +28,6 @@
#include <asm/atomic.h> #include <asm/atomic.h>
/* protects writes to fsnotify_groups and fsnotify_mask */
static DEFINE_MUTEX(fsnotify_grp_mutex);
/* protects reads while running the fsnotify_groups list */
struct srcu_struct fsnotify_grp_srcu;
/* all groups registered to receive filesystem notifications */
LIST_HEAD(fsnotify_groups);
/* bitwise OR of all events (FS_*) interesting to some group on this system */
__u32 fsnotify_mask;
/*
* When a new group registers or changes it's set of interesting events
* this function updates the fsnotify_mask to contain all interesting events
*/
void fsnotify_recalc_global_mask(void)
{
struct fsnotify_group *group;
__u32 mask = 0;
int idx;
idx = srcu_read_lock(&fsnotify_grp_srcu);
list_for_each_entry_rcu(group, &fsnotify_groups, group_list)
mask |= group->mask;
srcu_read_unlock(&fsnotify_grp_srcu, idx);
fsnotify_mask = mask;
}
/*
* Update the group->mask by running all of the marks associated with this
* group and finding the bitwise | of all of the mark->mask. If we change
* the group->mask we need to update the global mask of events interesting
* to the system.
*/
void fsnotify_recalc_group_mask(struct fsnotify_group *group)
{
__u32 mask = 0;
__u32 old_mask = group->mask;
struct fsnotify_mark_entry *entry;
spin_lock(&group->mark_lock);
list_for_each_entry(entry, &group->mark_entries, g_list)
mask |= entry->mask;
spin_unlock(&group->mark_lock);
group->mask = mask;
if (old_mask != mask)
fsnotify_recalc_global_mask();
}
/*
* Take a reference to a group so things found under the fsnotify_grp_mutex
* can't get freed under us
*/
static void fsnotify_get_group(struct fsnotify_group *group)
{
atomic_inc(&group->refcnt);
}
/* /*
* Final freeing of a group * Final freeing of a group
*/ */
...@@ -110,145 +52,53 @@ void fsnotify_final_destroy_group(struct fsnotify_group *group) ...@@ -110,145 +52,53 @@ void fsnotify_final_destroy_group(struct fsnotify_group *group)
*/ */
static void fsnotify_destroy_group(struct fsnotify_group *group) static void fsnotify_destroy_group(struct fsnotify_group *group)
{ {
/* clear all inode mark entries for this group */ /* clear all inode marks for this group */
fsnotify_clear_marks_by_group(group); fsnotify_clear_marks_by_group(group);
synchronize_srcu(&fsnotify_mark_srcu);
/* past the point of no return, matches the initial value of 1 */ /* past the point of no return, matches the initial value of 1 */
if (atomic_dec_and_test(&group->num_marks)) if (atomic_dec_and_test(&group->num_marks))
fsnotify_final_destroy_group(group); fsnotify_final_destroy_group(group);
} }
/*
* Remove this group from the global list of groups that will get events
* this can be done even if there are still references and things still using
* this group. This just stops the group from getting new events.
*/
static void __fsnotify_evict_group(struct fsnotify_group *group)
{
BUG_ON(!mutex_is_locked(&fsnotify_grp_mutex));
if (group->on_group_list)
list_del_rcu(&group->group_list);
group->on_group_list = 0;
}
/*
* Called when a group is no longer interested in getting events. This can be
* used if a group is misbehaving or if for some reason a group should no longer
* get any filesystem events.
*/
void fsnotify_evict_group(struct fsnotify_group *group)
{
mutex_lock(&fsnotify_grp_mutex);
__fsnotify_evict_group(group);
mutex_unlock(&fsnotify_grp_mutex);
}
/* /*
* Drop a reference to a group. Free it if it's through. * Drop a reference to a group. Free it if it's through.
*/ */
void fsnotify_put_group(struct fsnotify_group *group) void fsnotify_put_group(struct fsnotify_group *group)
{ {
if (!atomic_dec_and_mutex_lock(&group->refcnt, &fsnotify_grp_mutex)) if (atomic_dec_and_test(&group->refcnt))
return; fsnotify_destroy_group(group);
/*
* OK, now we know that there's no other users *and* we hold mutex,
* so no new references will appear
*/
__fsnotify_evict_group(group);
/*
* now it's off the list, so the only thing we might care about is
* srcu access....
*/
mutex_unlock(&fsnotify_grp_mutex);
synchronize_srcu(&fsnotify_grp_srcu);
/* and now it is really dead. _Nothing_ could be seeing it */
fsnotify_recalc_global_mask();
fsnotify_destroy_group(group);
}
/*
* Simply run the fsnotify_groups list and find a group which matches
* the given parameters. If a group is found we take a reference to that
* group.
*/
static struct fsnotify_group *fsnotify_find_group(unsigned int group_num, __u32 mask,
const struct fsnotify_ops *ops)
{
struct fsnotify_group *group_iter;
struct fsnotify_group *group = NULL;
BUG_ON(!mutex_is_locked(&fsnotify_grp_mutex));
list_for_each_entry_rcu(group_iter, &fsnotify_groups, group_list) {
if (group_iter->group_num == group_num) {
if ((group_iter->mask == mask) &&
(group_iter->ops == ops)) {
fsnotify_get_group(group_iter);
group = group_iter;
} else
group = ERR_PTR(-EEXIST);
}
}
return group;
} }
/* /*
* Either finds an existing group which matches the group_num, mask, and ops or * Create a new fsnotify_group and hold a reference for the group returned.
* creates a new group and adds it to the global group list. In either case we
* take a reference for the group returned.
*/ */
struct fsnotify_group *fsnotify_obtain_group(unsigned int group_num, __u32 mask, struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
const struct fsnotify_ops *ops)
{ {
struct fsnotify_group *group, *tgroup; struct fsnotify_group *group;
/* very low use, simpler locking if we just always alloc */ group = kzalloc(sizeof(struct fsnotify_group), GFP_KERNEL);
group = kmalloc(sizeof(struct fsnotify_group), GFP_KERNEL);
if (!group) if (!group)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
/* set to 0 when there a no external references to this group */
atomic_set(&group->refcnt, 1); atomic_set(&group->refcnt, 1);
/*
group->on_group_list = 0; * hits 0 when there are no external references AND no marks for
group->group_num = group_num; * this group
group->mask = mask; */
atomic_set(&group->num_marks, 1);
mutex_init(&group->notification_mutex); mutex_init(&group->notification_mutex);
INIT_LIST_HEAD(&group->notification_list); INIT_LIST_HEAD(&group->notification_list);
init_waitqueue_head(&group->notification_waitq); init_waitqueue_head(&group->notification_waitq);
group->q_len = 0;
group->max_events = UINT_MAX; group->max_events = UINT_MAX;
spin_lock_init(&group->mark_lock); spin_lock_init(&group->mark_lock);
atomic_set(&group->num_marks, 0); INIT_LIST_HEAD(&group->marks_list);
INIT_LIST_HEAD(&group->mark_entries);
group->ops = ops; group->ops = ops;
mutex_lock(&fsnotify_grp_mutex);
tgroup = fsnotify_find_group(group_num, mask, ops);
if (tgroup) {
/* group already exists */
mutex_unlock(&fsnotify_grp_mutex);
/* destroy the new one we made */
fsnotify_put_group(group);
return tgroup;
}
/* group not found, add a new one */
list_add_rcu(&group->group_list, &fsnotify_groups);
group->on_group_list = 1;
/* being on the fsnotify_groups list holds one num_marks */
atomic_inc(&group->num_marks);
mutex_unlock(&fsnotify_grp_mutex);
if (mask)
fsnotify_recalc_global_mask();
return group; return group;
} }
...@@ -16,72 +16,6 @@ ...@@ -16,72 +16,6 @@
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/ */
/*
* fsnotify inode mark locking/lifetime/and refcnting
*
* REFCNT:
* The mark->refcnt tells how many "things" in the kernel currently are
* referencing this object. The object typically will live inside the kernel
* with a refcnt of 2, one for each list it is on (i_list, g_list). Any task
* which can find this object holding the appropriete locks, can take a reference
* and the object itself is guarenteed to survive until the reference is dropped.
*
* LOCKING:
* There are 3 spinlocks involved with fsnotify inode marks and they MUST
* be taken in order as follows:
*
* entry->lock
* group->mark_lock
* inode->i_lock
*
* entry->lock protects 2 things, entry->group and entry->inode. You must hold
* that lock to dereference either of these things (they could be NULL even with
* the lock)
*
* group->mark_lock protects the mark_entries list anchored inside a given group
* and each entry is hooked via the g_list. It also sorta protects the
* free_g_list, which when used is anchored by a private list on the stack of the
* task which held the group->mark_lock.
*
* inode->i_lock protects the i_fsnotify_mark_entries list anchored inside a
* given inode and each entry is hooked via the i_list. (and sorta the
* free_i_list)
*
*
* LIFETIME:
* Inode marks survive between when they are added to an inode and when their
* refcnt==0.
*
* The inode mark can be cleared for a number of different reasons including:
* - The inode is unlinked for the last time. (fsnotify_inode_remove)
* - The inode is being evicted from cache. (fsnotify_inode_delete)
* - The fs the inode is on is unmounted. (fsnotify_inode_delete/fsnotify_unmount_inodes)
* - Something explicitly requests that it be removed. (fsnotify_destroy_mark_by_entry)
* - The fsnotify_group associated with the mark is going away and all such marks
* need to be cleaned up. (fsnotify_clear_marks_by_group)
*
* Worst case we are given an inode and need to clean up all the marks on that
* inode. We take i_lock and walk the i_fsnotify_mark_entries safely. For each
* mark on the list we take a reference (so the mark can't disappear under us).
* We remove that mark form the inode's list of marks and we add this mark to a
* private list anchored on the stack using i_free_list; At this point we no
* longer fear anything finding the mark using the inode's list of marks.
*
* We can safely and locklessly run the private list on the stack of everything
* we just unattached from the original inode. For each mark on the private list
* we grab the mark-> and can thus dereference mark->group and mark->inode. If
* we see the group and inode are not NULL we take those locks. Now holding all
* 3 locks we can completely remove the mark from other tasks finding it in the
* future. Remember, 10 things might already be referencing this mark, but they
* better be holding a ref. We drop our reference we took before we unhooked it
* from the inode. When the ref hits 0 we can free the mark.
*
* Very similarly for freeing by group, except we use free_g_list.
*
* This has the very interesting property of being able to run concurrently with
* any (or all) other directions.
*/
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -95,30 +29,19 @@ ...@@ -95,30 +29,19 @@
#include <linux/fsnotify_backend.h> #include <linux/fsnotify_backend.h>
#include "fsnotify.h" #include "fsnotify.h"
void fsnotify_get_mark(struct fsnotify_mark_entry *entry)
{
atomic_inc(&entry->refcnt);
}
void fsnotify_put_mark(struct fsnotify_mark_entry *entry)
{
if (atomic_dec_and_test(&entry->refcnt))
entry->free_mark(entry);
}
/* /*
* Recalculate the mask of events relevant to a given inode locked. * Recalculate the mask of events relevant to a given inode locked.
*/ */
static void fsnotify_recalc_inode_mask_locked(struct inode *inode) static void fsnotify_recalc_inode_mask_locked(struct inode *inode)
{ {
struct fsnotify_mark_entry *entry; struct fsnotify_mark *mark;
struct hlist_node *pos; struct hlist_node *pos;
__u32 new_mask = 0; __u32 new_mask = 0;
assert_spin_locked(&inode->i_lock); assert_spin_locked(&inode->i_lock);
hlist_for_each_entry(entry, pos, &inode->i_fsnotify_mark_entries, i_list) hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list)
new_mask |= entry->mask; new_mask |= mark->mask;
inode->i_fsnotify_mask = new_mask; inode->i_fsnotify_mask = new_mask;
} }
...@@ -135,107 +58,26 @@ void fsnotify_recalc_inode_mask(struct inode *inode) ...@@ -135,107 +58,26 @@ void fsnotify_recalc_inode_mask(struct inode *inode)
__fsnotify_update_child_dentry_flags(inode); __fsnotify_update_child_dentry_flags(inode);
} }
/* void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark)
* Any time a mark is getting freed we end up here.
* The caller had better be holding a reference to this mark so we don't actually
* do the final put under the entry->lock
*/
void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry)
{ {
struct fsnotify_group *group; struct inode *inode = mark->i.inode;
struct inode *inode;
spin_lock(&entry->lock); assert_spin_locked(&mark->lock);
assert_spin_locked(&mark->group->mark_lock);
group = entry->group;
inode = entry->inode;
BUG_ON(group && !inode);
BUG_ON(!group && inode);
/* if !group something else already marked this to die */
if (!group) {
spin_unlock(&entry->lock);
return;
}
/* 1 from caller and 1 for being on i_list/g_list */
BUG_ON(atomic_read(&entry->refcnt) < 2);
spin_lock(&group->mark_lock);
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
hlist_del_init(&entry->i_list); hlist_del_init_rcu(&mark->i.i_list);
entry->inode = NULL; mark->i.inode = NULL;
list_del_init(&entry->g_list);
entry->group = NULL;
fsnotify_put_mark(entry); /* for i_list and g_list */
/* /*
* this mark is now off the inode->i_fsnotify_mark_entries list and we * this mark is now off the inode->i_fsnotify_marks list and we
* hold the inode->i_lock, so this is the perfect time to update the * hold the inode->i_lock, so this is the perfect time to update the
* inode->i_fsnotify_mask * inode->i_fsnotify_mask
*/ */
fsnotify_recalc_inode_mask_locked(inode); fsnotify_recalc_inode_mask_locked(inode);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
spin_unlock(&group->mark_lock);
spin_unlock(&entry->lock);
/*
* Some groups like to know that marks are being freed. This is a
* callback to the group function to let it know that this entry
* is being freed.
*/
if (group->ops->freeing_mark)
group->ops->freeing_mark(entry, group);
/*
* __fsnotify_update_child_dentry_flags(inode);
*
* I really want to call that, but we can't, we have no idea if the inode
* still exists the second we drop the entry->lock.
*
* The next time an event arrive to this inode from one of it's children
* __fsnotify_parent will see that the inode doesn't care about it's
* children and will update all of these flags then. So really this
* is just a lazy update (and could be a perf win...)
*/
iput(inode);
/*
* it's possible that this group tried to destroy itself, but this
* this mark was simultaneously being freed by inode. If that's the
* case, we finish freeing the group here.
*/
if (unlikely(atomic_dec_and_test(&group->num_marks)))
fsnotify_final_destroy_group(group);
}
/*
* Given a group, destroy all of the marks associated with that group.
*/
void fsnotify_clear_marks_by_group(struct fsnotify_group *group)
{
struct fsnotify_mark_entry *lentry, *entry;
LIST_HEAD(free_list);
spin_lock(&group->mark_lock);
list_for_each_entry_safe(entry, lentry, &group->mark_entries, g_list) {
list_add(&entry->free_g_list, &free_list);
list_del_init(&entry->g_list);
fsnotify_get_mark(entry);
}
spin_unlock(&group->mark_lock);
list_for_each_entry_safe(entry, lentry, &free_list, free_g_list) {
fsnotify_destroy_mark_by_entry(entry);
fsnotify_put_mark(entry);
}
} }
/* /*
...@@ -243,112 +85,145 @@ void fsnotify_clear_marks_by_group(struct fsnotify_group *group) ...@@ -243,112 +85,145 @@ void fsnotify_clear_marks_by_group(struct fsnotify_group *group)
*/ */
void fsnotify_clear_marks_by_inode(struct inode *inode) void fsnotify_clear_marks_by_inode(struct inode *inode)
{ {
struct fsnotify_mark_entry *entry, *lentry; struct fsnotify_mark *mark, *lmark;
struct hlist_node *pos, *n; struct hlist_node *pos, *n;
LIST_HEAD(free_list); LIST_HEAD(free_list);
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
hlist_for_each_entry_safe(entry, pos, n, &inode->i_fsnotify_mark_entries, i_list) { hlist_for_each_entry_safe(mark, pos, n, &inode->i_fsnotify_marks, i.i_list) {
list_add(&entry->free_i_list, &free_list); list_add(&mark->i.free_i_list, &free_list);
hlist_del_init(&entry->i_list); hlist_del_init_rcu(&mark->i.i_list);
fsnotify_get_mark(entry); fsnotify_get_mark(mark);
} }
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
list_for_each_entry_safe(entry, lentry, &free_list, free_i_list) { list_for_each_entry_safe(mark, lmark, &free_list, i.free_i_list) {
fsnotify_destroy_mark_by_entry(entry); fsnotify_destroy_mark(mark);
fsnotify_put_mark(entry); fsnotify_put_mark(mark);
} }
} }
/*
* Given a group clear all of the inode marks associated with that group.
*/
void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group)
{
fsnotify_clear_marks_by_group_flags(group, FSNOTIFY_MARK_FLAG_INODE);
}
/* /*
* given a group and inode, find the mark associated with that combination. * given a group and inode, find the mark associated with that combination.
* if found take a reference to that mark and return it, else return NULL * if found take a reference to that mark and return it, else return NULL
*/ */
struct fsnotify_mark_entry *fsnotify_find_mark_entry(struct fsnotify_group *group, struct fsnotify_mark *fsnotify_find_inode_mark_locked(struct fsnotify_group *group,
struct inode *inode) struct inode *inode)
{ {
struct fsnotify_mark_entry *entry; struct fsnotify_mark *mark;
struct hlist_node *pos; struct hlist_node *pos;
assert_spin_locked(&inode->i_lock); assert_spin_locked(&inode->i_lock);
hlist_for_each_entry(entry, pos, &inode->i_fsnotify_mark_entries, i_list) { hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list) {
if (entry->group == group) { if (mark->group == group) {
fsnotify_get_mark(entry); fsnotify_get_mark(mark);
return entry; return mark;
} }
} }
return NULL; return NULL;
} }
/* /*
* Nothing fancy, just initialize lists and locks and counters. * given a group and inode, find the mark associated with that combination.
* if found take a reference to that mark and return it, else return NULL
*/ */
void fsnotify_init_mark(struct fsnotify_mark_entry *entry, struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group,
void (*free_mark)(struct fsnotify_mark_entry *entry)) struct inode *inode)
{
struct fsnotify_mark *mark;
spin_lock(&inode->i_lock);
mark = fsnotify_find_inode_mark_locked(group, inode);
spin_unlock(&inode->i_lock);
return mark;
}
/*
* If we are setting a mark mask on an inode mark we should pin the inode
* in memory.
*/
void fsnotify_set_inode_mark_mask_locked(struct fsnotify_mark *mark,
__u32 mask)
{ {
spin_lock_init(&entry->lock); struct inode *inode;
atomic_set(&entry->refcnt, 1);
INIT_HLIST_NODE(&entry->i_list); assert_spin_locked(&mark->lock);
entry->group = NULL;
entry->mask = 0; if (mask &&
entry->inode = NULL; mark->i.inode &&
entry->free_mark = free_mark; !(mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED)) {
mark->flags |= FSNOTIFY_MARK_FLAG_OBJECT_PINNED;
inode = igrab(mark->i.inode);
/*
* we shouldn't be able to get here if the inode wasn't
* already safely held in memory. But bug in case it
* ever is wrong.
*/
BUG_ON(!inode);
}
} }
/* /*
* Attach an initialized mark entry to a given group and inode. * Attach an initialized mark to a given inode.
* These marks may be used for the fsnotify backend to determine which * These marks may be used for the fsnotify backend to determine which
* event types should be delivered to which group and for which inodes. * event types should be delivered to which group and for which inodes. These
* marks are ordered according to the group's location in memory.
*/ */
int fsnotify_add_mark(struct fsnotify_mark_entry *entry, int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group, struct inode *inode) struct fsnotify_group *group, struct inode *inode,
int allow_dups)
{ {
struct fsnotify_mark_entry *lentry; struct fsnotify_mark *lmark;
struct hlist_node *node, *last = NULL;
int ret = 0; int ret = 0;
inode = igrab(inode); mark->flags |= FSNOTIFY_MARK_FLAG_INODE;
if (unlikely(!inode))
return -EINVAL; assert_spin_locked(&mark->lock);
assert_spin_locked(&group->mark_lock);
/*
* LOCKING ORDER!!!!
* entry->lock
* group->mark_lock
* inode->i_lock
*/
spin_lock(&entry->lock);
spin_lock(&group->mark_lock);
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
lentry = fsnotify_find_mark_entry(group, inode); mark->i.inode = inode;
if (!lentry) {
entry->group = group;
entry->inode = inode;
hlist_add_head(&entry->i_list, &inode->i_fsnotify_mark_entries); /* is mark the first mark? */
list_add(&entry->g_list, &group->mark_entries); if (hlist_empty(&inode->i_fsnotify_marks)) {
hlist_add_head_rcu(&mark->i.i_list, &inode->i_fsnotify_marks);
goto out;
}
fsnotify_get_mark(entry); /* for i_list and g_list */ /* should mark be in the middle of the current list? */
hlist_for_each_entry(lmark, node, &inode->i_fsnotify_marks, i.i_list) {
last = node;
if ((lmark->group == group) && !allow_dups) {
ret = -EEXIST;
goto out;
}
atomic_inc(&group->num_marks); if (mark->group < lmark->group)
continue;
fsnotify_recalc_inode_mask_locked(inode); hlist_add_before_rcu(&mark->i.i_list, &lmark->i.i_list);
goto out;
} }
BUG_ON(last == NULL);
/* mark should be the last entry. last is the current last entry */
hlist_add_after_rcu(last, &mark->i.i_list);
out:
fsnotify_recalc_inode_mask_locked(inode);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
spin_unlock(&group->mark_lock);
spin_unlock(&entry->lock);
if (lentry) {
ret = -EEXIST;
iput(inode);
fsnotify_put_mark(lentry);
} else {
__fsnotify_update_child_dentry_flags(inode);
}
return ret; return ret;
} }
......
config INOTIFY
bool "Inotify file change notification support"
default n
---help---
Say Y here to enable legacy in kernel inotify support. Inotify is a
file change notification system. It is a replacement for dnotify.
This option only provides the legacy inotify in kernel API. There
are no in tree kernel users of this interface since it is deprecated.
You only need this if you are loading an out of tree kernel module
that uses inotify.
For more information, see <file:Documentation/filesystems/inotify.txt>
If unsure, say N.
config INOTIFY_USER config INOTIFY_USER
bool "Inotify support for userspace" bool "Inotify support for userspace"
select ANON_INODES select ANON_INODES
......
obj-$(CONFIG_INOTIFY) += inotify.o
obj-$(CONFIG_INOTIFY_USER) += inotify_fsnotify.o inotify_user.o obj-$(CONFIG_INOTIFY_USER) += inotify_fsnotify.o inotify_user.o
此差异已折叠。
...@@ -9,13 +9,12 @@ struct inotify_event_private_data { ...@@ -9,13 +9,12 @@ struct inotify_event_private_data {
int wd; int wd;
}; };
struct inotify_inode_mark_entry { struct inotify_inode_mark {
/* fsnotify_mark_entry MUST be the first thing */ struct fsnotify_mark fsn_mark;
struct fsnotify_mark_entry fsn_entry;
int wd; int wd;
}; };
extern void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry, extern void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
struct fsnotify_group *group); struct fsnotify_group *group);
extern void inotify_free_event_priv(struct fsnotify_event_private_data *event_priv); extern void inotify_free_event_priv(struct fsnotify_event_private_data *event_priv);
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
* General Public License for more details. * General Public License for more details.
*/ */
#include <linux/dcache.h> /* d_unlinked */
#include <linux/fs.h> /* struct inode */ #include <linux/fs.h> /* struct inode */
#include <linux/fsnotify_backend.h> #include <linux/fsnotify_backend.h>
#include <linux/inotify.h> #include <linux/inotify.h>
...@@ -32,26 +33,84 @@ ...@@ -32,26 +33,84 @@
#include "inotify.h" #include "inotify.h"
static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_event *event) /*
* Check if 2 events contain the same information. We do not compare private data
* but at this moment that isn't a problem for any know fsnotify listeners.
*/
static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new)
{
if ((old->mask == new->mask) &&
(old->to_tell == new->to_tell) &&
(old->data_type == new->data_type) &&
(old->name_len == new->name_len)) {
switch (old->data_type) {
case (FSNOTIFY_EVENT_INODE):
/* remember, after old was put on the wait_q we aren't
* allowed to look at the inode any more, only thing
* left to check was if the file_name is the same */
if (!old->name_len ||
!strcmp(old->file_name, new->file_name))
return true;
break;
case (FSNOTIFY_EVENT_FILE):
if ((old->file->f_path.mnt == new->file->f_path.mnt) &&
(old->file->f_path.dentry == new->file->f_path.dentry))
return true;
break;
case (FSNOTIFY_EVENT_NONE):
if (old->mask & FS_Q_OVERFLOW)
return true;
else if (old->mask & FS_IN_IGNORED)
return false;
return true;
};
}
return false;
}
static struct fsnotify_event *inotify_merge(struct list_head *list,
struct fsnotify_event *event)
{ {
struct fsnotify_mark_entry *entry; struct fsnotify_event_holder *last_holder;
struct inotify_inode_mark_entry *ientry; struct fsnotify_event *last_event;
/* and the list better be locked by something too */
spin_lock(&event->lock);
last_holder = list_entry(list->prev, struct fsnotify_event_holder, event_list);
last_event = last_holder->event;
if (event_compare(last_event, event))
fsnotify_get_event(last_event);
else
last_event = NULL;
spin_unlock(&event->lock);
return last_event;
}
static int inotify_handle_event(struct fsnotify_group *group,
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
struct fsnotify_event *event)
{
struct inotify_inode_mark *i_mark;
struct inode *to_tell; struct inode *to_tell;
struct inotify_event_private_data *event_priv; struct inotify_event_private_data *event_priv;
struct fsnotify_event_private_data *fsn_event_priv; struct fsnotify_event_private_data *fsn_event_priv;
int wd, ret; struct fsnotify_event *added_event;
int wd, ret = 0;
BUG_ON(vfsmount_mark);
pr_debug("%s: group=%p event=%p to_tell=%p mask=%x\n", __func__, group,
event, event->to_tell, event->mask);
to_tell = event->to_tell; to_tell = event->to_tell;
spin_lock(&to_tell->i_lock); i_mark = container_of(inode_mark, struct inotify_inode_mark,
entry = fsnotify_find_mark_entry(group, to_tell); fsn_mark);
spin_unlock(&to_tell->i_lock); wd = i_mark->wd;
/* race with watch removal? We already passes should_send */
if (unlikely(!entry))
return 0;
ientry = container_of(entry, struct inotify_inode_mark_entry,
fsn_entry);
wd = ientry->wd;
event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL); event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL);
if (unlikely(!event_priv)) if (unlikely(!event_priv))
...@@ -62,48 +121,40 @@ static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_ev ...@@ -62,48 +121,40 @@ static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_ev
fsn_event_priv->group = group; fsn_event_priv->group = group;
event_priv->wd = wd; event_priv->wd = wd;
ret = fsnotify_add_notify_event(group, event, fsn_event_priv); added_event = fsnotify_add_notify_event(group, event, fsn_event_priv, inotify_merge);
if (ret) { if (added_event) {
inotify_free_event_priv(fsn_event_priv); inotify_free_event_priv(fsn_event_priv);
/* EEXIST says we tail matched, EOVERFLOW isn't something if (!IS_ERR(added_event))
* to report up the stack. */ fsnotify_put_event(added_event);
if ((ret == -EEXIST) || else
(ret == -EOVERFLOW)) ret = PTR_ERR(added_event);
ret = 0;
} }
/* if (inode_mark->mask & IN_ONESHOT)
* If we hold the entry until after the event is on the queue fsnotify_destroy_mark(inode_mark);
* IN_IGNORED won't be able to pass this event in the queue
*/
fsnotify_put_mark(entry);
return ret; return ret;
} }
static void inotify_freeing_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group) static void inotify_freeing_mark(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group)
{ {
inotify_ignored_and_remove_idr(entry, group); inotify_ignored_and_remove_idr(fsn_mark, group);
} }
static bool inotify_should_send_event(struct fsnotify_group *group, struct inode *inode, __u32 mask) static bool inotify_should_send_event(struct fsnotify_group *group, struct inode *inode,
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
__u32 mask, void *data, int data_type)
{ {
struct fsnotify_mark_entry *entry; if ((inode_mark->mask & FS_EXCL_UNLINK) &&
bool send; (data_type == FSNOTIFY_EVENT_FILE)) {
struct file *file = data;
spin_lock(&inode->i_lock);
entry = fsnotify_find_mark_entry(group, inode);
spin_unlock(&inode->i_lock);
if (!entry)
return false;
mask = (mask & ~FS_EVENT_ON_CHILD); if (d_unlinked(file->f_path.dentry))
send = (entry->mask & mask); return false;
}
/* find took a reference */
fsnotify_put_mark(entry);
return send; return true;
} }
/* /*
...@@ -115,18 +166,18 @@ static bool inotify_should_send_event(struct fsnotify_group *group, struct inode ...@@ -115,18 +166,18 @@ static bool inotify_should_send_event(struct fsnotify_group *group, struct inode
*/ */
static int idr_callback(int id, void *p, void *data) static int idr_callback(int id, void *p, void *data)
{ {
struct fsnotify_mark_entry *entry; struct fsnotify_mark *fsn_mark;
struct inotify_inode_mark_entry *ientry; struct inotify_inode_mark *i_mark;
static bool warned = false; static bool warned = false;
if (warned) if (warned)
return 0; return 0;
warned = true; warned = true;
entry = p; fsn_mark = p;
ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
WARN(1, "inotify closing but id=%d for entry=%p in group=%p still in " WARN(1, "inotify closing but id=%d for fsn_mark=%p in group=%p still in "
"idr. Probably leaking memory\n", id, p, data); "idr. Probably leaking memory\n", id, p, data);
/* /*
...@@ -135,9 +186,9 @@ static int idr_callback(int id, void *p, void *data) ...@@ -135,9 +186,9 @@ static int idr_callback(int id, void *p, void *data)
* out why we got here and the panic is no worse than the original * out why we got here and the panic is no worse than the original
* BUG() that was here. * BUG() that was here.
*/ */
if (entry) if (fsn_mark)
printk(KERN_WARNING "entry->group=%p inode=%p wd=%d\n", printk(KERN_WARNING "fsn_mark->group=%p inode=%p wd=%d\n",
entry->group, entry->inode, ientry->wd); fsn_mark->group, fsn_mark->i.inode, i_mark->wd);
return 0; return 0;
} }
......
...@@ -46,17 +46,11 @@ ...@@ -46,17 +46,11 @@
/* these are configurable via /proc/sys/fs/inotify/ */ /* these are configurable via /proc/sys/fs/inotify/ */
static int inotify_max_user_instances __read_mostly; static int inotify_max_user_instances __read_mostly;
static int inotify_max_queued_events __read_mostly; static int inotify_max_queued_events __read_mostly;
int inotify_max_user_watches __read_mostly; static int inotify_max_user_watches __read_mostly;
static struct kmem_cache *inotify_inode_mark_cachep __read_mostly; static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
struct kmem_cache *event_priv_cachep __read_mostly; struct kmem_cache *event_priv_cachep __read_mostly;
/*
* When inotify registers a new group it increments this and uses that
* value as an offset to set the fsnotify group "name" and priority.
*/
static atomic_t inotify_grp_num;
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
#include <linux/sysctl.h> #include <linux/sysctl.h>
...@@ -96,11 +90,14 @@ static inline __u32 inotify_arg_to_mask(u32 arg) ...@@ -96,11 +90,14 @@ static inline __u32 inotify_arg_to_mask(u32 arg)
{ {
__u32 mask; __u32 mask;
/* everything should accept their own ignored and cares about children */ /*
mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD); * everything should accept their own ignored, cares about children,
* and should receive events when the inode is unmounted
*/
mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT);
/* mask off the flags used to open the fd */ /* mask off the flags used to open the fd */
mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT)); mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK));
return mask; return mask;
} }
...@@ -144,6 +141,8 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group, ...@@ -144,6 +141,8 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
event = fsnotify_peek_notify_event(group); event = fsnotify_peek_notify_event(group);
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
if (event->name_len) if (event->name_len)
event_size += roundup(event->name_len + 1, event_size); event_size += roundup(event->name_len + 1, event_size);
...@@ -173,6 +172,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, ...@@ -173,6 +172,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
size_t event_size = sizeof(struct inotify_event); size_t event_size = sizeof(struct inotify_event);
size_t name_len = 0; size_t name_len = 0;
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
/* we get the inotify watch descriptor from the event private data */ /* we get the inotify watch descriptor from the event private data */
spin_lock(&event->lock); spin_lock(&event->lock);
fsn_priv = fsnotify_remove_priv_from_event(group, event); fsn_priv = fsnotify_remove_priv_from_event(group, event);
...@@ -245,6 +246,8 @@ static ssize_t inotify_read(struct file *file, char __user *buf, ...@@ -245,6 +246,8 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
kevent = get_one_event(group, count); kevent = get_one_event(group, count);
mutex_unlock(&group->notification_mutex); mutex_unlock(&group->notification_mutex);
pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
if (kevent) { if (kevent) {
ret = PTR_ERR(kevent); ret = PTR_ERR(kevent);
if (IS_ERR(kevent)) if (IS_ERR(kevent))
...@@ -289,6 +292,8 @@ static int inotify_release(struct inode *ignored, struct file *file) ...@@ -289,6 +292,8 @@ static int inotify_release(struct inode *ignored, struct file *file)
struct fsnotify_group *group = file->private_data; struct fsnotify_group *group = file->private_data;
struct user_struct *user = group->inotify_data.user; struct user_struct *user = group->inotify_data.user;
pr_debug("%s: group=%p\n", __func__, group);
fsnotify_clear_marks_by_group(group); fsnotify_clear_marks_by_group(group);
/* free this group, matching get was inotify_init->fsnotify_obtain_group */ /* free this group, matching get was inotify_init->fsnotify_obtain_group */
...@@ -312,6 +317,8 @@ static long inotify_ioctl(struct file *file, unsigned int cmd, ...@@ -312,6 +317,8 @@ static long inotify_ioctl(struct file *file, unsigned int cmd,
group = file->private_data; group = file->private_data;
p = (void __user *) arg; p = (void __user *) arg;
pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd);
switch (cmd) { switch (cmd) {
case FIONREAD: case FIONREAD:
mutex_lock(&group->notification_mutex); mutex_lock(&group->notification_mutex);
...@@ -357,59 +364,159 @@ static int inotify_find_inode(const char __user *dirname, struct path *path, uns ...@@ -357,59 +364,159 @@ static int inotify_find_inode(const char __user *dirname, struct path *path, uns
return error; return error;
} }
static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
int *last_wd,
struct inotify_inode_mark *i_mark)
{
int ret;
do {
if (unlikely(!idr_pre_get(idr, GFP_KERNEL)))
return -ENOMEM;
spin_lock(idr_lock);
ret = idr_get_new_above(idr, i_mark, *last_wd + 1,
&i_mark->wd);
/* we added the mark to the idr, take a reference */
if (!ret) {
*last_wd = i_mark->wd;
fsnotify_get_mark(&i_mark->fsn_mark);
}
spin_unlock(idr_lock);
} while (ret == -EAGAIN);
return ret;
}
static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
int wd)
{
struct idr *idr = &group->inotify_data.idr;
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
struct inotify_inode_mark *i_mark;
assert_spin_locked(idr_lock);
i_mark = idr_find(idr, wd);
if (i_mark) {
struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark;
fsnotify_get_mark(fsn_mark);
/* One ref for being in the idr, one ref we just took */
BUG_ON(atomic_read(&fsn_mark->refcnt) < 2);
}
return i_mark;
}
static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group,
int wd)
{
struct inotify_inode_mark *i_mark;
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
spin_lock(idr_lock);
i_mark = inotify_idr_find_locked(group, wd);
spin_unlock(idr_lock);
return i_mark;
}
static void do_inotify_remove_from_idr(struct fsnotify_group *group,
struct inotify_inode_mark *i_mark)
{
struct idr *idr = &group->inotify_data.idr;
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
int wd = i_mark->wd;
assert_spin_locked(idr_lock);
idr_remove(idr, wd);
/* removed from the idr, drop that ref */
fsnotify_put_mark(&i_mark->fsn_mark);
}
/* /*
* Remove the mark from the idr (if present) and drop the reference * Remove the mark from the idr (if present) and drop the reference
* on the mark because it was in the idr. * on the mark because it was in the idr.
*/ */
static void inotify_remove_from_idr(struct fsnotify_group *group, static void inotify_remove_from_idr(struct fsnotify_group *group,
struct inotify_inode_mark_entry *ientry) struct inotify_inode_mark *i_mark)
{ {
struct idr *idr; spinlock_t *idr_lock = &group->inotify_data.idr_lock;
struct fsnotify_mark_entry *entry; struct inotify_inode_mark *found_i_mark = NULL;
struct inotify_inode_mark_entry *found_ientry;
int wd; int wd;
spin_lock(&group->inotify_data.idr_lock); spin_lock(idr_lock);
idr = &group->inotify_data.idr; wd = i_mark->wd;
wd = ientry->wd;
if (wd == -1) /*
* does this i_mark think it is in the idr? we shouldn't get called
* if it wasn't....
*/
if (wd == -1) {
WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
" i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
goto out; goto out;
}
entry = idr_find(&group->inotify_data.idr, wd); /* Lets look in the idr to see if we find it */
if (unlikely(!entry)) found_i_mark = inotify_idr_find_locked(group, wd);
if (unlikely(!found_i_mark)) {
WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
" i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
goto out; goto out;
}
found_ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); /*
if (unlikely(found_ientry != ientry)) { * We found an mark in the idr at the right wd, but it's
/* We found an entry in the idr with the right wd, but it's * not the mark we were told to remove. eparis seriously
* not the entry we were told to remove. eparis seriously * fucked up somewhere.
* fucked up somewhere. */ */
WARN_ON(1); if (unlikely(found_i_mark != i_mark)) {
ientry->wd = -1; WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
"mark->inode=%p found_i_mark=%p found_i_mark->wd=%d "
"found_i_mark->group=%p found_i_mark->inode=%p\n",
__func__, i_mark, i_mark->wd, i_mark->fsn_mark.group,
i_mark->fsn_mark.i.inode, found_i_mark, found_i_mark->wd,
found_i_mark->fsn_mark.group,
found_i_mark->fsn_mark.i.inode);
goto out; goto out;
} }
/* One ref for being in the idr, one ref held by the caller */ /*
BUG_ON(atomic_read(&entry->refcnt) < 2); * One ref for being in the idr
* one ref held by the caller trying to kill us
idr_remove(idr, wd); * one ref grabbed by inotify_idr_find
ientry->wd = -1; */
if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 3)) {
printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
" i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
/* we can't really recover with bad ref cnting.. */
BUG();
}
/* removed from the idr, drop that ref */ do_inotify_remove_from_idr(group, i_mark);
fsnotify_put_mark(entry);
out: out:
spin_unlock(&group->inotify_data.idr_lock); /* match the ref taken by inotify_idr_find_locked() */
if (found_i_mark)
fsnotify_put_mark(&found_i_mark->fsn_mark);
i_mark->wd = -1;
spin_unlock(idr_lock);
} }
/* /*
* Send IN_IGNORED for this wd, remove this wd from the idr. * Send IN_IGNORED for this wd, remove this wd from the idr.
*/ */
void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry, void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
struct fsnotify_group *group) struct fsnotify_group *group)
{ {
struct inotify_inode_mark_entry *ientry; struct inotify_inode_mark *i_mark;
struct fsnotify_event *ignored_event; struct fsnotify_event *ignored_event, *notify_event;
struct inotify_event_private_data *event_priv; struct inotify_event_private_data *event_priv;
struct fsnotify_event_private_data *fsn_event_priv; struct fsnotify_event_private_data *fsn_event_priv;
int ret; int ret;
...@@ -420,7 +527,7 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry, ...@@ -420,7 +527,7 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
if (!ignored_event) if (!ignored_event)
return; return;
ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS); event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS);
if (unlikely(!event_priv)) if (unlikely(!event_priv))
...@@ -429,37 +536,44 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry, ...@@ -429,37 +536,44 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
fsn_event_priv = &event_priv->fsnotify_event_priv_data; fsn_event_priv = &event_priv->fsnotify_event_priv_data;
fsn_event_priv->group = group; fsn_event_priv->group = group;
event_priv->wd = ientry->wd; event_priv->wd = i_mark->wd;
ret = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv); notify_event = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv, NULL);
if (ret) if (notify_event) {
if (IS_ERR(notify_event))
ret = PTR_ERR(notify_event);
else
fsnotify_put_event(notify_event);
inotify_free_event_priv(fsn_event_priv); inotify_free_event_priv(fsn_event_priv);
}
skip_send_ignore: skip_send_ignore:
/* matches the reference taken when the event was created */ /* matches the reference taken when the event was created */
fsnotify_put_event(ignored_event); fsnotify_put_event(ignored_event);
/* remove this entry from the idr */ /* remove this mark from the idr */
inotify_remove_from_idr(group, ientry); inotify_remove_from_idr(group, i_mark);
atomic_dec(&group->inotify_data.user->inotify_watches); atomic_dec(&group->inotify_data.user->inotify_watches);
} }
/* ding dong the mark is dead */ /* ding dong the mark is dead */
static void inotify_free_mark(struct fsnotify_mark_entry *entry) static void inotify_free_mark(struct fsnotify_mark *fsn_mark)
{ {
struct inotify_inode_mark_entry *ientry = (struct inotify_inode_mark_entry *)entry; struct inotify_inode_mark *i_mark;
i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
kmem_cache_free(inotify_inode_mark_cachep, ientry); kmem_cache_free(inotify_inode_mark_cachep, i_mark);
} }
static int inotify_update_existing_watch(struct fsnotify_group *group, static int inotify_update_existing_watch(struct fsnotify_group *group,
struct inode *inode, struct inode *inode,
u32 arg) u32 arg)
{ {
struct fsnotify_mark_entry *entry; struct fsnotify_mark *fsn_mark;
struct inotify_inode_mark_entry *ientry; struct inotify_inode_mark *i_mark;
__u32 old_mask, new_mask; __u32 old_mask, new_mask;
__u32 mask; __u32 mask;
int add = (arg & IN_MASK_ADD); int add = (arg & IN_MASK_ADD);
...@@ -467,52 +581,43 @@ static int inotify_update_existing_watch(struct fsnotify_group *group, ...@@ -467,52 +581,43 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
/* don't allow invalid bits: we don't want flags set */ /* don't allow invalid bits: we don't want flags set */
mask = inotify_arg_to_mask(arg); mask = inotify_arg_to_mask(arg);
if (unlikely(!mask)) if (unlikely(!(mask & IN_ALL_EVENTS)))
return -EINVAL; return -EINVAL;
spin_lock(&inode->i_lock); fsn_mark = fsnotify_find_inode_mark(group, inode);
entry = fsnotify_find_mark_entry(group, inode); if (!fsn_mark)
spin_unlock(&inode->i_lock);
if (!entry)
return -ENOENT; return -ENOENT;
ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
spin_lock(&entry->lock); spin_lock(&fsn_mark->lock);
old_mask = entry->mask; old_mask = fsn_mark->mask;
if (add) { if (add)
entry->mask |= mask; fsnotify_set_mark_mask_locked(fsn_mark, (fsn_mark->mask | mask));
new_mask = entry->mask; else
} else { fsnotify_set_mark_mask_locked(fsn_mark, mask);
entry->mask = mask; new_mask = fsn_mark->mask;
new_mask = entry->mask;
}
spin_unlock(&entry->lock); spin_unlock(&fsn_mark->lock);
if (old_mask != new_mask) { if (old_mask != new_mask) {
/* more bits in old than in new? */ /* more bits in old than in new? */
int dropped = (old_mask & ~new_mask); int dropped = (old_mask & ~new_mask);
/* more bits in this entry than the inode's mask? */ /* more bits in this fsn_mark than the inode's mask? */
int do_inode = (new_mask & ~inode->i_fsnotify_mask); int do_inode = (new_mask & ~inode->i_fsnotify_mask);
/* more bits in this entry than the group? */
int do_group = (new_mask & ~group->mask);
/* update the inode with this new entry */ /* update the inode with this new fsn_mark */
if (dropped || do_inode) if (dropped || do_inode)
fsnotify_recalc_inode_mask(inode); fsnotify_recalc_inode_mask(inode);
/* update the group mask with the new mask */
if (dropped || do_group)
fsnotify_recalc_group_mask(group);
} }
/* return the wd */ /* return the wd */
ret = ientry->wd; ret = i_mark->wd;
/* match the get from fsnotify_find_mark_entry() */ /* match the get from fsnotify_find_mark() */
fsnotify_put_mark(entry); fsnotify_put_mark(fsn_mark);
return ret; return ret;
} }
...@@ -521,73 +626,51 @@ static int inotify_new_watch(struct fsnotify_group *group, ...@@ -521,73 +626,51 @@ static int inotify_new_watch(struct fsnotify_group *group,
struct inode *inode, struct inode *inode,
u32 arg) u32 arg)
{ {
struct inotify_inode_mark_entry *tmp_ientry; struct inotify_inode_mark *tmp_i_mark;
__u32 mask; __u32 mask;
int ret; int ret;
struct idr *idr = &group->inotify_data.idr;
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
/* don't allow invalid bits: we don't want flags set */ /* don't allow invalid bits: we don't want flags set */
mask = inotify_arg_to_mask(arg); mask = inotify_arg_to_mask(arg);
if (unlikely(!mask)) if (unlikely(!(mask & IN_ALL_EVENTS)))
return -EINVAL; return -EINVAL;
tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
if (unlikely(!tmp_ientry)) if (unlikely(!tmp_i_mark))
return -ENOMEM; return -ENOMEM;
fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark); fsnotify_init_mark(&tmp_i_mark->fsn_mark, inotify_free_mark);
tmp_ientry->fsn_entry.mask = mask; tmp_i_mark->fsn_mark.mask = mask;
tmp_ientry->wd = -1; tmp_i_mark->wd = -1;
ret = -ENOSPC; ret = -ENOSPC;
if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
goto out_err; goto out_err;
retry:
ret = -ENOMEM;
if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL)))
goto out_err;
/* we are putting the mark on the idr, take a reference */ ret = inotify_add_to_idr(idr, idr_lock, &group->inotify_data.last_wd,
fsnotify_get_mark(&tmp_ientry->fsn_entry); tmp_i_mark);
if (ret)
spin_lock(&group->inotify_data.idr_lock);
ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry,
group->inotify_data.last_wd+1,
&tmp_ientry->wd);
spin_unlock(&group->inotify_data.idr_lock);
if (ret) {
/* we didn't get on the idr, drop the idr reference */
fsnotify_put_mark(&tmp_ientry->fsn_entry);
/* idr was out of memory allocate and try again */
if (ret == -EAGAIN)
goto retry;
goto out_err; goto out_err;
}
/* we are on the idr, now get on the inode */ /* we are on the idr, now get on the inode */
ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode); ret = fsnotify_add_mark(&tmp_i_mark->fsn_mark, group, inode, NULL, 0);
if (ret) { if (ret) {
/* we failed to get on the inode, get off the idr */ /* we failed to get on the inode, get off the idr */
inotify_remove_from_idr(group, tmp_ientry); inotify_remove_from_idr(group, tmp_i_mark);
goto out_err; goto out_err;
} }
/* update the idr hint, who cares about races, it's just a hint */
group->inotify_data.last_wd = tmp_ientry->wd;
/* increment the number of watches the user has */ /* increment the number of watches the user has */
atomic_inc(&group->inotify_data.user->inotify_watches); atomic_inc(&group->inotify_data.user->inotify_watches);
/* return the watch descriptor for this new entry */ /* return the watch descriptor for this new mark */
ret = tmp_ientry->wd; ret = tmp_i_mark->wd;
/* if this mark added a new event update the group mask */
if (mask & ~group->mask)
fsnotify_recalc_group_mask(group);
out_err: out_err:
/* match the ref from fsnotify_init_markentry() */ /* match the ref from fsnotify_init_mark() */
fsnotify_put_mark(&tmp_ientry->fsn_entry); fsnotify_put_mark(&tmp_i_mark->fsn_mark);
return ret; return ret;
} }
...@@ -616,11 +699,8 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod ...@@ -616,11 +699,8 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod
static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events) static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events)
{ {
struct fsnotify_group *group; struct fsnotify_group *group;
unsigned int grp_num;
/* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */ group = fsnotify_alloc_group(&inotify_fsnotify_ops);
grp_num = (INOTIFY_GROUP_NUM - atomic_inc_return(&inotify_grp_num));
group = fsnotify_obtain_group(grp_num, 0, &inotify_fsnotify_ops);
if (IS_ERR(group)) if (IS_ERR(group))
return group; return group;
...@@ -726,7 +806,7 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname, ...@@ -726,7 +806,7 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd) SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
{ {
struct fsnotify_group *group; struct fsnotify_group *group;
struct fsnotify_mark_entry *entry; struct inotify_inode_mark *i_mark;
struct file *filp; struct file *filp;
int ret = 0, fput_needed; int ret = 0, fput_needed;
...@@ -735,25 +815,23 @@ SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd) ...@@ -735,25 +815,23 @@ SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
return -EBADF; return -EBADF;
/* verify that this is indeed an inotify instance */ /* verify that this is indeed an inotify instance */
if (unlikely(filp->f_op != &inotify_fops)) { ret = -EINVAL;
ret = -EINVAL; if (unlikely(filp->f_op != &inotify_fops))
goto out; goto out;
}
group = filp->private_data; group = filp->private_data;
spin_lock(&group->inotify_data.idr_lock); ret = -EINVAL;
entry = idr_find(&group->inotify_data.idr, wd); i_mark = inotify_idr_find(group, wd);
if (unlikely(!entry)) { if (unlikely(!i_mark))
spin_unlock(&group->inotify_data.idr_lock);
ret = -EINVAL;
goto out; goto out;
}
fsnotify_get_mark(entry);
spin_unlock(&group->inotify_data.idr_lock);
fsnotify_destroy_mark_by_entry(entry); ret = 0;
fsnotify_put_mark(entry);
fsnotify_destroy_mark(&i_mark->fsn_mark);
/* match ref taken by inotify_idr_find */
fsnotify_put_mark(&i_mark->fsn_mark);
out: out:
fput_light(filp, fput_needed); fput_light(filp, fput_needed);
...@@ -767,7 +845,28 @@ SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd) ...@@ -767,7 +845,28 @@ SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
*/ */
static int __init inotify_user_setup(void) static int __init inotify_user_setup(void)
{ {
inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC); BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
BUILD_BUG_ON(IN_OPEN != FS_OPEN);
BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
BUILD_BUG_ON(IN_CREATE != FS_CREATE);
BUILD_BUG_ON(IN_DELETE != FS_DELETE);
BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
BUILD_BUG_ON(IN_EXCL_UNLINK != FS_EXCL_UNLINK);
BUILD_BUG_ON(IN_ISDIR != FS_IN_ISDIR);
BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
BUG_ON(hweight32(ALL_INOTIFY_BITS) != 21);
inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark, SLAB_PANIC);
event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC); event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);
inotify_max_queued_events = 16384; inotify_max_queued_events = 16384;
......
/*
* Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* fsnotify inode mark locking/lifetime/and refcnting
*
* REFCNT:
* The mark->refcnt tells how many "things" in the kernel currently are
* referencing this object. The object typically will live inside the kernel
* with a refcnt of 2, one for each list it is on (i_list, g_list). Any task
* which can find this object holding the appropriete locks, can take a reference
* and the object itself is guarenteed to survive until the reference is dropped.
*
* LOCKING:
* There are 3 spinlocks involved with fsnotify inode marks and they MUST
* be taken in order as follows:
*
* mark->lock
* group->mark_lock
* inode->i_lock
*
* mark->lock protects 2 things, mark->group and mark->inode. You must hold
* that lock to dereference either of these things (they could be NULL even with
* the lock)
*
* group->mark_lock protects the marks_list anchored inside a given group
* and each mark is hooked via the g_list. It also sorta protects the
* free_g_list, which when used is anchored by a private list on the stack of the
* task which held the group->mark_lock.
*
* inode->i_lock protects the i_fsnotify_marks list anchored inside a
* given inode and each mark is hooked via the i_list. (and sorta the
* free_i_list)
*
*
* LIFETIME:
* Inode marks survive between when they are added to an inode and when their
* refcnt==0.
*
* The inode mark can be cleared for a number of different reasons including:
* - The inode is unlinked for the last time. (fsnotify_inode_remove)
* - The inode is being evicted from cache. (fsnotify_inode_delete)
* - The fs the inode is on is unmounted. (fsnotify_inode_delete/fsnotify_unmount_inodes)
* - Something explicitly requests that it be removed. (fsnotify_destroy_mark)
* - The fsnotify_group associated with the mark is going away and all such marks
* need to be cleaned up. (fsnotify_clear_marks_by_group)
*
* Worst case we are given an inode and need to clean up all the marks on that
* inode. We take i_lock and walk the i_fsnotify_marks safely. For each
* mark on the list we take a reference (so the mark can't disappear under us).
* We remove that mark form the inode's list of marks and we add this mark to a
* private list anchored on the stack using i_free_list; At this point we no
* longer fear anything finding the mark using the inode's list of marks.
*
* We can safely and locklessly run the private list on the stack of everything
* we just unattached from the original inode. For each mark on the private list
* we grab the mark-> and can thus dereference mark->group and mark->inode. If
* we see the group and inode are not NULL we take those locks. Now holding all
* 3 locks we can completely remove the mark from other tasks finding it in the
* future. Remember, 10 things might already be referencing this mark, but they
* better be holding a ref. We drop our reference we took before we unhooked it
* from the inode. When the ref hits 0 we can free the mark.
*
* Very similarly for freeing by group, except we use free_g_list.
*
* This has the very interesting property of being able to run concurrently with
* any (or all) other directions.
*/
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/srcu.h>
#include <linux/writeback.h> /* for inode_lock */
#include <asm/atomic.h>
#include <linux/fsnotify_backend.h>
#include "fsnotify.h"
struct srcu_struct fsnotify_mark_srcu;
static DEFINE_SPINLOCK(destroy_lock);
static LIST_HEAD(destroy_list);
static DECLARE_WAIT_QUEUE_HEAD(destroy_waitq);
void fsnotify_get_mark(struct fsnotify_mark *mark)
{
atomic_inc(&mark->refcnt);
}
void fsnotify_put_mark(struct fsnotify_mark *mark)
{
if (atomic_dec_and_test(&mark->refcnt))
mark->free_mark(mark);
}
/*
* Any time a mark is getting freed we end up here.
* The caller had better be holding a reference to this mark so we don't actually
* do the final put under the mark->lock
*/
void fsnotify_destroy_mark(struct fsnotify_mark *mark)
{
struct fsnotify_group *group;
struct inode *inode = NULL;
spin_lock(&mark->lock);
group = mark->group;
/* something else already called this function on this mark */
if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) {
spin_unlock(&mark->lock);
return;
}
mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
/* 1 from caller and 1 for being on i_list/g_list */
BUG_ON(atomic_read(&mark->refcnt) < 2);
spin_lock(&group->mark_lock);
if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {
inode = mark->i.inode;
fsnotify_destroy_inode_mark(mark);
} else if (mark->flags & FSNOTIFY_MARK_FLAG_VFSMOUNT)
fsnotify_destroy_vfsmount_mark(mark);
else
BUG();
list_del_init(&mark->g_list);
spin_unlock(&group->mark_lock);
spin_unlock(&mark->lock);
spin_lock(&destroy_lock);
list_add(&mark->destroy_list, &destroy_list);
spin_unlock(&destroy_lock);
wake_up(&destroy_waitq);
/*
* Some groups like to know that marks are being freed. This is a
* callback to the group function to let it know that this mark
* is being freed.
*/
if (group->ops->freeing_mark)
group->ops->freeing_mark(mark, group);
/*
* __fsnotify_update_child_dentry_flags(inode);
*
* I really want to call that, but we can't, we have no idea if the inode
* still exists the second we drop the mark->lock.
*
* The next time an event arrive to this inode from one of it's children
* __fsnotify_parent will see that the inode doesn't care about it's
* children and will update all of these flags then. So really this
* is just a lazy update (and could be a perf win...)
*/
if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED))
iput(inode);
/*
* it's possible that this group tried to destroy itself, but this
* this mark was simultaneously being freed by inode. If that's the
* case, we finish freeing the group here.
*/
if (unlikely(atomic_dec_and_test(&group->num_marks)))
fsnotify_final_destroy_group(group);
}
void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask)
{
assert_spin_locked(&mark->lock);
mark->mask = mask;
if (mark->flags & FSNOTIFY_MARK_FLAG_INODE)
fsnotify_set_inode_mark_mask_locked(mark, mask);
}
void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask)
{
assert_spin_locked(&mark->lock);
mark->ignored_mask = mask;
}
/*
* Attach an initialized mark to a given group and fs object.
* These marks may be used for the fsnotify backend to determine which
* event types should be delivered to which group.
*/
int fsnotify_add_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group, struct inode *inode,
struct vfsmount *mnt, int allow_dups)
{
int ret = 0;
BUG_ON(inode && mnt);
BUG_ON(!inode && !mnt);
/*
* LOCKING ORDER!!!!
* mark->lock
* group->mark_lock
* inode->i_lock
*/
spin_lock(&mark->lock);
spin_lock(&group->mark_lock);
mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE;
mark->group = group;
list_add(&mark->g_list, &group->marks_list);
atomic_inc(&group->num_marks);
fsnotify_get_mark(mark); /* for i_list and g_list */
if (inode) {
ret = fsnotify_add_inode_mark(mark, group, inode, allow_dups);
if (ret)
goto err;
} else if (mnt) {
ret = fsnotify_add_vfsmount_mark(mark, group, mnt, allow_dups);
if (ret)
goto err;
} else {
BUG();
}
spin_unlock(&group->mark_lock);
/* this will pin the object if appropriate */
fsnotify_set_mark_mask_locked(mark, mark->mask);
spin_unlock(&mark->lock);
if (inode)
__fsnotify_update_child_dentry_flags(inode);
return ret;
err:
mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
list_del_init(&mark->g_list);
mark->group = NULL;
atomic_dec(&group->num_marks);
spin_unlock(&group->mark_lock);
spin_unlock(&mark->lock);
spin_lock(&destroy_lock);
list_add(&mark->destroy_list, &destroy_list);
spin_unlock(&destroy_lock);
wake_up(&destroy_waitq);
return ret;
}
/*
* clear any marks in a group in which mark->flags & flags is true
*/
void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
unsigned int flags)
{
struct fsnotify_mark *lmark, *mark;
LIST_HEAD(free_list);
spin_lock(&group->mark_lock);
list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
if (mark->flags & flags) {
list_add(&mark->free_g_list, &free_list);
list_del_init(&mark->g_list);
fsnotify_get_mark(mark);
}
}
spin_unlock(&group->mark_lock);
list_for_each_entry_safe(mark, lmark, &free_list, free_g_list) {
fsnotify_destroy_mark(mark);
fsnotify_put_mark(mark);
}
}
/*
* Given a group, destroy all of the marks associated with that group.
*/
void fsnotify_clear_marks_by_group(struct fsnotify_group *group)
{
fsnotify_clear_marks_by_group_flags(group, (unsigned int)-1);
}
void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old)
{
assert_spin_locked(&old->lock);
new->i.inode = old->i.inode;
new->m.mnt = old->m.mnt;
new->group = old->group;
new->mask = old->mask;
new->free_mark = old->free_mark;
}
/*
* Nothing fancy, just initialize lists and locks and counters.
*/
void fsnotify_init_mark(struct fsnotify_mark *mark,
void (*free_mark)(struct fsnotify_mark *mark))
{
memset(mark, 0, sizeof(*mark));
spin_lock_init(&mark->lock);
atomic_set(&mark->refcnt, 1);
mark->free_mark = free_mark;
}
static int fsnotify_mark_destroy(void *ignored)
{
struct fsnotify_mark *mark, *next;
LIST_HEAD(private_destroy_list);
for (;;) {
spin_lock(&destroy_lock);
/* exchange the list head */
list_replace_init(&destroy_list, &private_destroy_list);
spin_unlock(&destroy_lock);
synchronize_srcu(&fsnotify_mark_srcu);
list_for_each_entry_safe(mark, next, &private_destroy_list, destroy_list) {
list_del_init(&mark->destroy_list);
fsnotify_put_mark(mark);
}
wait_event_interruptible(destroy_waitq, !list_empty(&destroy_list));
}
return 0;
}
static int __init fsnotify_mark_init(void)
{
struct task_struct *thread;
thread = kthread_run(fsnotify_mark_destroy, NULL,
"fsnotify_mark");
if (IS_ERR(thread))
panic("unable to start fsnotify mark destruction thread.");
return 0;
}
device_initcall(fsnotify_mark_init);
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
* allocated and used. * allocated and used.
*/ */
#include <linux/file.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -56,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep; ...@@ -56,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
* it is needed. It's refcnt is set 1 at kernel init time and will never * it is needed. It's refcnt is set 1 at kernel init time and will never
* get set to 0 so it will never get 'freed' * get set to 0 so it will never get 'freed'
*/ */
static struct fsnotify_event q_overflow_event; static struct fsnotify_event *q_overflow_event;
static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0); static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
/** /**
...@@ -87,12 +88,15 @@ void fsnotify_put_event(struct fsnotify_event *event) ...@@ -87,12 +88,15 @@ void fsnotify_put_event(struct fsnotify_event *event)
return; return;
if (atomic_dec_and_test(&event->refcnt)) { if (atomic_dec_and_test(&event->refcnt)) {
if (event->data_type == FSNOTIFY_EVENT_PATH) pr_debug("%s: event=%p\n", __func__, event);
path_put(&event->path);
if (event->data_type == FSNOTIFY_EVENT_FILE)
fput(event->file);
BUG_ON(!list_empty(&event->private_data_list)); BUG_ON(!list_empty(&event->private_data_list));
kfree(event->file_name); kfree(event->file_name);
put_pid(event->tgid);
kmem_cache_free(fsnotify_event_cachep, event); kmem_cache_free(fsnotify_event_cachep, event);
} }
} }
...@@ -104,7 +108,8 @@ struct fsnotify_event_holder *fsnotify_alloc_event_holder(void) ...@@ -104,7 +108,8 @@ struct fsnotify_event_holder *fsnotify_alloc_event_holder(void)
void fsnotify_destroy_event_holder(struct fsnotify_event_holder *holder) void fsnotify_destroy_event_holder(struct fsnotify_event_holder *holder)
{ {
kmem_cache_free(fsnotify_event_holder_cachep, holder); if (holder)
kmem_cache_free(fsnotify_event_holder_cachep, holder);
} }
/* /*
...@@ -128,54 +133,21 @@ struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struct fsnot ...@@ -128,54 +133,21 @@ struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struct fsnot
return priv; return priv;
} }
/*
* Check if 2 events contain the same information. We do not compare private data
* but at this moment that isn't a problem for any know fsnotify listeners.
*/
static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new)
{
if ((old->mask == new->mask) &&
(old->to_tell == new->to_tell) &&
(old->data_type == new->data_type) &&
(old->name_len == new->name_len)) {
switch (old->data_type) {
case (FSNOTIFY_EVENT_INODE):
/* remember, after old was put on the wait_q we aren't
* allowed to look at the inode any more, only thing
* left to check was if the file_name is the same */
if (!old->name_len ||
!strcmp(old->file_name, new->file_name))
return true;
break;
case (FSNOTIFY_EVENT_PATH):
if ((old->path.mnt == new->path.mnt) &&
(old->path.dentry == new->path.dentry))
return true;
break;
case (FSNOTIFY_EVENT_NONE):
if (old->mask & FS_Q_OVERFLOW)
return true;
else if (old->mask & FS_IN_IGNORED)
return false;
return false;
};
}
return false;
}
/* /*
* Add an event to the group notification queue. The group can later pull this * Add an event to the group notification queue. The group can later pull this
* event off the queue to deal with. If the event is successfully added to the * event off the queue to deal with. If the event is successfully added to the
* group's notification queue, a reference is taken on event. * group's notification queue, a reference is taken on event.
*/ */
int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event, struct fsnotify_event *fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event,
struct fsnotify_event_private_data *priv) struct fsnotify_event_private_data *priv,
struct fsnotify_event *(*merge)(struct list_head *,
struct fsnotify_event *))
{ {
struct fsnotify_event *return_event = NULL;
struct fsnotify_event_holder *holder = NULL; struct fsnotify_event_holder *holder = NULL;
struct list_head *list = &group->notification_list; struct list_head *list = &group->notification_list;
struct fsnotify_event_holder *last_holder;
struct fsnotify_event *last_event; pr_debug("%s: group=%p event=%p priv=%p\n", __func__, group, event, priv);
int ret = 0;
/* /*
* There is one fsnotify_event_holder embedded inside each fsnotify_event. * There is one fsnotify_event_holder embedded inside each fsnotify_event.
...@@ -189,18 +161,40 @@ int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_even ...@@ -189,18 +161,40 @@ int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_even
alloc_holder: alloc_holder:
holder = fsnotify_alloc_event_holder(); holder = fsnotify_alloc_event_holder();
if (!holder) if (!holder)
return -ENOMEM; return ERR_PTR(-ENOMEM);
} }
mutex_lock(&group->notification_mutex); mutex_lock(&group->notification_mutex);
if (group->q_len >= group->max_events) { if (group->q_len >= group->max_events) {
event = &q_overflow_event; event = q_overflow_event;
ret = -EOVERFLOW;
/*
* we need to return the overflow event
* which means we need a ref
*/
fsnotify_get_event(event);
return_event = event;
/* sorry, no private data on the overflow event */ /* sorry, no private data on the overflow event */
priv = NULL; priv = NULL;
} }
if (!list_empty(list) && merge) {
struct fsnotify_event *tmp;
tmp = merge(list, event);
if (tmp) {
mutex_unlock(&group->notification_mutex);
if (return_event)
fsnotify_put_event(return_event);
if (holder != &event->holder)
fsnotify_destroy_event_holder(holder);
return tmp;
}
}
spin_lock(&event->lock); spin_lock(&event->lock);
if (list_empty(&event->holder.event_list)) { if (list_empty(&event->holder.event_list)) {
...@@ -212,19 +206,13 @@ int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_even ...@@ -212,19 +206,13 @@ int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_even
* event holder was used, go back and get a new one */ * event holder was used, go back and get a new one */
spin_unlock(&event->lock); spin_unlock(&event->lock);
mutex_unlock(&group->notification_mutex); mutex_unlock(&group->notification_mutex);
goto alloc_holder;
}
if (!list_empty(list)) { if (return_event) {
last_holder = list_entry(list->prev, struct fsnotify_event_holder, event_list); fsnotify_put_event(return_event);
last_event = last_holder->event; return_event = NULL;
if (event_compare(last_event, event)) {
spin_unlock(&event->lock);
mutex_unlock(&group->notification_mutex);
if (holder != &event->holder)
fsnotify_destroy_event_holder(holder);
return -EEXIST;
} }
goto alloc_holder;
} }
group->q_len++; group->q_len++;
...@@ -238,7 +226,7 @@ int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_even ...@@ -238,7 +226,7 @@ int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_even
mutex_unlock(&group->notification_mutex); mutex_unlock(&group->notification_mutex);
wake_up(&group->notification_waitq); wake_up(&group->notification_waitq);
return ret; return return_event;
} }
/* /*
...@@ -253,6 +241,8 @@ struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group ...@@ -253,6 +241,8 @@ struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group
BUG_ON(!mutex_is_locked(&group->notification_mutex)); BUG_ON(!mutex_is_locked(&group->notification_mutex));
pr_debug("%s: group=%p\n", __func__, group);
holder = list_first_entry(&group->notification_list, struct fsnotify_event_holder, event_list); holder = list_first_entry(&group->notification_list, struct fsnotify_event_holder, event_list);
event = holder->event; event = holder->event;
...@@ -314,25 +304,82 @@ void fsnotify_flush_notify(struct fsnotify_group *group) ...@@ -314,25 +304,82 @@ void fsnotify_flush_notify(struct fsnotify_group *group)
static void initialize_event(struct fsnotify_event *event) static void initialize_event(struct fsnotify_event *event)
{ {
event->holder.event = NULL;
INIT_LIST_HEAD(&event->holder.event_list); INIT_LIST_HEAD(&event->holder.event_list);
atomic_set(&event->refcnt, 1); atomic_set(&event->refcnt, 1);
spin_lock_init(&event->lock); spin_lock_init(&event->lock);
event->path.dentry = NULL;
event->path.mnt = NULL;
event->inode = NULL;
event->data_type = FSNOTIFY_EVENT_NONE;
INIT_LIST_HEAD(&event->private_data_list); INIT_LIST_HEAD(&event->private_data_list);
}
event->to_tell = NULL; /*
* Caller damn well better be holding whatever mutex is protecting the
* old_holder->event_list and the new_event must be a clean event which
* cannot be found anywhere else in the kernel.
*/
int fsnotify_replace_event(struct fsnotify_event_holder *old_holder,
struct fsnotify_event *new_event)
{
struct fsnotify_event *old_event = old_holder->event;
struct fsnotify_event_holder *new_holder = &new_event->holder;
event->file_name = NULL; enum event_spinlock_class {
event->name_len = 0; SPINLOCK_OLD,
SPINLOCK_NEW,
};
event->sync_cookie = 0; pr_debug("%s: old_event=%p new_event=%p\n", __func__, old_event, new_event);
/*
* if the new_event's embedded holder is in use someone
* screwed up and didn't give us a clean new event.
*/
BUG_ON(!list_empty(&new_holder->event_list));
spin_lock_nested(&old_event->lock, SPINLOCK_OLD);
spin_lock_nested(&new_event->lock, SPINLOCK_NEW);
new_holder->event = new_event;
list_replace_init(&old_holder->event_list, &new_holder->event_list);
spin_unlock(&new_event->lock);
spin_unlock(&old_event->lock);
/* event == holder means we are referenced through the in event holder */
if (old_holder != &old_event->holder)
fsnotify_destroy_event_holder(old_holder);
fsnotify_get_event(new_event); /* on the list take reference */
fsnotify_put_event(old_event); /* off the list, drop reference */
return 0;
}
struct fsnotify_event *fsnotify_clone_event(struct fsnotify_event *old_event)
{
struct fsnotify_event *event;
event = kmem_cache_alloc(fsnotify_event_cachep, GFP_KERNEL);
if (!event)
return NULL;
pr_debug("%s: old_event=%p new_event=%p\n", __func__, old_event, event);
memcpy(event, old_event, sizeof(*event));
initialize_event(event);
if (event->name_len) {
event->file_name = kstrdup(old_event->file_name, GFP_KERNEL);
if (!event->file_name) {
kmem_cache_free(fsnotify_event_cachep, event);
return NULL;
}
}
event->tgid = get_pid(old_event->tgid);
if (event->data_type == FSNOTIFY_EVENT_FILE)
get_file(event->file);
return event;
} }
/* /*
...@@ -348,15 +395,18 @@ static void initialize_event(struct fsnotify_event *event) ...@@ -348,15 +395,18 @@ static void initialize_event(struct fsnotify_event *event)
* @name the filename, if available * @name the filename, if available
*/ */
struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, void *data, struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, void *data,
int data_type, const char *name, u32 cookie, int data_type, const unsigned char *name,
gfp_t gfp) u32 cookie, gfp_t gfp)
{ {
struct fsnotify_event *event; struct fsnotify_event *event;
event = kmem_cache_alloc(fsnotify_event_cachep, gfp); event = kmem_cache_zalloc(fsnotify_event_cachep, gfp);
if (!event) if (!event)
return NULL; return NULL;
pr_debug("%s: event=%p to_tell=%p mask=%x data=%p data_type=%d\n",
__func__, event, to_tell, mask, data, data_type);
initialize_event(event); initialize_event(event);
if (name) { if (name) {
...@@ -368,35 +418,36 @@ struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, ...@@ -368,35 +418,36 @@ struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask,
event->name_len = strlen(event->file_name); event->name_len = strlen(event->file_name);
} }
event->tgid = get_pid(task_tgid(current));
event->sync_cookie = cookie; event->sync_cookie = cookie;
event->to_tell = to_tell; event->to_tell = to_tell;
event->data_type = data_type;
switch (data_type) { switch (data_type) {
case FSNOTIFY_EVENT_FILE: { case FSNOTIFY_EVENT_FILE: {
struct file *file = data; event->file = data;
struct path *path = &file->f_path; /*
event->path.dentry = path->dentry; * if this file is about to disappear hold an extra reference
event->path.mnt = path->mnt; * until we return to __fput so we don't have to worry about
path_get(&event->path); * future get/put destroying the file under us or generating
event->data_type = FSNOTIFY_EVENT_PATH; * additional events. Notice that we change f_mode without
break; * holding f_lock. This is safe since this is the only possible
} * reference to this object in the kernel (it was about to be
case FSNOTIFY_EVENT_PATH: { * freed, remember?)
struct path *path = data; */
event->path.dentry = path->dentry; if (!atomic_long_read(&event->file->f_count)) {
event->path.mnt = path->mnt; event->file->f_mode |= FMODE_NONOTIFY;
path_get(&event->path); get_file(event->file);
event->data_type = FSNOTIFY_EVENT_PATH; }
get_file(event->file);
break; break;
} }
case FSNOTIFY_EVENT_INODE: case FSNOTIFY_EVENT_INODE:
event->inode = data; event->inode = data;
event->data_type = FSNOTIFY_EVENT_INODE;
break; break;
case FSNOTIFY_EVENT_NONE: case FSNOTIFY_EVENT_NONE:
event->inode = NULL; event->inode = NULL;
event->path.dentry = NULL; event->file = NULL;
event->path.mnt = NULL;
break; break;
default: default:
BUG(); BUG();
...@@ -412,8 +463,11 @@ __init int fsnotify_notification_init(void) ...@@ -412,8 +463,11 @@ __init int fsnotify_notification_init(void)
fsnotify_event_cachep = KMEM_CACHE(fsnotify_event, SLAB_PANIC); fsnotify_event_cachep = KMEM_CACHE(fsnotify_event, SLAB_PANIC);
fsnotify_event_holder_cachep = KMEM_CACHE(fsnotify_event_holder, SLAB_PANIC); fsnotify_event_holder_cachep = KMEM_CACHE(fsnotify_event_holder, SLAB_PANIC);
initialize_event(&q_overflow_event); q_overflow_event = fsnotify_create_event(NULL, FS_Q_OVERFLOW, NULL,
q_overflow_event.mask = FS_Q_OVERFLOW; FSNOTIFY_EVENT_NONE, NULL, 0,
GFP_KERNEL);
if (!q_overflow_event)
panic("unable to allocate fsnotify q_overflow_event\n");
return 0; return 0;
} }
......
此差异已折叠。
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <linux/falloc.h> #include <linux/falloc.h>
#include <linux/fs_struct.h> #include <linux/fs_struct.h>
#include <linux/ima.h> #include <linux/ima.h>
#include <linux/dnotify.h>
#include "internal.h" #include "internal.h"
...@@ -887,7 +888,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode) ...@@ -887,7 +888,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
put_unused_fd(fd); put_unused_fd(fd);
fd = PTR_ERR(f); fd = PTR_ERR(f);
} else { } else {
fsnotify_open(f->f_path.dentry); fsnotify_open(f);
fd_install(fd, f); fd_install(fd, f);
} }
} }
......
此差异已折叠。
此差异已折叠。
...@@ -210,6 +210,7 @@ unifdef-y += ethtool.h ...@@ -210,6 +210,7 @@ unifdef-y += ethtool.h
unifdef-y += eventpoll.h unifdef-y += eventpoll.h
unifdef-y += signalfd.h unifdef-y += signalfd.h
unifdef-y += ext2_fs.h unifdef-y += ext2_fs.h
unifdef-y += fanotify.h
unifdef-y += fb.h unifdef-y += fb.h
unifdef-y += fcntl.h unifdef-y += fcntl.h
unifdef-y += filter.h unifdef-y += filter.h
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册