提交 96d41019 编写于 作者: J Jan Kara 提交者: Linus Torvalds

fanotify: fix list corruption in fanotify_get_response()

fanotify_get_response() calls fsnotify_remove_event() when it finds that
group is being released from fanotify_release() (bypass_perm is set).

However the event it removes need not be only in the group's notification
queue but it can have already moved to access_list (userspace read the
event before closing the fanotify instance fd) which is protected by a
different lock.  Thus when fsnotify_remove_event() races with
fanotify_release() operating on access_list, the list can get corrupted.

Fix the problem by moving all the logic removing permission events from
the lists to one place - fanotify_release().

Fixes: 5838d444 ("fanotify: fix double free of pending permission events")
Link: http://lkml.kernel.org/r/1473797711-14111-3-git-send-email-jack@suse.czSigned-off-by: NJan Kara <jack@suse.cz>
Reported-by: NMiklos Szeredi <mszeredi@redhat.com>
Tested-by: NMiklos Szeredi <mszeredi@redhat.com>
Reviewed-by: NMiklos Szeredi <mszeredi@redhat.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 12703dbf
...@@ -67,18 +67,7 @@ static int fanotify_get_response(struct fsnotify_group *group, ...@@ -67,18 +67,7 @@ static int fanotify_get_response(struct fsnotify_group *group,
pr_debug("%s: group=%p event=%p\n", __func__, group, event); pr_debug("%s: group=%p event=%p\n", __func__, group, event);
wait_event(group->fanotify_data.access_waitq, event->response || wait_event(group->fanotify_data.access_waitq, event->response);
atomic_read(&group->fanotify_data.bypass_perm));
if (!event->response) { /* bypass_perm set */
/*
* Event was canceled because group is being destroyed. Remove
* it from group's event list because we are responsible for
* freeing the permission event.
*/
fsnotify_remove_event(group, &event->fae.fse);
return 0;
}
/* userspace responded, convert to something usable */ /* userspace responded, convert to something usable */
switch (event->response) { switch (event->response) {
......
...@@ -358,16 +358,20 @@ static int fanotify_release(struct inode *ignored, struct file *file) ...@@ -358,16 +358,20 @@ static int fanotify_release(struct inode *ignored, struct file *file)
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
struct fanotify_perm_event_info *event, *next; struct fanotify_perm_event_info *event, *next;
struct fsnotify_event *fsn_event;
/* /*
* There may be still new events arriving in the notification queue * Stop new events from arriving in the notification queue. since
* but since userspace cannot use fanotify fd anymore, no event can * userspace cannot use fanotify fd anymore, no event can enter or
* enter or leave access_list by now. * leave access_list by now either.
*/ */
spin_lock(&group->fanotify_data.access_lock); fsnotify_group_stop_queueing(group);
atomic_inc(&group->fanotify_data.bypass_perm);
/*
* Process all permission events on access_list and notification queue
* and simulate reply from userspace.
*/
spin_lock(&group->fanotify_data.access_lock);
list_for_each_entry_safe(event, next, &group->fanotify_data.access_list, list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
fae.fse.list) { fae.fse.list) {
pr_debug("%s: found group=%p event=%p\n", __func__, group, pr_debug("%s: found group=%p event=%p\n", __func__, group,
...@@ -379,12 +383,21 @@ static int fanotify_release(struct inode *ignored, struct file *file) ...@@ -379,12 +383,21 @@ static int fanotify_release(struct inode *ignored, struct file *file)
spin_unlock(&group->fanotify_data.access_lock); spin_unlock(&group->fanotify_data.access_lock);
/* /*
* Since bypass_perm is set, newly queued events will not wait for * Destroy all non-permission events. For permission events just
* access response. Wake up the already sleeping ones now. * dequeue them and set the response. They will be freed once the
* synchronize_srcu() in fsnotify_destroy_group() will wait for all * response is consumed and fanotify_get_response() returns.
* processes sleeping in fanotify_handle_event() waiting for access
* response and thus also for all permission events to be freed.
*/ */
mutex_lock(&group->notification_mutex);
while (!fsnotify_notify_queue_is_empty(group)) {
fsn_event = fsnotify_remove_first_event(group);
if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS))
fsnotify_destroy_event(group, fsn_event);
else
FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
}
mutex_unlock(&group->notification_mutex);
/* Response for all permission events it set, wakeup waiters */
wake_up(&group->fanotify_data.access_waitq); wake_up(&group->fanotify_data.access_waitq);
#endif #endif
...@@ -755,7 +768,6 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) ...@@ -755,7 +768,6 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
spin_lock_init(&group->fanotify_data.access_lock); spin_lock_init(&group->fanotify_data.access_lock);
init_waitqueue_head(&group->fanotify_data.access_waitq); init_waitqueue_head(&group->fanotify_data.access_waitq);
INIT_LIST_HEAD(&group->fanotify_data.access_list); INIT_LIST_HEAD(&group->fanotify_data.access_list);
atomic_set(&group->fanotify_data.bypass_perm, 0);
#endif #endif
switch (flags & FAN_ALL_CLASS_BITS) { switch (flags & FAN_ALL_CLASS_BITS) {
case FAN_CLASS_NOTIF: case FAN_CLASS_NOTIF:
......
...@@ -131,21 +131,6 @@ int fsnotify_add_event(struct fsnotify_group *group, ...@@ -131,21 +131,6 @@ int fsnotify_add_event(struct fsnotify_group *group,
return ret; return ret;
} }
/*
* Remove @event from group's notification queue. It is the responsibility of
* the caller to destroy the event.
*/
void fsnotify_remove_event(struct fsnotify_group *group,
struct fsnotify_event *event)
{
mutex_lock(&group->notification_mutex);
if (!list_empty(&event->list)) {
list_del_init(&event->list);
group->q_len--;
}
mutex_unlock(&group->notification_mutex);
}
/* /*
* Remove and return the first event from the notification list. It is the * Remove and return the first event from the notification list. It is the
* responsibility of the caller to destroy the obtained event * responsibility of the caller to destroy the obtained event
......
...@@ -180,7 +180,6 @@ struct fsnotify_group { ...@@ -180,7 +180,6 @@ struct fsnotify_group {
spinlock_t access_lock; spinlock_t access_lock;
struct list_head access_list; struct list_head access_list;
wait_queue_head_t access_waitq; wait_queue_head_t access_waitq;
atomic_t bypass_perm;
#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */ #endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
int f_flags; int f_flags;
unsigned int max_marks; unsigned int max_marks;
...@@ -307,8 +306,6 @@ extern int fsnotify_add_event(struct fsnotify_group *group, ...@@ -307,8 +306,6 @@ extern int fsnotify_add_event(struct fsnotify_group *group,
struct fsnotify_event *event, struct fsnotify_event *event,
int (*merge)(struct list_head *, int (*merge)(struct list_head *,
struct fsnotify_event *)); struct fsnotify_event *));
/* Remove passed event from groups notification queue */
extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event);
/* true if the group notification queue is empty */ /* true if the group notification queue is empty */
extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group); extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
/* return, but do not dequeue the first event on the notification queue */ /* return, but do not dequeue the first event on the notification queue */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册