提交 f7347ce4 编写于 作者: L Linus Torvalds 提交者: Arnd Bergmann

fasync: re-organize fasync entry insertion to allow it under a spinlock

You currently cannot use "fasync_helper()" in an atomic environment to
insert a new fasync entry, because it will need to allocate the new
"struct fasync_struct".

Yet fcntl_setlease() wants to call this under lock_flocks(), which is in
the process of being converted from the BKL to a spinlock.

In order to fix this, this abstracts out the actual fasync list
insertion and the fasync allocations into functions of their own, and
teaches fs/locks.c to pre-allocate the fasync_struct entry.  That way
the actual list insertion can happen while holding the required
spinlock.
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
[bfields@redhat.com: rebase on top of my changes to Arnd's patch]
Tested-by: NJ. Bruce Fields <bfields@redhat.com>
Signed-off-by: NArnd Bergmann <arnd@arndb.de>
上级 c5b1f0d9
......@@ -640,7 +640,7 @@ static void fasync_free_rcu(struct rcu_head *head)
* match the state "is the filp on a fasync list".
*
*/
static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
{
struct fasync_struct *fa, **fp;
int result = 0;
......@@ -666,21 +666,28 @@ static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
return result;
}
struct fasync_struct *fasync_alloc(void)
{
return kmem_cache_alloc(fasync_cache, GFP_KERNEL);
}
/*
* Add a fasync entry. Return negative on error, positive if
* added, and zero if did nothing but change an existing one.
*
* NOTE! It is very important that the FASYNC flag always
* match the state "is the filp on a fasync list".
* NOTE! This can be used only for unused fasync entries:
* entries that actually got inserted on the fasync list
* need to be released by rcu - see fasync_remove_entry.
*/
static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
void fasync_free(struct fasync_struct *new)
{
struct fasync_struct *new, *fa, **fp;
int result = 0;
kmem_cache_free(fasync_cache, new);
}
new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
if (!new)
return -ENOMEM;
/*
* Insert a new entry into the fasync list. Return the pointer to the
* old one if we didn't use the new one.
*/
struct fasync_struct *fasync_insert_entry(int fd, struct file *filp, struct fasync_struct **fapp, struct fasync_struct *new)
{
struct fasync_struct *fa, **fp;
spin_lock(&filp->f_lock);
spin_lock(&fasync_lock);
......@@ -691,8 +698,6 @@ static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fa
spin_lock_irq(&fa->fa_lock);
fa->fa_fd = fd;
spin_unlock_irq(&fa->fa_lock);
kmem_cache_free(fasync_cache, new);
goto out;
}
......@@ -702,13 +707,42 @@ static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fa
new->fa_fd = fd;
new->fa_next = *fapp;
rcu_assign_pointer(*fapp, new);
result = 1;
filp->f_flags |= FASYNC;
out:
spin_unlock(&fasync_lock);
spin_unlock(&filp->f_lock);
return result;
return fa;
}
/*
* Add a fasync entry. Return negative on error, positive if
* added, and zero if did nothing but change an existing one.
*
* NOTE! It is very important that the FASYNC flag always
* match the state "is the filp on a fasync list".
*/
static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
{
struct fasync_struct *new;
new = fasync_alloc();
if (!new)
return -ENOMEM;
/*
* fasync_insert_entry() returns the old (update) entry if
* it existed.
*
* So free the (unused) new entry and return 0 to let the
* caller know that we didn't add any new fasync entries.
*/
if (fasync_insert_entry(fd, filp, fapp, new)) {
fasync_free(new);
return 0;
}
return 1;
}
/*
......
......@@ -1505,6 +1505,7 @@ EXPORT_SYMBOL_GPL(vfs_setlease);
int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
{
struct file_lock *fl;
struct fasync_struct *new;
struct inode *inode = filp->f_path.dentry->d_inode;
int error;
......@@ -1512,12 +1513,25 @@ int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
if (IS_ERR(fl))
return PTR_ERR(fl);
new = fasync_alloc();
if (!new) {
locks_free_lock(fl);
return -ENOMEM;
}
lock_flocks();
error = __vfs_setlease(filp, arg, &fl);
if (error || arg == F_UNLCK)
goto out_unlock;
error = fasync_helper(fd, filp, 1, &fl->fl_fasync);
/*
* fasync_insert_entry() returns the old entry if any.
* If there was no old entry, then it used 'new' and
* inserted it into the fasync list. Clear new so that
* we don't release it here.
*/
if (!fasync_insert_entry(fd, filp, &fl->fl_fasync, new))
new = NULL;
if (error < 0) {
/* remove lease just inserted by setlease */
fl->fl_type = F_UNLCK | F_INPROGRESS;
......@@ -1529,6 +1543,8 @@ int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
out_unlock:
unlock_flocks();
if (new)
fasync_free(new);
return error;
}
......
......@@ -1302,6 +1302,11 @@ struct fasync_struct {
/* SMP safe fasync helpers: */
extern int fasync_helper(int, struct file *, int, struct fasync_struct **);
extern struct fasync_struct *fasync_insert_entry(int, struct file *, struct fasync_struct **, struct fasync_struct *);
extern int fasync_remove_entry(struct file *, struct fasync_struct **);
extern struct fasync_struct *fasync_alloc(void);
extern void fasync_free(struct fasync_struct *);
/* can be called from interrupts */
extern void kill_fasync(struct fasync_struct **, int, int);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册